diff --git a/addons.xml b/addons.xml new file mode 100644 index 00000000..85878a5e --- /dev/null +++ b/addons.xml @@ -0,0 +1,66 @@ + + + + + + + + + video + + + Sumario en Español + Descripción en Español + English summary + English description + all + GNU GPL v3 + foro + web + my@email.com + source + + + + + + + + + + + + video + + + Sumario en Español + Descripción en Español + English summary + English description + all + GNU GPL v3 + foro + web + my@email.com + source + + + + + + + + + + https://raw.github.com/alfa-addon/addon/master/addons.xml + https://raw.github.com/alfa-addon/addon/master/addons.xml.md5 + https://raw.github.com/alfa-addon/addon/master + + + Repositorio para Alfa-Addon + + The owners and submitters to this repository do not host or distribute any of the content displayed by these addons nor do they have any affiliation with the content providers. + all + + + diff --git a/addons.xml.md5 b/addons.xml.md5 new file mode 100644 index 00000000..a3c67bcb --- /dev/null +++ b/addons.xml.md5 @@ -0,0 +1 @@ +61883b19a5b4d4232b6cfde14e7827d2 \ No newline at end of file diff --git a/addons_xml_generator.py b/addons_xml_generator.py new file mode 100644 index 00000000..453c1e84 --- /dev/null +++ b/addons_xml_generator.py @@ -0,0 +1,73 @@ +""" addons.xml generator """ + +import os +import md5 + + +class Generator: + """ + Generates a new addons.xml file from each addons addon.xml file + and a new addons.xml.md5 hash file. Must be run from the root of + the checked-out repo. Only handles single depth folder structure. + """ + def __init__( self ): + # generate files + self._generate_addons_file() + self._generate_md5_file() + # notify user + print "Finished updating addons xml and md5 files" + + def _generate_addons_file( self ): + # addon list + addons = os.listdir( "." ) + # final addons text + addons_xml = u"\n\n" + # loop thru and add each addons addon.xml file + for addon in addons: + try: + # skip any file or .svn folder + if ( not os.path.isdir( addon ) or addon == ".svn" ): continue + # create path + _path = os.path.join( addon, "addon.xml" ) + # split lines for stripping + xml_lines = open( _path, "r" ).read().splitlines() + # new addon + addon_xml = "" + # loop thru cleaning each line + for line in xml_lines: + # skip encoding format line + if ( line.find( "= 0 ): continue + # add line + addon_xml += unicode( line.rstrip() + "\n", "UTF-8" ) + # we succeeded so add to our final addons.xml text + addons_xml += addon_xml.rstrip() + "\n\n" + except Exception, e: + # missing or poorly formatted addon.xml + print "Excluding %s for %s" % ( _path, e, ) + # clean and add closing tag + addons_xml = addons_xml.strip() + u"\n\n" + # save file + self._save_file( addons_xml.encode( "UTF-8" ), file="addons.xml" ) + + def _generate_md5_file( self ): + try: + # create a new md5 hash + m = md5.new( open( "addons.xml" ).read() ).hexdigest() + # save file + self._save_file( m, file="addons.xml.md5" ) + except Exception, e: + # oops + print "An error occurred creating addons.xml.md5 file!\n%s" % ( e, ) + + def _save_file( self, data, file ): + try: + # write data to the file + open( file, "w" ).write( data ) + except Exception, e: + # oops + print "An error occurred saving %s file!\n%s" % ( file, e, ) + + +if ( __name__ == "__main__" ): + # start + Generator() \ No newline at end of file diff --git a/addons_zip_generator.py b/addons_zip_generator.py new file mode 100644 index 00000000..fdf85828 --- /dev/null +++ b/addons_zip_generator.py @@ -0,0 +1,54 @@ +""" zip generator """ + +import os +import zipfile + + +class Generator: + """ + Original code from addons_xml_generator.py + """ + def __init__( self ): + # generate files + self._generate_addons_file() + # notify user + print "Finished creating zip file" + + def _generate_addons_file( self ): + # addon list + addons = os.listdir( "." ) + # final addons text + # loop thru and add each addons addon.xml file + for addon in addons: + try: + # skip any file or .svn folder + if ( not os.path.isdir( addon ) or addon == ".svn" ): continue + # create path + _path = os.path.join( addon, "addon.xml" ) + # split lines for stripping + xml_lines = open( _path, "r" ).read().splitlines() + # loop thru cleaning each line + for line in xml_lines: + # skip encoding format line + if line.find("= 0: + version = line[line.find('version="') + 9:] + version = version[:version.find('"')] + break + # add line + filenamezip = '.\\' + addon + '.\\' + addon + '-' + version + print addon + zf = zipfile.ZipFile(filenamezip + ".zip", "w") + for dirname, subdirs, files in os.walk(addon): + zf.write(dirname) + for filename in files: + if '.zip' not in filename: + zf.write(os.path.join(dirname, filename)) + zf.close() + except Exception, e: + # missing or poorly formatted addon.xml + print "Excluding %s for %s" % ( _path, e, ) + + +if ( __name__ == "__main__" ): + # start + Generator() \ No newline at end of file diff --git a/kodi/channels/allcalidad.json b/kodi/channels/allcalidad.json old mode 100755 new mode 100644 index c05eca8e..0804f221 --- a/kodi/channels/allcalidad.json +++ b/kodi/channels/allcalidad.json @@ -22,25 +22,25 @@ "id": "include_in_global_search", "type": "bool", "label": "Incluir en busqueda global", - "default": "true", - "enabled": "true", - "visible": "true" + "default": true, + "enabled": true, + "visible": true }, { "id": "include_in_newest_peliculas", "type": "bool", "label": "Incluir en Novedades - Peliculas", - "default": "true", - "enabled": "true", - "visible": "true" + "default": true, + "enabled": true, + "visible": true }, { "id": "include_in_newest_infantiles", "type": "bool", "label": "Incluir en Novedades - Infantiles", - "default": "true", - "enabled": "true", - "visible": "true" + "default": true, + "enabled": true, + "visible": true } ] } \ No newline at end of file diff --git a/plugin.video.alfa/__init__.py b/plugin.video.alfa/__init__.py new file mode 100755 index 00000000..4c48b5ac --- /dev/null +++ b/plugin.video.alfa/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/plugin.video.alfa/addon.xml b/plugin.video.alfa/addon.xml new file mode 100755 index 00000000..9ac1bfac --- /dev/null +++ b/plugin.video.alfa/addon.xml @@ -0,0 +1,27 @@ + + + + + + + + video + + + Sumario en Español + Descripción en Español + English summary + English description + all + GNU GPL v3 + foro + web + my@email.com + source + + + + + + + diff --git a/plugin.video.alfa/changelog-0.0.1.txt b/plugin.video.alfa/changelog-0.0.1.txt new file mode 100644 index 00000000..67039dcf --- /dev/null +++ b/plugin.video.alfa/changelog-0.0.1.txt @@ -0,0 +1,11 @@ +Versión 0.0.1 +- Cambiados xml a json y adaptada conversión interna, eliminado código de comprobaciones. +- Eliminado código de conversión de versiones previas al anterior addon que ya no hará falta. +- Comentado codigo para actualización y ayuda. +- Eliminado código de boxee, xbox y versiones previas a xbmc12 y otras opciones que no se usa como pyload o jdownloader. +- Renombrado nombres de ficheros en español a inglés para hacerlo más fácil para no hispano parlante. +- Nuevos iconos y nuevas rutas en local. # pendiente de crear sistema para elegir otro pack de iconos. +- Reseteados valores en "versión" a 1 en canales y servers. +- Biblioteca es ahora videoteca. +- Cambiado el sistema para configurar la biblioteca de kodi e integrarla con los datos del addon. +- Preparado codigo para permitir más clientes de torrent sin modificar código de platformtools, se añade un elemento dentro de /servers/torrent.json nodo "clients" diff --git a/plugin.video.alfa/channels/__init__.py b/plugin.video.alfa/channels/__init__.py new file mode 100755 index 00000000..d290e2cd --- /dev/null +++ b/plugin.video.alfa/channels/__init__.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- + +import os +import sys + +# Appends the main plugin dir to the PYTHONPATH if an internal package cannot be imported. +# Examples: In Plex Media Server all modules are under "Code.*" package, and in Enigma2 under "Plugins.Extensions.*" +try: + # from core import logger + import core +except: + sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))) diff --git a/plugin.video.alfa/channels/allcalidad.json b/plugin.video.alfa/channels/allcalidad.json new file mode 100644 index 00000000..0804f221 --- /dev/null +++ b/plugin.video.alfa/channels/allcalidad.json @@ -0,0 +1,46 @@ +{ + "id": "allcalidad", + "name": "Allcalidad", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "https://s22.postimg.org/irnlwuizh/allcalidad1.png", + "bannermenu": "https://s22.postimg.org/9y1athlep/allcalidad2.png", + "version": 1, + "changes": [ + { + "date": "14/07/2017", + "description": "Primera version" + } + ], + "categories": [ + "movie", + "latino" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_infantiles", + "type": "bool", + "label": "Incluir en Novedades - Infantiles", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/allcalidad.py b/plugin.video.alfa/channels/allcalidad.py new file mode 100755 index 00000000..5c6ec6a0 --- /dev/null +++ b/plugin.video.alfa/channels/allcalidad.py @@ -0,0 +1,149 @@ +# -*- coding: utf-8 -*- + +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + + +host = "http://allcalidad.com/" + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append(Item(channel = item.channel, title = "Novedades", action = "peliculas", url = host)) + itemlist.append(Item(channel = item.channel, title = "Por género", action = "generos_years", url = host, extra = "Genero" )) + itemlist.append(Item(channel = item.channel, title = "Por año", action = "generos_years", url = host, extra = ">Año<")) + itemlist.append(Item(channel = item.channel, title = "Favoritas", action = "peliculas", url = host + "/favorites" )) + itemlist.append(Item(channel = item.channel, title = "")) + itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "?s=")) + return itemlist + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + try: + if categoria == 'peliculas': + item.url = host + elif categoria == 'infantiles': + item.url = host + 'category/animacion/' + itemlist = peliculas(item) + if "Pagina" in itemlist[-1].title: + itemlist.pop() + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ","+") + item.url = item.url + texto + item.extra = "busca" + if texto!='': + return peliculas(item) + else: + return [] + + +def generos_years(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = '(?s)%s(.*?)' %item.extra + bloque = scrapertools.find_single_match(data, patron) + patron = 'href="([^"]+)' + patron += '">([^<]+)' + matches = scrapertools.find_multiple_matches(bloque, patron) + for url, titulo in matches: + itemlist.append(Item(channel = item.channel, + action = "peliculas", + title = titulo, + url = url + )) + return itemlist + + +def peliculas(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = '(?s)short_overlay.*?([^<]+)') + year = scrapertools.find_single_match(varios, 'Año.*?kinopoisk">([^<]+)') + year = scrapertools.find_single_match(year, '[0-9]{4}') + mtitulo = titulo + " (" + idioma + ") (" + year + ")" + new_item = Item(channel = item.channel, + action = "findvideos", + title = mtitulo, + fulltitle = titulo, + thumbnail = thumbnail, + url = url, + contentTitle = titulo, + contentType="movie" + ) + if year: + new_item.infoLabels['year'] = int(year) + itemlist.append(new_item) + url_pagina = scrapertools.find_single_match(data, 'next" href="([^"]+)') + if url_pagina != "": + pagina = "Pagina: " + scrapertools.find_single_match(url_pagina, "page/([0-9]+)") + itemlist.append(Item(channel = item.channel, action = "peliculas", title = pagina, url = url_pagina)) + return itemlist + + +def findvideos(item): + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = '(?s)fmi(.*?)thead' + bloque = scrapertools.find_single_match(data, patron) + match = scrapertools.find_multiple_matches(bloque, '(?is)iframe .*?src="([^"]+)') + for url in match: + server = servertools.get_server_from_url(url) + titulo = "Ver en: " + server + if "youtube" in server: + if "embed" in url: + url = "http://www.youtube.com/watch?v=" + scrapertools.find_single_match(url, 'embed/(.*)') + titulo = "[COLOR = yellow]Ver trailer: " + server + "[/COLOR]" + elif "directo" in server: + continue + itemlist.append( + Item(channel = item.channel, + action = "play", + title = titulo, + fulltitle = item.fulltitle, + thumbnail = item.thumbnail, + server = server, + url = url + )) + if itemlist: + itemlist.append(Item(channel = item.channel)) + itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="", + text_color="magenta")) + # Opción "Añadir esta película a la biblioteca de XBMC" + if item.extra != "library": + if config.get_library_support(): + itemlist.append(Item(channel=item.channel, title="Añadir a la biblioteca", text_color="green", + filtro=True, action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail, + infoLabels={'title': item.fulltitle}, fulltitle=item.fulltitle, + extra="library")) + return itemlist \ No newline at end of file diff --git a/plugin.video.alfa/channels/allpeliculas.json b/plugin.video.alfa/channels/allpeliculas.json new file mode 100755 index 00000000..f0078ef2 --- /dev/null +++ b/plugin.video.alfa/channels/allpeliculas.json @@ -0,0 +1,77 @@ +{ + "id": "allpeliculas", + "name": "Allpeliculas", + "language": "es", + "active": true, + "adult": false, + "version": 1, + "changes": [ + { + "date": "24/06/2017", + "description": "Url mal escritas" + }, + { + "date": "10/06/2017", + "description": "Reparado búsqueda de videos" + }, + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "16/02/2017", + "description": "Añadidas nuevas opciones y servidores" + }, + { + "date": "19/03/2016", + "description": "Añadido soporte para la videoteca y reparada busqueda global." + } + ], + "thumbnail": "http://i.imgur.com/aWCDWtn.png", + "banner": "allpeliculas.png", + "categories": [ + "movie", + "latino", + "vos", + "tvshow" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Películas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "modo_grafico", + "type": "bool", + "label": "Buscar información extra", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "perfil", + "type": "list", + "label": "Perfil de color", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "Perfil 3", + "Perfil 2", + "Perfil 1" + ] + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/allpeliculas.py b/plugin.video.alfa/channels/allpeliculas.py new file mode 100755 index 00000000..3dd0fff2 --- /dev/null +++ b/plugin.video.alfa/channels/allpeliculas.py @@ -0,0 +1,485 @@ +# -*- coding: utf-8 -*- + +import string + +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + +__modo_grafico__ = config.get_setting('modo_grafico', "allpeliculas") +__perfil__ = int(config.get_setting('perfil', "allpeliculas")) + +# Fijar perfil de color +perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'], + ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'], + ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']] +color1, color2, color3 = perfil[__perfil__] + +IDIOMAS = {"Castellano": "CAST", "Latino": "LAT", "Subtitulado": "VOSE", "Ingles": "VO"} +SERVERS = {"26": "powvideo", "45": "okru", "75": "openload", "12": "netutv", "65": "thevideos", + "67": "spruto", "71": "stormo", "73": "idowatch", "48": "okru", "55": "openload", + "20": "nowvideo", "84": "fastplay", "96": "raptu", "94": "tusfiles"} + + +def mainlist(item): + logger.info() + itemlist = [] + item.text_color = color1 + + itemlist.append(item.clone(title="Películas", action="lista", fanart="http://i.imgur.com/c3HS8kj.png", + url="http://allpeliculas.co/Movies/fullView/1/0/&ajax=1")) + itemlist.append(item.clone(title="Series", action="lista", fanart="http://i.imgur.com/9loVksV.png", extra="tv", + url="http://allpeliculas.co/Movies/fullView/1/86/?ajax=1&withoutFilter=1", )) + itemlist.append(item.clone(title="Géneros", action="subindice", fanart="http://i.imgur.com/ymazCWq.jpg")) + itemlist.append(item.clone(title="Índices", action="indices", fanart="http://i.imgur.com/c3HS8kj.png")) + itemlist.append(item.clone(title="", action="")) + itemlist.append(item.clone(title="Buscar...", action="search")) + itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False)) + + return itemlist + + +def configuracion(item): + from platformcode import platformtools + ret = platformtools.show_channel_settings() + platformtools.itemlist_refresh() + return ret + + +def search(item, texto): + logger.info() + if texto != "": + texto = texto.replace(" ", "+") + item.url = "http://allpeliculas.co/Search/advancedSearch?searchType=movie&movieName=" + texto + "&ajax=1" + try: + return busqueda(item) + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + try: + if categoria == "peliculas": + item.url = "http://allpeliculas.co/Movies/fullView/1/0/&ajax=1" + item.action = "lista" + itemlist = lista(item) + + if itemlist[-1].action == "lista": + itemlist.pop() + + # Se captura la excepción, para no interrumpir al canal novedades si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist + + +def busqueda(item): + logger.info() + itemlist = [] + item.infoLabels = {} + item.text_color = color2 + + data = httptools.downloadpage(item.url).data + data = data.replace("\n", "").replace("\t", "") + data = scrapertools.decodeHtmlentities(data) + + patron = '(.*?)/.*?' \ + ' (.*?)

.*?Género: (.*?)

' + matches = scrapertools.find_multiple_matches(data, patron) + for thumbnail, vote, url, title, year, genre in matches: + url = "http://allpeliculas.co" + url.replace("#", "") + "&ajax=1" + thumbnail = thumbnail.replace("/105/", "/400/").replace("/141/", "/600/").replace(" ", "%20") + titulo = title + " (" + year + ")" + item.infoLabels['year'] = year + item.infoLabels['genre'] = genre + item.infoLabels['rating'] = vote + if "Series" not in genre: + itemlist.append(item.clone(action="findvideos", title=titulo, fulltitle=title, url=url, thumbnail=thumbnail, + context=["buscar_trailer"], contentTitle=title, contentType="movie")) + else: + itemlist.append(item.clone(action="temporadas", title=titulo, fulltitle=title, url=url, thumbnail=thumbnail, + context=["buscar_trailer"], contentTitle=title, contentType="tvshow")) + + # Paginacion + next_page = scrapertools.find_single_match(data, 'class="pagination-active".*?href="([^"]+)"') + if next_page != "": + url = next_page.replace("#", "") + "&ajax=1" + itemlist.append(item.clone(action="lista", title=">> Siguiente", url=url, text_color=color3)) + + return itemlist + + +def indices(item): + logger.info() + itemlist = [] + item.text_color = color1 + + itemlist.append(item.clone(title="Alfabético", action="subindice")) + itemlist.append(item.clone(title="Por idioma", action="subindice")) + itemlist.append(item.clone(title="Por valoración", action="lista", + url="http://allpeliculas.co/Movies/fullView/1/0/rating:imdb|date:1900-3000|" + "alphabet:all|?ajax=1&withoutFilter=1")) + itemlist.append(item.clone(title="Por año", action="subindice")) + itemlist.append(item.clone(title="Por calidad", action="subindice")) + + return itemlist + + +def lista(item): + logger.info() + itemlist = [] + item.infoLabels = {} + item.text_color = color2 + + data = httptools.downloadpage(item.url).data + data = data.replace("\n", "").replace("\t", "") + data = scrapertools.decodeHtmlentities(data) + + bloque = scrapertools.find_single_match(data, '
([^<]+)<\/span>|
)' \ + '.*?
(.*?).*?Year.*?">(.*?).*?' \ + '(?:Género|Genre).*?(.*?).*?Language.*?(.*?).*?' \ + '
(.*?)<.*?
(.*?)<.*?' \ + '
(.*?)<' + + if bloque == "": + bloque = data[:] + matches = scrapertools.find_multiple_matches(bloque, patron) + for thumbnail, url, trailer, vote, year, genre, idioma, sinopsis, calidad, title in matches: + url = url.replace("#", "") + "&ajax=1" + thumbnail = thumbnail.replace("/157/", "/400/").replace("/236/", "/600/").replace(" ", "%20") + idioma = idioma.replace(" ", "").split(",") + idioma.sort() + titleidioma = "[" + "/".join(idioma) + "]" + + titulo = title + " " + titleidioma + " [" + calidad + "]" + item.infoLabels['plot'] = sinopsis + item.infoLabels['year'] = year + item.infoLabels['genre'] = genre + item.infoLabels['rating'] = vote + item.infoLabels['trailer'] = trailer.replace("youtu.be/", "http://www.youtube.com/watch?v=") + if item.extra != "tv" or "Series" not in genre: + itemlist.append(item.clone(action="findvideos", title=titulo, fulltitle=title, url=url, thumbnail=thumbnail, + context=["buscar_trailer"], contentTitle=title, contentType="movie")) + else: + itemlist.append(item.clone(action="temporadas", title=titulo, fulltitle=title, url=url, thumbnail=thumbnail, + context=["buscar_trailer"], contentTitle=title, show=title, + contentType="tvshow")) + + try: + from core import tmdb + # Obtenemos los datos basicos de todas las peliculas mediante multihilos + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + except: + pass + # Paginacion + next_page = scrapertools.find_single_match(data, 'class="pagination-active".*?href="([^"]+)"') + if next_page != "": + url = next_page.replace("#", "") + "&ajax=1" + itemlist.append(item.clone(action="lista", title=">> Siguiente", url=url, text_color=color3)) + + return itemlist + + +def subindice(item): + logger.info() + itemlist = [] + + url_base = "http://allpeliculas.co/Movies/fullView/1/0/date:1900-3000|alphabet:all|?ajax=1&withoutFilter=1" + indice_genero, indice_alfa, indice_idioma, indice_year, indice_calidad = dict_indices() + if "Géneros" in item.title: + for key, value in indice_genero.items(): + url = url_base.replace("/0/", "/" + key + "/") + itemlist.append(item.clone(action="lista", title=value, url=url)) + itemlist.sort(key=lambda item: item.title) + + elif "Alfabético" in item.title: + for i in range(len(indice_alfa)): + url = url_base.replace(":all", ":" + indice_alfa[i]) + itemlist.append(item.clone(action="lista", title=indice_alfa[i], url=url)) + + elif "Por idioma" in item.title: + for key, value in indice_idioma.items(): + url = url_base.replace("3000|", "3000|language:" + key) + itemlist.append(item.clone(action="lista", title=value, url=url)) + itemlist.sort(key=lambda item: item.title) + + elif "Por año" in item.title: + for i in range(len(indice_year)): + year = indice_year[i] + url = url_base.replace("1900-3000", year + "-" + year) + itemlist.append(item.clone(action="lista", title=year, url=url)) + + elif "Por calidad" in item.title: + for key, value in indice_calidad.items(): + url = "http://allpeliculas.co/Search/advancedSearch?searchType=movie&movieName=&movieDirector=&movieGenre" \ + "=&movieActor=&movieYear=&language=&movieTypeId=" + key + "&ajax=1" + itemlist.append(item.clone(action="busqueda", title=value, url=url)) + itemlist.sort(key=lambda item: item.title) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + item.text_color = color3 + + # Rellena diccionarios idioma y calidad + idiomas_videos, calidad_videos = dict_videos() + + data = httptools.downloadpage(item.url).data + data = data.replace("\n", "").replace("\t", "") + data = scrapertools.decodeHtmlentities(data) + + if item.extra != "library": + try: + from core import tmdb + tmdb.set_infoLabels(item, __modo_grafico__) + except: + pass + + # Enlaces Online + patron = '([^<]+)') + item.infoLabels['trailer'] = trailer_url.replace("youtu.be/", "http://www.youtube.com/watch?v=") + + itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler", + text_color="magenta", context="")) + if item.extra != "library": + if config.get_videolibrary_support(): + itemlist.append(Item(channel=item.channel, title="Añadir película a la videoteca", + action="add_pelicula_to_library", url=item.url, text_color="green", + infoLabels={'title': item.fulltitle}, fulltitle=item.fulltitle, + extra="library")) + + return itemlist + + +def temporadas(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + try: + from core import tmdb + tmdb.set_infoLabels_item(item, __modo_grafico__) + except: + pass + + matches = scrapertools.find_multiple_matches(data, '([^<]+)') + item.infoLabels['trailer'] = trailer_url.replace("youtu.be/", "http://www.youtube.com/watch?v=") + + itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler", + text_color="magenta", context="")) + + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + + # Rellena diccionarios idioma y calidad + idiomas_videos, calidad_videos = dict_videos() + + data = httptools.downloadpage(item.url).data + data = data.replace("\n", "").replace("\t", "") + data = scrapertools.decodeHtmlentities(data) + + patron = '
  • ]+season="' + str(item.infoLabels['season']) + '"[^>]+>([^<]+)
  • ' + matches = scrapertools.find_multiple_matches(data, patron) + capitulos = [] + for title in matches: + if not title in capitulos: + episode = int(title.split(" ")[1]) + capitulos.append(title) + itemlist.append( + item.clone(action="findvideostv", title=title, contentEpisodeNumber=episode, contentType="episode")) + + itemlist.sort(key=lambda item: item.contentEpisodeNumber) + try: + from core import tmdb + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + except: + pass + for item in itemlist: + if item.infoLabels["episodio_titulo"]: + item.title = "%dx%02d: %s" % ( + item.contentSeason, item.contentEpisodeNumber, item.infoLabels["episodio_titulo"]) + else: + item.title = "%dx%02d: %s" % (item.contentSeason, item.contentEpisodeNumber, item.title) + + return itemlist + + +def findvideostv(item): + logger.info() + itemlist = [] + + # Rellena diccionarios idioma y calidad + idiomas_videos, calidad_videos = dict_videos() + + data = httptools.downloadpage(item.url).data + data = data.replace("\n", "").replace("\t", "") + data = scrapertools.decodeHtmlentities(data) + + patron = '') + matches = scrapertools.find_multiple_matches(bloque_idioma, '') + for key1, key2 in matches: + idiomas_videos[key1] = unicode(key2, "utf8").capitalize().encode("utf8") + bloque_calidad = scrapertools.find_single_match(data, '') + matches = scrapertools.find_multiple_matches(bloque_genero, '') + for key1, key2 in matches: + if key2 != "Series": + if key2 == "Mystery": + key2 = "Misterio" + indice_genero[key1] = key2 + bloque_year = scrapertools.find_single_match(data, '') + matches = scrapertools.find_multiple_matches(bloque_calidad, '') + for key1, key2 in matches: + indice_calidad[key1] = key2 + + return indice_genero, indice_alfa, indice_idioma, indice_year, indice_calidad diff --git a/plugin.video.alfa/channels/alltorrent.json b/plugin.video.alfa/channels/alltorrent.json new file mode 100755 index 00000000..18564066 --- /dev/null +++ b/plugin.video.alfa/channels/alltorrent.json @@ -0,0 +1,37 @@ +{ + "id": "alltorrent", + "name": "Alltorrent", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "http://imgur.com/sLaXHvp.png", + "version": 1, + "changes": [ + { + "date": "26/04/2017", + "description": "Release" + } + ], + "categories": [ + "torrent", + "movie" + ], + "settings": [ + { + "id": "modo_grafico", + "type": "bool", + "label": "Buscar información extra", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/alltorrent.py b/plugin.video.alfa/channels/alltorrent.py new file mode 100755 index 00000000..8f662aca --- /dev/null +++ b/plugin.video.alfa/channels/alltorrent.py @@ -0,0 +1,395 @@ +# -*- coding: utf-8 -*- + +import os +import re +import unicodedata +from threading import Thread + +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item + +__modo_grafico__ = config.get_setting('modo_grafico', "ver-pelis") + + +# Para la busqueda en bing evitando baneos + + +def browser(url): + import mechanize + + # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing + br = mechanize.Browser() + # Browser options + br.set_handle_equiv(False) + br.set_handle_gzip(True) + br.set_handle_redirect(True) + br.set_handle_referer(False) + br.set_handle_robots(False) + # Follows refresh 0 but not hangs on refresh > 0 + br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) + # Want debugging messages? + # br.set_debug_http(True) + # br.set_debug_redirects(True) + # br.set_debug_responses(True) + + # User-Agent (this is cheating, ok?) + # br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')] + # br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')] + # Open some site, let's pick a random one, the first that pops in mind + r = br.open(url) + response = r.read() + print response + if "img,divreturn" in response: + r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url) + print "prooooxy" + response = r.read() + + return response + + +api_key = "2e2160006592024ba87ccdf78c28f49f" +api_fankey = "dffe90fba4d02c199ae7a9e71330c987" + + +def mainlist(item): + logger.info() + itemlist = [] + i = 0 + global i + itemlist.append(item.clone(title="[COLOR springgreen][B]Todas Las Películas[/B][/COLOR]", action="scraper", + url="http://alltorrent.net/", thumbnail="http://imgur.com/XLqPZoF.png", + fanart="http://imgur.com/v3ChkZu.jpg", contentType="movie")) + itemlist.append(item.clone(title="[COLOR springgreen] Incluyen 1080p[/COLOR]", action="scraper", + url="http://alltorrent.net/rezolucia/1080p/", thumbnail="http://imgur.com/XLqPZoF.png", + fanart="http://imgur.com/v3ChkZu.jpg", contentType="movie")) + itemlist.append(item.clone(title="[COLOR springgreen] Incluyen 720p[/COLOR]", action="scraper", + url="http://alltorrent.net/rezolucia/720p/", thumbnail="http://imgur.com/XLqPZoF.png", + fanart="http://imgur.com/v3ChkZu.jpg", contentType="movie")) + itemlist.append(item.clone(title="[COLOR springgreen] Incluyen Hdrip[/COLOR]", action="scraper", + url="http://alltorrent.net/rezolucia/hdrip/", thumbnail="http://imgur.com/XLqPZoF.png", + fanart="http://imgur.com/v3ChkZu.jpg", contentType="movie")) + itemlist.append(item.clone(title="[COLOR springgreen] Incluyen 3D[/COLOR]", action="scraper", + url="http://alltorrent.net/rezolucia/3d/", thumbnail="http://imgur.com/XLqPZoF.png", + fanart="http://imgur.com/v3ChkZu.jpg", contentType="movie")) + itemlist.append(itemlist[-1].clone(title="[COLOR floralwhite][B]Buscar[/B][/COLOR]", action="search", + thumbnail="http://imgur.com/5EBwccS.png", fanart="http://imgur.com/v3ChkZu.jpg", + contentType="movie", extra="titulo")) + itemlist.append(itemlist[-1].clone(title="[COLOR oldlace] Por Título[/COLOR]", action="search", + thumbnail="http://imgur.com/5EBwccS.png", fanart="http://imgur.com/v3ChkZu.jpg", + contentType="movie", extra="titulo")) + itemlist.append(itemlist[-1].clone(title="[COLOR oldlace] Por Año[/COLOR]", action="search", + thumbnail="http://imgur.com/5EBwccS.png", fanart="http://imgur.com/v3ChkZu.jpg", + contentType="movie", extra="año")) + itemlist.append(itemlist[-1].clone(title="[COLOR oldlace] Por Rating Imdb[/COLOR]", action="search", + thumbnail="http://imgur.com/5EBwccS.png", fanart="http://imgur.com/v3ChkZu.jpg", + contentType="movie", extra="rating")) + + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + if item.extra == "titulo": + item.url = "http://alltorrent.net/?s=" + texto + + elif item.extra == "año": + item.url = "http://alltorrent.net/weli/" + texto + else: + item.url = "http://alltorrent.net/imdb/" + texto + if texto != '': + return scraper(item) + + +def scraper(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = scrapertools.find_multiple_matches(data, + '
    ([^"]+) ') + + for url, thumb, title, year in patron: + title = re.sub(r"\(\d+\)", "", title) + + title = ''.join((c for c in unicodedata.normalize('NFD', unicode(title.decode('utf-8'))) if + unicodedata.category(c) != 'Mn')).encode("ascii", "ignore") + titulo = "[COLOR lime]" + title + "[/COLOR]" + title = re.sub(r"!|\/.*", "", title).strip() + + new_item = item.clone(action="findvideos", title=titulo, url=url, thumbnail=thumb, fulltitle=title, + contentTitle=title, contentType="movie", library=True) + new_item.infoLabels['year'] = year + itemlist.append(new_item) + + ## Paginación + next = scrapertools.find_single_match(data, '
  • Next Page') + if len(next) > 0: + url = next + + itemlist.append(item.clone(title="[COLOR olivedrab][B]Siguiente >>[/B][/COLOR]", action="scraper", url=url, + thumbnail="http://imgur.com/TExhOJE.png")) + + try: + from core import tmdb + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + for item in itemlist: + if not "Siguiente >>" in item.title: + if "0." in str(item.infoLabels['rating']): + item.infoLabels['rating'] = "[COLOR olive]Sin puntuación[/COLOR]" + else: + item.infoLabels['rating'] = "[COLOR yellow]" + str(item.infoLabels['rating']) + "[/COLOR]" + item.title = item.title + " " + str(item.infoLabels['rating']) + except: + pass + + for item_tmdb in itemlist: + logger.info(str(item_tmdb.infoLabels['tmdb_id'])) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + th = Thread(target=get_art(item)) + th.setDaemon(True) + th.start() + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + enlaces = scrapertools.find_multiple_matches(data, + 'id="modal-quality-\w+">(.*?).*?class="quality-size">(.*?)

    .*?href="([^"]+)"') + for calidad, size, url in enlaces: + title = "[COLOR palegreen][B]Torrent[/B][/COLOR]" + " " + "[COLOR chartreuse]" + calidad + "[/COLOR]" + "[COLOR teal] ( [/COLOR]" + "[COLOR forestgreen]" + size + "[/COLOR]" + "[COLOR teal] )[/COLOR]" + itemlist.append( + Item(channel=item.channel, title=title, url=url, action="play", server="torrent", fanart=item.fanart, + thumbnail=item.thumbnail, extra=item.extra, InfoLabels=item.infoLabels, folder=False)) + dd = scrapertools.find_single_match(data, 'button-green-download-big".*?href="([^"]+)">') + if dd: + if item.library: + itemlist.append( + Item(channel=item.channel, title="[COLOR floralwhite][B]Online[/B][/COLOR]", url=dd, action="dd_y_o", + thumbnail="http://imgur.com/mRmBIV4.png", fanart=item.extra.split("|")[0], + contentType=item.contentType, extra=item.extra, folder=True)) + else: + videolist = servertools.find_video_items(data=str(dd)) + for video in videolist: + icon_server = os.path.join(config.get_runtime_path(), "resources", "images", "servers", + "server_" + video.server + ".png") + if not os.path.exists(icon_server): + icon_server = "" + itemlist.append(Item(channel=item.channel, url=video.url, server=video.server, + title="[COLOR floralwhite][B]" + video.server + "[/B][/COLOR]", + thumbnail=icon_server, fanart=item.extra.split("|")[1], action="play", + folder=False)) + if item.library and config.get_videolibrary_support() and itemlist: + infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'], + 'title': item.infoLabels['title']} + itemlist.append(Item(channel=item.channel, title="Añadir esta película a la videoteca", + action="add_pelicula_to_library", url=item.url, infoLabels=infoLabels, + text_color="0xFFe5ffcc", + thumbnail='http://imgur.com/DNCBjUB.png', extra="library")) + + return itemlist + + +def dd_y_o(item): + itemlist = [] + logger.info() + videolist = servertools.find_video_items(data=item.url) + for video in videolist: + icon_server = os.path.join(config.get_runtime_path(), "resources", "images", "servers", + "server_" + video.server + ".png") + if not os.path.exists(icon_server): + icon_server = "" + itemlist.append(Item(channel=item.channel, url=video.url, server=video.server, + title="[COLOR floralwhite][B]" + video.server + "[/B][/COLOR]", thumbnail=icon_server, + fanart=item.extra.split("|")[1], action="play", folder=False)) + return itemlist + + +def fanartv(item, id_tvdb, id, images={}): + headers = [['Content-Type', 'application/json']] + from core import jsontools + if item.contentType == "movie": + url = "http://webservice.fanart.tv/v3/movies/%s?api_key=cab16e262d72fea6a6843d679aa10300" \ + % id + else: + url = "http://webservice.fanart.tv/v3/tv/%s?api_key=cab16e262d72fea6a6843d679aa10300" % id_tvdb + try: + data = jsontools.load(scrapertools.downloadpage(url, headers=headers)) + if data and not "error message" in data: + for key, value in data.items(): + if key not in ["name", "tmdb_id", "imdb_id", "thetvdb_id"]: + images[key] = value + else: + images = [] + + except: + images = [] + return images + + +def get_art(item): + logger.info() + id = item.infoLabels['tmdb_id'] + check_fanart = item.infoLabels['fanart'] + if item.contentType != "movie": + tipo_ps = "tv" + else: + tipo_ps = "movie" + if not id: + year = item.extra + otmdb = tmdb.Tmdb(texto_buscado=item.fulltitle, year=year, tipo=tipo_ps) + id = otmdb.result.get("id") + + if id == None: + otmdb = tmdb.Tmdb(texto_buscado=item.fulltitle, tipo=tipo_ps) + id = otmdb.result.get("id") + if id == None: + if item.contentType == "movie": + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % ( + item.fulltitle.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data) + subdata_imdb = scrapertools.find_single_match(data, + '
  • (.*?)h="ID.*?.*?TV Series') + else: + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % ( + item.fulltitle.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data) + subdata_imdb = scrapertools.find_single_match(data, '
  • (.*?)h="ID.*?') + try: + imdb_id = scrapertools.get_match(subdata_imdb, '(.*?)h="ID.*?.*?TV Series') + else: + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % ( + title.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", + data) + subdata_imdb = scrapertools.find_single_match(data, + '
  • (.*?)h="ID.*?') + try: + imdb_id = scrapertools.get_match(subdata_imdb, + '= 4: + if imagenes[0] != check_fanart: + item.fanart = imagenes[0] + else: + item.fanart = imagenes[1] + if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart: + item.extra = imagenes[1] + "|" + imagenes[2] + + else: + if imagenes[1] != check_fanart and imagenes[1] != item.fanart: + item.extra = imagenes[1] + "|" + imagenes[3] + elif imagenes[2] != check_fanart: + item.extra = imagenes[2] + "|" + imagenes[3] + else: + item.extra = imagenes[3] + "|" + imagenes[3] + elif len(imagenes) == 3: + if imagenes[0] != check_fanart: + item.fanart = imagenes[0] + else: + item.fanart = imagenes[1] + + if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart: + item.extra = imagenes[1] + "|" + imagenes[2] + + else: + if imagenes[1] != check_fanart and imagenes[1] != item.fanart: + item.extra = imagenes[0] + "|" + imagenes[1] + elif imagenes[2] != check_fanart: + item.extra = imagenes[1] + "|" + imagenes[2] + else: + item.extra = imagenes[1] + "|" + imagenes[1] + elif len(imagenes) == 2: + if imagenes[0] != check_fanart: + item.fanart = imagenes[0] + else: + item.fanart = imagenes[1] + if imagenes[1] != check_fanart and imagenes[1] != item.fanart: + item.extra = imagenes[0] + "|" + imagenes[1] + else: + item.extra = imagenes[1] + "|" + imagenes[0] + elif len(imagenes) == 1: + item.extra = imagenes + "|" + imagenes + else: + item.extra = item.fanart + "|" + item.fanart + + images_fanarttv = fanartv(item, id_tvdb, id) + if images_fanarttv: + if item.contentType == "movie": + if images_fanarttv.get("moviedisc"): + item.thumbnail = images_fanarttv.get("moviedisc")[0].get("url") + elif images_fanarttv.get("hdmovielogo"): + item.thumbnail = images_fanarttv.get("hdmovielogo")[0].get("url") + elif images_fanarttv.get("moviethumb"): + item.thumbnail = images_fanarttv.get("moviethumb")[0].get("url") + elif images_fanarttv.get("moviebanner"): + item.thumbnail_ = images_fanarttv.get("moviebanner")[0].get("url") + else: + item.thumbnail = item.thumbnail + else: + if images_fanarttv.get("hdtvlogo"): + item.thumbnail = images_fanarttv.get("hdtvlogo")[0].get("url") + elif images_fanarttv.get("clearlogo"): + item.thumbnail = images_fanarttv.get("hdmovielogo")[0].get("url") + + if images_fanarttv.get("tvbanner"): + item.extra = item.extra + "|" + images_fanarttv.get("tvbanner")[0].get("url") + elif images_fanarttv.get("tvthumb"): + item.extra = item.extra + "|" + images_fanarttv.get("tvthumb")[0].get("url") + else: + item.extra = item.extra + "|" + item.thumbnail + else: + item.extra = item.extra + "|" + item.thumbnail diff --git a/plugin.video.alfa/channels/animeflv.json b/plugin.video.alfa/channels/animeflv.json new file mode 100755 index 00000000..01ed328b --- /dev/null +++ b/plugin.video.alfa/channels/animeflv.json @@ -0,0 +1,49 @@ +{ + "id": "animeflv", + "name": "AnimeFLV", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "animeflv.png", + "banner": "animeflv.png", + "version": 1, + "changes": [ + { + "date": "18/05/2017", + "description": "fix ultimos animes, episodios" + }, + { + "date": "06/04/2017", + "description": "fix ultimos episodios" + }, + { + "date": "01/03/2017", + "description": "fix nueva web" + }, + { + "date": "09/07/2016", + "description": "Arreglo viewmode" + } + ], + "categories": [ + "anime" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_anime", + "type": "bool", + "label": "Incluir en Novedades - Episodios de anime", + "default": true, + "enabled": true, + "visible": true + } + ] +} diff --git a/plugin.video.alfa/channels/animeflv.py b/plugin.video.alfa/channels/animeflv.py new file mode 100755 index 00000000..91e97197 --- /dev/null +++ b/plugin.video.alfa/channels/animeflv.py @@ -0,0 +1,330 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from channels import renumbertools +from core import httptools +from core import jsontools +from core import logger +from core import scrapertools +from core.item import Item + +HOST = "https://animeflv.net/" + + +def mainlist(item): + logger.info() + + itemlist = list() + + itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Últimos episodios", url=HOST)) + itemlist.append(Item(channel=item.channel, action="novedades_anime", title="Últimos animes", url=HOST)) + itemlist.append(Item(channel=item.channel, action="listado", title="Animes", url=HOST + "browse?order=title")) + + itemlist.append(Item(channel=item.channel, title="Buscar por:")) + itemlist.append(Item(channel=item.channel, action="search", title=" Título")) + itemlist.append(Item(channel=item.channel, action="search_section", title=" Género", url=HOST + "browse", + extra="genre")) + itemlist.append(Item(channel=item.channel, action="search_section", title=" Tipo", url=HOST + "browse", + extra="type")) + itemlist.append(Item(channel=item.channel, action="search_section", title=" Año", url=HOST + "browse", + extra="year")) + itemlist.append(Item(channel=item.channel, action="search_section", title=" Estado", url=HOST + "browse", + extra="status")) + + itemlist = renumbertools.show_option(item.channel, itemlist) + + return itemlist + + +def search(item, texto): + logger.info() + itemlist = [] + item.url = urlparse.urljoin(HOST, "api/animes/search") + texto = texto.replace(" ", "+") + post = "value=%s" % texto + data = httptools.downloadpage(item.url, post=post).data + + try: + dict_data = jsontools.load(data) + + for e in dict_data: + if e["id"] != e["last_id"]: + _id = e["last_id"] + else: + _id = e["id"] + + url = "%sanime/%s/%s" % (HOST, _id, e["slug"]) + title = e["title"] + thumbnail = "%suploads/animes/covers/%s.jpg" % (HOST, e["id"]) + new_item = item.clone(action="episodios", title=title, url=url, thumbnail=thumbnail) + + if e["type"] != "movie": + new_item.show = title + new_item.context = renumbertools.context(item) + else: + new_item.contentType = "movie" + new_item.contentTitle = title + + itemlist.append(new_item) + + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + return itemlist + + +def search_section(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) + patron = 'id="%s_select"[^>]+>(.*?)' % item.extra + data = scrapertools.find_single_match(data, patron) + + matches = re.compile('', re.DOTALL).findall(data) + + for _id, title in matches: + url = "%s?%s=%s&order=title" % (item.url, item.extra, _id) + itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, + context=renumbertools.context(item))) + + return itemlist + + +def newest(categoria): + itemlist = [] + + if categoria == 'anime': + itemlist = novedades_episodios(Item(url=HOST)) + + return itemlist + + +def novedades_episodios(item): + logger.info() + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) + data = scrapertools.find_single_match(data, '

    Últimos episodios

    .+?
      ]+>.+?(.*?)' + '(.*?)', re.DOTALL).findall(data) + itemlist = [] + + for url, thumbnail, str_episode, show in matches: + + try: + episode = int(str_episode.replace("Episodio ", "")) + except ValueError: + season = 1 + episode = 1 + else: + season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, episode) + + title = "%s: %sx%s" % (show, season, str(episode).zfill(2)) + url = urlparse.urljoin(HOST, url) + thumbnail = urlparse.urljoin(HOST, thumbnail) + + new_item = Item(channel=item.channel, action="findvideos", title=title, url=url, show=show, thumbnail=thumbnail, + fulltitle=title) + + itemlist.append(new_item) + + return itemlist + + +def novedades_anime(item): + logger.info() + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) + data = scrapertools.find_single_match(data, '
        (.*?).+?(.*?).+?' + '(?:

        (.*?)

        .+?)?', re.DOTALL).findall(data) + itemlist = [] + + for url, thumbnail, _type, title, plot in matches: + + url = urlparse.urljoin(HOST, url) + thumbnail = urlparse.urljoin(HOST, thumbnail) + + new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, + fulltitle=title, plot=plot) + if _type != "Película": + new_item.show = title + new_item.context = renumbertools.context(item) + else: + new_item.contentType = "movie" + new_item.contentTitle = title + + itemlist.append(new_item) + + return itemlist + + +def listado(item): + logger.info() + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) + url_pagination = scrapertools.find_single_match(data, '
      • .*?
      • ') + + data = scrapertools.find_multiple_matches(data, '
          .+?(.*?).+?(.*?)' + '.*?

          (.*?)

          ', re.DOTALL).findall(data) + + itemlist = [] + + for url, thumbnail, _type, title, plot in matches: + + url = urlparse.urljoin(HOST, url) + thumbnail = urlparse.urljoin(HOST, thumbnail) + + new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, + fulltitle=title, plot=plot) + + if _type == "Anime": + new_item.show = title + new_item.context = renumbertools.context(item) + else: + new_item.contentType = "movie" + new_item.contentTitle = title + + itemlist.append(new_item) + + if url_pagination: + url = urlparse.urljoin(HOST, url_pagination) + title = ">> Pagina Siguiente" + + itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url)) + + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) + + # fix para renumbertools + item.show = scrapertools.find_single_match(data, '

          (.*?)

          ') + + if item.plot == "": + item.plot = scrapertools.find_single_match(data, 'Description[^>]+>

          (.*?)

          ') + + matches = re.compile('href="([^"]+)">
          ' + '

          (.*?)

          ', re.DOTALL).findall(data) + + if matches: + for url, thumb, title in matches: + title = title.strip() + url = urlparse.urljoin(item.url, url) + # thumbnail = item.thumbnail + + try: + episode = int(scrapertools.find_single_match(title, "^.+?\s(\d+)$")) + except ValueError: + season = 1 + episode = 1 + else: + season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, episode) + + title = "%s: %sx%s" % (item.title, season, str(episode).zfill(2)) + + itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumb, fulltitle=title, + fanart=item.thumbnail, contentType="episode")) + else: + # no hay thumbnail + matches = re.compile('
          ]+>(.*?)<', re.DOTALL).findall(data) + + for url, title in matches: + title = title.strip() + url = urlparse.urljoin(item.url, url) + thumb = item.thumbnail + + try: + episode = int(scrapertools.find_single_match(title, "^.+?\s(\d+)$")) + except ValueError: + season = 1 + episode = 1 + else: + season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, episode) + + title = "%s: %sx%s" % (item.title, season, str(episode).zfill(2)) + + itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumb, fulltitle=title, + fanart=item.thumbnail, contentType="episode")) + + return itemlist + + +def findvideos(item): + logger.info() + + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) + list_videos = scrapertools.find_multiple_matches(data, 'video\[\d\]\s=\s\'\d+?
        • )(.+?)(?:)' +REGEX_THUMB = r'src="(http://media.animeflv\.me/uploads/thumbs/[^"]+?)"' +REGEX_PLOT = r'Línea de historia:

          (.*?)' +REGEX_URL = r'href="(http://animeflv\.me/Anime/[^"]+)">' +REGEX_SERIE = r'{0}.+?{1}([^<]+?)

          (.+?)

          '.format(REGEX_THUMB, REGEX_URL) +REGEX_EPISODE = r'href="(http://animeflv\.me/Ver/[^"]+?)">(?:)?(.+?)(\d+/\d+/\d+)' +REGEX_GENERO = r'([^<]+)' + + +def get_url_contents(url): + html = httptools.downloadpage(url, headers=CHANNEL_DEFAULT_HEADERS).data + # Elimina los espacios antes y despues de aperturas y cierres de etiquetas + html = re.sub(r'>\s+<', '><', html) + html = re.sub(r'>\s+', '>', html) + html = re.sub(r'\s+<', '<', html) + + return html + + +def get_cookie_value(): + """ + Obtiene las cookies de cloudflare + """ + + cookie_file = path.join(config.get_data_path(), 'cookies.dat') + cookie_data = filetools.read(cookie_file) + + cfduid = scrapertools.find_single_match( + cookie_data, r"animeflv.*?__cfduid\s+([A-Za-z0-9\+\=]+)") + cfduid = "__cfduid=" + cfduid + ";" + cf_clearance = scrapertools.find_single_match( + cookie_data, r"animeflv.*?cf_clearance\s+([A-Za-z0-9\+\=\-]+)") + cf_clearance = " cf_clearance=" + cf_clearance + cookies_value = cfduid + cf_clearance + + return cookies_value + + +header_string = "|User-Agent=Mozilla/5.0&Referer=http://animeflv.me&Cookie=" + \ + get_cookie_value() + + +def __find_next_page(html): + """ + Busca el enlace a la pagina siguiente + """ + + return scrapertools.find_single_match(html, REGEX_NEXT_PAGE) + + +def __extract_info_from_serie(html): + """ + Extrae la información de una serie o pelicula desde su página + Util para cuando una busqueda devuelve un solo resultado y animeflv.me + redirecciona a la página de este. + """ + + title = scrapertools.find_single_match(html, REGEX_TITLE) + title = clean_title(title) + url = scrapertools.find_single_match(html, REGEX_URL) + thumbnail = scrapertools.find_single_match( + html, REGEX_THUMB) + header_string + plot = scrapertools.find_single_match(html, REGEX_PLOT) + + return [title, url, thumbnail, plot] + + +def __sort_by_quality(items): + """ + Ordena los items por calidad en orden decreciente + """ + + def func(item): + return int(scrapertools.find_single_match(item.title, r'\[(.+?)\]')) + + return sorted(items, key=func, reverse=True) + + +def clean_title(title): + """ + Elimina el año del nombre de las series o peliculas + """ + year_pattern = r'\([\d -]+?\)' + + return re.sub(year_pattern, '', title).strip() + + +def __find_series(html): + """ + Busca series en un listado, ejemplo: resultados de busqueda, categorias, etc + """ + series = [] + + # Limitamos la busqueda al listado de series + list_start = html.find('') + list_end = html.find('
          ', list_start) + + list_html = html[list_start:list_end] + + for serie in re.finditer(REGEX_SERIE, list_html, re.S): + thumbnail, url, title, plot = serie.groups() + title = clean_title(title) + thumbnail = thumbnail + header_string + plot = scrapertools.htmlclean(plot) + + series.append([title, url, thumbnail, plot]) + + return series + + +def mainlist(item): + logger.info() + + itemlist = list() + + itemlist.append(Item(channel=item.channel, action="letras", + title="Por orden alfabético")) + itemlist.append(Item(channel=item.channel, action="generos", title="Por géneros", + url=urlparse.urljoin(CHANNEL_HOST, "ListadeAnime"))) + itemlist.append(Item(channel=item.channel, action="series", title="Por popularidad", + url=urlparse.urljoin(CHANNEL_HOST, "/ListadeAnime/MasVisto"))) + itemlist.append(Item(channel=item.channel, action="series", title="Novedades", + url=urlparse.urljoin(CHANNEL_HOST, "ListadeAnime/Nuevo"))) + itemlist.append(Item(channel=item.channel, action="series", title="Últimos", + url=urlparse.urljoin(CHANNEL_HOST, "ListadeAnime/LatestUpdate"))) + itemlist.append(Item(channel=item.channel, action="search", title="Buscar...", + url=urlparse.urljoin(CHANNEL_HOST, "Buscar?s="))) + + itemlist = renumbertools.show_option(item.channel, itemlist) + + return itemlist + + +def letras(item): + logger.info() + + base_url = 'http://animeflv.me/ListadeAnime?c=' + + itemlist = [] + itemlist.append(Item(channel=item.channel, action="series", title="#", + url=base_url + "#", viewmode="movies_with_plot")) + + # Itera sobre las posiciones de las letras en la tabla ascii + # 65 = A, 90 = Z + for i in xrange(65, 91): + letter = chr(i) + + logger.debug("title=[{0}], url=[{1}], thumbnail=[]".format( + letter, base_url + letter)) + + itemlist.append(Item(channel=item.channel, action="series", title=letter, + url=base_url + letter, viewmode="movies_with_plot")) + + return itemlist + + +def generos(item): + logger.info() + + itemlist = [] + + html = get_url_contents(item.url) + + generos = re.findall(REGEX_GENERO, html) + + for url, genero in generos: + logger.debug( + "title=[{0}], url=[{1}], thumbnail=[]".format(genero, url)) + + itemlist.append(Item(channel=item.channel, action="series", title=genero, url=url, + plot='', viewmode="movies_with_plot")) + + return itemlist + + +def search(item, texto): + logger.info() + + texto = texto.replace(" ", "%20") + item.url = "{0}{1}".format(item.url, texto) + + html = get_url_contents(item.url) + + try: + # Se encontro un solo resultado y se redicciono a la página de la serie + if html.find('Ver') >= 0: + series = [__extract_info_from_serie(html)] + # Se obtuvo una lista de resultados + else: + series = __find_series(html) + + items = [] + for serie in series: + title, url, thumbnail, plot = serie + + logger.debug("title=[{0}], url=[{1}], thumbnail=[{2}]".format( + title, url, thumbnail)) + + items.append(Item(channel=item.channel, action="episodios", title=title, + url=url, thumbnail=thumbnail, plot=plot, + show=title, viewmode="movies_with_plot", context=renumbertools.context(item))) + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + return items + + +def series(item): + logger.info() + + page_html = get_url_contents(item.url) + + series = __find_series(page_html) + + items = [] + for serie in series: + title, url, thumbnail, plot = serie + + logger.debug("title=[{0}], url=[{1}], thumbnail=[{2}]".format( + title, url, thumbnail)) + + items.append(Item(channel=item.channel, action="episodios", title=title, url=url, + thumbnail=thumbnail, plot=plot, show=title, viewmode="movies_with_plot", + context=renumbertools.context(item))) + + url_next_page = __find_next_page(page_html) + + if url_next_page: + items.append(Item(channel=item.channel, action="series", title=">> Página Siguiente", + url=url_next_page, thumbnail="", plot="", folder=True, + viewmode="movies_with_plot")) + + return items + + +def episodios(item): + logger.info() + + itemlist = [] + + html_serie = get_url_contents(item.url) + + info_serie = __extract_info_from_serie(html_serie) + plot = info_serie[3] if info_serie else '' + + episodes = re.findall(REGEX_EPISODE, html_serie, re.DOTALL) + + es_pelicula = False + for url, title, date in episodes: + episode = scrapertools.find_single_match(title, r'Episodio (\d+)') + + # El enlace pertenece a un episodio + if episode: + season = 1 + episode = int(episode) + season, episode = renumbertools.numbered_for_tratk( + item.channel, item.show, season, episode) + + title = "{0}x{1:02d} {2} ({3})".format( + season, episode, "Episodio " + str(episode), date) + # El enlace pertenece a una pelicula + else: + title = "{0} ({1})".format(title, date) + item.url = url + es_pelicula = True + + logger.debug("title=[{0}], url=[{1}], thumbnail=[{2}]".format( + title, url, item.thumbnail)) + + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, + thumbnail=item.thumbnail, plot=plot, show=item.show, + fulltitle="{0} {1}".format(item.show, title), + viewmode="movies_with_plot", folder=True)) + + # El sistema soporta la videoteca y se encontro por lo menos un episodio + # o pelicula + if config.get_videolibrary_support() and len(itemlist) > 0: + if es_pelicula: + item_title = "Añadir película a la videoteca" + item_action = "add_pelicula_to_library" + item_extra = "" + else: + item_title = "Añadir serie a la videoteca" + item_action = "add_serie_to_library" + item_extra = "episodios" + + itemlist.append(Item(channel=item.channel, title=item_title, url=item.url, + action=item_action, extra=item_extra, show=item.show)) + + if not es_pelicula: + itemlist.append(Item(channel=item.channel, title="Descargar todos los episodios", + url=item.url, action="download_all_episodes", extra="episodios", + show=item.show)) + + return itemlist + + +def findvideos(item): + logger.info() + + itemlist = [] + + page_html = get_url_contents(item.url) + + regex_api = r'http://player\.animeflv\.me/[^\"]+' + iframe_url = scrapertools.find_single_match(page_html, regex_api) + + iframe_html = get_url_contents(iframe_url) + + regex_video_list = r'var part = \[([^\]]+)' + + videos_html = scrapertools.find_single_match(iframe_html, regex_video_list) + videos = re.findall('"([^"]+)"', videos_html, re.DOTALL) + + qualities = ["360", "480", "720", "1080"] + + for quality_id, video_url in enumerate(videos): + itemlist.append(Item(channel=item.channel, action="play", url=video_url, show=re.escape(item.show), + title="Ver en calidad [{0}]".format(qualities[quality_id]), plot=item.plot, + folder=True, fulltitle=item.title, viewmode="movies_with_plot")) + + return __sort_by_quality(itemlist) diff --git a/plugin.video.alfa/channels/animeflv_ru.json b/plugin.video.alfa/channels/animeflv_ru.json new file mode 100755 index 00000000..1bdaea73 --- /dev/null +++ b/plugin.video.alfa/channels/animeflv_ru.json @@ -0,0 +1,47 @@ +{ + "id": "animeflv_ru", + "name": "AnimeFLV.RU", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "http://i.imgur.com/5nRR9qq.png", + "banner": "animeflv_ru.png", + "version": 1, + "compatible": { + "python": "2.7.9", + "addon_version": "4.2.1" + }, + "changes": { + "change": [ + { + "date": "06/04/2017", + "description": "fix" + }, + { + "date": "01/03/2017", + "description": "fix nueva web" + } + ] + }, + "categories": [ + "anime" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_anime", + "type": "bool", + "label": "Incluir en Novedades - Episodios de anime", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/animeflv_ru.py b/plugin.video.alfa/channels/animeflv_ru.py new file mode 100755 index 00000000..9dd5a71a --- /dev/null +++ b/plugin.video.alfa/channels/animeflv_ru.py @@ -0,0 +1,270 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from channels import renumbertools +from core import httptools +from core import jsontools +from core import logger +from core import scrapertools +from core.item import Item + +HOST = "https://animeflv.ru/" + + +def mainlist(item): + logger.info() + + itemlist = list() + + itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Últimos episodios", url=HOST)) + itemlist.append(Item(channel=item.channel, action="novedades_anime", title="Últimos animes", url=HOST)) + itemlist.append(Item(channel=item.channel, action="listado", title="Animes", url=HOST + "animes/nombre/lista")) + + itemlist.append(Item(channel=item.channel, title="Buscar por:")) + itemlist.append(Item(channel=item.channel, action="search", title=" Título")) + itemlist.append(Item(channel=item.channel, action="search_section", title=" Género", url=HOST + "animes", + extra="genre")) + + itemlist = renumbertools.show_option(item.channel, itemlist) + + return itemlist + + +def clean_title(title): + year_pattern = r'\([\d -]+?\)' + + return re.sub(year_pattern, '', title).strip() + + +def search(item, texto): + logger.info() + itemlist = [] + item.url = urlparse.urljoin(HOST, "search_suggest") + texto = texto.replace(" ", "+") + post = "value=%s" % texto + data = httptools.downloadpage(item.url, post=post).data + + try: + dict_data = jsontools.load(data) + + for e in dict_data: + title = clean_title(scrapertools.htmlclean(e["name"])) + url = e["url"] + plot = e["description"] + thumbnail = HOST + e["thumb"] + new_item = item.clone(action="episodios", title=title, url=url, plot=plot, thumbnail=thumbnail) + + if "Pelicula" in e["genre"]: + new_item.contentType = "movie" + new_item.contentTitle = title + else: + new_item.show = title + new_item.context = renumbertools.context(item) + + itemlist.append(new_item) + + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + return itemlist + + +def search_section(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) + + patron = 'id="%s_filter"[^>]+><div class="inner">(.*?)</div></div>' % item.extra + data = scrapertools.find_single_match(data, patron) + matches = re.compile('<a href="([^"]+)"[^>]+>(.*?)</a>', re.DOTALL).findall(data) + + for url, title in matches: + url = "%s/nombre/lista" % url + itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, + context=renumbertools.context(item))) + + return itemlist + + +def newest(categoria): + itemlist = [] + + if categoria == 'anime': + itemlist = novedades_episodios(Item(url=HOST)) + + return itemlist + + +def novedades_episodios(item): + logger.info() + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) + data = scrapertools.find_single_match(data, '<ul class="ListEpisodios[^>]+>(.*?)</ul>') + + matches = re.compile('href="([^"]+)"[^>]+>.+?<img src="([^"]+)".+?"Capi">(.*?)</span>' + '<strong class="Title">(.*?)</strong>', re.DOTALL).findall(data) + itemlist = [] + + for url, thumbnail, str_episode, show in matches: + + try: + episode = int(str_episode.replace("Ep. ", "")) + except ValueError: + season = 1 + episode = 1 + else: + season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, episode) + + title = "%s: %sx%s" % (show, season, str(episode).zfill(2)) + url = urlparse.urljoin(HOST, url) + thumbnail = urlparse.urljoin(HOST, thumbnail) + + new_item = Item(channel=item.channel, action="findvideos", title=title, url=url, show=show, thumbnail=thumbnail, + fulltitle=title) + + itemlist.append(new_item) + + return itemlist + + +def novedades_anime(item): + logger.info() + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) + data = scrapertools.find_single_match(data, '<ul class="ListAnimes[^>]+>(.*?)</ul>') + + matches = re.compile('<img src="([^"]+)".+?<a href="([^"]+)">(.*?)</a>', re.DOTALL).findall(data) + itemlist = [] + + for thumbnail, url, title in matches: + url = urlparse.urljoin(HOST, url) + thumbnail = urlparse.urljoin(HOST, thumbnail) + title = clean_title(title) + + new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, + fulltitle=title) + + new_item.show = title + new_item.context = renumbertools.context(item) + + itemlist.append(new_item) + + return itemlist + + +def listado(item): + logger.info() + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) + # logger.debug("datito %s" % data) + + url_pagination = scrapertools.find_single_match(data, '<li class="current">.*?</li>[\s]<li><a href="([^"]+)">') + + data = scrapertools.find_single_match(data, '</div><div class="full">(.*?)<div class="pagination') + + matches = re.compile('<img.+?src="([^"]+)".+?<a href="([^"]+)">(.*?)</a>.+?' + '<div class="full item_info genres_info">(.*?)</div>.+?class="full">(.*?)</p>', + re.DOTALL).findall(data) + + itemlist = [] + + for thumbnail, url, title, genres, plot in matches: + + title = clean_title(title) + url = urlparse.urljoin(HOST, url) + thumbnail = urlparse.urljoin(HOST, thumbnail) + + new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, + fulltitle=title, plot=plot) + + if "Pelicula Anime" in genres: + new_item.contentType = "movie" + new_item.contentTitle = title + else: + new_item.show = title + new_item.context = renumbertools.context(item) + + itemlist.append(new_item) + + if url_pagination: + url = urlparse.urljoin(HOST, url_pagination) + title = ">> Pagina Siguiente" + + itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url)) + + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) + + if item.plot == "": + item.plot = scrapertools.find_single_match(data, 'Description[^>]+><p>(.*?)</p>') + + data = scrapertools.find_single_match(data, '<div class="Sect Episodes full">(.*?)</div>') + matches = re.compile('<a href="([^"]+)"[^>]+>(.+?)</a', re.DOTALL).findall(data) + + for url, title in matches: + title = title.strip() + url = urlparse.urljoin(item.url, url) + thumbnail = item.thumbnail + + try: + episode = int(scrapertools.find_single_match(title, "Episodio (\d+)")) + except ValueError: + season = 1 + episode = 1 + else: + season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, episode) + + title = "%s: %sx%s" % (item.title, season, str(episode).zfill(2)) + + itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumbnail, fulltitle=title, + fanart=thumbnail, contentType="episode")) + + return itemlist + + +def findvideos(item): + logger.info() + + itemlist = [] + + _id = scrapertools.find_single_match(item.url, 'https://animeflv.ru/ver/([^/]+)/') + post = "embed_id=%s" % _id + data = httptools.downloadpage("https://animeflv.ru/get_video_info", post=post).data + dict_data = jsontools.load(data) + + headers = dict() + headers["Referer"] = item.url + data = httptools.downloadpage("https:" + dict_data["value"], headers=headers).data + dict_data = jsontools.load(data) + + list_videos = dict_data["playlist"][0]["sources"] + + if isinstance(list_videos, list): + for video in list_videos: + itemlist.append(Item(channel=item.channel, action="play", url=video["file"], show=re.escape(item.show), + title="Ver en calidad [%s]" % video["label"], plot=item.plot, fulltitle=item.title, + thumbnail=item.thumbnail)) + + else: + for video in list_videos.values(): + itemlist.append(Item(channel=item.channel, action="play", url=video["file"], show=re.escape(item.show), + title="Ver en calidad [%s]" % video["label"], plot=item.plot, fulltitle=item.title, + thumbnail=item.thumbnail)) + + return itemlist diff --git a/plugin.video.alfa/channels/animeid.json b/plugin.video.alfa/channels/animeid.json new file mode 100755 index 00000000..bdad185b --- /dev/null +++ b/plugin.video.alfa/channels/animeid.json @@ -0,0 +1,45 @@ +{ + "id": "animeid", + "name": "Animeid", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "animeid.png", + "banner": "animeid.png", + "version": 1, + "changes": [ + { + "date": "17/05/2017", + "description": "Fix novedades y replace en findvideos" + }, + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "04/01/16", + "description": "Arreglado problema en findvideos" + } + ], + "categories": [ + "anime" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_anime", + "type": "bool", + "label": "Incluir en Novedades - Episodios de anime", + "default": true, + "enabled": true, + "visible": true + } + ] +} diff --git a/plugin.video.alfa/channels/animeid.py b/plugin.video.alfa/channels/animeid.py new file mode 100755 index 00000000..9e678e04 --- /dev/null +++ b/plugin.video.alfa/channels/animeid.py @@ -0,0 +1,355 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core.item import Item + +CHANNEL_HOST = "http://animeid.tv/" + + +def mainlist(item): + logger.info() + + itemlist = list() + itemlist.append( + Item(channel=item.channel, action="novedades_series", title="Últimas series", url="http://www.animeid.tv/")) + itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Últimos episodios", + url="http://www.animeid.tv/", viewmode="movie_with_plot")) + itemlist.append( + Item(channel=item.channel, action="generos", title="Listado por genero", url="http://www.animeid.tv/")) + itemlist.append( + Item(channel=item.channel, action="letras", title="Listado alfabetico", url="http://www.animeid.tv/")) + itemlist.append(Item(channel=item.channel, action="search", title="Buscar...")) + + return itemlist + + +def newest(categoria): + itemlist = [] + item = Item() + try: + if categoria == 'anime': + item.url = "http://animeid.tv/" + itemlist = novedades_episodios(item) + # Se captura la excepción, para no interrumpir al canal novedades si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + return itemlist + + +# todo ARREGLAR +def search(item, texto): + logger.info() + itemlist = [] + + if item.url == "": + item.url = "http://www.animeid.tv/ajax/search?q=" + texto = texto.replace(" ", "+") + item.url = item.url + texto + try: + headers = [] + headers.append( + ["User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:19.0) Gecko/20100101 Firefox/19.0"]) + headers.append(["Referer", "http://www.animeid.tv/"]) + headers.append(["X-Requested-With", "XMLHttpRequest"]) + data = scrapertools.cache_page(item.url, headers=headers) + data = data.replace("\\", "") + logger.debug("data=" + data) + + patron = '{"id":"([^"]+)","text":"([^"]+)","date":"[^"]*","image":"([^"]+)","link":"([^"]+)"}' + matches = re.compile(patron, re.DOTALL).findall(data) + + for id, scrapedtitle, scrapedthumbnail, scrapedurl in matches: + title = scrapedtitle + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = scrapedthumbnail + plot = "" + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + + itemlist.append( + Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, plot=plot, + show=title, viewmode="movie_with_plot")) + + return itemlist + + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def novedades_series(item): + logger.info() + + # Descarga la pagina + data = httptools.downloadpage(item.url).data + data = scrapertools.get_match(data, '<section class="series">(.*?)</section>') + patronvideos = '<li><a href="([^"]+)"><span class="tipo\d+">([^<]+)</span><strong>([^<]+)</strong>' + matches = re.compile(patronvideos, re.DOTALL).findall(data) + itemlist = [] + + for url, tipo, title in matches: + scrapedtitle = title + " (" + tipo + ")" + scrapedurl = urlparse.urljoin(item.url, url) + scrapedthumbnail = "" + scrapedplot = "" + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + + itemlist.append(Item(channel=item.channel, action="episodios", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot, show=title, viewmode="movie_with_plot")) + + return itemlist + + +def novedades_episodios(item): + logger.info() + + # Descarga la pagina + # <article> <a href="/ver/uchuu-kyoudai-35"> <header>Uchuu Kyoudai #35</header> <figure><img src="http://static.animeid.com/art/uchuu-kyoudai/normal/b4934a1d.jpg" class="cover" alt="Uchuu Kyoudai" width="250" height="140" /></figure><div class="mask"></div> <aside><span class="p"><strong>Reproducciones: </strong>306</span> <span class="f"><strong>Favoritos: </strong>0</span></aside> </a> <p>Una noche en el año 2006, cuando eran jovenes, los dos hermanos Mutta (el mayor) y Hibito (el menor) vieron un OVNI que hiba en dirección hacia la luna. Esa misma noche decidieron que ellos se convertirian en astronautas y irian al espacio exterior. En el año 2050, Hibito se ha convertido en astronauta y que ademas está incluido en una misión que irá a la luna. En cambio Mutta siguió una carrera mas tradicional, y terminó trabajando en una compañia de fabricación de automoviles. Sin embargo, Mutta termina arruinando su carrera por ciertos problemas que tiene con su jefe. Ahora bien, no sólo perdió su trabajo si no que fue incluido en la lista negra de la industria laboral. Pueda ser que esta sea su unica oportunidad que tenga Mutta de volver a perseguir su sueño de la infancia y convertirse en astronauta, al igual que su perqueño hermano Hibito.</p> </article> + # <img pagespeed_high_res_src=" + data = httptools.downloadpage(item.url).data + data = scrapertools.get_match(data, '<section class="lastcap">(.*?)</section>') + + patronvideos = '<a href="([^"]+)">[^<]+<header>([^<]+)</header>[^<]+<figure><img[^>]+src="([^"]+)"[\s\S]+?<p>(.+?)</p>' + matches = re.compile(patronvideos, re.DOTALL).findall(data) + itemlist = [] + + for url, title, thumbnail, plot in matches: + scrapedtitle = scrapertools.entityunescape(title) + scrapedurl = urlparse.urljoin(item.url, url) + scrapedthumbnail = thumbnail + scrapedplot = plot + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + + episodio = scrapertools.get_match(scrapedtitle, '\s+#(.*?)$') + contentTitle = scrapedtitle.replace('#' + episodio, '') + + itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot, + hasContentDetails=True, contentSeason=1, contentTitle=contentTitle)) + + return itemlist + + +def generos(item): + logger.info() + + # Descarga la pagina + data = httptools.downloadpage(item.url).data + data = scrapertools.get_match(data, '<div class="generos">(.*?)</div>') + patronvideos = '<li> <a href="([^"]+)">([^<]+)</a>' + matches = re.compile(patronvideos, re.DOTALL).findall(data) + itemlist = [] + + for url, title in matches: + scrapedtitle = title + scrapedurl = urlparse.urljoin(item.url, url) + scrapedthumbnail = "" + scrapedplot = "" + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + + itemlist.append( + Item(channel=item.channel, action="series", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, + plot=scrapedplot, show=title, viewmode="movie_with_plot")) + + return itemlist + + +def letras(item): + logger.info() + + # Descarga la pagina + data = httptools.downloadpage(item.url).data + data = scrapertools.get_match(data, '<ul id="letras">(.*?)</ul>') + patronvideos = '<li> <a href="([^"]+)">([^<]+)</a>' + matches = re.compile(patronvideos, re.DOTALL).findall(data) + itemlist = [] + + for url, title in matches: + scrapedtitle = title + scrapedurl = urlparse.urljoin(item.url, url) + scrapedthumbnail = "" + scrapedplot = "" + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + + itemlist.append( + Item(channel=item.channel, action="series", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, + plot=scrapedplot, show=title, viewmode="movie_with_plot")) + + return itemlist + + +def series(item): + logger.info() + + # Descarga la pagina + data = httptools.downloadpage(item.url).data + logger.debug("datito %s" % data) + + ''' + <article class="item"> + <a href="/aoi-sekai-no-chuushin-de"> + <header>Aoi Sekai no Chuushin de</header> + <figure> + <img src="http://static.animeid.com/art/aoi-sekai-no-chuushin-de/cover/0077cb45.jpg" width="116" + height="164" /> + </figure> + <div class="mask"></div> + </a> + <p> + El Reino de Segua ha ido perdiendo la guerra contra el Imperio de Ninterdo pero la situación ha cambiado + con la aparición de un chico llamado Gear. Todos los personajes son parodias de protas de videojuegos de + Nintendo y Sega respectivamente, como lo son Sonic the Hedgehog, Super Mario Bros., The Legend of Zelda, + etc. + </p> + </article> + ''' + patron = '<article class="item"[^<]+' + patron += '<a href="([^"]+)"[^<]+<header>([^<]+)</header[^<]+' + patron += '<figure><img[\sa-z_]+src="([^"]+)"[^<]+</figure><div class="mask"></div></a>[^<]+<p>(.*?)<' + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + for url, title, thumbnail, plot in matches: + scrapedtitle = title + scrapedurl = urlparse.urljoin(item.url, url) + scrapedthumbnail = thumbnail + scrapedplot = plot + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + + itemlist.append(Item(channel=item.channel, action="episodios", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot, show=scrapedtitle, + viewmode="movie_with_plot")) + + itemlist = sorted(itemlist, key=lambda it: it.title) + + try: + page_url = scrapertools.get_match(data, '<li><a href="([^"]+)">></a></li>') + itemlist.append(Item(channel=item.channel, action="series", title=">> Página siguiente", + url=urlparse.urljoin(item.url, page_url), viewmode="movie_with_plot", thumbnail="", + plot="")) + except: + pass + + return itemlist + + +def episodios(item, final=True): + logger.info() + + # Descarga la pagina + body = httptools.downloadpage(item.url).data + + try: + scrapedplot = scrapertools.get_match(body, '<meta name="description" content="([^"]+)"') + except: + pass + + try: + scrapedthumbnail = scrapertools.get_match(body, '<link rel="image_src" href="([^"]+)"') + except: + pass + + data = scrapertools.get_match(body, '<ul id="listado">(.*?)</ul>') + patron = '<li><a href="([^"]+)">(.*?)</a></li>' + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + for url, title in matches: + scrapedtitle = scrapertools.htmlclean(title) + + try: + episodio = scrapertools.get_match(scrapedtitle, "Capítulo\s+(\d+)") + titulo_limpio = re.compile("Capítulo\s+(\d+)\s+", re.DOTALL).sub("", scrapedtitle) + if len(episodio) == 1: + scrapedtitle = "1x0" + episodio + " - " + titulo_limpio + else: + scrapedtitle = "1x" + episodio + " - " + titulo_limpio + except: + pass + + scrapedurl = urlparse.urljoin(item.url, url) + # scrapedthumbnail = "" + # scrapedplot = "" + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + + itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot, show=item.show)) + + try: + next_page = scrapertools.get_match(body, '<a href="([^"]+)">\>\;</a>') + next_page = urlparse.urljoin(item.url, next_page) + item2 = Item(channel=item.channel, action="episodios", title=item.title, url=next_page, + thumbnail=item.thumbnail, plot=item.plot, show=item.show, viewmode="movie_with_plot") + itemlist.extend(episodios(item2, final=False)) + except: + import traceback + logger.error(traceback.format_exc()) + + if final and config.get_videolibrary_support(): + itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url, + action="add_serie_to_library", extra="episodios", show=item.show)) + itemlist.append(Item(channel=item.channel, title="Descargar todos los episodios de la serie", url=item.url, + action="download_all_episodes", extra="episodios", show=item.show)) + + return itemlist + + +def findvideos(item): + logger.info() + + data = httptools.downloadpage(item.url).data + itemlist = [] + + url_anterior = scrapertools.find_single_match(data, '<li class="b"><a href="([^"]+)">« Capítulo anterior') + url_siguiente = scrapertools.find_single_match(data, '<li class="b"><a href="([^"]+)">Siguiente capítulo »') + + data = scrapertools.find_single_match(data, '<ul id="partes">(.*?)</ul>') + data = data.replace("\\/", "/") + data = data.replace("%3A", ":") + data = data.replace("%2F", "/") + logger.info("data=" + data) + + # http%3A%2F%2Fwww.animeid.moe%2Fstream%2F41TLmCj7_3q4BQLnfsban7%2F1440956023.mp4 + # http://www.animeid.moe/stream/41TLmCj7_3q4BQLnfsban7/1440956023.mp4 + # http://www.animeid.tv/stream/oiW0uG7yqBrg5TVM5Cm34n/1385370686.mp4 + patron = '(http://www.animeid.tv/stream/[^/]+/\d+.[a-z0-9]+)' + matches = re.compile(patron, re.DOTALL).findall(data) + encontrados = set() + for url in matches: + if url not in encontrados: + itemlist.append( + Item(channel=item.channel, action="play", title="[directo]", server="directo", url=url, thumbnail="", + plot="", show=item.show, folder=False)) + encontrados.add(url) + + from core import servertools + itemlist.extend(servertools.find_video_items(data=data)) + for videoitem in itemlist: + videoitem.channel = item.channel + videoitem.action = "play" + videoitem.folder = False + videoitem.title = "[" + videoitem.server + "]" + + if url_anterior: + title_anterior = url_anterior.strip("/v/").replace('-', ' ').strip('.html') + itemlist.append(Item(channel=item.channel, action="findvideos", title="Anterior: " + title_anterior, + url=CHANNEL_HOST + url_anterior, thumbnail=item.thumbnail, plot=item.plot, show=item.show, + fanart=item.thumbnail, folder=True)) + + if url_siguiente: + title_siguiente = url_siguiente.strip("/v/").replace('-', ' ').strip('.html') + itemlist.append(Item(channel=item.channel, action="findvideos", title="Siguiente: " + title_siguiente, + url=CHANNEL_HOST + url_siguiente, thumbnail=item.thumbnail, plot=item.plot, show=item.show, + fanart=item.thumbnail, folder=True)) + return itemlist diff --git a/plugin.video.alfa/channels/animeshd.json b/plugin.video.alfa/channels/animeshd.json new file mode 100755 index 00000000..fdf04e49 --- /dev/null +++ b/plugin.video.alfa/channels/animeshd.json @@ -0,0 +1,28 @@ +{ + "id": "animeshd", + "name": "AnimesHD", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "https://s21.postimg.org/b43i3ljav/animeshd.png", + "banner": "https://s4.postimg.org/lulxulmql/animeshd-banner.png", + "version": 1, + "changes": [ + { + "date": "03/06/2017", + "description": "limpieza de codigo" + }, + { + "date": "25/05/2017", + "description": "cambios esteticos" + }, + { + "date": "19/05/2017", + "description": "First release" + } + ], + "categories": [ + "latino", + "anime" + ] +} diff --git a/plugin.video.alfa/channels/animeshd.py b/plugin.video.alfa/channels/animeshd.py new file mode 100755 index 00000000..f185938c --- /dev/null +++ b/plugin.video.alfa/channels/animeshd.py @@ -0,0 +1,190 @@ +# -*- coding: utf-8 -*- + +import re +import urllib + +from core import httptools +from core import logger +from core import scrapertools +from core.item import Item + +tgenero = {"Comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png", + "Drama": "https://s16.postimg.org/94sia332d/drama.png", + "Acción": "https://s3.postimg.org/y6o9puflv/accion.png", + "Aventura": "https://s10.postimg.org/6su40czih/aventura.png", + "Romance": "https://s15.postimg.org/fb5j8cl63/romance.png", + "Ciencia ficción": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png", + "Terror": "https://s7.postimg.org/yi0gij3gb/terror.png", + "Fantasía": "https://s13.postimg.org/65ylohgvb/fantasia.png", + "Misterio": "https://s1.postimg.org/w7fdgf2vj/misterio.png", + "Crimen": "https://s4.postimg.org/6z27zhirx/crimen.png", + "Hentai": "https://s29.postimg.org/aamrngu2f/hentai.png", + "Magia": "https://s9.postimg.org/nhkfzqffj/magia.png", + "Psicológico": "https://s13.postimg.org/m9ghzr86f/psicologico.png", + "Sobrenatural": "https://s9.postimg.org/6hxbvd4ov/sobrenatural.png", + "Torneo": "https://s2.postimg.org/ajoxkk9ih/torneo.png", + "Thriller": "https://s22.postimg.org/5y9g0jsu9/thriller.png", + "Otros": "https://s30.postimg.org/uj5tslenl/otros.png"} + +host = "http://www.animeshd.tv" + +headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'], + ['Referer', host]] + + +def mainlist(item): + logger.info() + itemlist = [] + + itemlist.append(item.clone(title="Ultimas", + action="lista", + thumbnail='https://s22.postimg.org/cb7nmhwv5/ultimas.png', + fanart='https://s22.postimg.org/cb7nmhwv5/ultimas.png', + url=host + '/ultimos' + )) + + itemlist.append(item.clone(title="Todas", + action="lista", + thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png', + fanart='https://s18.postimg.org/fwvaeo6qh/todas.png', + url=host + '/buscar?t=todos&q=' + )) + + itemlist.append(item.clone(title="Generos", + action="generos", + url=host, + thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png', + fanart='https://s3.postimg.org/5s9jg2wtf/generos.png' + )) + + itemlist.append(item.clone(title="Buscar", + action="search", + url=host + '/buscar?t=todos&q=', + thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png', + fanart='https://s30.postimg.org/pei7txpa9/buscar.png' + )) + + return itemlist + + +def get_source(url): + logger.info() + data = httptools.downloadpage(url).data + data = re.sub(r'\n|\r|\t| |<br>|\s{2,}|"|\(|\)', "", data) + return data + + +def lista(item): + logger.info() + + itemlist = [] + + post = '' + if item.extra in ['episodios']: + post = {'tipo': 'episodios', '_token': 'rAqVX74O9HVHFFigST3M9lMa5VL7seIO7fT8PBkl'} + post = urllib.urlencode(post) + data = get_source(item.url) + patron = 'class=anime><div class=cover style=background-image: url(.*?)>.*?<a href=(.*?)><h2>(.*?)<\/h2><\/a><\/div>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedthumbnail, scrapedurl, scrapedtitle in matches: + url = scrapedurl + thumbnail = host + scrapedthumbnail + title = scrapedtitle + itemlist.append(item.clone(action='episodios', + title=title, + url=url, + thumbnail=thumbnail, + contentSerieName=title + )) + + # Paginacion + next_page = scrapertools.find_single_match(data, + '<li class=active><span>.*?<\/span><\/li><li><a href=(.*?)>.*?<\/a><\/li>') + next_page_url = scrapertools.decodeHtmlentities(next_page) + if next_page_url != "": + itemlist.append(Item(channel=item.channel, + action="lista", + title=">> Página siguiente", + url=next_page_url, + thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png' + )) + + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = item.url + texto + try: + if texto != '': + return lista(item) + else: + return [] + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def generos(item): + logger.info() + itemlist = [] + + data = get_source(item.url) + patron = '<li class=><a href=http:\/\/www\.animeshd\.tv\/genero\/(.*?)>(.*?)<\/a><\/li>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + title = scrapertools.decodeHtmlentities(scrapedtitle) + if title == 'Recuentos de la vida': + title = 'Otros' + genero = scrapertools.decodeHtmlentities(scrapedurl) + thumbnail = '' + if title in tgenero: + thumbnail = tgenero[title] + + url = 'http://www.animeshd.tv/genero/%s' % genero + itemlist.append(item.clone(action='lista', title=title, url=url, thumbnail=thumbnail)) + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + + data = get_source(item.url) + patron = '<li id=epi-.*? class=list-group-item ><a href=(.*?) class=badge.*?width=25 title=(.*?)> <\/span>(.*?)<\/li>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedlang, scrapedtitle in matches: + language = scrapedlang + title = scrapedtitle + ' (%s)' % language + url = scrapedurl + itemlist.append(item.clone(title=title, url=url, action='findvideos', language=language)) + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + + data = get_source(item.url) + patron = '<iframe.*?src=(.*?) frameborder=0' + matches = re.compile(patron, re.DOTALL).findall(data) + + for video_url in matches: + data = get_source(video_url) + data = data.replace("'", '') + patron = 'file:(.*?),label:(.*?),type' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedquality in matches: + url = scrapedurl + quality = scrapedquality + title = item.contentSerieName + ' (%s)' % quality + itemlist.append(item.clone(action='play', title=title, url=url, quality=quality)) + + return itemlist diff --git a/plugin.video.alfa/channels/anitoonstv.json b/plugin.video.alfa/channels/anitoonstv.json new file mode 100755 index 00000000..eded6911 --- /dev/null +++ b/plugin.video.alfa/channels/anitoonstv.json @@ -0,0 +1,24 @@ +{ + "id": "anitoonstv", + "name": "AniToons TV", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "http://i.imgur.com/9Zu5NBc.png", + "banner": "http://i.imgur.com/JQSXCaB.png", + "version": 1, + "changes": [ + { + "date": "13/06/2017", + "description": "Arreglado problema en nombre de servidores" + }, + { + "date": "02/06/2017", + "description": "Primera Versión" + } + ], + "categories": [ + "tvshow", + "latino" + ] +} diff --git a/plugin.video.alfa/channels/anitoonstv.py b/plugin.video.alfa/channels/anitoonstv.py new file mode 100755 index 00000000..3fbfdd73 --- /dev/null +++ b/plugin.video.alfa/channels/anitoonstv.py @@ -0,0 +1,173 @@ +# -*- coding: utf-8 -*- + +import re + +from channels import renumbertools +from channelselector import get_thumb +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item + +host = "http://www.anitoonstv.com" + + +def mainlist(item): + logger.info() + thumb_series = get_thumb("thumb_channels_tvshow.png") + + itemlist = list() + + itemlist.append(Item(channel=item.channel, action="lista", title="Anime", url=host, + thumbnail=thumb_series)) + itemlist.append(Item(channel=item.channel, action="lista", title="Series Animadas", url=host, + thumbnail=thumb_series)) + itemlist.append(Item(channel=item.channel, action="lista", title="Novedades", url=host, + thumbnail=thumb_series)) + itemlist.append(Item(channel=item.channel, action="lista", title="Pokemon", url=host, + thumbnail=thumb_series)) + itemlist = renumbertools.show_option(item.channel, itemlist) + return itemlist + + +def lista(item): + logger.info() + + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + if 'Novedades' in item.title: + patron_cat = '<div class="activos"><h3>(.+?)<\/h2><\/a><\/div>' + patron = '<a href="(.+?)"><h2><span>(.+?)<\/span>' + else: + patron_cat = '<li><a href=.+?>' + patron_cat += str(item.title) + patron_cat += '<\/a><div>(.+?)<\/div><\/li>' + patron = "<a href='(.+?)'>(.+?)<\/a>" + data = scrapertools.find_single_match(data, patron_cat) + + matches = scrapertools.find_multiple_matches(data, patron) + for link, name in matches: + if "Novedades" in item.title: + url = link + title = name.capitalize() + else: + url = host + link + title = name + if ":" in title: + cad = title.split(":") + show = cad[0] + else: + if "(" in title: + cad = title.split("(") + if "Super" in title: + show = cad[1] + show = show.replace(")", "") + else: + show = cad[0] + else: + show = title + if "&" in show: + cad = title.split("xy") + show = cad[0] + + itemlist.append( + item.clone(title=title, url=url, plot=show, action="episodios", show=show, + context=renumbertools.context(item))) + tmdb.set_infoLabels(itemlist) + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '<div class="pagina">(.+?)<\/div><div id="fade".+?>' + data = scrapertools.find_single_match(data, patron) + patron_caps = "<a href='(.+?)'>Capitulo: (.+?) - (.+?)<\/a>" + matches = scrapertools.find_multiple_matches(data, patron_caps) + show = scrapertools.find_single_match(data, '<span>Titulo.+?<\/span>(.+?)<br><span>') + scrapedthumbnail = scrapertools.find_single_match(data, "<img src='(.+?)'.+?>") + scrapedplot = scrapertools.find_single_match(data, '<span>Descripcion.+?<\/span>(.+?)<br>') + i = 0 + temp = 0 + for link, cap, name in matches: + if int(cap) == 1: + temp = temp + 1 + if int(cap) < 10: + cap = "0" + cap + season = temp + episode = int(cap) + season, episode = renumbertools.numbered_for_tratk( + item.channel, item.show, season, episode) + date = name + title = "{0}x{1:02d} {2} ({3})".format( + season, episode, "Episodio " + str(episode), date) + # title = str(temp)+"x"+cap+" "+name + url = host + "/" + link + if "NO DISPONIBLE" in name: + name = name + else: + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, thumbnail=scrapedthumbnail, + plot=scrapedplot, url=url, show=show)) + + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url, + + action="add_serie_to_library", extra="episodios", show=show)) + + return itemlist + + +def findvideos(item): + logger.info() + + itemlist = [] + + data = httptools.downloadpage(item.url).data + data1 = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + data_vid = scrapertools.find_single_match(data1, '<div class="videos">(.+?)<\/div><div .+?>') + # name = scrapertools.find_single_match(data,'<span>Titulo.+?<\/span>([^<]+)<br>') + scrapedplot = scrapertools.find_single_match(data, '<br><span>Descrip.+?<\/span>([^<]+)<br>') + scrapedthumbnail = scrapertools.find_single_match(data, '<div class="caracteristicas"><img src="([^<]+)">') + itemla = scrapertools.find_multiple_matches(data_vid, '<div class="serv">.+?-(.+?)-(.+?)<\/div><.+? src="(.+?)"') + for server, quality, url in itemla: + if "Calidad Alta" in quality: + quality = quality.replace("Calidad Alta", "HQ") + server = server.lower() + server = server.strip() + if "ok" in server: + server = 'okru' + itemlist.append( + item.clone(url=url, action="play", server=server, contentQuality=quality, thumbnail=scrapedthumbnail, + plot=scrapedplot, title="Enlace encontrado en %s: [%s ]" % (server.capitalize(), quality))) + return itemlist + + +def play(item): + logger.info() + + itemlist = [] + + # Buscamos video por servidor ... + + devuelve = servertools.findvideosbyserver(item.url, item.server) + + if not devuelve: + # ...sino lo encontramos buscamos en todos los servidores disponibles + + devuelve = servertools.findvideos(item.url, skip=True) + + if devuelve: + # logger.debug(devuelve) + itemlist.append(Item(channel=item.channel, title=item.contentTitle, action="play", server=devuelve[0][2], + + url=devuelve[0][1], thumbnail=item.thumbnail, folder=False)) + + return itemlist diff --git a/plugin.video.alfa/channels/areadocumental.json b/plugin.video.alfa/channels/areadocumental.json new file mode 100755 index 00000000..c9ca5448 --- /dev/null +++ b/plugin.video.alfa/channels/areadocumental.json @@ -0,0 +1,59 @@ +{ + "id": "areadocumental", + "name": "Area-Documental", + "language": "es", + "adult": false, + "active": true, + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "16/02/2017", + "description": "Canal reparado ya que no funcionaban los enlaces" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ], + "banner": "areadocumental.png", + "thumbnail": "areadocumental.png", + "categories": [ + "documentary" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_documentales", + "type": "bool", + "label": "Incluir en Novedades - Documentales", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "perfil", + "type": "list", + "label": "Perfil de color", + "default": 3, + "enabled": true, + "visible": true, + "lvalues": [ + "Sin color", + "Perfil 3", + "Perfil 2", + "Perfil 1" + ] + } + ] +} diff --git a/plugin.video.alfa/channels/areadocumental.py b/plugin.video.alfa/channels/areadocumental.py new file mode 100755 index 00000000..e45e72b2 --- /dev/null +++ b/plugin.video.alfa/channels/areadocumental.py @@ -0,0 +1,205 @@ +# -*- coding: utf-8 -*- + +import urllib + +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core.item import Item + +host = "http://www.area-documental.com" +__perfil__ = int(config.get_setting('perfil', "areadocumental")) + +# Fijar perfil de color +perfil = [['', '', ''], + ['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'], + ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'], + ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']] +color1, color2, color3 = perfil[__perfil__] + + +def mainlist(item): + logger.info() + itemlist = [] + item.text_color = color1 + itemlist.append(item.clone(title="Novedades", action="entradas", + url="http://www.area-documental.com/resultados-reciente.php?buscar=&genero=", + fanart="http://i.imgur.com/Q7fsFI6.png")) + itemlist.append(item.clone(title="Destacados", action="entradas", + url="http://www.area-documental.com/resultados-destacados.php?buscar=&genero=", + fanart="http://i.imgur.com/Q7fsFI6.png")) + itemlist.append(item.clone(title="Categorías", action="cat", url="http://www.area-documental.com/index.php", + fanart="http://i.imgur.com/Q7fsFI6.png")) + itemlist.append(item.clone(title="Ordenados por...", action="indice", fanart="http://i.imgur.com/Q7fsFI6.png")) + + itemlist.append(item.clone(title="Buscar...", action="search")) + itemlist.append(item.clone(title="Configurar canal", action="configuracion", text_color="gold")) + + return itemlist + + +def configuracion(item): + from platformcode import platformtools + ret = platformtools.show_channel_settings() + platformtools.itemlist_refresh() + return ret + + +def search(item, texto): + logger.info() + item.url = "http://www.area-documental.com/resultados.php?buscar=%s&genero=&x=0&y=0" % texto + item.action = "entradas" + try: + itemlist = entradas(item) + return itemlist + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + try: + if categoria == "documentales": + item.url = "http://www.area-documental.com/resultados-reciente.php?buscar=&genero=" + item.action = "entradas" + itemlist = entradas(item) + + if itemlist[-1].action == "entradas": + itemlist.pop() + + # Se captura la excepción, para no interrumpir al canal novedades si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist + + +def indice(item): + logger.info() + itemlist = [] + itemlist.append(item.clone(title="Título", action="entradas", + url="http://www.area-documental.com/resultados-titulo.php?buscar=&genero=")) + itemlist.append(item.clone(title="Año", action="entradas", + url="http://www.area-documental.com/resultados-anio.php?buscar=&genero=")) + return itemlist + + +def cat(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + bloque = scrapertools.find_single_match(data, '<ul class="menu">(.*?)</nav>') + matches = scrapertools.find_multiple_matches(bloque, "<li>.*?<a href='([^']+)'.*?>(.*?)</a>") + for scrapedurl, scrapedtitle in matches: + scrapedurl = host + "/" + scrapedurl + if not "span" in scrapedtitle: + scrapedtitle = "[COLOR gold] **" + scrapedtitle + "**[/COLOR]" + itemlist.append(item.clone(action="entradas", title=scrapedtitle, url=scrapedurl)) + else: + scrapedtitle = scrapertools.htmlclean(scrapedtitle) + itemlist.append(item.clone(action="entradas", title=scrapedtitle, url=scrapedurl)) + + return itemlist + + +def entradas(item): + logger.info() + itemlist = [] + item.text_color = color2 + + data = httptools.downloadpage(item.url).data + data = scrapertools.unescape(data) + next_page = scrapertools.find_single_match(data, '<a href="([^"]+)"> ></a>') + if next_page != "": + data2 = scrapertools.unescape(httptools.downloadpage(host + next_page).data) + data += data2 + else: + data2 = "" + data = data.replace("\n", "").replace("\t", "") + + patron = '<div id="peliculas">.*?<a href="([^"]+)".*?<img src="([^"]+)".*?' \ + 'target="_blank">(.*?)</a>(.*?)<p>(.*?)</p>' \ + '.*?</strong>: (.*?)<strong>.*?</strong>(.*?)</div>' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedthumbnail, scrapedtitle, year, scrapedplot, genero, extra in matches: + infolab = {'plot': scrapedplot, 'genre': genero} + scrapedurl = host + "/" + scrapedurl + scrapedthumbnail = host + urllib.quote(scrapedthumbnail) + title = scrapedtitle + if "full_hd" in extra: + scrapedtitle += " [COLOR gold][3D][/COLOR]" + elif "720" in extra: + scrapedtitle += " [COLOR gold][720p][/COLOR]" + else: + scrapedtitle += " [COLOR gold][SD][/COLOR]" + + year = year.replace("\xc2\xa0", "").replace(" ", "") + if not year.isspace() and year != "": + infolab['year'] = int(year) + scrapedtitle += " (" + year + ")" + itemlist.append(item.clone(action="findvideos", title=scrapedtitle, fulltitle=title, + url=scrapedurl, thumbnail=scrapedthumbnail, infoLabels=infolab)) + + next_page = scrapertools.find_single_match(data2, '<a href="([^"]+)"> ></a>') + if next_page: + itemlist.append(item.clone(action="entradas", title=">> Página Siguiente", url=host + next_page, + text_color=color3)) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + + subs = scrapertools.find_multiple_matches(data, 'file: "(/webvtt[^"]+)".*?label: "([^"]+)"') + patron = 'file:\s*"(http://[^/]*/Videos/[^"]+)",\s*label:\s*"([^"]+)"' + matches = scrapertools.find_multiple_matches(data, patron) + for url, quality in matches: + url += "|User-Agent=%s&Referer=%s" \ + % ("Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0", item.url) + for url_sub, label in subs: + url_sub = host + urllib.quote(url_sub) + title = "Ver video en [[COLOR %s]%s[/COLOR]] Sub %s" % (color3, quality, label) + itemlist.append(item.clone(action="play", server="directo", title=title, + url=url, subtitle=url_sub, extra=item.url, calidad=quality)) + + return itemlist + + +def play(item): + logger.info() + itemlist = [] + + try: + from core import filetools + ficherosubtitulo = filetools.join(config.get_data_path(), 'subtitulo_areadocu.srt') + if filetools.exists(ficherosubtitulo): + try: + filetools.remove(ficherosubtitulo) + except IOError: + logger.error("Error al eliminar el archivo " + ficherosubtitulo) + raise + + data = httptools.downloadpage(item.subtitle, headers={'Referer': item.extra}).data + filetools.write(ficherosubtitulo, data) + subtitle = ficherosubtitulo + except: + subtitle = "" + logger.error("Error al descargar el subtítulo") + + extension = item.url.rsplit("|", 1)[0][-4:] + itemlist.append(['%s %s [directo]' % (extension, item.calidad), item.url, 0, subtitle]) + # itemlist.append(item.clone(subtitle=subtitle)) + + return itemlist diff --git a/plugin.video.alfa/channels/autoplay.py b/plugin.video.alfa/channels/autoplay.py new file mode 100755 index 00000000..d4b32f5b --- /dev/null +++ b/plugin.video.alfa/channels/autoplay.py @@ -0,0 +1,581 @@ +# -*- coding: utf-8 -*- + +import os + +from core import channeltools +from core import config +from core import jsontools +from core import logger +from core.item import Item +from platformcode import platformtools + +__channel__ = "autoplay" + +autoplay_node = {} + + +def context(): + ''' + Agrega la opcion Configurar AutoPlay al menu contextual + + :return: + ''' + + _context = "" + + if config.is_xbmc(): + _context = [{"title": "Configurar AutoPlay", + "action": "autoplay_config", + "channel": "autoplay"}] + return _context + + +context = context() + + +def show_option(channel, itemlist, text_color='yellow', thumbnail=None, fanart=None): + ''' + Agrega la opcion Configurar AutoPlay en la lista recibida + + :param channel: str + :param itemlist: list (lista donde se desea integrar la opcion de configurar AutoPlay) + :param text_color: str (color para el texto de la opcion Configurar Autoplay) + :param thumbnail: str (direccion donde se encuentra el thumbnail para la opcion Configurar Autoplay) + :return: + ''' + logger.info() + if thumbnail == None: + thumbnail = 'https://s7.postimg.org/65ooga04b/Auto_Play.png' + if fanart == None: + fanart = 'https://s7.postimg.org/65ooga04b/Auto_Play.png' + + plot_autoplay = 'AutoPlay permite auto reproducir los enlaces directamente, basándose en la configuracion de tus ' \ + 'servidores y calidades preferidas. ' + itemlist.append( + Item(channel=__channel__, + title="Configurar AutoPlay", + action="autoplay_config", + text_color=text_color, + thumbnail=thumbnail, + fanart=fanart, + plot=plot_autoplay, + from_channel=channel + )) + return itemlist + + +def start(itemlist, item): + ''' + Metodo principal desde donde se reproduce automaticamente los enlaces + - En caso la opcion de personalizar activa utilizara las opciones definidas por el usuario. + - En caso contrario intentara reproducir cualquier enlace que cuente con el idioma preferido. + + :param itemlist: list (lista de items listos para reproducir, o sea con action='play') + :param item: item (el item principal del canal) + :return: intenta autoreproducir, en caso de fallar devuelve el itemlist que recibio en un principio + ''' + logger.info() + global autoplay_node + + if not config.is_xbmc(): + platformtools.dialog_notification('AutoPlay ERROR', 'Sólo disponible para XBMC/Kodi') + return itemlist + else: + if not autoplay_node: + # Obtiene el nodo AUTOPLAY desde el json + autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY') + + # Agrega servidores y calidades que no estaban listados a autoplay_node + new_options = check_value(item.channel, itemlist) + + # Obtiene el nodo del canal desde autoplay_node + channel_node = autoplay_node.get(item.channel, {}) + # Obtiene los ajustes des autoplay para este canal + settings_node = channel_node.get('settings', {}) + + if settings_node['active']: + url_list_valid = [] + autoplay_list = [] + favorite_servers = [] + favorite_quality = [] + + # Guarda el valor actual de "Accion al seleccionar vídeo:" en preferencias + user_config_setting = config.get_setting("default_action") + # Habilita la accion "Ver en calidad alta" (si el servidor devuelve más de una calidad p.e. gdrive) + if user_config_setting != 2: + config.set_setting("default_action", 2) + + # Informa que AutoPlay esta activo + platformtools.dialog_notification('AutoPlay Activo', '', sound=False) + + # Prioridades a la hora de ordenar itemlist: + # 0: Servidores y calidades + # 1: Calidades y servidores + # 2: Solo servidores + # 3: Solo calidades + # 4: No ordenar + if settings_node['custom_servers'] and settings_node['custom_quality']: + priority = settings_node['priority'] # 0: Servidores y calidades o 1: Calidades y servidores + elif settings_node['custom_servers']: + priority = 2 # Solo servidores + elif settings_node['custom_quality']: + priority = 3 # Solo calidades + else: + priority = 4 # No ordenar + + # Obtiene las listas servidores, calidades disponibles desde el nodo del json de AutoPlay + server_list = channel_node.get('servers', []) + quality_list = channel_node.get('quality', []) + + # Se guardan los textos de cada servidor y calidad en listas p.e. favorite_servers = ['openload', + # 'streamcloud'] + for num in range(1, 4): + favorite_servers.append(channel_node['servers'][settings_node['server_%s' % num]]) + favorite_quality.append(channel_node['quality'][settings_node['quality_%s' % num]]) + + # Se filtran los enlaces de itemlist y que se correspondan con los valores de autoplay + for item in itemlist: + autoplay_elem = dict() + + # Comprobamos q se trata de un item de video + if 'server' not in item: + continue + + # Agrega la opcion configurar AutoPlay al menu contextual + if 'context' not in item: + item.context = list() + if not filter(lambda x: x['action'] == 'autoplay_config', context): + item.context.append({"title": "Configurar AutoPlay", + "action": "autoplay_config", + "channel": "autoplay", + "from_channel": item.channel}) + + # Si no tiene calidad definida le asigna calidad 'default' + if item.quality == '': + item.quality = 'default' + + # Se crea la lista para configuracion personalizada + if priority < 2: # 0: Servidores y calidades o 1: Calidades y servidores + + # si el servidor y la calidad no se encuentran en las listas de favoritos o la url esta repetida, + # descartamos el item + if item.server not in favorite_servers or item.quality not in favorite_quality \ + or item.url in url_list_valid: + continue + autoplay_elem["indice_server"] = favorite_servers.index(item.server) + autoplay_elem["indice_quality"] = favorite_quality.index(item.quality) + + elif priority == 2: # Solo servidores + + # si el servidor no se encuentra en la lista de favoritos o la url esta repetida, + # descartamos el item + if item.server not in favorite_servers or item.url in url_list_valid: + continue + autoplay_elem["indice_server"] = favorite_servers.index(item.server) + + elif priority == 3: # Solo calidades + + # si la calidad no se encuentra en la lista de favoritos o la url esta repetida, + # descartamos el item + if item.quality not in favorite_quality or item.url in url_list_valid: + continue + autoplay_elem["indice_quality"] = favorite_quality.index(item.quality) + + else: # No ordenar + + # si la url esta repetida, descartamos el item + if item.url in url_list_valid: + continue + + # Si el item llega hasta aqui lo añadimos al listado de urls validas y a autoplay_list + url_list_valid.append(item.url) + autoplay_elem['videoitem'] = item + # autoplay_elem['server'] = item.server + # autoplay_elem['quality'] = item.quality + autoplay_list.append(autoplay_elem) + + # Ordenamos segun la prioridad + if priority == 0: # Servidores y calidades + autoplay_list.sort(key=lambda orden: (orden['indice_server'], orden['indice_quality'])) + + elif priority == 1: # Calidades y servidores + autoplay_list.sort(key=lambda orden: (orden['indice_quality'], orden['indice_server'])) + + elif priority == 2: # Solo servidores + autoplay_list.sort(key=lambda orden: orden['indice_server']) + + elif priority == 3: # Solo calidades + autoplay_list.sort(key=lambda orden: orden['indice_quality']) + + # Si hay elementos en la lista de autoplay se intenta reproducir cada elemento, hasta encontrar uno + # funcional o fallen todos + if autoplay_list: + played = False + max_intentos = 5 + max_intentos_servers = {} + + # Si se esta reproduciendo algo detiene la reproduccion + if platformtools.is_playing(): + platformtools.stop_video() + + for autoplay_elem in autoplay_list: + if not platformtools.is_playing() and not played: + videoitem = autoplay_elem['videoitem'] + + if videoitem.server not in max_intentos_servers: + max_intentos_servers[videoitem.server] = max_intentos + + # Si se han alcanzado el numero maximo de intentos de este servidor saltamos al siguiente + if max_intentos_servers[videoitem.server] == 0: + continue + + lang = " " + if hasattr(videoitem, 'language') and videoitem.language != "": + lang = " '%s' " % videoitem.language + + platformtools.dialog_notification("AutoPlay", "%s%s%s" % ( + videoitem.server.upper(), lang, videoitem.quality.upper()), sound=False) + # TODO videoitem.server es el id del server, pero podria no ser el nombre!!! + + # Intenta reproducir los enlaces + # Si el canal tiene metodo play propio lo utiliza + channel = __import__('channels.%s' % item.channel, None, None, ["channels.%s" % item.channel]) + if hasattr(channel, 'play'): + resolved_item = getattr(channel, 'play')(videoitem) + if len(resolved_item) > 0: + if isinstance(resolved_item[0], list): + videoitem.video_urls = resolved_item + else: + videoitem = resolved_item[0] + + # si no directamente reproduce + platformtools.play_video(videoitem) + + try: + if platformtools.is_playing(): + played = True + break + except: # TODO evitar el informe de que el conector fallo o el video no se encuentra + logger.debug(str(len(autoplay_list))) + + # Si hemos llegado hasta aqui es por q no se ha podido reproducir + max_intentos_servers[videoitem.server] -= 1 + + # Si se han alcanzado el numero maximo de intentos de este servidor + # preguntar si queremos seguir probando o lo ignoramos + if max_intentos_servers[videoitem.server] == 0: + text = "Parece que los enlaces de %s no estan funcionando." % videoitem.server.upper() + if not platformtools.dialog_yesno("AutoPlay", text, + "¿Desea ignorar todos los enlaces de este servidor?"): + max_intentos_servers[videoitem.server] = max_intentos + + else: + platformtools.dialog_notification('AutoPlay No Fue Posible', 'No Hubo Coincidencias') + if new_options: + platformtools.dialog_notification("AutoPlay", "Nueva Calidad/Servidor disponible en la " + "configuracion", sound=False) + + # Restaura si es necesario el valor previo de "Accion al seleccionar vídeo:" en preferencias + if user_config_setting != 2: + config.set_setting("default_action", user_config_setting) + + # devuelve la lista de enlaces para la eleccion manual + return itemlist + + +def init(channel, list_servers, list_quality): + ''' + Comprueba la existencia de canal en el archivo de configuracion de Autoplay y si no existe lo añade. + Es necesario llamar a esta funcion al entrar a cualquier canal que incluya la funcion Autoplay. + + :param channel: (str) id del canal + :param list_servers: (list) lista inicial de servidores validos para el canal. No es necesario incluirlos todos, + ya que la lista de servidores validos se ira actualizando dinamicamente. + :param list_quality: (list) lista inicial de calidades validas para el canal. No es necesario incluirlas todas, + ya que la lista de calidades validas se ira actualizando dinamicamente. + :return: (bool) True si la inicializacion ha sido correcta. + ''' + logger.info() + change = False + result = True + + if not config.is_xbmc(): + platformtools.dialog_notification('AutoPlay ERROR', 'Sólo disponible para XBMC/Kodi') + result = False + else: + autoplay_path = os.path.join(config.get_data_path(), "settings_channels", 'autoplay_data.json') + if os.path.exists(autoplay_path): + autoplay_node = jsontools.get_node_from_file('autoplay', "AUTOPLAY") + else: + change = True + autoplay_node = {"AUTOPLAY": {}} + + if channel not in autoplay_node: + change = True + + # Se comprueba que no haya calidades ni servidores duplicados + list_servers = list(set(list_servers)) + list_quality = list(set(list_quality)) + + # Creamos el nodo del canal y lo añadimos + channel_node = {"servers": list_servers, + "quality": list_quality, + "settings": { + "active": False, + "custom_servers": False, + "custom_quality": False, + "priority": 0}} + for n in range(1, 4): + s = c = 0 + if len(list_servers) >= n: + s = n - 1 + if len(list_quality) >= n: + c = n - 1 + + channel_node["settings"]["server_%s" % n] = s + channel_node["settings"]["quality_%s" % n] = c + + autoplay_node[channel] = channel_node + + if change: + result, json_data = jsontools.update_node(autoplay_node, 'autoplay', 'AUTOPLAY') + + if result: + heading = "AutoPlay Disponible" + msj = "Seleccione '<Configurar AutoPlay>' para activarlo." + icon = 0 + else: + heading = "Error al iniciar AutoPlay" + msj = "Consulte su log para obtener mas información." + icon = 1 + + platformtools.dialog_notification(heading, msj, icon, sound=False) + + return result + + +def check_value(channel, itemlist): + ''' comprueba la existencia de un valor en la lista de servidores o calidades + si no existiera los agrega a la lista en el json + + :param channel: str + :param values: list (una de servidores o calidades) + :param value_type: str (server o quality) + :return: list + ''' + logger.info() + global autoplay_node + change = False + + if not autoplay_node: + # Obtiene el nodo AUTOPLAY desde el json + autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY') + + channel_node = autoplay_node.get(channel) + + server_list = channel_node.get('servers') + if not server_list: + server_list = channel_node['servers'] = list() + + quality_list = channel_node.get('quality') + if not quality_list: + quality_list = channel_node['quality'] = list() + + for item in itemlist: + if item.server not in server_list: + server_list.append(item.server) + change = True + if item.quality not in quality_list: + quality_list.append(item.quality) + change = True + + if change: + change, json_data = jsontools.update_node(autoplay_node, 'autoplay', 'AUTOPLAY') + + return change + + +def autoplay_config(item): + logger.info() + global autoplay_node + dict_values = {} + list_controls = [] + channel_parameters = channeltools.get_channel_parameters(item.from_channel) + channel_name = channel_parameters['title'] + + if not autoplay_node: + # Obtiene el nodo AUTOPLAY desde el json + autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY') + + channel_node = autoplay_node.get(item.from_channel, {}) + settings_node = channel_node.get('settings', {}) + + allow_option = True + + active_settings = {"id": "active", "label": "AutoPlay (activar/desactivar la auto-reproduccion)", + "color": "0xffffff99", "type": "bool", "default": False, "enabled": allow_option, + "visible": allow_option} + list_controls.append(active_settings) + dict_values['active'] = settings_node.get('active', False) + + # Idioma + status_language = config.get_setting("filter_languages", item.from_channel) + if not status_language: + status_language = 0 + + set_language = {"id": "language", "label": "Idioma para AutoPlay (Opcional)", "color": "0xffffff99", + "type": "list", "default": 0, "enabled": "eq(-1,true)", "visible": True, + "lvalues": get_languages(item.from_channel)} + + list_controls.append(set_language) + dict_values['language'] = status_language + + separador = {"id": "label", "label": " " + "_________________________________________________________________________________________", + "type": "label", "enabled": True, "visible": True} + list_controls.append(separador) + + # Seccion servidores Preferidos + server_list = channel_node.get("servers", []) + if not server_list: + enabled = False + server_list = ["No disponible"] + else: + enabled = "eq(-3,true)" + + custom_servers_settings = {"id": "custom_servers", "label": " Servidores Preferidos", "color": "0xff66ffcc", + "type": "bool", "default": False, "enabled": enabled, "visible": True} + list_controls.append(custom_servers_settings) + if dict_values['active'] and enabled: + dict_values['custom_servers'] = settings_node.get('custom_servers', False) + else: + dict_values['custom_servers'] = False + + for num in range(1, 4): + pos1 = num + 3 + default = num - 1 + if default > len(server_list) - 1: + default = 0 + set_servers = {"id": "server_%s" % num, "label": u" \u2665 Servidor Favorito %s" % num, + "color": "0xfffcab14", "type": "list", "default": default, + "enabled": "eq(-%s,true)+eq(-%s,true)" % (pos1, num), "visible": True, + "lvalues": server_list} + list_controls.append(set_servers) + + dict_values["server_%s" % num] = settings_node.get("server_%s" % num, 0) + if settings_node.get("server_%s" % num, 0) > len(server_list) - 1: + dict_values["server_%s" % num] = 0 + + # Seccion Calidades Preferidas + quality_list = channel_node.get("quality", []) + if not quality_list: + enabled = False + quality_list = ["No disponible"] + else: + enabled = "eq(-7,true)" + + custom_quality_settings = {"id": "custom_quality", "label": " Calidades Preferidas", "color": "0xff66ffcc", + "type": "bool", "default": False, "enabled": enabled, "visible": True} + list_controls.append(custom_quality_settings) + if dict_values['active'] and enabled: + dict_values['custom_quality'] = settings_node.get('custom_quality', False) + else: + dict_values['custom_quality'] = False + + for num in range(1, 4): + pos1 = num + 7 + default = num - 1 + if default > len(quality_list) - 1: + default = 0 + + set_quality = {"id": "quality_%s" % num, "label": u" \u2665 Calidad Favorita %s" % num, + "color": "0xfff442d9", "type": "list", "default": default, + "enabled": "eq(-%s,true)+eq(-%s,true)" % (pos1, num), "visible": True, + "lvalues": quality_list} + list_controls.append(set_quality) + dict_values["quality_%s" % num] = settings_node.get("quality_%s" % num, 0) + if settings_node.get("quality_%s" % num, 0) > len(quality_list) - 1: + dict_values["quality_%s" % num] = 0 + + # Seccion Prioridades + priority_list = ["Servidor y Calidad", "Calidad y Servidor"] + set_priority = {"id": "priority", "label": " Prioridad (Indica el orden para Auto-Reproducir)", + "color": "0xffffff99", "type": "list", "default": 0, + "enabled": True, "visible": "eq(-4,true)+eq(-8,true)+eq(-11,true)", "lvalues": priority_list} + list_controls.append(set_priority) + dict_values["priority"] = settings_node.get("priority", 0) + + # Abrir cuadro de dialogo + platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values, callback='save', + item=item, caption='%s - AutoPlay' % channel_name) + + +def save(item, dict_data_saved): + ''' + Guarda los datos de la ventana de configuracion + + :param item: item + :param dict_data_saved: dict + :return: + ''' + logger.info() + global autoplay_node + + if not autoplay_node: + # Obtiene el nodo AUTOPLAY desde el json + autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY') + + channel_node = autoplay_node.get(item.from_channel) + config.set_setting("filter_languages", dict_data_saved.pop("language"), item.from_channel) + channel_node['settings'] = dict_data_saved + + result, json_data = jsontools.update_node(autoplay_node, 'autoplay', 'AUTOPLAY') + + return result + + +def get_languages(channel): + ''' + Obtiene los idiomas desde el json del canal + + :param channel: str + :return: list + ''' + logger.info() + list_language = ['No filtrar'] + list_controls, dict_settings = channeltools.get_channel_controls_settings(channel) + for control in list_controls: + if control["id"] == 'filter_languages': + list_language = control["lvalues"] + + return list_language + + +def is_active(): + ''' + Devuelve un booleano q indica si esta activo o no autoplay en el canal desde el que se llama + + :return: True si esta activo autoplay para el canal desde el que se llama, False en caso contrario. + ''' + logger.info() + global autoplay_node + + if not config.is_xbmc(): + return False + + if not autoplay_node: + # Obtiene el nodo AUTOPLAY desde el json + autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY') + + # Obtine el canal desde el q se hace la llamada + import inspect + module = inspect.getmodule(inspect.currentframe().f_back) + canal = module.__name__.split('.')[1] + logger.debug(canal) + + # Obtiene el nodo del canal desde autoplay_node + channel_node = autoplay_node.get(canal, {}) + # Obtiene los ajustes des autoplay para este canal + settings_node = channel_node.get('settings', {}) + + return settings_node.get('active', False) diff --git a/plugin.video.alfa/channels/bajui.json b/plugin.video.alfa/channels/bajui.json new file mode 100755 index 00000000..05969545 --- /dev/null +++ b/plugin.video.alfa/channels/bajui.json @@ -0,0 +1,37 @@ +{ + "id": "bajui", + "name": "Bajui", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "bajui.png", + "banner": "bajui.png", + "fanart": "bajui.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ], + "categories": [ + "movie", + "tvshow", + "documentary", + "vos" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/bajui.py b/plugin.video.alfa/channels/bajui.py new file mode 100755 index 00000000..6d3cf32a --- /dev/null +++ b/plugin.video.alfa/channels/bajui.py @@ -0,0 +1,270 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append(Item(channel=item.channel, title="Películas", action="menupeliculas", + url="http://www.bajui.com/descargas/categoria/2/peliculas", + fanart=item.fanart)) + itemlist.append(Item(channel=item.channel, title="Series", action="menuseries", + fanart=item.fanart)) + itemlist.append(Item(channel=item.channel, title="Documentales", action="menudocumentales", + fanart=item.fanart)) + itemlist.append(Item(channel=item.channel, title="Buscar", action="search", + fanart=item.fanart)) + return itemlist + + +def menupeliculas(item): + logger.info() + itemlist = [] + itemlist.append(Item(channel=item.channel, title="Películas - Novedades", action="peliculas", url=item.url, + fanart=item.fanart, viewmode="movie_with_plot")) + itemlist.append( + Item(channel=item.channel, title="Películas - A-Z", action="peliculas", url=item.url + "/orden:nombre", + fanart=item.fanart, viewmode="movie_with_plot")) + + # <ul class="submenu2 subcategorias"><li ><a href="/descargas/subcategoria/4/br-scr-dvdscr">BR-Scr / DVDScr</a></li><li ><a href="/descargas/subcategoria/6/dvdr-full">DVDR - Full</a></li><li ><a href="/descargas/subcategoria/1/dvdrip-vhsrip">DVDRip / VHSRip</a></li><li ><a href="/descargas/subcategoria/3/hd">HD</a></li><li ><a href="/descargas/subcategoria/2/hdrip-bdrip">HDRip / BDRip</a></li><li ><a href="/descargas/subcategoria/35/latino">Latino</a></li><li ><a href="/descargas/subcategoria/5/ts-scr-cam">TS-Scr / CAM</a></li><li ><a href="/descargas/subcategoria/7/vos">VOS</a></li></ul> + data = scrapertools.cache_page(item.url) + data = scrapertools.get_match(data, '<ul class="submenu2 subcategorias">(.*?)</ul>') + patron = '<a href="([^"]+)">([^<]+)</a>' + matches = re.compile(patron, re.DOTALL).findall(data) + for url, title in matches: + scrapedurl = urlparse.urljoin(item.url, url) + itemlist.append(Item(channel=item.channel, title="Películas en " + title, action="peliculas", url=scrapedurl, + fanart=item.fanart, viewmode="movie_with_plot")) + + itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="", fanart=item.fanart)) + return itemlist + + +def menuseries(item): + logger.info() + itemlist = [] + itemlist.append(Item(channel=item.channel, title="Series - Novedades", action="peliculas", + url="http://www.bajui.com/descargas/categoria/3/series", + fanart=item.fanart, viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, title="Series - A-Z", action="peliculas", + url="http://www.bajui.com/descargas/categoria/3/series/orden:nombre", + fanart=item.fanart, viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, title="Series - HD", action="peliculas", + url="http://www.bajui.com/descargas/subcategoria/11/hd/orden:nombre", + fanart=item.fanart, viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="", + fanart=item.fanart)) + return itemlist + + +def menudocumentales(item): + logger.info() + itemlist = [] + itemlist.append(Item(channel=item.channel, title="Documentales - Novedades", action="peliculas", + url="http://www.bajui.com/descargas/categoria/7/docus-y-tv", + fanart=item.fanart, viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, title="Documentales - A-Z", action="peliculas", + url="http://www.bajui.com/descargas/categoria/7/docus-y-tv/orden:nombre", + fanart=item.fanart, viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="", + fanart=item.fanart)) + return itemlist + + +# Al llamarse "search" la función, el launcher pide un texto a buscar y lo añade como parámetro +def search(item, texto, categoria=""): + logger.info(item.url + " search " + texto) + itemlist = [] + url = item.url + texto = texto.replace(" ", "+") + logger.info("categoria: " + categoria + " url: " + url) + try: + item.url = "http://www.bajui.com/descargas/busqueda/%s" + item.url = item.url % texto + itemlist.extend(peliculas(item)) + return itemlist + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def peliculas(item, paginacion=True): + logger.info() + url = item.url + + # Descarga la página + data = scrapertools.cache_page(url) + patron = '<li id="ficha-\d+" class="ficha2[^<]+' + patron += '<div class="detalles-ficha"[^<]+' + patron += '<span class="nombre-det">Ficha\: ([^<]+)</span>[^<]+' + patron += '<span class="categoria-det">[^<]+</span>[^<]+' + patron += '<span class="descrip-det">(.*?)</span>[^<]+' + patron += '</div>.*?<a href="([^"]+)"[^<]+' + patron += '<img src="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + for title, plot, url, thumbnail in matches: + scrapedtitle = title + scrapedplot = clean_plot(plot) + scrapedurl = urlparse.urljoin(item.url, url) + scrapedthumbnail = urlparse.urljoin("http://www.bajui.com/", thumbnail.replace("_m.jpg", "_g.jpg")) + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + + # Añade al listado de XBMC + itemlist.append( + Item(channel=item.channel, action="enlaces", title=scrapedtitle, fulltitle=title, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot, extra=scrapedtitle, context="4|5", + fanart=item.fanart, viewmode="movie_with_plot")) + + # Extrae el paginador + patron = '<a href="([^"]+)" class="pagina pag_sig">Siguiente \»\;</a>' + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + if len(matches) > 0: + scrapedurl = urlparse.urljoin("http://www.bajui.com/", matches[0]) + pagitem = Item(channel=item.channel, action="peliculas", title=">> Página siguiente", url=scrapedurl, + fanart=item.fanart, viewmode="movie_with_plot") + if not paginacion: + itemlist.extend(peliculas(pagitem)) + else: + itemlist.append(pagitem) + + return itemlist + + +def clean_plot(scrapedplot): + scrapedplot = scrapedplot.replace("\n", "").replace("\r", "") + scrapedplot = re.compile("TÍTULO ORIGINAL[^<]+<br />", re.DOTALL).sub("", scrapedplot) + scrapedplot = re.compile("AÑO[^<]+<br />", re.DOTALL).sub("", scrapedplot) + scrapedplot = re.compile("Año[^<]+<br />", re.DOTALL).sub("", scrapedplot) + scrapedplot = re.compile("DURACIÓN[^<]+<br />", re.DOTALL).sub("", scrapedplot) + scrapedplot = re.compile("Duración[^<]+<br />", re.DOTALL).sub("", scrapedplot) + scrapedplot = re.compile("PAIS[^<]+<br />", re.DOTALL).sub("", scrapedplot) + scrapedplot = re.compile("PAÍS[^<]+<br />", re.DOTALL).sub("", scrapedplot) + scrapedplot = re.compile("Pais[^<]+<br />", re.DOTALL).sub("", scrapedplot) + scrapedplot = re.compile("País[^<]+<br />", re.DOTALL).sub("", scrapedplot) + scrapedplot = re.compile("DIRECTOR[^<]+<br />", re.DOTALL).sub("", scrapedplot) + scrapedplot = re.compile("DIRECCIÓN[^<]+<br />", re.DOTALL).sub("", scrapedplot) + scrapedplot = re.compile("Dirección[^<]+<br />", re.DOTALL).sub("", scrapedplot) + scrapedplot = re.compile("REPARTO[^<]+<br />", re.DOTALL).sub("", scrapedplot) + scrapedplot = re.compile("Reparto[^<]+<br />", re.DOTALL).sub("", scrapedplot) + scrapedplot = re.compile("Interpretación[^<]+<br />", re.DOTALL).sub("", scrapedplot) + scrapedplot = re.compile("GUIÓN[^<]+<br />", re.DOTALL).sub("", scrapedplot) + scrapedplot = re.compile("Guión[^<]+<br />", re.DOTALL).sub("", scrapedplot) + scrapedplot = re.compile("MÚSICA[^<]+<br />", re.DOTALL).sub("", scrapedplot) + scrapedplot = re.compile("Música[^<]+<br />", re.DOTALL).sub("", scrapedplot) + scrapedplot = re.compile("FOTOGRAFÍA[^<]+<br />", re.DOTALL).sub("", scrapedplot) + scrapedplot = re.compile("Fotografía[^<]+<br />", re.DOTALL).sub("", scrapedplot) + scrapedplot = re.compile("PRODUCTORA[^<]+<br />", re.DOTALL).sub("", scrapedplot) + scrapedplot = re.compile("Producción[^<]+<br />", re.DOTALL).sub("", scrapedplot) + scrapedplot = re.compile("Montaje[^<]+<br />", re.DOTALL).sub("", scrapedplot) + scrapedplot = re.compile("Vestuario[^<]+<br />", re.DOTALL).sub("", scrapedplot) + scrapedplot = re.compile("GÉNERO[^<]+<br />", re.DOTALL).sub("", scrapedplot) + scrapedplot = re.compile("GENERO[^<]+<br />", re.DOTALL).sub("", scrapedplot) + scrapedplot = re.compile("Genero[^<]+<br />", re.DOTALL).sub("", scrapedplot) + scrapedplot = re.compile("Género[^<]+<br />", re.DOTALL).sub("", scrapedplot) + scrapedplot = re.compile("PREMIOS[^<]+<br />", re.DOTALL).sub("", scrapedplot) + + scrapedplot = re.compile("SINOPSIS", re.DOTALL).sub("", scrapedplot) + scrapedplot = re.compile("Sinopsis", re.DOTALL).sub("", scrapedplot) + scrapedplot = scrapertools.htmlclean(scrapedplot) + return scrapedplot + + +def enlaces(item): + logger.info() + itemlist = [] + + data = scrapertools.cache_page(item.url) + + try: + item.plot = scrapertools.get_match(data, '<span class="ficha-descrip">(.*?)</span>') + item.plot = clean_plot(item.plot) + except: + pass + + try: + item.thumbnail = scrapertools.get_match(data, '<div class="ficha-imagen"[^<]+<img src="([^"]+)"') + item.thumbnail = urlparse.urljoin("http://www.bajui.com/", item.thumbnail) + except: + pass + + ''' + <div id="enlaces-34769"><img id="enlaces-cargando-34769" src="/images/cargando.gif" style="display:none;"/></div> + </li><li id="box-enlace-330690" class="box-enlace"> + <div class="box-enlace-cabecera"> + <div class="datos-usuario"><img class="avatar" src="images/avatars/116305_p.jpg" />Enlaces de: + <a class="nombre-usuario" href="/usuario/jerobien">jerobien</a> </div> + <div class="datos-act">Actualizado: Hace 8 minutos</div> + <div class="datos-boton-mostrar"><a id="boton-mostrar-330690" class="boton" href="javascript:mostrar_enlaces(330690,'b01de63028139fdd348d');">Mostrar enlaces</a></div> + <div class="datos-servidores"><div class="datos-servidores-cell"><img src="/images/servidores/ul.to.png" title="uploaded.com" border="0" alt="uploaded.com" /><img src="/images/servidores/bitshare.png" title="bitshare.com" border="0" alt="bitshare.com" /><img src="/images/servidores/freakshare.net.jpg" title="freakshare.com" border="0" alt="freakshare.com" /><img src="/images/servidores/letitbit.png" title="letitbit.net" border="0" alt="letitbit.net" /><img src="/images/servidores/turbobit.png" title="turbobit.net" border="0" alt="turbobit.net" /><img src="/images/servidores/rapidgator.png" title="rapidgator.net" border="0" alt="rapidgator.net" /><img src="/images/servidores/cloudzer.png" title="clz.to" border="0" alt="clz.to" /></div></div> + </div> + ''' + + patron = '<div class="box-enlace-cabecera"[^<]+' + patron += '<div class="datos-usuario"><img class="avatar" src="([^"]+)" />Enlaces[^<]+' + patron += '<a class="nombre-usuario" href="[^"]+">([^<]+)</a[^<]+</div>[^<]+' + patron += '<div class="datos-act">Actualizado. ([^<]+)</div>.*?' + patron += '<div class="datos-boton-mostrar"><a id="boton-mostrar-\d+" class="boton" href="javascript.mostrar_enlaces\((\d+)\,\'([^\']+)\'[^>]+>Mostrar enlaces</a></div>[^<]+' + patron += '<div class="datos-servidores"><div class="datos-servidores-cell">(.*?)</div></div>' + + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + logger.debug("matches=" + repr(matches)) + + for thumbnail, usuario, fecha, id, id2, servidores in matches: + # <img src="/images/servidores/bitshare.png" title="bitshare.com" border="0" alt="bitshare.com" /><img src="/images/servidores/freakshare.net.jpg" title="freakshare.com" border="0" alt="freakshare.com" /><img src="/images/servidores/rapidgator.png" title="rapidgator.net" border="0" alt="rapidgator.net" /><img src="/images/servidores/turbobit.png" title="turbobit.net" border="0" alt="turbobit.net" /><img src="/images/servidores/muchshare.png" title="muchshare.net" border="0" alt="muchshare.net" /><img src="/images/servidores/letitbit.png" title="letitbit.net" border="0" alt="letitbit.net" /><img src="/images/servidores/shareflare.png" title="shareflare.net" border="0" alt="shareflare.net" /><img src="/images/servidores/otros.gif" title="Otros servidores" border="0" alt="Otros" /> + patronservidores = '<img src="[^"]+" title="([^"]+)"' + matches2 = re.compile(patronservidores, re.DOTALL).findall(servidores) + lista_servidores = "" + for servidor in matches2: + lista_servidores = lista_servidores + servidor + ", " + lista_servidores = lista_servidores[:-2] + + scrapedthumbnail = item.thumbnail + # http://www.bajui.com/ajax/mostrar-enlaces.php?id=330582&code=124767d31bfbf14c3861 + scrapedurl = "http://www.bajui.com/ajax/mostrar-enlaces.php?id=" + id + "&code=" + id2 + scrapedplot = item.plot + scrapedtitle = "Enlaces de " + usuario + " (" + fecha + ") (" + lista_servidores + ")" + + itemlist.append( + Item(channel=item.channel, action="findvideos", title=scrapedtitle, fulltitle=item.title, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot, context="4|5", + fanart=item.fanart)) + + return itemlist + + +def findvideos(item): + logger.info() + + data = scrapertools.cache_page(item.url) + itemlist = servertools.find_video_items(data=data) + for videoitem in itemlist: + videoitem.channel = item.channel + videoitem.plot = item.plot + videoitem.thumbnail = item.thumbnail + videoitem.fulltitle = item.fulltitle + + try: + parsed_url = urlparse.urlparse(videoitem.url) + fichero = parsed_url.path + partes = fichero.split("/") + titulo = partes[len(partes) - 1] + videoitem.title = titulo + " - [" + videoitem.server + "]" + except: + videoitem.title = item.title + + return itemlist diff --git a/plugin.video.alfa/channels/beeg.json b/plugin.video.alfa/channels/beeg.json new file mode 100755 index 00000000..b90b31ae --- /dev/null +++ b/plugin.video.alfa/channels/beeg.json @@ -0,0 +1,37 @@ +{ + "id": "beeg", + "name": "Beeg", + "active": true, + "adult": true, + "language": "es", + "thumbnail": "beeg.png", + "banner": "beeg.png", + "version": 1, + "changes": [ + { + "date": "03/06/2017", + "description": "reliminado encoding y soporte multiples calidades" + }, + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ], + "categories": [ + "adult" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/beeg.py b/plugin.video.alfa/channels/beeg.py new file mode 100755 index 00000000..f032da02 --- /dev/null +++ b/plugin.video.alfa/channels/beeg.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- + +import re +import urllib + +from core import jsontools as json +from core import logger +from core import scrapertools +from core.item import Item + +url_api = "" +beeg_salt = "" + + +def get_api_url(): + global url_api + global beeg_salt + data = scrapertools.downloadpage("http://beeg.com") + version = re.compile('<script src="//static.beeg.com/cpl/([\d]+).js"').findall(data)[0] + js_url = "http:" + re.compile('<script src="(//static.beeg.com/cpl/[\d]+.js)"').findall(data)[0] + url_api = "https://api2.beeg.com/api/v6/" + version + data = scrapertools.downloadpage(js_url) + beeg_salt = re.compile('beeg_salt="([^"]+)"').findall(data)[0] + + +def decode(key): + a = beeg_salt + e = unicode(urllib.unquote(key), "utf8") + t = len(a) + o = "" + for n in range(len(e)): + r = ord(e[n:n + 1]) + i = n % t + s = ord(a[i:i + 1]) % 21 + o += chr(r - s) + + n = [] + for x in range(len(o), 0, -3): + if x >= 3: + n.append(o[(x - 3):x]) + else: + n.append(o[0:x]) + + return "".join(n) + + +get_api_url() + + +def mainlist(item): + logger.info() + get_api_url() + itemlist = [] + itemlist.append(Item(channel=item.channel, action="videos", title="Útimos videos", url=url_api + "/index/main/0/pc", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="listcategorias", title="Listado categorias", + url=url_api + "/index/main/0/pc")) + itemlist.append( + Item(channel=item.channel, action="search", title="Buscar", url=url_api + "/index/search/0/pc?query=%s")) + return itemlist + + +def videos(item): + logger.info() + itemlist = [] + data = scrapertools.cache_page(item.url) + JSONData = json.load(data) + + for Video in JSONData["videos"]: + thumbnail = "http://img.beeg.com/236x177/" + Video["id"] + ".jpg" + url = url_api + "/video/" + Video["id"] + title = Video["title"] + itemlist.append( + Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot="", show="", + folder=True)) + + # Paginador + Actual = int(scrapertools.get_match(item.url, url_api + '/index/[^/]+/([0-9]+)/pc')) + if JSONData["pages"] - 1 > Actual: + scrapedurl = item.url.replace("/" + str(Actual) + "/", "/" + str(Actual + 1) + "/") + itemlist.append( + Item(channel=item.channel, action="videos", title="Página Siguiente", url=scrapedurl, thumbnail="", + folder=True, viewmode="movie")) + + return itemlist + + +def listcategorias(item): + logger.info() + itemlist = [] + data = scrapertools.cache_page(item.url) + JSONData = json.load(data) + + for Tag in JSONData["tags"]["popular"]: + url = url_api + "/index/tag/0/pc?tag=" + Tag + title = Tag + title = title[:1].upper() + title[1:] + itemlist.append( + Item(channel=item.channel, action="videos", title=title, url=url, folder=True, viewmode="movie")) + + return itemlist + + +def search(item, texto): + logger.info() + + texto = texto.replace(" ", "+") + item.url = item.url % (texto) + try: + return videos(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def play(item): + logger.info() + itemlist = [] + data = scrapertools.downloadpage(item.url) + + JSONData = json.load(data) + for key in JSONData: + videourl = re.compile("([0-9]+p)", re.DOTALL).findall(key) + if videourl: + videourl = videourl[0] + if not JSONData[videourl] == None: + url = JSONData[videourl] + url = url.replace("{DATA_MARKERS}", "data=pc.ES") + viedokey = re.compile("key=(.*?)%2Cend=", re.DOTALL).findall(url)[0] + + url = url.replace(viedokey, decode(viedokey)) + if not url.startswith("https:"): url = "https:" + url + title = videourl + itemlist.append(["%s %s [directo]" % (title, url[-4:]), url]) + + itemlist.sort(key=lambda item: item[0]) + return itemlist diff --git a/plugin.video.alfa/channels/bityouth.json b/plugin.video.alfa/channels/bityouth.json new file mode 100755 index 00000000..401d7de6 --- /dev/null +++ b/plugin.video.alfa/channels/bityouth.json @@ -0,0 +1,35 @@ +{ + "id": "bityouth", + "name": "Bityouth", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "http://s6.postimg.org/6ash180up/bityoulogo.png", + "banner": "bityouth.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ], + "categories": [ + "torrent", + "movie", + "tvshow" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/bityouth.py b/plugin.video.alfa/channels/bityouth.py new file mode 100755 index 00000000..054612d8 --- /dev/null +++ b/plugin.video.alfa/channels/bityouth.py @@ -0,0 +1,1762 @@ +# -*- coding: utf-8 -*- + +import os +import re +import urllib +import urllib2 +import urlparse + +from core import logger +from core import scrapertools +from core.item import Item + +try: + import xbmc + import xbmcgui +except: + pass + +host = "http://bityouth.com/" + + +def browser(url): + import mechanize + + # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing + br = mechanize.Browser() + # Browser options + br.set_handle_equiv(False) + br.set_handle_gzip(True) + br.set_handle_redirect(True) + br.set_handle_referer(False) + br.set_handle_robots(False) + # Follows refresh 0 but not hangs on refresh > 0 + br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) + + # Want debugging messages? + # br.set_debug_http(True) + # br.set_debug_redirects(True) + # br.set_debug_responses(True) + + # User-Agent (this is cheating, ok?) + br.addheaders = [('User-agent', + 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')] + # br.addheaders =[('Cookie','SRCHD=D=4210979&AF=NOFORM; domain=.bing.com; expires=Wednesday, 09-Nov-06 23:12:40 GMT; MUIDB=36F71C46589F6EAD0BE714175C9F68FC; domain=www.bing.com; expires=15 de enero de 2018 08:43:26 GMT+1')] + # Open some site, let's pick a random one, the first that pops in mind + r = br.open(url) + response = r.read() + # if "z{a:1}" in response: + if not ".ftrH,.ftrHd,.ftrD>" in response: + print "proooxyy" + r = br.open("http://anonymouse.org/cgi-bin/anon-www.cgi/" + url) + response = r.read() + return response + ###def proxy(url): + '''from lib import requests + proxies = {"http": "http://anonymouse.org/cgi-bin/anon-www.cgi/"+url} + print "zorro" + print proxies + rsp = requests.get(url, proxies=proxies,stream=True) + print rsp.raw._fp.fp._sock.getpeername() + print rsp.content + response = rsp.content + return response''' + + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append(Item(channel=item.channel, title="[COLOR skyblue][B]Generos[/B][/COLOR]", action="generos", + url="http://bityouth.com", thumbnail="http://s6.postimg.org/ybey4gxu9/bityougenerosthum3.png", + fanart="http://s18.postimg.org/l4judlx09/bityougenerosfan.jpg")) + itemlist.append(Item(channel=item.channel, title="[COLOR skyblue][B]Puntuacion[/B][/COLOR]", action="scraper", + url="http://bityouth.com/more_elements/0/?o=pd", + thumbnail="http://s6.postimg.org/n1qtn9i6p/bityoupuntothum4.png", + fanart="http://s6.postimg.org/qrh9oof9t/bityoupuntofan.jpg")) + itemlist.append(Item(channel=item.channel, title="[COLOR skyblue][B]Novedades[/B][/COLOR]", action="scraper", + url="http://bityouth.com/more_elements/0/?o=", + thumbnail="http://s6.postimg.org/bry3sbd5d/bityounovedathum2.png", + fanart="http://s6.postimg.org/ys4r4naz5/bityounovedadfan.jpg")) + import xbmc + if xbmc.Player().isPlaying(): + xbmc.executebuiltin('xbmc.PlayMedia(Stop)') + SEARCHDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "search.txt") + TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") + KEYMAPDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customkey.xml") + REMOTEDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remote.xml") + APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") + try: + os.remove(KEYMAPDESTFILE) + print "Custom Keyboard.xml borrado" + os.remove(TESTPYDESTFILE) + print "Testpy borrado" + os.remove(REMOTEDESTFILE) + print "Remote borrado" + os.remove(APPCOMMANDDESTFILE) + print "Appcommand borrado" + xbmc.executebuiltin('Action(reloadkeymaps)') + except Exception as inst: + xbmc.executebuiltin('Action(reloadkeymaps)') + print "No hay customs" + itemlist.append(Item(channel=item.channel, title="[COLOR skyblue][B]Series[/B][/COLOR]", action="scraper", + url="http://bityouth.com/more_elements/0/genero/serie_de_tv?o=", + thumbnail="http://s6.postimg.org/59j1km53l/bityouseriesthum.png", + fanart="http://s6.postimg.org/45yx8nkgh/bityouseriesfan3.jpg")) + if xbmc.Player().isPlaying(): + xbmc.executebuiltin('xbmc.PlayMedia(Stop)') + TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") + KEYMAPDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customkey.xml") + REMOTEDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remote.xml") + APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") + TRAILERDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "trailer.txt") + try: + os.remove(KEYMAPDESTFILE) + print "Custom Keyboard.xml borrado" + os.remove(TESTPYDESTFILE) + print "Testpy borrado" + os.remove(REMOTEDESTFILE) + print "Remote borrado" + os.remove(APPCOMMANDDESTFILE) + print "Appcommand borrado" + xbmc.executebuiltin('Action(reloadkeymaps)') + except Exception as inst: + xbmc.executebuiltin('Action(reloadkeymaps)') + print "No hay customs" + try: + os.remove(SEARCHDESTFILE) + print "Custom search.txt borrado" + except: + print "No hay search.txt" + + try: + os.remove(TRAILERDESTFILE) + print "Custom Trailer.txt borrado" + except: + print "No hay Trailer.txt" + itemlist.append(Item(channel=item.channel, title="[COLOR skyblue][B]Buscar...[/B][/COLOR]", action="search", url="", + thumbnail="http://s6.postimg.org/48isvho41/bityousearchthum.png", + fanart="http://s6.postimg.org/ic5hcegk1/bityousearchfan.jpg", plot="search")) + + return itemlist + + +def search(item, texto): + logger.info() + + itemlist = [] + + if item.url == "": + item.url = "http://bityouth.com/busqueda/" + + item.url = item.url + texto + item.url = item.url.replace(" ", "%20") + + data = scrapertools.cache_page(item.url) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '<div class="title">.*?title="([^<]+)" ' + patron += 'href="([^"]+)".*?' + patron += '<h2 itemprop="name">([^<]+)</h2>.*?' + patron += '<img itemprop="image" src="([^"]+)".*?' + patron += '<a href="/year/(\d+)".*?' + patron += '<div id="sinopsys">(.*?)</div>' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + itemlist.append(Item(channel=item.channel, title="[COLOR gold][B]Sin resultados...[/B][/COLOR]", + thumbnail="http://s6.postimg.org/jp5jx97ip/bityoucancel.png", + fanart="http://s6.postimg.org/vfjhen0b5/bityounieve.jpg", folder=False)) + + for scrapedrate, scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear, scrapedplot in matches: + title_fan = scrapedtitle.strip() + if " /10" in scrapedrate: + scrapedrate = scrapedrate.replace(" /10", " [COLOR red]Sin Puntuacion[/COLOR] ") + scrapedrate = scrapedrate.replace("Valoracion", "") + trailer = scrapedtitle + " " + scrapedyear + " trailer" + trailer = urllib.quote(trailer) + scrapedtitle = scrapedtitle.replace(scrapedtitle, "[COLOR white]" + scrapedtitle + "[/COLOR]") + scrapedrate = scrapedrate.replace(scrapedrate, "[COLOR gold][B]" + scrapedrate + "[/B][/COLOR]") + scrapedrate = scrapedrate.replace("Valoracion", "[COLOR skyblue]Valoracion[/COLOR]") + if not "serie_de_tv" in item.url: + scrapedtitle = scrapedtitle.replace("(Serie de TV)", "[COLOR royalblue](Serie de TV)[/COLOR]") + else: + scrapedtitle = scrapedtitle.replace("(Serie de TV)", "") + + scrapedtitle = scrapedtitle.replace("torrent", "") + scrapedtitle = scrapedtitle.replace("torrent", "") + title = scrapedtitle + "--" + scrapedrate + url = urlparse.urljoin(host, scrapedurl) + thumbnail = urlparse.urljoin(host, scrapedthumbnail) + + if "Miniserie de TV" in scrapedplot: + extra = "series" + else: + extra = "" + if "_serie_de_tv" in scrapedurl or "Miniserie de TV" in scrapedplot: + import xbmc + SEARCHDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "search.txt") + urllib.urlretrieve("https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/search.txt", + SEARCHDESTFILE) + show = title_fan + "|" + scrapedyear + "|" + trailer + itemlist.append(Item(channel=item.channel, action="fanart", title=title, url=url, thumbnail=thumbnail, + fanart="http://s6.postimg.org/y1uehu24x/bityougeneralfan.jpg", plot=trailer, show=show, + extra=extra, folder=True)) + + return itemlist + + +def generos(item): + logger.info() + itemlist = [] + + data = scrapertools.cache_page(item.url) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '<li><a href="([^<]+)" title.*?Bityouth">([^<]+)</a></li>' + + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + if "Acción" in scrapedtitle: + thumbnail = "http://s6.postimg.org/tbbxshsgh/bityouaccionthumb.png" + fanart = "http://s6.postimg.org/iagsnh07l/bityouaccion.jpg" + elif "Animación" in scrapedtitle: + thumbnail = "http://s6.postimg.org/4w3prftjl/bityouanimacionthum.png" + fanart = "http://s6.postimg.org/n06qc2r81/bityouanimacionfan.jpg" + elif "Aventuras" in scrapedtitle: + thumbnail = "http://s6.postimg.org/bdr7ootap/bityouadventurethum.png" + fanart = "http://s6.postimg.org/lzb30ozm9/bityouadventurefan.jpg" + elif "Bélica" in scrapedtitle: + thumbnail = "http://s6.postimg.org/5fdeegac1/bityouguerrathum.png" + fanart = "http://s6.postimg.org/acqyzkcb5/bityouguerrafan.jpg" + elif "Ciencia" in scrapedtitle: + thumbnail = "http://s6.postimg.org/cxwjn31ox/bityoucienciaficcionthum.png" + fanart = "http://s6.postimg.org/gszxpnkup/cienciaficcionbityoufan.jpg" + elif "Cine" in scrapedtitle: + thumbnail = "http://s6.postimg.org/y7orbo7dd/bityoucinenegrothum.png" + fanart = "http://s6.postimg.org/m4jfo3wb5/bityoucinenegrofan.jpg" + elif "Comedia" in scrapedtitle: + thumbnail = "http://s6.postimg.org/jea3qwzm9/bityouxomediathum.png" + fanart = "http://s6.postimg.org/v4o18asep/bityoucomediafan2.png" + elif "Docu" in scrapedtitle: + thumbnail = "http://s6.postimg.org/ifyc2dbo1/bityoudocuthumb.png" + fanart = "http://s6.postimg.org/xn9q8ze4x/bityoudocufan.jpg" + elif "Drama" in scrapedtitle: + thumbnail = "http://s6.postimg.org/5r41ip5jl/bityoudramathumb.png " + fanart = "http://s6.postimg.org/wawmku635/bityoudramafan.jpg" + elif "Fant" in scrapedtitle: + thumbnail = "http://s6.postimg.org/9sl4ocxu9/bityoufantasiathum.png" + fanart = "http://s6.postimg.org/xiakd1w7l/bityoufantasiafan.jpg" + elif "Infantil" in scrapedtitle: + thumbnail = "http://s6.postimg.org/j6e75o7rl/bityouinfathumb.png" + fanart = "http://s6.postimg.org/f4s22w95d/bityouanimacionfan.jpg" + elif "Intriga" in scrapedtitle: + thumbnail = "http://s22.postimg.org/vpmmbystd/bityouintrigthum.png" + fanart = "http://s27.postimg.org/zee2hh7xv/bityouintrigfan.jpg" + elif "Musical" in scrapedtitle: + thumbnail = "http://s8.postimg.org/u3wlw5eet/bityoumusithum.png" + fanart = "http://s17.postimg.org/l21xuwt5r/bityoumusifan.jpg" + elif "Romance" in scrapedtitle: + thumbnail = "http://s4.postimg.org/q6v7eq6e5/bityouromancethum.png" + fanart = "http://s9.postimg.org/3o4qd4dsf/bityouromancefan.jpg" + elif "Terror" in scrapedtitle: + thumbnail = "http://s9.postimg.org/yntipquvj/bityouterrorthum.png" + fanart = "http://s3.postimg.org/wwq3dnpgz/bityouterrorfan.jpg" + elif "Thr" in scrapedtitle: + thumbnail = "http://s17.postimg.org/eldin5an3/bityouthrithum.png" + fanart = "http://s2.postimg.org/fnqykvb9l/bityouthrifan.jpg" + elif "West" in scrapedtitle: + thumbnail = "http://s23.postimg.org/hjq6wjakb/bityouwesterthum.png" + fanart = "http://s7.postimg.org/wzrh42ltn/bityouwesterfan.jpg" + + scrapedtitle = scrapedtitle.replace("ó", "o") + scrapedtitle = scrapedtitle.replace("é", "e") + url = "http://bityouth.com/more_elements/0/genero/" + scrapedtitle + + itemlist.append( + Item(channel=item.channel, action="scraper", title=scrapedtitle, thumbnail=thumbnail, fanart=fanart, + url=url, folder=True)) + + return itemlist + + +def scraper(item): + logger.info() + itemlist = [] + import xbmc + if xbmc.Player().isPlaying(): + xbmc.executebuiltin('xbmc.PlayMedia(Stop)') + + TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") + KEYMAPDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customkey.xml") + REMOTEDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remote.xml") + APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") + TRAILERDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "trailer.txt") + try: + os.remove(KEYMAPDESTFILE) + print "Custom Keyboard.xml borrado" + os.remove(TESTPYDESTFILE) + print "Testpy borrado" + os.remove(REMOTEDESTFILE) + print "Remote borrado" + os.remove(APPCOMMANDDESTFILE) + print "App borrado" + xbmc.executebuiltin('Action(reloadkeymaps)') + except Exception as inst: + xbmc.executebuiltin('Action(reloadkeymaps)') + print "No hay customs" + try: + os.remove(TRAILERDESTFILE) + print "Trailer.txt borrado" + except: + print "No hay Trailer.txt" + + data = scrapertools.cache_page(item.url) + data = re.sub(r"\n|\r|\t|\s{2}| |&", "", data) + + patron = '<div class="title">.*?title="([^<]+)" ' + patron += 'href="([^"]+)".*?' + patron += '<h2 itemprop="name">([^<]+)</h2>.*?' + patron += '<img itemprop="image" src="([^"]+)".*?' + patron += '<a href="/year/(\d+)".*?' + patron += '<div id="sinopsys">(.*?)</div>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedrate, scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear, scrapedplot in matches: + title_fan = scrapedtitle.strip() + if " /10" in scrapedrate: + scrapedrate = scrapedrate.replace(" /10", " [COLOR red]Sin Puntuacion[/COLOR] ") + scrapedrate = scrapedrate.replace("Valoracion", "") + trailer = scrapedtitle + " " + scrapedyear + " trailer" + trailer = urllib.quote(trailer) + scrapedtitle = scrapedtitle.replace(scrapedtitle, "[COLOR white]" + scrapedtitle + "[/COLOR]") + scrapedrate = scrapedrate.replace(scrapedrate, "[COLOR gold][B]" + scrapedrate + "[/B][/COLOR]") + scrapedrate = scrapedrate.replace("Valoracion", "[COLOR skyblue]Valoracion[/COLOR]") + if not "serie_de_tv" in item.url: + scrapedtitle = scrapedtitle.replace("(Serie de TV)", "[COLOR royalblue](Serie de TV)[/COLOR]") + else: + scrapedtitle = scrapedtitle.replace("(Serie de TV)", "") + + scrapedtitle = scrapedtitle.replace("torrent", "") + + title = scrapedtitle + "--" + scrapedrate + url = urlparse.urljoin(host, scrapedurl) + thumbnail = urlparse.urljoin(host, scrapedthumbnail) + if "Miniserie de TV" in scrapedplot: + extra = "series" + else: + extra = "" + show = title_fan + "|" + scrapedyear + "|" + trailer + itemlist.append(Item(channel=item.channel, action="fanart", title=title, url=url, thumbnail=thumbnail, + fanart="http://s6.postimg.org/y1uehu24x/bityougeneralfan.jpg", plot=trailer, extra=extra, + show=show, folder=True)) + + # paginacion + data = scrapertools.cache_page(item.url) + if not "<div class=\"title\">" in data: + itemlist.append(Item(channel=item.channel, title="[COLOR gold][B]No hay mas paginas...[/B][/COLOR]", + thumbnail="http://s6.postimg.org/f4es4kyfl/bityou_Sorry.png", + fanart="http://s6.postimg.org/y1uehu24x/bityougeneralfan.jpg", folder=False)) + else: + + current_page_number = int(scrapertools.get_match(item.url, 'more_elements/(\d+)')) + item.url = re.sub(r"more_elements/\d+", "more_elements/{0}", item.url) + + next_page_number = current_page_number + 40 + next_page = item.url.format(next_page_number) + + title = "[COLOR skyblue]Pagina siguiente>>[/COLOR]" + + itemlist.append(Item(channel=item.channel, title=title, url=next_page, + fanart="http://s6.postimg.org/y1uehu24x/bityougeneralfan.jpg", + thumbnail="http://s6.postimg.org/kbzv91f0x/bityouflecha2.png", + action="scraper", folder=True)) + + return itemlist + + +def fanart(item): + # Vamos a sacar todos los fanarts y arts posibles + logger.info() + itemlist = [] + url = item.url + data = scrapertools.cachePage(url) + data = re.sub(r"\n|\r|\t|\s{2}|\(.*?\)|\[.*?\]| ", "", data) + year = item.show.split("|")[1] + title = item.show.split("|")[0] + trailer = item.show.split("|")[2] + print "joder" + print title + if title == "Érase una vez (Serie de TV)": + title = "Once upon in time" + + import xbmc + xbmc.executebuiltin('Action(reloadkeymaps)') + title = title.replace('á', 'a') + title = title.replace('Á', 'A') + title = title.replace('é', 'e') + title = title.replace('É', 'E') + title = title.replace('í', 'i') + title = title.replace('Í', 'i') + title = title.replace('ó', 'o') + title = title.replace('Ó', 'o') + title = title.replace('ú', 'u') + title = title.replace('Ú', 'U') + title = title.replace('ñ', 'n') + title = title.replace('Ñ', 'N') + if not "_serie_de_tv" in item.url and not item.extra == "series": + title = title.replace("(Serie de TV)", "") + title = title.replace("torrent", "") + + try: + try: + ###Busqueda en Tmdb la peli por titulo y año + title_tmdb = title.replace(" ", "%20") + url_tmdb = "http://api.themoviedb.org/3/search/movie?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + title_tmdb + "&year=" + year + "&language=es&include_adult=false" + data = scrapertools.cachePage(url_tmdb) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + id = scrapertools.get_match(data, '"page":1.*?,"id":(.*?),') + plot = scrapertools.get_match(data, '"page":1.*?,"overview":"(.*?)",') + except: + if ":" in title or "(" in title: + title_tmdb = title.replace(" ", "%20") + url_tmdb = "http://api.themoviedb.org/3/search/movie?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + title_tmdb + "&year=" + year + "&language=es&include_adult=false" + data = scrapertools.cachePage(url_tmdb) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + id = scrapertools.get_match(data, '"page":1.*?,"id":(.*?),') + plot = scrapertools.get_match(data, '"page":1.*?,"overview":"(.*?)",') + else: + title_tmdb = title.replace(" ", "%20") + title_tmdb = re.sub(r"(:.*)|\(.*?\)", "", title_tmdb) + url_tmdb = "http://api.themoviedb.org/3/search/movie?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + title_tmdb + "&year=" + year + "&language=es&include_adult=false" + data = scrapertools.cachePage(url_tmdb) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + id = scrapertools.get_match(data, '"page":1.*?,"id":(.*?),') + plot = scrapertools.get_match(data, '"page":1.*?,"overview":"(.*?)",') + + + except: + ###Si no hay coincidencia realiza busqueda por bing del id Imdb + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % (title.replace(' ', '+'), year) + data = browser(urlbing_imdb) + '''if "z{a:1}"in data: + data = proxy(urlbing_imdb)''' + try: + subdata_imdb = scrapertools.get_match(data, '<li class="b_algo">(.*?)h="ID') + subdata_imdb = re.sub("http://anonymouse.org/cgi-bin/anon-www.cgi/", "", subdata_imdb) + except: + pass + + try: + url_imdb = scrapertools.get_match(subdata_imdb, '<a href="([^"]+)"') + + except: + pass + try: + id_imdb = scrapertools.get_match(url_imdb, '.*?www.imdb.com/.*?/(.*?)/') + except: + pass + try: + ###Busca id Tmdb mediante el id de Imdb + urltmdb_remote = "https://api.themoviedb.org/3/find/" + id_imdb + "?external_source=imdb_id&api_key=2e2160006592024ba87ccdf78c28f49f&language=es&include_adult=false" + + data = scrapertools.cachePage(urltmdb_remote) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + id = scrapertools.get_match(data, '"movie_results".*?,"id":(\d+)') + plot = scrapertools.get_match(data, '"movie_results".*?,"overview":"(.*?)",') + except: + id = "" + plot = "" + + ###Llegados aqui ya tenemos(o no) el id(Tmdb);Busca fanart_1 + urltmdb_fan1 = "http://api.themoviedb.org/3/movie/" + id + "?api_key=2e2160006592024ba87ccdf78c28f49f" + data = scrapertools.cachePage(urltmdb_fan1) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '"adult".*?"backdrop_path":"(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + try: + ###Prueba poster de Tmdb + posterdb = scrapertools.get_match(data, '"adult".*?"poster_path":"(.*?)"') + posterdb = "https://image.tmdb.org/t/p/original" + posterdb + except: + posterdb = item.thumbnail + + if len(matches) == 0: + ###Si no encuentra fanart_1 en Tmdb realiza busqueda directamente en Imdb + try: + + urlbing_imdb = "http://www.bing.com/search?q=imdb+movie+%s+%s" % (title.replace(' ', '+'), year) + data = browser(urlbing_imdb) + '''if "z{a:1}"in data: + data = proxy(urlbing_imdb)''' + try: + subdata_imdb = scrapertools.get_match(data, '<li class="b_algo">(.*?)h="ID') + subdata_imdb = re.sub("http://anonymouse.org/cgi-bin/anon-www.cgi/", "", subdata_imdb) + except: + pass + try: + url_imdb = scrapertools.get_match(subdata_imdb, '<a href="([^"]+)"') + except: + url_imdb = data + data = scrapertools.cachePage(url_imdb) + + try: + poster_imdb = scrapertools.get_match(data, '<td rowspan="2" id="img_primary">.*?src="([^"]+)"') + poster_imdb = poster_imdb.replace("._.*?jpg", "._V1_SX640_SY720_.jpg") + + except: + poster_imdb = posterdb + + try: + url_photo = scrapertools.get_match(data, + '<div class="combined-see-more see-more">.*?<a href="([^"]+)"') + url_photos = "http://www.imdb.com" + url_photo + data = scrapertools.cachePage(url_photos) + try: + photo_imdb = scrapertools.get_match(data, '<div class="media_index_thumb_list".*?src="([^"]+)"') + photo_imdb = re.sub(r"._.*?jpg", "._V1_SX1280_SY720_.jpg", photo_imdb) + + except: + pass + + try: + photo_imdb2 = scrapertools.get_match(data, + '<div class="media_index_thumb_list".*?src=.*?src="([^"]+)"') + photo_imdb2 = re.sub(r"._.*?jpg", "._V1_SX1280_SY720_.jpg", photo_imdb2) + except: + pass + try: + photo_imdb3 = scrapertools.get_match(data, + '<div class="media_index_thumb_list".*?src=.*?src=.*?src="([^"]+)"') + photo_imdb3 = re.sub(r"._.*?jpg", "._V1_SX1280_SY720_.jpg", photo_imdb3) + except: + pass + try: + photo_imdb4 = scrapertools.get_match(data, + '<div class="media_index_thumb_list".*?src=.*?src=.*?src=.*?src="([^"]+)"') + photo_imdb4 = re.sub(r"._.*?jpg", "._V1_SX1280_SY720_.jpg", photo_imdb4) + except: + pass + + except: + pass + except: + pass + extra = "http://s6.postimg.org/rv2mu3pap/bityouthsinopsis2.png" + + try: + fanart_1 = photo_imdb3 + except: + try: + fanart_1 = photo_imdb2 + except: + try: + fanart_1 = photo_imdb1 + except: + fanart_1 = "http://s6.postimg.org/6ucl96lsh/bityouthnofan.jpg" + + try: + fanart_2 = photo_imdb4 + except: + try: + fanart_2 = photo_imdb2 + except: + try: + fanart_2 = photo_imdb + except: + fanart_2 = "http://s6.postimg.org/6ucl96lsh/bityouthnofan.jpg" + try: + fanart_info = photo_imdb2 + except: + try: + fanart_info = photo_imdb + except: + fanart_info = "http://s6.postimg.org/6ucl96lsh/bityouthnofan.jpg" + + try: + fanart_trailer = photo_imdb3 + except: + try: + fanart_trailer = photo_imdb2 + except: + try: + fanart_trailer = photo_imdb + except: + fanart_trailer = "http://s6.postimg.org/6ucl96lsh/bityouthnofan.jpg" + + try: + category = photo_imdb3 + except: + try: + category = photo_imdb + except: + try: + category = photo_imdb3 + except: + category = "http://s6.postimg.org/mh3umjzkh/bityouthnofanventanuco.jpg" + try: + fanart = photo_imdb + except: + try: + fanart = photo_imdb2 + except: + try: + fanart = photo_imdb3 + except: + fanart = "http://s6.postimg.org/6ucl96lsh/bityouthnofan.jpg" + try: + show = photo_imdb4 + except: + try: + show = photo_imdb2 + except: + try: + show = photo_imdb + except: + show = "http://s6.postimg.org/6ucl96lsh/bityouthnofan.jpg" + + ###Encontrado fanart_1 en Tmdb + for fan in matches: + + fanart = "https://image.tmdb.org/t/p/original" + fan + fanart_1 = fanart + print "faan" + print fanart_1 + ###Busca fanart para info, fanart para trailer y fanart_2(finvideos) en Tmdb + urltmdb_images = "http://api.themoviedb.org/3/movie/" + id + "/images?api_key=2e2160006592024ba87ccdf78c28f49f" + data = scrapertools.cachePage(urltmdb_images) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '"backdrops".*?"file_path":".*?",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + patron = '"backdrops".*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + fanart_info = fanart_1 + fanart_trailer = fanart_1 + fanart_2 = fanart_1 + category = "" + for fanart_info, fanart_trailer, fanart_2 in matches: + fanart_info = "https://image.tmdb.org/t/p/original" + fanart_info + fanart_trailer = "https://image.tmdb.org/t/p/original" + fanart_trailer + fanart_2 = "https://image.tmdb.org/t/p/original" + fanart_2 + category = "" + + if fanart_info == fanart: + ###Busca fanart_info en Imdb si coincide con fanart + try: + url_imdbphoto = "http://www.imdb.com/title/" + id_imdb + "/mediaindex" + photo_imdb = scrapertools.get_match(url_imdbphoto, + '<div class="media_index_thumb_list".*?src="([^"]+)"') + photo_imdb = photo_imdb.replace("@._V1_UY100_CR25,0,100,100_AL_.jpg", "@._V1_SX1280_SY720_.jpg") + fanart_info = photo_imdb + except: + fanart_info = fanart_2 + + # fanart_2 y arts + + url = "http://webservice.fanart.tv/v3/movies/" + id + "?api_key=dffe90fba4d02c199ae7a9e71330c987" + data = scrapertools.cachePage(url) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '"hdmovielogo":.*?"url": "([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + if '"moviedisc"' in data: + disc = scrapertools.get_match(data, '"moviedisc":.*?"url": "([^"]+)"') + if '"movieposter"' in data: + poster = scrapertools.get_match(data, '"movieposter":.*?"url": "([^"]+)"') + if '"moviethumb"' in data: + thumb = scrapertools.get_match(data, '"moviethumb":.*?"url": "([^"]+)"') + if '"moviebanner"' in data: + banner = scrapertools.get_match(data, '"moviebanner":.*?"url": "([^"]+)"') + + if len(matches) == 0: + extra = "http://s6.postimg.org/rv2mu3pap/bityouthsinopsis2.png" + show = fanart_2 + if category == "": + category = fanart_1 + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos_pelis", url=item.url, + thumbnail=posterdb, fanart=fanart, extra=extra, show=show, category=category, + folder=True)) + + for logo in matches: + if '"hdmovieclearart"' in data: + clear = scrapertools.get_match(data, '"hdmovieclearart":.*?"url": "([^"]+)"') + if '"moviebackground"' in data: + extra = clear + show = fanart_2 + if '"moviebanner"' in data: + category = banner + else: + category = clear + itemlist.append( + Item(channel=item.channel, title=item.title, action="findvideos_pelis", url=item.url, + thumbnail=logo, fanart=fanart_1, extra=extra, show=show, category=category, folder=True)) + else: + extra = clear + show = fanart_2 + if '"moviebanner"' in data: + category = banner + else: + category = clear + itemlist.append( + Item(channel=item.channel, title=item.title, action="findvideos_pelis", url=item.url, + thumbnail=logo, fanart=fanart_1, extra=extra, show=show, category=category, folder=True)) + + if '"moviebackground"' in data: + + if '"hdmovieclearart"' in data: + clear = scrapertools.get_match(data, '"hdmovieclearart":.*?"url": "([^"]+)"') + extra = clear + show = fanart_2 + if '"moviebanner"' in data: + category = banner + else: + category = clear + + else: + extra = logo + show = fanart_2 + if '"moviebanner"' in data: + category = banner + else: + category = logo + itemlist.append( + Item(channel=item.channel, title=item.title, action="findvideos_pelis", url=item.url, + thumbnail=logo, fanart=fanart_1, extra=extra, show=show, category=category, folder=True)) + + if not '"hdmovieclearart"' in data and not '"moviebackground"' in data: + extra = logo + show = fanart_2 + if '"moviebanner"' in data: + category = banner + else: + category = fanart_1 + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos_pelis", url=item.url, + thumbnail=logo, fanart=fanart_1, category=category, extra=extra, show=show, + folder=True)) + + if "_serie_de_tv" in item.url or item.extra == "series": + # Establece destino customkey + import xbmc + SEARCHDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "search.txt") + TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") + KEYMAPDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customkey.xml") + REMOTEDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remote.xml") + APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") + TRAILERDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "trailer.txt") + item.title = item.title.replace("(Serie de TV)", "") + title = re.sub(r"\(.*?\)", "", title).strip() + title_tunes = (translate(title, "en")) + + ###Prepara customkeys y borra cuando vuelve + import xbmc + if not xbmc.Player().isPlaying() and not os.path.exists(TRAILERDESTFILE): + + TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") + KEYMAPDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customkey.xml") + REMOTEDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remote.xml") + APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") + try: + os.remove(KEYMAPDESTFILE) + print "Custom Keyboard.xml borrado" + os.remove(TESTPYDESTFILE) + print "Testpy borrado" + os.remove(REMOTEDESTFILE) + print "Remote borrado" + os.remove(APPCOMMANDDESTFILE) + print "Appcommand borrado" + xbmc.executebuiltin('Action(reloadkeymaps)') + except Exception as inst: + xbmc.executebuiltin('Action(reloadkeymaps)') + print "No hay customs" + + try: + import xbmc + ###Busca música serie y caraga customkey. En la vuelta evita busqueda si ya suena música + url_bing = "http://www.bing.com/search?q=%s+theme+song+site:televisiontunes.com" % title_tunes.replace( + ' ', '+') + # Llamamos al browser de mechanize. Se reitera en todas las busquedas bing + data = browser(url_bing) + '''if "z{a:1}"in data: + data = proxy(url_bing)''' + try: + subdata_tvt = scrapertools.get_match(data, '<li class="b_algo">(.*?)h="ID') + except: + pass + try: + url_tvt = scrapertools.get_match(subdata_tvt, '<a href="(.*?)"') + except: + url_tvt = "" + + if "-theme-songs.html" in url_tvt: + url_tvt = "" + if "http://m.televisiontunes" in url_tvt: + url_tvt = url_tvt.replace("http://m.televisiontunes", "http://televisiontunes") + + data = scrapertools.cachePage(url_tvt) + song = scrapertools.get_match(data, '<form name="song_name_form">.*?type="hidden" value="(.*?)"') + song = song.replace(" ", "%20") + + xbmc.executebuiltin('xbmc.PlayMedia(' + song + ')') + import xbmc, time + TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") + urllib.urlretrieve( + "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bityouth/test.py", + TESTPYDESTFILE) + KEYMAPDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customkey.xml") + + urllib.urlretrieve( + "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bityouth/customkey.xml", + KEYMAPDESTFILE) + REMOTEDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remote.xml") + urllib.urlretrieve( + "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bityouth/remote.xml", + REMOTEDESTFILE) + APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") + urllib.urlretrieve( + "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bityouth/customapp.xml", + APPCOMMANDDESTFILE) + + xbmc.executebuiltin('Action(reloadkeymaps)') + + except: + pass + try: + os.remove(TRAILERDESTFILE) + print "Trailer.txt borrado" + xbmc.executebuiltin('Action(reloadkeymaps)') + except: + print "No hay Trailer.txt" + xbmc.executebuiltin('Action(reloadkeymaps)') + if os.path.exists(SEARCHDESTFILE): + + try: + os.remove(KEYMAPDESTFILE) + print "Custom Keyboard.xml borrado" + os.remove(TESTPYDESTFILE) + print "Testpy borrado" + os.remove(REMOTEDESTFILE) + print "Remote borrado" + os.remove(APPCOMMANDDESTFILE) + print "Appcommand borrado" + os.remove(SEARCHDESTFILE) + print "search.txt borrado" + xbmc.executebuiltin('Action(reloadkeymaps)') + except Exception as inst: + print "No hay customs" + xbmc.executebuiltin('Action(reloadkeymaps)') + + # Busqueda bing de Imdb serie id + url_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % (title.replace(' ', '+'), year) + print url_imdb + data = browser(url_imdb) + '''if "z{a:1}"in data: + data = proxy(url_imdb)''' + print "perro" + print data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + try: + subdata_imdb = scrapertools.get_match(data, '<li class="b_algo">(.*?)h="ID') + print "ostia" + print subdata_imdb + except: + pass + print "joder" + try: + imdb_id = scrapertools.get_match(subdata_imdb, '<a href=.*?http.*?imdb.com/title/(.*?)/.*?"') + print "siii?" + print imdb_id + except: + imdb_id = "" + ### Busca id de tvdb mediante imdb id + urltvdb_remote = "http://thetvdb.com/api/GetSeriesByRemoteID.php?imdbid=" + imdb_id + "&language=es" + data = scrapertools.cachePage(urltvdb_remote) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '<Data><Series><seriesid>([^<]+)</seriesid>.*?<Overview>(.*?)</Overview>' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + ###Si no hay coincidencia busca en tvdb directamente + if ":" in title or "(" in title: + title = title.replace(" ", "%20") + url_tvdb = "http://thetvdb.com/api/GetSeries.php?seriesname=" + title + "&language=es" + data = scrapertools.cachePage(url_tvdb) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '<Data><Series><seriesid>([^<]+)</seriesid>.*?<Overview>(.*?)</Overview>' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + title = re.sub(r"(:.*)|\(.*?\)", "", title) + title = title.replace(" ", "%20") + url_tvdb = "http://thetvdb.com/api/GetSeries.php?seriesname=" + title + "&language=es" + data = scrapertools.cachePage(url_tvdb) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '<Data><Series><seriesid>([^<]+)</seriesid>.*?<Overview>(.*?)</Overview>' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + plot = "" + postertvdb = item.thumbnail + extra = "http://s6.postimg.org/rv2mu3pap/bityouthsinopsis2.png" + fanart_info = "http://s6.postimg.org/6ucl96lsh/bityouthnofan.jpg" + fanart_trailer = "http://s6.postimg.org/6ucl96lsh/bityouthnofan.jpg" + category = "" + show = title + "|" + year + "|" + "http://s6.postimg.org/mh3umjzkh/bityouthnofanventanuco.jpg" + itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action="finvideos", + thumbnail=item.thumbnail, + fanart="http://s6.postimg.org/6ucl96lsh/bityouthnofan.jpg", extra=extra, + category=category, show=show, plot=plot, folder=True)) + + else: + title = title.replace(" ", "%20") + url_tvdb = "http://thetvdb.com/api/GetSeries.php?seriesname=" + title + "&language=es" + data = scrapertools.cachePage(url_tvdb) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '<Data><Series><seriesid>([^<]+)</seriesid>.*?<Overview>(.*?)</Overview>' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + plot = "" + postertvdb = item.thumbnail + extra = "http://s6.postimg.org/rv2mu3pap/bityouthsinopsis2.png" + show = title + "|" + year + "|" + "http://s6.postimg.org/mh3umjzkh/bityouthnofanventanuco.jpg" + fanart_info = "http://s6.postimg.org/6ucl96lsh/bityouthnofan.jpg" + fanart_trailer = "http://s6.postimg.org/6ucl96lsh/bityouthnofan.jpg" + category = "" + itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action="findvideos", + thumbnail=item.thumbnail, + fanart="http://s6.postimg.org/6ucl96lsh/bityouthnofan.jpg", extra=extra, + category=category, show=show, plot=plot, folder=True)) + # fanart + for id, info in matches: + try: + info = (translate(info, "es")) + except: + pass + + category = id + plot = info + id_serie = id + + url = "http://thetvdb.com/api/1D62F2F90030C444/series/" + id_serie + "/banners.xml" + + data = scrapertools.cachePage(url) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '<Banners><Banner>.*?<VignettePath>(.*?)</VignettePath>' + matches = re.compile(patron, re.DOTALL).findall(data) + try: + postertvdb = scrapertools.get_match(data, '<Banners><Banner>.*?<BannerPath>posters/(.*?)</BannerPath>') + postertvdb = "http://thetvdb.com/banners/_cache/posters/" + postertvdb + except: + postertvdb = item.thumbnail + + if len(matches) == 0: + extra = "http://s6.postimg.org/rv2mu3pap/bityouthsinopsis2.png" + show = title + "|" + year + "|" + "http://s6.postimg.org/mh3umjzkh/bityouthnofanventanuco.jpg" + fanart_info = "http://s6.postimg.org/6ucl96lsh/bityouthnofan.jpg" + fanart_trailer = "http://s6.postimg.org/6ucl96lsh/bityouthnofan.jpg" + itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action="findvideos", + thumbnail=postertvdb, fanart="http://s6.postimg.org/6ucl96lsh/bityouthnofan.jpg", + category=category, extra=extra, show=show, folder=True)) + + for fan in matches: + fanart = "http://thetvdb.com/banners/" + fan + fanart_1 = fanart + patron = '<Banners><Banner>.*?<BannerPath>.*?</BannerPath>.*?</Banner><Banner>.*?<BannerPath>(.*?)</BannerPath>.*?</Banner><Banner>.*?<BannerPath>(.*?)</BannerPath>.*?</Banner><Banner>.*?<BannerPath>(.*?)</BannerPath>' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + fanart_info = fanart_1 + fanart_trailer = fanart_1 + fanart_2 = fanart_1 + show = title + "|" + year + "|" + fanart_1 + extra = postertvdb + itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action="findvideos", + thumbnail=postertvdb, fanart=fanart_1, category=category, extra=extra, + show=show, folder=True)) + for fanart_info, fanart_trailer, fanart_2 in matches: + fanart_info = "http://thetvdb.com/banners/" + fanart_info + fanart_trailer = "http://thetvdb.com/banners/" + fanart_trailer + fanart_2 = "http://thetvdb.com/banners/" + fanart_2 + # clearart, fanart_2 y logo + for id in matches: + url_fanartv = "http://webservice.fanart.tv/v3/tv/" + id_serie + "?api_key=dffe90fba4d02c199ae7a9e71330c987" + data = scrapertools.cachePage(url_fanartv) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '"clearlogo":.*?"url": "([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + if '"tvposter"' in data: + tvposter = scrapertools.get_match(data, '"tvposter":.*?"url": "([^"]+)"') + if '"tvbanner"' in data: + tvbanner = scrapertools.get_match(data, '"tvbanner":.*?"url": "([^"]+)"') + if '"tvthumb"' in data: + tvthumb = scrapertools.get_match(data, '"tvthumb":.*?"url": "([^"]+)"') + if '"hdtvlogo"' in data: + hdtvlogo = scrapertools.get_match(data, '"hdtvlogo":.*?"url": "([^"]+)"') + if '"hdclearart"' in data: + hdtvclear = scrapertools.get_match(data, '"hdclearart":.*?"url": "([^"]+)"') + if len(matches) == 0: + item.thumbnail = postertvdb + if '"hdtvlogo"' in data: + if "showbackground" in data: + + if '"hdclearart"' in data: + thumbnail = hdtvlogo + extra = hdtvclear + show = title + "|" + year + "|" + fanart_2 + else: + thumbnail = hdtvlogo + extra = thumbnail + show = title + "|" + year + "|" + fanart_2 + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=fanart_1, category=category, + extra=extra, show=show, plot=item.plot, folder=True)) + + + else: + if '"hdclearart"' in data: + thumbnail = hdtvlogo + extra = hdtvclear + show = title + "|" + year + "|" + fanart_2 + else: + thumbnail = hdtvlogo + extra = thumbnail + show = title + "|" + year + "|" + fanart_2 + + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=fanart_1, extra=extra, + show=show, category=category, plot=item.plot, folder=True)) + else: + extra = "http://s6.postimg.org/rv2mu3pap/bityouthsinopsis2.png" + show = title + "|" + year + "|" + fanart_2 + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=item.thumbnail, fanart=fanart_1, extra=extra, + show=show, category=category, plot=item.plot, folder=True)) + for logo in matches: + if '"hdtvlogo"' in data: + thumbnail = hdtvlogo + elif not '"hdtvlogo"' in data: + if '"clearlogo"' in data: + thumbnail = logo + else: + thumbnail = item.thumbnail + if '"clearart"' in data: + clear = scrapertools.get_match(data, '"clearart":.*?"url": "([^"]+)"') + if "showbackground" in data: + + extra = clear + show = title + "|" + year + "|" + fanart_2 + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=fanart_1, extra=extra, show=show, + category=category, plot=item.plot, folder=True)) + else: + extra = clear + show = title + "|" + year + "|" + fanart_2 + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=fanart_1, extra=extra, show=show, + category=category, plot=item.plot, folder=True)) + + if "showbackground" in data: + + if '"clearart"' in data: + clear = scrapertools.get_match(data, '"clearart":.*?"url": "([^"]+)"') + extra = clear + show = title + "|" + year + "|" + fanart_2 + else: + extra = logo + show = title + "|" + year + "|" + fanart_2 + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=fanart_1, extra=extra, show=show, + category=category, plot=item.plot, folder=True)) + + if not '"clearart"' in data and not '"showbackground"' in data: + if '"hdclearart"' in data: + extra = hdtvclear + show = title + "|" + year + "|" + fanart_2 + else: + extra = thumbnail + show = title + "|" + year + "|" + fanart_2 + itemlist.append( + Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, server="torrent", + thumbnail=thumbnail, fanart=fanart_1, extra=extra, show=show, category=category, + plot=item.plot, folder=True)) + + title = "Info" + if not "_serie_de_tv" in item.url and not item.extra == "series": + thumbnail = posterdb + if "_serie_de_tv" in item.url or item.extra == "series": + if '"tvposter"' in data: + thumbnail = tvposter + else: + thumbnail = postertvdb + + if "tvbanner" in data: + category = tvbanner + else: + category = item.show.split("|")[2] + + title = title.replace(title, "[COLOR cyan]" + title + "[/COLOR]") + itemlist.append( + Item(channel=item.channel, action="info", title=title, url=item.url, thumbnail=thumbnail, fanart=fanart_info, + extra=extra, plot=plot, category=category, show=show, folder=False)) + ###trailer + + + title = "[COLOR gold]Trailer[/COLOR]" + + if "_serie_de_tv" in item.url or item.extra == "series": + if '"tvthumb"' in data: + thumbnail = tvthumb + else: + thumbnail = postertvdb + if '"tvbanner"' in data: + extra = tvbanner + elif '"tvthumb"' in data: + extra = tvthumb + else: + extra = item.thumbnail + else: + if '"moviethumb"' in data: + thumbnail = thumb + else: + thumbnail = posterdb + + if '"moviedisc"' in data: + extra = disc + else: + if '"moviethumb"' in data: + extra = thumb + + else: + extra = posterdb + + itemlist.append( + Item(channel=item.channel, action="trailer", title=title, url=item.url, thumbnail=thumbnail, plot=item.plot, + fanart=fanart_trailer, extra=extra, show=trailer, folder=True)) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + import xbmc + SEARCHDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "search.txt") + TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") + KEYMAPDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customkey.xml") + REMOTEDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remote.xml") + APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") + if xbmc.Player().isPlaying(): + if not os.path.exists(TESTPYDESTFILE): + import xbmc + urllib.urlretrieve("https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/search.txt", + SEARCHDESTFILE) + urllib.urlretrieve("https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/test.py", + TESTPYDESTFILE) + urllib.urlretrieve( + "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bricocine/customkey.xml", + KEYMAPDESTFILE) + urllib.urlretrieve( + "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bricocine/remote.xml", + REMOTEDESTFILE) + urllib.urlretrieve( + "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bricocine/customapp.xml", + APPCOMMANDDESTFILE) + + xbmc.executebuiltin('Action(reloadkeymaps)') + + if not xbmc.Player().isPlaying(): + TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") + KEYMAPDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customkey.xml") + REMOTEDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remote.xml") + APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") + try: + os.remove(KEYMAPDESTFILE) + print "Custom Keyboard.xml borrado" + os.remove(TESTPYDESTFILE) + print "Testpy borrado" + os.remove(REMOTEDESTFILE) + print "Remote borrado" + os.remove(APPCOMMANDDESTFILE) + print "Appcommand borrado" + xbmc.executebuiltin('Action(reloadkeymaps)') + except Exception as inst: + xbmc.executebuiltin('Action(reloadkeymaps)') + print "No hay customs" + + data = scrapertools.cache_page(item.url) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '<a class="btn btn-success" href="([^"]+)" role="button".*?' + patron += '<td><div style="width:125px.*?<td><small>([^<]+)</small>.*?' + patron += '<td><small>([^<]+)</small>.*?' + + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + if len(matches) == 0: + itemlist.append(Item(channel=item.channel, + title="[COLOR gold][B]Lo sentimos el torrent aún no está disponible...[/B][/COLOR]", + thumbnail="http://s6.postimg.org/f4es4kyfl/bityou_Sorry.png", + fanart="http://s6.postimg.org/guxt62fyp/bityounovideo.jpg", folder=False)) + + for scrapedurl, scrapedcalidad, scrapedsize in matches: + + scrapedurl = urlparse.urljoin(host, scrapedurl) + season = scrapedcalidad + season = re.sub(r"\n|\r|\t|\s{2}| |V.O.S|Cast|Temp.|Cap.\d+| ", "", season) + epi = scrapedcalidad + epi = re.sub(r"\n|\r|\t|\s{2}|V.O.S|Cast| |Temp.\d+|Cap.| ", "", epi) + title = scrapertools.get_match(item.title, '(.*?)--') + title_info = scrapertools.get_match(data, '<meta name="title" content="(.*?) -') + title_info = title_info.replace("(Serie de TV)", "") + title_info = title_info.replace("torrent", "") + title_info = title_info.replace(" ", "%20") + scrapedcalidad = scrapedcalidad.replace(scrapedcalidad, "[COLOR skyblue][B]" + scrapedcalidad + "[/B][/COLOR]") + scrapedsize = scrapedsize.replace(scrapedsize, "[COLOR gold][B]" + scrapedsize + "[/B][/COLOR]") + title = title.replace(title, + "[COLOR white][B]" + title + "[/B][/COLOR]") + "-(" + scrapedcalidad + "/" + scrapedsize + ")" + + if "bityouthsinopsis2.png" in item.extra: + item.extra = item.thumbnail + if "bityouthnofanventanuco.jpg" in item.show.split("|")[2]: + fanart = item.fanart + else: + fanart = item.show.split("|")[2] + + extra = season + "|" + title_info + "|" + epi + itemlist.append( + Item(channel=item.channel, title=title, action="episodios", url=scrapedurl, thumbnail=item.extra, + fanart=item.show.split("|")[2], extra=extra, show=item.show, category=item.category, folder=True)) + + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + import xbmc + if not xbmc.Player().isPlaying(): + TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") + KEYMAPDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customkey.xml") + REMOTEDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remote.xml") + APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") + try: + os.remove(KEYMAPDESTFILE) + print "Custom Keyboard.xml borrado" + os.remove(TESTPYDESTFILE) + print "Testpy borrado" + os.remove(REMOTEDESTFILE) + print "Remote borrado" + os.remove(APPCOMMANDDESTFILE) + print "Appcommand borrado" + xbmc.executebuiltin('Action(reloadkeymaps)') + except Exception as inst: + xbmc.executebuiltin('Action(reloadkeymaps)') + print "No hay customs" + + season = item.extra.split("|")[0] + title = item.show.split("|")[0] + if title == "Invisibles": + title = "The whispers" + epi = item.extra.split("|")[2] + year = item.show.split("|")[1] + title_tag = "[COLOR yellow]Ver --[/COLOR]" + item.title = item.title.replace("amp", "") + title_clean = title_tag + item.title + if ":" in title: + try: + title = title.replace(" ", "%20") + url_tmdb = "http://api.themoviedb.org/3/search/tv?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + title + "&year=" + year + "&language=es&include_adult=false" + data = scrapertools.cachePage(url_tmdb) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + id_tmdb = scrapertools.get_match(data, 'page":1.*?,"id":(.*?),"') + except: + try: + title = re.sub(r"(:.*)", "", title) + title = title.replace(" ", "%20") + url_tmdb = "http://api.themoviedb.org/3/search/tv?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + title + "&year=" + year + "&language=es&include_adult=false" + data = scrapertools.cachePage(url_tmdb) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + id_tmdb = scrapertools.get_match(data, 'page":1.*?,"id":(.*?),"') + except: + thumbnail = item.thumbnail + fanart = item.fanart + id_tmdb = "" + else: + try: + title = title.replace(" ", "%20") + url_tmdb = "http://api.themoviedb.org/3/search/tv?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + title + "&year=" + year + "&language=es&include_adult=false" + data = scrapertools.cachePage(url_tmdb) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + id_tmdb = scrapertools.get_match(data, 'page":1.*?,"id":(.*?),"') + except: + thumbnail = item.thumbnail + fanart = item.fanart + id_tmdb = "" + ###Teniendo (o no) el id Tmdb busca imagen + urltmdb_images = "https://api.themoviedb.org/3/tv/" + id_tmdb + "?api_key=2e2160006592024ba87ccdf78c28f49f" + data = scrapertools.cachePage(urltmdb_images) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + try: + backdrop = scrapertools.get_match(data, '"backdrop_path":"(.*?)"') + fanart_3 = "https://image.tmdb.org/t/p/original" + backdrop + fanart = fanart_3 + except: + fanart_3 = item.fanart + fanart = fanart_3 + ###Se hace también la busqueda de el thumb del episodio en Tmdb + urltmdb_epi = "https://api.themoviedb.org/3/tv/" + id_tmdb + "/season/" + season + "/episode/" + epi + "/images?api_key=2e2160006592024ba87ccdf78c28f49f" + data = scrapertools.cachePage(urltmdb_epi) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '{"id".*?"file_path":"(.*?)","height"' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + thumbnail = item.thumbnail + fanart = fanart_3 + itemlist.append(Item(channel=item.channel, title=title_clean, action="play", url=item.url, server="torrent", + thumbnail=thumbnail, fanart=fanart, folder=False)) + + for foto in matches: + thumbnail = "https://image.tmdb.org/t/p/original" + foto + itemlist.append(Item(channel=item.channel, title=title_clean, action="play", url=item.url, server="torrent", + thumbnail=thumbnail, fanart=fanart, category=item.category, folder=False)) + ###thumb temporada### + urltmdb_temp = "http://api.themoviedb.org/3/tv/" + id_tmdb + "/season/" + season + "/images?api_key=2e2160006592024ba87ccdf78c28f49f" + data = scrapertools.cachePage(urltmdb_temp) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '{"id".*?"file_path":"(.*?)","height"' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + thumbnail = item.thumbnail + for temp in matches: + thumbnail = "https://image.tmdb.org/t/p/original" + temp + ####fanart info#### + urltmdb_faninfo = "http://api.themoviedb.org/3/tv/" + id_tmdb + "/images?api_key=2e2160006592024ba87ccdf78c28f49f" + data = scrapertools.cachePage(urltmdb_faninfo) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '{"backdrops".*?"file_path":".*?","height".*?"file_path":"(.*?)",' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + fanart = item.fanart + for fanart_4 in matches: + fanart = "https://image.tmdb.org/t/p/original" + fanart_4 + + show = item.category + "|" + item.thumbnail + + title = "Info" + title = title.replace(title, "[COLOR skyblue]" + title + "[/COLOR]") + itemlist.append(Item(channel=item.channel, action="info_capitulos", title=title, url=item.url, thumbnail=thumbnail, + fanart=fanart, extra=item.extra, show=show, folder=False)) + + return itemlist + + +def findvideos_pelis(item): + logger.info() + itemlist = [] + + data = scrapertools.cache_page(item.url) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '<td><a class="btn btn-success" href="([^"]+)" role="button".*?' + patron += '<td><div style="width:125px.*?<td><small>([^<]+)</small>.*?' + patron += '<td><small>([^<]+)</small>.*?' + + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + if len(matches) == 0: + itemlist.append(Item(channel=item.channel, + title="[COLOR gold][B]Lo sentimos el torrent aún no está disponible...[/B][/COLOR]", + thumbnail="http://s6.postimg.org/f4es4kyfl/bityou_Sorry.png", + fanart="http://s6.postimg.org/guxt62fyp/bityounovideo.jpg", folder=False)) + + for scrapedurl, scrapedcalidad, scrapedsize in matches: + + scrapedurl = urlparse.urljoin(host, scrapedurl) + + title = scrapertools.get_match(data, '<meta name="title" content="(.*?) -') + title = title.replace("(Serie de TV)", "") + title = title.replace("torrent", "") + title_info = scrapertools.get_match(data, '<meta name="title" content="(.*?) -') + title_info = title_info.replace("(Serie de TV)", "") + title_info = title_info.replace("torrent", "") + scrapedcalidad = scrapedcalidad.replace(scrapedcalidad, "[COLOR skyblue][B]" + scrapedcalidad + "[/B][/COLOR]") + scrapedsize = scrapedsize.replace(scrapedsize, "[COLOR gold][B]" + scrapedsize + "[/B][/COLOR]") + title = title.replace(title, + "[COLOR white][B]" + title + "[/B][/COLOR]") + "-(" + scrapedcalidad + "/" + scrapedsize + ")" + if "bityouthsinopsis2.png" in item.extra: + item.extra = item.thumbnail + + itemlist.append(Item(channel=item.channel, title=title, url=scrapedurl, fanart=item.show, thumbnail=item.extra, + action="play", folder=False)) + + return itemlist + + +def trailer(item): + logger.info() + itemlist = [] + import xbmc + xbmc.executebuiltin('Action(reloadkeymaps)') + TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") + if os.path.exists(TESTPYDESTFILE): + TRAILERDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "trailer.txt") + urllib.urlretrieve("https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/trailer.txt", + TRAILERDESTFILE) + youtube_trailer = "https://www.youtube.com/results?search_query=" + item.show + "español" + + data = scrapertools.cache_page(youtube_trailer) + + patron = '<a href="/watch?(.*?)".*?' + patron += 'title="([^"]+)"' + + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + if len(matches) == 0: + itemlist.append(Item(channel=item.channel, title="[COLOR gold][B]No hay Trailer[/B][/COLOR]", + thumbnail="http://s6.postimg.org/jp5jx97ip/bityoucancel.png", + fanart="http://s6.postimg.org/vfjhen0b5/bityounieve.jpg", folder=False)) + + for scrapedurl, scrapedtitle in matches: + scrapedurl = "https://www.youtube.com/watch" + scrapedurl + scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle) + scrapedtitle = scrapedtitle.replace(scrapedtitle, "[COLOR khaki][B]" + scrapedtitle + "[/B][/COLOR]") + itemlist.append(Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, server="youtube", + fanart="http://s6.postimg.org/g4gxuw91r/bityoutrailerfn.jpg", thumbnail=item.extra, + action="play", folder=False)) + + return itemlist + + +def play(item): + logger.info() + itemlist = [] + + if item.server == "youtube": + itemlist.append(Item(channel=item.channel, title=item.plot, url=item.url, server="youtube", fanart=item.fanart, + thumbnail=item.thumbnail, action="play", folder=False)) + + data = scrapertools.cache_page(item.url) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '<td><a class="btn btn-success" href="([^"]+)"' + + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + for scrapedurl in matches: + itemlist.append(Item(channel=item.channel, title=item.title, server="torrent", url=scrapedurl, + fanart="http://s9.postimg.org/lmwhrdl7z/aquitfanart.jpg", thumbnail=item.thumbnail, + action="play", folder=True)) + + return itemlist + + +def info(item): + logger.info() + url = item.url + if "_serie_de_tv" in item.url: + import xbmc + APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") + try: + os.remove(APPCOMMANDDESTFILE) + except: + pass + + data = scrapertools.cachePage(url) + data = re.sub(r"\n|\r|\t|\s{2}|Descarga el torrent.*?en Bityouth.", "", data) + title = scrapertools.get_match(data, '<meta name="title" content="(.*?) -') + title = title.upper() + title = title.replace(title, "[COLOR gold][B]" + title + "[/B][/COLOR]") + title = title.replace("TORRENT", "") + try: + try: + plot = scrapertools.get_match(data, '<div itemprop="description">(.*?)<a href="#enlaces">') + except: + plot = item.plot + + plot = plot.replace(plot, "[COLOR bisque][B]" + plot + "[/B][/COLOR]") + plot = plot.replace("</i>", "") + plot = plot.replace("</br>", "") + plot = plot.replace("<br/>", "") + plot = plot.replace("“", "") + plot = plot.replace("<b>", "") + plot = plot.replace("</b>", "") + plot = plot.replace(" ​​", "") + plot = scrapertools.decodeHtmlentities(plot) + plot = plot.replace(""", "") + except: + + title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" + plot = "Esta serie no tiene informacion..." + plot = plot.replace(plot, "[COLOR yellow][B]" + plot + "[/B][/COLOR]") + photo = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png" + foto = "http://s6.postimg.org/ub7pb76c1/noinfo.png" + info = "" + quit = "Pulsa" + " [COLOR blue][B]INTRO [/B][/COLOR]" + "para quitar" + try: + scrapedinfo = scrapertools.get_match(data, '<div class="col-sm-5 col-md-5 col-lg-4">(.*?)Título Original:') + infoformat = re.compile('(.*?:).*?</strong>(.*?)<strong>', re.DOTALL).findall(scrapedinfo) + for info, info2 in infoformat: + scrapedinfo = scrapedinfo.replace(info2, "[COLOR bisque]" + info2 + "[/COLOR]") + scrapedinfo = scrapedinfo.replace(info, "[COLOR aqua][B]" + info + "[/B][/COLOR]") + info = scrapedinfo + info = re.sub( + r'<p class=".*?">|<strong>|</strong>|<a href="/year/.*?">| title=".*?"|alt=".*?"|>#2015|</a>|<span itemprop=".*?".*?>|<a.*?itemprop=".*?".*?">|</span>|<a href="/genero/.*?"|<a href=".*?"|itemprop="url">|"|</div><div class="col-sm-7 col-md-7 col-lg-8">|>,', + '', info) + info = info.replace("</p>", " ") + info = info.replace("#", ",") + info = info.replace(">", "") + except: + info = "[COLOR skyblue][B]Sin informacion adicional...[/B][/COLOR]" + if "_serie_de_tv" in item.url: + foto = item.show.split("|")[2] + + else: + foto = item.category + if item.show == item.thumbnail: + foto = "http://s6.postimg.org/mh3umjzkh/bityouthnofanventanuco.jpg" + photo = item.extra + quit = "Pulsa" + " [COLOR blue][B]INTRO [/B][/COLOR]" + "para quitar" + if "_serie_de_tv" in item.url: + NOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "noback.xml") + REMOTENOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remotenoback.xml") + APPNOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "appnoback.xml") + urllib.urlretrieve("https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/noback.xml", + NOBACKDESTFILE) + urllib.urlretrieve( + "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bityouth/remotenoback.xml", + REMOTENOBACKDESTFILE) + urllib.urlretrieve("https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/appnoback.xml", + APPNOBACKDESTFILE) + xbmc.executebuiltin('Action(reloadkeymaps)') + + ventana2 = TextBox1(title=title, plot=plot, info=info, thumbnail=photo, fanart=foto, quit=quit) + ventana2.doModal() + + +ACTION_GESTURE_SWIPE_LEFT = 511 +ACTION_SELECT_ITEM = 7 + + +class TextBox1(xbmcgui.WindowDialog): + """ Create a skinned textbox window """ + + def __init__(self, *args, **kwargs): + + self.getTitle = kwargs.get('title') + self.getPlot = kwargs.get('plot') + self.getInfo = kwargs.get('info') + self.getThumbnail = kwargs.get('thumbnail') + self.getFanart = kwargs.get('fanart') + self.getQuit = kwargs.get('quit') + + self.background = xbmcgui.ControlImage(70, 20, 1150, 630, + 'http://s6.postimg.org/58jknrvtd/backgroundventana5.png') + self.title = xbmcgui.ControlTextBox(140, 60, 1130, 50) + self.quit = xbmcgui.ControlTextBox(145, 90, 1030, 45) + self.plot = xbmcgui.ControlTextBox(120, 150, 1056, 140) + self.info = xbmcgui.ControlFadeLabel(120, 310, 1056, 100) + self.thumbnail = xbmcgui.ControlImage(813, 43, 390, 100, self.getThumbnail) + self.fanart = xbmcgui.ControlImage(120, 365, 1060, 250, self.getFanart) + + self.addControl(self.background) + self.addControl(self.title) + self.addControl(self.quit) + self.addControl(self.plot) + self.addControl(self.thumbnail) + self.addControl(self.fanart) + self.addControl(self.info) + + self.title.setText(self.getTitle) + self.quit.setText(self.getQuit) + try: + self.plot.autoScroll(7000, 6000, 30000) + except: + print "Actualice a la ultima version de kodi para mejor info" + import xbmc + xbmc.executebuiltin( + 'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000,"https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/kodi-icon.png")') + self.plot.setText(self.getPlot) + self.info.addLabel(self.getInfo) + + def get(self): + self.show() + + def onAction(self, action): + if action == ACTION_SELECT_ITEM or action == ACTION_GESTURE_SWIPE_LEFT: + import os + import xbmc + APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") + NOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "noback.xml") + REMOTENOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remotenoback.xml") + APPNOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "appnoback.xml") + TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") + try: + os.remove(NOBACKDESTFILE) + os.remove(REMOTENOBACKDESTFILE) + os.remove(APPNOBACKDESTFILE) + if os.path.exists(TESTPYDESTFILE): + urllib.urlretrieve( + "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bityouth/customapp.xml", + APPCOMMANDDESTFILE) + xbmc.executebuiltin('Action(reloadkeymaps)') + except: + xbmc.executebuiltin('Action(reloadkeymaps)') + self.close() + + +def info_capitulos(item): + logger.info() + import xbmc + APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") + try: + os.remove(APPCOMMANDDESTFILE) + except: + pass + item.category = item.show.split("|")[0] + item.thumbnail = item.show.split("|")[1] + + url = "http://thetvdb.com/api/1D62F2F90030C444/series/" + item.show.split("|")[0] + "/default/" + \ + item.extra.split("|")[0] + "/" + item.extra.split("|")[2] + "/es.xml" + data = scrapertools.cache_page(url) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '<Data>.*?<EpisodeName>([^<]+)</EpisodeName>.*?' + patron += '<Overview>(.*?)</Overview>.*?' + + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" + plot = "Este capitulo no tiene informacion..." + plot = plot.replace(plot, "[COLOR yellow][B]" + plot + "[/B][/COLOR]") + foto = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png" + image = "http://s6.postimg.org/ub7pb76c1/noinfo.png" + quit = "Pulsa" + " [COLOR greenyellow][B]INTRO [/B][/COLOR]" + "para quitar" + + else: + + for name_epi, info in matches: + if "<filename>episodes" in data: + foto = scrapertools.get_match(data, '<Data>.*?<filename>(.*?)</filename>') + fanart = "http://thetvdb.com/banners/" + foto + else: + fanart = item.show.split("|")[1] + + plot = info + plot = plot.replace(plot, "[COLOR burlywood][B]" + plot + "[/B][/COLOR]") + title = name_epi.upper() + title = title.replace(title, "[COLOR skyblue][B]" + title + "[/B][/COLOR]") + image = fanart + foto = item.show.split("|")[1] + if not ".png" in item.show.split("|")[1]: + foto = "http://s6.postimg.org/rv2mu3pap/bityouthsinopsis2.png" + quit = "Pulsa" + " [COLOR greenyellow][B]INTRO [/B][/COLOR]" + "para quitar" + NOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "noback.xml") + REMOTENOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remotenoback.xml") + APPNOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "appnoback.xml") + TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") + urllib.urlretrieve("https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/noback.xml", + NOBACKDESTFILE) + urllib.urlretrieve( + "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bityouth/remotenoback.xml", + REMOTENOBACKDESTFILE) + urllib.urlretrieve( + "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/appnoback.xml", + APPNOBACKDESTFILE) + xbmc.executebuiltin('Action(reloadkeymaps)') + ventana = TextBox2(title=title, plot=plot, thumbnail=image, fanart=foto, quit=quit) + ventana.doModal() + + +ACTION_GESTURE_SWIPE_LEFT = 511 +ACTION_SELECT_ITEM = 7 + + +class TextBox2(xbmcgui.WindowDialog): + """ Create a skinned textbox window """ + + def __init__(self, *args, **kwargs): + self.getTitle = kwargs.get('title') + self.getPlot = kwargs.get('plot') + self.getThumbnail = kwargs.get('thumbnail') + self.getFanart = kwargs.get('fanart') + self.getQuit = kwargs.get('quit') + + self.background = xbmcgui.ControlImage(70, 20, 1150, 630, 'http://s6.postimg.org/n3ph1uxn5/ventana.png') + self.title = xbmcgui.ControlTextBox(120, 60, 430, 50) + self.quit = xbmcgui.ControlTextBox(145, 90, 1030, 45) + self.plot = xbmcgui.ControlTextBox(120, 150, 1056, 100) + self.thumbnail = xbmcgui.ControlImage(120, 300, 1056, 300, self.getThumbnail) + self.fanart = xbmcgui.ControlImage(780, 43, 390, 100, self.getFanart) + + self.addControl(self.background) + self.addControl(self.title) + self.addControl(self.quit) + self.addControl(self.plot) + self.addControl(self.thumbnail) + self.addControl(self.fanart) + + self.title.setText(self.getTitle) + self.quit.setText(self.getQuit) + try: + self.plot.autoScroll(7000, 6000, 30000) + except: + print "Actualice a la ultima version de kodi para mejor info" + import xbmc + xbmc.executebuiltin( + 'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000,"https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/kodi-icon.png")') + self.plot.setText(self.getPlot) + + def get(self): + self.show() + + def onAction(self, action): + if action == ACTION_SELECT_ITEM or action == ACTION_GESTURE_SWIPE_LEFT: + import os + import xbmc + APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") + NOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "noback.xml") + REMOTENOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remotenoback.xml") + APPNOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "appnoback.xml") + TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") + try: + os.remove(NOBACKDESTFILE) + os.remove(REMOTENOBACKDESTFILE) + os.remove(APPNOBACKDESTFILE) + if os.path.exists(TESTPYDESTFILE): + urllib.urlretrieve( + "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bityouth/customapp.xml", + APPCOMMANDDESTFILE) + xbmc.executebuiltin('Action(reloadkeymaps)') + except: + xbmc.executebuiltin('Action(reloadkeymaps)') + self.close() + + +def translate(to_translate, to_langage="auto", langage="auto"): + ###Traducción atraves de Google + '''Return the translation using google translate + you must shortcut the langage you define (French = fr, English = en, Spanish = es, etc...) + if you don't define anything it will detect it or use english by default + Example: + print(translate("salut tu vas bien?", "en")) + hello you alright?''' + agents = { + 'User-Agent': "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)"} + before_trans = 'class="t0">' + link = "http://translate.google.com/m?hl=%s&sl=%s&q=%s" % (to_langage, langage, to_translate.replace(" ", "+")) + request = urllib2.Request(link, headers=agents) + page = urllib2.urlopen(request).read() + result = page[page.find(before_trans) + len(before_trans):] + result = result.split("<")[0] + return result + + +if __name__ == '__main__': + to_translate = 'Hola como estas?' + print("%s >> %s" % (to_translate, translate(to_translate))) + print("%s >> %s" % (to_translate, translate(to_translate, 'fr'))) +# should print Hola como estas >> Hello how are you +# and Hola como estas? >> Bonjour comment allez-vous? diff --git a/plugin.video.alfa/channels/borrachodetorrent.json b/plugin.video.alfa/channels/borrachodetorrent.json new file mode 100755 index 00000000..47c9158a --- /dev/null +++ b/plugin.video.alfa/channels/borrachodetorrent.json @@ -0,0 +1,42 @@ +{ + "id": "borrachodetorrent", + "name": "BorrachodeTorrent", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "http://imgur.com/BePrYmy.png", + "version": 1, + "changes": [ + { + "date": "26/04/2017", + "description": "Release" + }, + { + "date": "28/06/2017", + "description": "Correciones código y mejoras" + } + ], + "categories": [ + "torrent", + "movie", + "tvshow" + ], + "settings": [ + { + "id": "modo_grafico", + "type": "bool", + "label": "Buscar información extra", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/borrachodetorrent.py b/plugin.video.alfa/channels/borrachodetorrent.py new file mode 100755 index 00000000..d5a4565f --- /dev/null +++ b/plugin.video.alfa/channels/borrachodetorrent.py @@ -0,0 +1,1048 @@ +# -*- coding: utf-8 -*- + +import os +import re +import ssl +from threading import Thread + +import xbmc +import xbmcgui +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item +from core.scrapertools import decodeHtmlentities as dhe + +try: + _create_unverified_https_context = ssl._create_unverified_context +except AttributeError: + # Legacy Python that doesn't verify HTTPS certificates by default + pass +else: + # Handle target environment that doesn't support HTTPS verification + ssl._create_default_https_context = _create_unverified_https_context + +ACTION_SHOW_FULLSCREEN = 36 +ACTION_GESTURE_SWIPE_LEFT = 511 +ACTION_SELECT_ITEM = 7 +ACTION_PREVIOUS_MENU = 10 +ACTION_MOVE_LEFT = 1 +ACTION_MOVE_RIGHT = 2 +ACTION_MOVE_DOWN = 4 +ACTION_MOVE_UP = 3 +OPTION_PANEL = 6 +OPTIONS_OK = 5 + +__modo_grafico__ = config.get_setting('modo_grafico', "borrachodetorrent") + + +# Para la busqueda en bing evitando baneos + +def browser(url): + import mechanize + + # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing + br = mechanize.Browser() + # Browser options + br.set_handle_equiv(False) + br.set_handle_gzip(True) + br.set_handle_redirect(True) + br.set_handle_referer(False) + br.set_handle_robots(False) + # Follows refresh 0 but not hangs on refresh > 0 + br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) + # Want debugging messages? + # br.set_debug_http(True) + # br.set_debug_redirects(True) + # br.set_debug_responses(True) + + # User-Agent (this is cheating, ok?) + # br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')] + # br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')] + # Open some site, let's pick a random one, the first that pops in mind + r = br.open(url) + response = r.read() + print response + if "img,divreturn" in response: + r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url) + print "prooooxy" + response = r.read() + + return response + + +api_key = "2e2160006592024ba87ccdf78c28f49f" +api_fankey = "dffe90fba4d02c199ae7a9e71330c987" + + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append(item.clone(title="[COLOR floralwhite][B]Películas[/B][/COLOR]", action="scraper", + url="https://www.borrachodetorrent.com/peliculas-torrent/", + thumbnail="http://imgur.com/tBvoGIk.png", fanart="http://imgur.com/AqUvMW3.jpg", + contentType="movie")) + itemlist.append(item.clone(title="[COLOR floralwhite][B] Estrenos[/B][/COLOR]", action="scraper", + url="https://www.borrachodetorrent.com/peliculas-estrenos-torrent/", + thumbnail="http://imgur.com/tBvoGIk.png", fanart="http://imgur.com/AqUvMW3.jpg", + contentType="movie")) + itemlist.append(item.clone(title="[COLOR floralwhite][B] Esenciales[/B][/COLOR]", action="scraper", + url="https://www.borrachodetorrent.com/peliculas-torrent-deberias-haber-visto/", + thumbnail="http://imgur.com/tBvoGIk.png", fanart="http://imgur.com/AqUvMW3.jpg", + contentType="movie")) + + itemlist.append(itemlist[-1].clone(title="[COLOR floralwhite][B]Series[/B][/COLOR]", action="scraper", + url="https://www.borrachodetorrent.com/series-torrent/", + thumbnail="http://imgur.com/lMHcNwc.png", contentType="tvshow")) + + itemlist.append(itemlist[-1].clone(title="[COLOR cadetblue][B]Buscar[/B][/COLOR]", action="search", + thumbnail="http://imgur.com/NrIVpps.png", + fanart="http://imgur.com/AqUvMW3.jpg", )) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = "https://www.borrachodetorrent.com/?s=" + texto + item.extra = "search" + try: + return buscador(item) + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def buscador(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = scrapertools.find_multiple_matches(data, + '<a id="busca_a" class="busca_a" href="([^"]+)">.*?<img src="([^"]+)".*?<span class="tt">([^"]+)</span>(.*?)<span class="calidad2">([^"]+)</span>') + + for url, thumb, title, check_year, calidad in patron: + + if "SERIE" in calidad or "&#" in title: + if "&#" in title: + item.extra = "" + + checkmt = "tvshow" + + else: + checkmt = "movie" + year = scrapertools.find_single_match(check_year, '<span class="year_SKA">([^"]+)</span>') + if year == "": + year = " " + titulo = "[COLOR teal]" + title + "[/COLOR]" + " " + "[COLOR floralwhite]" + calidad + "[/COLOR]" + title = re.sub(r"!|¡", "", title) + title = re.sub(r"’|PRE-Estreno|\d+&#.*", "'", title) + + if checkmt == "movie": + new_item = item.clone(action="findvideos", title=titulo, url=url, thumbnail=thumb, fulltitle=title, + contentTitle=title, contentType="movie", extra=year, library=True) + else: + if item.extra == "search": + new_item = item.clone(action="findtemporadas", title=titulo, url=url, thumbnail=thumb, fulltitle=title, + contentTitle=title, show=title, contentType="tvshow", library=True) + else: + new_item = item.clone(action="findvideos", title=titulo, url=url, thumbnail=thumb, fulltitle=title, + contentTitle=title, show=title, contentType="tvshow", library=True) + new_item.infoLabels['year'] = year + itemlist.append(new_item) + + try: + from core import tmdb + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + for item in itemlist: + if not "Siguiente >>" in item.title: + if "0." in str(item.infoLabels['rating']): + item.infoLabels['rating'] = "[COLOR indianred]Sin puntuacíon[/COLOR]" + else: + item.infoLabels['rating'] = "[COLOR springgreen]" + str(item.infoLabels['rating']) + "[/COLOR]" + item.title = item.title + " " + str(item.infoLabels['rating']) + except: + pass + + return itemlist + + +def scraper(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + if item.contentType == "movie": + patron = scrapertools.find_multiple_matches(data, + '<a id="busca_a" class="busca_a" href="([^"]+)">.*?<img src="([^"]+)".*?<span class="tt">([^"]+)</span>(.*?)<span class="calidad2">([^"]+)</span>') + + for url, thumb, title, check_year, calidad in patron: + + year = scrapertools.find_single_match(check_year, '<span class="year_SKA">([^"]+)</span>') + if year == "": + year = " " + + titulo = "[COLOR teal]" + title + "[/COLOR]" + " " + "[COLOR floralwhite]" + calidad + "[/COLOR]" + title = re.sub(r"!|¡", "", title) + title = re.sub(r"’|PRE-Estreno", "'", title) + + new_item = item.clone(action="findvideos", title=titulo, url=url, thumbnail=thumb, fulltitle=title, + contentTitle=title, contentType="movie", extra=year, library=True) + new_item.infoLabels['year'] = year + itemlist.append(new_item) + + else: + + data = re.sub(r'×', 'x', data) + patron = scrapertools.find_multiple_matches(data, + 'id="busca_a" class="busca_a" href="([^"]+)">.*?<img src="([^"]+)".*?<span class="tt">([^"]+)</span>.*?<span class="calidad2">([^"]+)</span>') + + for url, thumb, title, calidad in patron: + titulo = "[COLOR teal]" + title + "[/COLOR]" + " " + "[COLOR floralwhite]" + calidad + "[/COLOR]" + title = re.sub(r'\d+x\d+', '', title) + title = re.sub(r"’", "'", title) + filtro_thumb = thumb.replace("https://image.tmdb.org/t/p/w300", "") + filtro_list = {"poster_path": filtro_thumb} + filtro_list = filtro_list.items() + + new_item = item.clone(action="findvideos", title=titulo, url=url, thumbnail=thumb, + fulltitle=title, infoLabels={'filtro': filtro_list}, + contentTitle=title, show=title, contentType="tvshow", library=True) + itemlist.append(new_item) + + ## Paginación + next = scrapertools.find_single_match(data, "<div class='paginado'>.*?<a class='current'>.*?href='([^']+)'") + if len(next) > 0: + url = next + + itemlist.append(item.clone(title="[COLOR dodgerblue][B]Siguiente >>[/B][/COLOR]", url=url)) + try: + from core import tmdb + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + for item in itemlist: + if not "Siguiente >>" in item.title: + if "0." in str(item.infoLabels['rating']): + item.infoLabels['rating'] = "[COLOR indianred]Sin puntuacíon[/COLOR]" + else: + item.infoLabels['rating'] = "[COLOR springgreen]" + str(item.infoLabels['rating']) + "[/COLOR]" + item.title = item.title + " " + str(item.infoLabels['rating']) + except: + pass + + for item_tmdb in itemlist: + logger.info(str(item_tmdb.infoLabels['tmdb_id'])) + + return itemlist + + +def findtemporadas(item): + logger.info() + itemlist = [] + if item.extra == "search": + th = Thread(target=get_art(item)) + th.setDaemon(True) + th.start() + data = httptools.downloadpage(item.url).data + if len(item.extra.split("|")): + if len(item.extra.split("|")) >= 4: + fanart = item.extra.split("|")[2] + extra = item.extra.split("|")[3] + try: + fanart_extra = item.extra.split("|")[4] + except: + fanart_extra = item.extra.split("|")[3] + try: + fanart_info = item.extra.split("|")[5] + except: + fanart_extra = item.extra.split("|")[3] + elif len(item.extra.split("|")) == 3: + fanart = item.extra.split("|")[2] + extra = item.extra.split("|")[0] + fanart_extra = item.extra.split("|")[0] + fanart_info = item.extra.split("|")[1] + elif len(item.extra.split("|")) == 2: + fanart = item.extra.split("|")[1] + extra = item.extra.split("|")[0] + fanart_extra = item.extra.split("|")[0] + fanart_info = item.extra.split("|")[1] + else: + extra = item.extra + fanart_extra = item.extra + fanart_info = item.extra + try: + logger.info(fanart_extra) + logger.info(fanart_info) + except: + fanart_extra = item.fanart + fanart_info = item.fanart + bloque_episodios = scrapertools.find_multiple_matches(data, 'Temporada (\d+) -(.*?)</ul>') + for temporada, bloque_epis in bloque_episodios: + if 'no data' in bloque_epis or '<a href="">' in bloque_epis: continue + item.infoLabels = item.InfoLabels + item.infoLabels['season'] = temporada + itemlist.append(item.clone(action="epis", + title="[COLOR royalblue][B]Temporada [/B][/COLOR]" + "[COLOR antiquewhite][B]" + temporada + "[/B][/COLOR]", + url=bloque_epis, fanart=fanart, contentType=item.contentType, + contentTitle=item.contentTitle, show=item.show, extra=item.extra, + fanart_extra=fanart_extra, fanart_info=fanart_info, datalibrary=data, folder=True)) + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + for item in itemlist: + item.fanart = fanart + item.extra = extra + if config.get_videolibrary_support() and itemlist: + + if len(bloque_episodios) == 1: + extra = "epis" + else: + extra = "epis###serie_add" + infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'], 'tvdb_id': item.infoLabels['tvdb_id'], + 'imdb_id': item.infoLabels['imdb_id']} + itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", text_color="0xFF00ffff", + action="add_serie_to_library", extra=extra, url=item.url, + contentSerieName=item.fulltitle, infoLabels=infoLabels, + thumbnail='http://imgur.com/BbafXw7.png', datalibrary=data)) + + return itemlist + + +def epis(item): + logger.info() + itemlist = [] + if item.extra == "serie_add": + item.url = item.datalibrary + patron = scrapertools.find_multiple_matches(item.url, + '<div class="numerando">([^"]+)</div>.*?href="([^"]+)">([^"]+)</a>') + + for epi, url, title in patron: + epi = epi.replace(" ", "") + episodio = epi + episodio = scrapertools.find_single_match(episodio, '\d+x(\d+)') + item.infoLabels['episode'] = episodio + itemlist.append( + item.clone(title="[COLOR blue][B]" + epi + "[/B][/COLOR]", url=url, action="findvideos", show=item.show, + fanart=item.extra, extra=item.extra, fanart_extra=item.fanart_extra, + fanart_info=item.fanart_info, folder=True)) + if item.extra != "serie_add": + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + for item in itemlist: + item.fanart = item.extra + if item.infoLabels['title']: title = "[COLOR lightblue]" + item.infoLabels['title'] + "[/COLOR]" + item.title = item.title + "[CR]\"" + title + "\"" + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + if not item.infoLabels['episode']: + th = Thread(target=get_art(item)) + th.setDaemon(True) + th.start() + url = scrapertools.find_single_match(data, '<div class="botones_descarga">.*?href="([^"]+)"').strip() + if item.contentType != "movie": + check_online = '<div class="linkstv">' + if not item.infoLabels['episode']: + capitulo = scrapertools.find_single_match(item.title, '(\d+x\d+)') + url_serie = re.sub(r'-\d+x\d+.*', '', item.url) + url_serie = re.sub(r'\/episodio', '/series', url_serie) + if len(item.extra.split("|")) >= 2: + extra = item.extra + else: + extra = item.fanart + else: + capitulo = scrapertools.find_single_match(item.title, '\d+x\d+') + try: + fanart = item.fanart_extra + except: + fanart = item.extra.split("|")[0] + if not url and item.library: + itemlist.append(Item(channel=item.channel, title="[COLOR slateblue][B]No disponible[/B][/COLOR]", url=url, + fanart=fanart, thumbnail=item.thumbnail, extra=item.extra, folder=False)) + else: + + title = "[COLOR darkturquoise][B]Torrent [/B][/COLOR]" + "[COLOR aliceblue][B]" + capitulo + "[/B][/COLOR]" + + title = re.sub(r'\".*', '', title) + itemlist.append( + Item(channel=item.channel, title=title, url=url, action="play", server="torrent", fanart=fanart, + thumbnail=item.thumbnail, extra=item.extra, folder=False)) + + + else: + + check_online = '<div class="realse">' + item.infoLabels['year'] = None + itemlist.append( + Item(channel=item.channel, title="[COLOR deepskyblue][B]Torrent[/B][/COLOR]", url=url, action="play", + server="torrent", fanart=item.fanart, thumbnail=item.thumbnail, extra=item.extra, + InfoLabels=item.infoLabels, folder=False)) + if item.library and config.get_videolibrary_support() and len(itemlist) > 0: + infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'], + 'title': item.infoLabels['title']} + itemlist.append(Item(channel=item.channel, title="Añadir esta película a la videoteca", + action="add_pelicula_to_library", url=item.url, infoLabels=infoLabels, + text_color="0xFF00ffff", + thumbnail='http://imgur.com/BbafXw7.png')) + + dd = scrapertools.find_multiple_matches(data, '<h1 class="h1_pelis_online">(.*?)' + check_online + '') + if dd: + if item.library: + extra = dd + itemlist.append( + Item(channel=item.channel, title="[COLOR floralwhite][B] Online[/B][/COLOR]", url=item.url, + action="dd_y_o", thumbnail="http://imgur.com/hYgra9W.png", fanart=item.extra.split("|")[0], + contentType=item.contentType, extra=str(extra) + "|" + item.extra, folder=True)) + else: + + patron = scrapertools.find_multiple_matches(str(dd), + '<li class="elemento">.*?href="([^"]+)".*?<span class="c">([^"]+)</span>.*?<span class="d">([^"]+)</span>') + + for url, idioma, calidad, in patron: + idioma = re.sub(r'\\xc3\\xb1', 'ñ', idioma) + idioma = re.sub(r'\\xc3\\xa9', 'é', idioma) + videolist = servertools.find_video_items(data=url) + for video in videolist: + icon_server = os.path.join(config.get_runtime_path(), "resources", "images", "servers", + "server_" + video.server + ".png") + if not os.path.exists(icon_server): + icon_server = "" + itemlist.append(Item(channel=item.channel, url=video.url, server=video.server, + title="[COLOR floralwhite][B]" + video.server + "[/B][/COLOR]" + " " + "[COLOR powderblue]" + idioma + "[/COLOR]" + "[COLOR deepskyblue]--" + calidad + "[/COLOR]", + thumbnail=icon_server, fanart=fanart, action="play", folder=False)) + if not item.infoLabels['episode'] and item.contentType != "movie": + itemlist.append( + Item(channel=item.channel, title="[COLOR paleturquoise][B]Todos los episodios[/B][/COLOR]", url=url_serie, + action="findtemporadas", server="torrent", fanart=item.extra.split("|")[1], thumbnail=item.thumbnail, + thumb_art=item.thumb_art, thumb_info=item.thumb_info, extra=item.extra + "|" + item.thumbnail, + contentType=item.contentType, contentTitle=item.contentTitle, InfoLabels=item.infoLabels, + library=item.library, fulltitle=item.fulltitle, folder=True)) + if item.infoLabels['episode'] and item.library: + + thumbnail = scrapertools.find_single_match(item.extra, 'http://assets.fanart.tv/.*jpg') + if thumbnail == "": + thumbnail = item.thumbnail + if not "assets.fanart" in item.fanart_info: + fanart = item.fanart_info + else: + fanart = item.fanart + itemlist.append( + Item(channel=item.channel, title="[COLOR steelblue][B] info[/B][/COLOR]", url=url, action="info_capitulos", + fanart=item.extra.split("|")[0], thumbnail=item.thumb_art, thumb_info=item.thumb_info, + extra=item.extra, show=item.show, InfoLabels=item.infoLabels, folder=False)) + return itemlist + + +def dd_y_o(item): + logger.info() + itemlist = [] + if item.contentType == "movie": + enlaces = item.extra.split("|")[0] + fanart = item.extra.split("|")[2] + + else: + enlaces = item.extra.split("|")[0] + fanart = "" + patron = scrapertools.find_multiple_matches(enlaces, + '<li class="elemento">.*?href="([^"]+)".*?<span class="c">([^"]+)</span>.*?<span class="d">([^"]+)</span>') + for url, idioma, calidad, in patron: + idioma = re.sub(r'\\xc3\\xb1', 'ñ', idioma) + idioma = re.sub(r'\\xc3\\xa9', 'é', idioma) + videolist = servertools.find_video_items(data=url) + for video in videolist: + icon_server = os.path.join(config.get_runtime_path(), "resources", "images", "servers", + "server_" + video.server + ".png") + if not os.path.exists(icon_server): + icon_server = "" + itemlist.append(Item(channel=item.channel, url=video.url, server=video.server, + title="[COLOR floralwhite][B]" + video.server + "[/B][/COLOR]" + " " + "[COLOR powderblue]" + idioma + "[/COLOR]" + "[COLOR deepskyblue]--" + calidad + "[/COLOR]", + thumbnail=icon_server, fanart=fanart, action="play", folder=False)) + return itemlist + + +def info_capitulos(item, images={}): + logger.info() + try: + url = "http://thetvdb.com/api/1D62F2F90030C444/series/" + str(item.InfoLabels['tvdb_id']) + "/default/" + str( + item.InfoLabels['season']) + "/" + str(item.InfoLabels['episode']) + "/es.xml" + if "/0" in url: + url = url.replace("/0", "/") + from core import jsontools + data = httptools.downloadpage(url).data + if "<filename>episodes" in data: + image = scrapertools.find_single_match(data, '<Data>.*?<filename>(.*?)</filename>') + image = "http://thetvdb.com/banners/" + image + else: + try: + image = item.InfoLabels['episodio_imagen'] + except: + image = "http://imgur.com/ZiEAVOD.png" + + foto = item.thumb_info + if not ".png" in foto: + foto = "http://imgur.com/AdGHzKS.png" + try: + title = item.InfoLabels['episodio_titulo'] + except: + title = "" + title = "[COLOR red][B]" + title + "[/B][/COLOR]" + + try: + plot = item.InfoLabels['episodio_sinopsis'] + except: + plot = scrapertools.find_single_match(data, '<Overview>(.*?)</Overview>') + if plot == "": + plot = "Sin información todavia" + try: + rating = item.InfoLabels['episodio_vote_average'] + except: + rating = 0 + try: + + if rating >= 5 and rating < 8: + rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR springgreen][B]" + str(rating) + "[/B][/COLOR]" + elif rating >= 8 and rating < 10: + rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR yellow][B]" + str(rating) + "[/B][/COLOR]" + elif rating == 10: + rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR orangered][B]" + str(rating) + "[/B][/COLOR]" + else: + rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR crimson][B]" + str(rating) + "[/B][/COLOR]" + except: + rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR crimson][B]" + str(rating) + "[/B][/COLOR]" + if "10." in rating: + rating = re.sub(r'10\.\d+', '10', rating) + + + except: + + title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" + plot = "Este capitulo no tiene informacion..." + plot = "[COLOR yellow][B]" + plot + "[/B][/COLOR]" + image = "http://s6.postimg.org/ub7pb76c1/noinfo.png" + foto = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png" + rating = "" + + ventana = TextBox2(title=title, plot=plot, thumbnail=image, fanart=foto, rating=rating) + ventana.doModal() + + +class TextBox2(xbmcgui.WindowDialog): + """ Create a skinned textbox window """ + + def __init__(self, *args, **kwargs): + self.getTitle = kwargs.get('title') + self.getPlot = kwargs.get('plot') + self.getThumbnail = kwargs.get('thumbnail') + self.getFanart = kwargs.get('fanart') + self.getRating = kwargs.get('rating') + + self.background = xbmcgui.ControlImage(70, 20, 1150, 630, 'http://imgur.com/PKOYIzX.jpg') + self.title = xbmcgui.ControlTextBox(120, 60, 430, 50) + self.rating = xbmcgui.ControlTextBox(145, 112, 1030, 45) + self.plot = xbmcgui.ControlTextBox(120, 150, 1056, 100) + self.thumbnail = xbmcgui.ControlImage(120, 300, 1056, 300, self.getThumbnail) + self.fanart = xbmcgui.ControlImage(780, 43, 390, 100, self.getFanart) + + self.addControl(self.background) + self.background.setAnimations( + [('conditional', 'effect=slide start=1000% end=0% time=1500 condition=true tween=bounce',), + ('WindowClose', 'effect=slide delay=800 start=0% end=1000% time=800 condition=true',)]) + self.addControl(self.thumbnail) + self.thumbnail.setAnimations([('conditional', + 'effect=zoom start=0% end=100% delay=2700 time=1500 condition=true tween=elastic easing=inout',), + ('WindowClose', 'effect=slide end=0,700% time=300 condition=true',)]) + self.addControl(self.plot) + self.plot.setAnimations( + [('conditional', 'effect=zoom delay=2000 center=auto start=0 end=100 time=800 condition=true ',), ( + 'conditional', + 'effect=rotate delay=2000 center=auto aceleration=6000 start=0% end=360% time=800 condition=true',), + ('WindowClose', 'effect=zoom center=auto start=100% end=-0% time=600 condition=true',)]) + self.addControl(self.fanart) + self.fanart.setAnimations( + [('WindowOpen', 'effect=slide start=0,-700 delay=1000 time=2500 tween=bounce condition=true',), ( + 'conditional', + 'effect=rotate center=auto start=0% end=360% delay=3000 time=2500 tween=bounce condition=true',), + ('WindowClose', 'effect=slide end=0,-700% time=1000 condition=true',)]) + self.addControl(self.title) + self.title.setText(self.getTitle) + self.title.setAnimations( + [('conditional', 'effect=slide start=-1500% end=0% delay=1000 time=2000 condition=true tween=elastic',), + ('WindowClose', 'effect=slide start=0% end=-1500% time=800 condition=true',)]) + self.addControl(self.rating) + self.rating.setText(self.getRating) + self.rating.setAnimations( + [('conditional', 'effect=fade start=0% end=100% delay=3000 time=1500 condition=true',), + ('WindowClose', 'effect=slide end=0,-700% time=600 condition=true',)]) + xbmc.sleep(200) + + try: + self.plot.autoScroll(7000, 6000, 30000) + except: + + xbmc.executebuiltin( + 'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000,"https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/kodi-icon.png")') + self.plot.setText(self.getPlot) + + def get(self): + self.show() + + def onAction(self, action): + if action == ACTION_PREVIOUS_MENU or action.getId() == ACTION_GESTURE_SWIPE_LEFT or action == 110 or action == 92: + self.close() + + +def test(): + return True + + +def tokenize(text, match=re.compile("([idel])|(\d+):|(-?\d+)").match): + i = 0 + while i < len(text): + m = match(text, i) + s = m.group(m.lastindex) + i = m.end() + if m.lastindex == 2: + yield "s" + yield text[i:i + int(s)] + i = i + int(s) + else: + yield s + + +def decode_item(next, token): + if token == "i": + # integer: "i" value "e" + data = int(next()) + if next() != "e": + raise ValueError + elif token == "s": + # string: "s" value (virtual tokens) + data = next() + elif token == "l" or token == "d": + # container: "l" (or "d") values "e" + data = [] + tok = next() + while tok != "e": + data.append(decode_item(next, tok)) + tok = next() + if token == "d": + data = dict(zip(data[0::2], data[1::2])) + else: + raise ValueError + return data + + +def decode(text): + try: + src = tokenize(text) + data = decode_item(src.next, src.next()) + for token in src: # look for more tokens + raise SyntaxError("trailing junk") + except (AttributeError, ValueError, StopIteration): + try: + data = data + except: + data = src + + return data + + +def convert_size(size): + import math + if (size == 0): + return '0B' + size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") + i = int(math.floor(math.log(size, 1024))) + p = math.pow(1024, i) + s = round(size / p, 2) + return '%s %s' % (s, size_name[i]) + + +def fanartv(item, id_tvdb, id, images={}): + headers = [['Content-Type', 'application/json']] + from core import jsontools + if item.contentType == "movie": + url = "http://webservice.fanart.tv/v3/movies/%s?api_key=cab16e262d72fea6a6843d679aa10300" \ + % id + else: + url = "http://webservice.fanart.tv/v3/tv/%s?api_key=cab16e262d72fea6a6843d679aa10300" % id_tvdb + try: + data = jsontools.load(scrapertools.downloadpage(url, headers=headers)) + if data and not "error message" in data: + for key, value in data.items(): + if key not in ["name", "tmdb_id", "imdb_id", "thetvdb_id"]: + images[key] = value + else: + images = [] + + except: + images = [] + return images + + +def filmaffinity(item, infoLabels): + title = infoLabels["title"].replace(" ", "+") + try: + year = infoLabels["year"] + except: + year = "" + sinopsis = infoLabels["sinopsis"] + + if year == "": + if item.contentType != "movie": + tipo = "serie" + url_bing = "http://www.bing.com/search?q=%s+Serie+de+tv+site:filmaffinity.com" % title + else: + tipo = "película" + url_bing = "http://www.bing.com/search?q=%s+site:filmaffinity.com" % title + try: + data = browser(url_bing) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + if "myaddrproxy.php" in data: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><div class="b_title"><h2>(<a href="/myaddrproxy.php/http/www.filmaffinity.com/es/film.*?)"') + subdata_bing = re.sub(r'\/myaddrproxy.php\/http\/', '', subdata_bing) + else: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><h2>(<a href="http://www.filmaffinity.com/.*?/film.*?)"') + + url_filma = scrapertools.get_match(subdata_bing, '<a href="([^"]+)') + if not "http" in url_filma: + try: + data = httptools.downloadpage("http://" + url_filma, cookies=False, timeout=1).data + except: + data = httptools.downloadpage("http://" + url_filma, cookies=False, timeout=1).data + else: + try: + data = httptools.downloadpage(url_filma, cookies=False, timeout=1).data + except: + data = httptools.downloadpage(url_filma, cookies=False, timeout=1).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + except: + pass + else: + tipo = "Pelicula" + url = "http://www.filmaffinity.com/es/advsearch.php?stext={0}&stype%5B%5D=title&country=&genre=&fromyear={1}&toyear={1}".format( + title, year) + data = httptools.downloadpage(url, cookies=False).data + url_filmaf = scrapertools.find_single_match(data, '<div class="mc-poster">\s*<a title="[^"]*" href="([^"]+)"') + if url_filmaf: + url_filmaf = "http://www.filmaffinity.com%s" % url_filmaf + data = httptools.downloadpage(url_filmaf, cookies=False).data + else: + if item.contentType != "movie": + tipo = "serie" + url_bing = "http://www.bing.com/search?q=%s+Serie+de+tv+site:filmaffinity.com" % title + else: + tipo = "película" + url_bing = "http://www.bing.com/search?q=%s+site:filmaffinity.com" % title + try: + data = browser(url_bing) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + if "myaddrproxy.php" in data: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><div class="b_title"><h2>(<a href="/myaddrproxy.php/http/www.filmaffinity.com/es/film.*?)"') + subdata_bing = re.sub(r'\/myaddrproxy.php\/http\/', '', subdata_bing) + else: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><h2>(<a href="http://www.filmaffinity.com/.*?/film.*?)"') + + url_filma = scrapertools.get_match(subdata_bing, '<a href="([^"]+)') + if not "http" in url_filma: + data = httptools.downloadpage("http://" + url_filma, cookies=False).data + else: + data = httptools.downloadpage(url_filma, cookies=False).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + except: + pass + sinopsis_f = scrapertools.find_single_match(data, '<dd itemprop="description">(.*?)</dd>') + sinopsis_f = sinopsis_f.replace("<br><br />", "\n") + sinopsis_f = re.sub(r"\(FILMAFFINITY\)<br />", "", sinopsis_f) + try: + year_f = scrapertools.get_match(data, '<dt>Año</dt>.*?>(\d+)</dd>') + except: + year_f = "" + try: + rating_filma = scrapertools.get_match(data, 'itemprop="ratingValue" content="(.*?)">') + except: + rating_filma = "Sin puntuacion" + critica = "" + patron = '<div itemprop="reviewBody">(.*?)</div>.*?itemprop="author">(.*?)\s*<i alt="([^"]+)"' + matches_reviews = scrapertools.find_multiple_matches(data, patron) + + if matches_reviews: + for review, autor, valoracion in matches_reviews: + review = dhe(scrapertools.htmlclean(review)) + review += "\n" + autor + "[CR]" + review = re.sub(r'Puntuac.*?\)', '', review) + if "positiva" in valoracion: + critica += "[COLOR green][B]%s[/B][/COLOR]\n" % review + elif "neutral" in valoracion: + critica += "[COLOR yellow][B]%s[/B][/COLOR]\n" % review + else: + critica += "[COLOR red][B]%s[/B][/COLOR]\n" % review + else: + critica = "[COLOR floralwhite][B]Esta %s no tiene críticas todavía...[/B][/COLOR]" % tipo + + return critica, rating_filma, year_f, sinopsis_f + + +def get_art(item): + logger.info() + id = item.infoLabels['tmdb_id'] + check_fanart = item.infoLabels['fanart'] + if item.contentType != "movie": + tipo_ps = "tv" + else: + tipo_ps = "movie" + if not id: + year = item.extra + otmdb = tmdb.Tmdb(texto_buscado=item.fulltitle, year=year, tipo=tipo_ps) + id = otmdb.result.get("id") + + if id == None: + otmdb = tmdb.Tmdb(texto_buscado=item.fulltitle, tipo=tipo_ps) + id = otmdb.result.get("id") + if id == None: + if item.contentType == "movie": + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % ( + item.fulltitle.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data) + subdata_imdb = scrapertools.find_single_match(data, + '<li class="b_algo">(.*?)h="ID.*?<strong>.*?TV Series') + else: + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % ( + item.fulltitle.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data) + subdata_imdb = scrapertools.find_single_match(data, '<li class="b_algo">(.*?)h="ID.*?<strong>') + try: + imdb_id = scrapertools.get_match(subdata_imdb, '<a href=.*?http.*?imdb.com/title/(.*?)/.*?"') + except: + try: + imdb_id = scrapertools.get_match(subdata_imdb, + '<a href=.*?http.*?imdb.com/.*?/title/(.*?)/.*?"') + except: + imdb_id = "" + otmdb = tmdb.Tmdb(external_id=imdb_id, external_source="imdb_id", tipo=tipo_ps, idioma_busqueda="es") + id = otmdb.result.get("id") + + if id == None: + if "(" in item.fulltitle: + title = scrapertools.find_single_match(item.fulltitle, '\(.*?\)') + if item.contentType != "movie": + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % ( + title.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", + data) + subdata_imdb = scrapertools.find_single_match(data, + '<li class="b_algo">(.*?)h="ID.*?<strong>.*?TV Series') + else: + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % ( + title.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", + data) + subdata_imdb = scrapertools.find_single_match(data, + '<li class="b_algo">(.*?)h="ID.*?<strong>') + try: + imdb_id = scrapertools.get_match(subdata_imdb, + '<a href=.*?http.*?imdb.com/title/(.*?)/.*?"') + except: + try: + imdb_id = scrapertools.get_match(subdata_imdb, + '<a href=.*?http.*?imdb.com/.*?/title/(.*?)/.*?"') + except: + imdb_id = "" + otmdb = tmdb.Tmdb(external_id=imdb_id, external_source="imdb_id", tipo=tipo_ps, + idioma_busqueda="es") + id = otmdb.result.get("id") + + if not id: + fanart = item.fanart + + imagenes = [] + itmdb = tmdb.Tmdb(id_Tmdb=id, tipo=tipo_ps) + images = itmdb.result.get("images") + if images: + for key, value in images.iteritems(): + for detail in value: + imagenes.append('http://image.tmdb.org/t/p/original' + detail["file_path"]) + + if item.contentType == "movie": + if len(imagenes) >= 4: + if imagenes[0] != check_fanart: + item.fanart = imagenes[0] + else: + item.fanart = imagenes[1] + if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart: + item.extra = imagenes[1] + "|" + imagenes[2] + + else: + if imagenes[1] != check_fanart and imagenes[1] != item.fanart: + item.extra = imagenes[1] + "|" + imagenes[3] + elif imagenes[2] != check_fanart: + item.extra = imagenes[2] + "|" + imagenes[3] + else: + item.extra = imagenes[3] + "|" + imagenes[3] + elif len(imagenes) == 3: + + if imagenes[0] != check_fanart: + item.fanart = imagenes[0] + else: + item.fanart = imagenes[1] + + if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart: + item.extra = imagenes[1] + "|" + imagenes[2] + + + else: + if imagenes[1] != check_fanart and imagenes[1] != item.fanart: + item.extra = imagenes[0] + "|" + imagenes[1] + elif imagenes[2] != check_fanart: + item.extra = imagenes[1] + "|" + imagenes[2] + else: + item.extra = imagenes[1] + "|" + imagenes[1] + elif len(imagenes) == 2: + if imagenes[0] != check_fanart: + item.fanart = imagenes[0] + else: + item.fanart = imagenes[1] + if imagenes[1] != check_fanart and imagenes[1] != item.fanart: + item.extra = imagenes[0] + "|" + imagenes[1] + else: + item.extra = imagenes[1] + "|" + imagenes[0] + elif len(imagenes) == 1: + item.extra = imagenes[0] + "|" + imagenes[0] + else: + item.extra = item.fanart + "|" + item.fanart + id_tvdb = "" + else: + # item.infoLabels['year']=None + # item.infoLabels['filtro']=None + + if itmdb.result.get("external_ids").get("tvdb_id"): + id_tvdb = itmdb.result.get("external_ids").get("tvdb_id") + else: + id_tvdb = "" + if len(imagenes) >= 6: + + if imagenes[0] != check_fanart: + item.fanart = imagenes[0] + else: + item.fanart = imagenes[1] + if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart: + item.extra = imagenes[1] + "|" + imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + \ + imagenes[5] + + else: + if imagenes[1] != check_fanart and imagenes[1] != item.fanart: + item.extra = imagenes[1] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[5] + "|" + \ + imagenes[2] + elif imagenes[2] != check_fanart: + item.extra = imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[5] + "|" + \ + imagenes[1] + else: + item.extra = imagenes[3] + "|" + imagenes[4] + "|" + imagenes[5] + "|" + imagenes[2] + "|" + \ + imagenes[1] + elif len(imagenes) == 5: + if imagenes[0] != check_fanart: + item.fanart = imagenes[0] + else: + item.fanart = imagenes[1] + if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart: + item.extra = imagenes[1] + "|" + imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] + + + else: + + if imagenes[1] != check_fanart and imagenes[1] != item.fanart: + item.extra = imagenes[1] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[2] + elif imagenes[2] != check_fanart: + item.extra = imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[1] + else: + item.extra = imagenes[3] + "|" + imagenes[4] + "|" + imagenes[2] + "|" + imagenes[1] + elif len(imagenes) == 4: + if imagenes[0] != check_fanart: + item.fanart = imagenes[0] + else: + item.fanart = imagenes[1] + if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart: + item.extra = imagenes[1] + "|" + imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] + + else: + if imagenes[1] != check_fanart and imagenes[1] != item.fanart: + item.extra = imagenes[1] + "|" + imagenes[3] + "|" + imagenes[2] + elif imagenes[2] != check_fanart: + item.extra = imagenes[2] + "|" + imagenes[3] + "|" + imagenes[1] + else: + item.extra = imagenes[3] + "|" + imagenes[2] + "|" + imagenes[1] + + elif len(imagenes) == 3: + if imagenes[0] != check_fanart: + item.fanart = imagenes[0] + else: + item.fanart = imagenes[1] + + if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart: + item.extra = imagenes[1] + "|" + imagenes[2] + + else: + + if imagenes[1] != check_fanart and imagenes[1] != item.fanart: + item.extra = imagenes[0] + "|" + imagenes[1] + elif imagenes[2] != check_fanart: + item.extra = imagenes[1] + "|" + imagenes[2] + else: + item.extra = imagenes[1] + "|" + imagenes[1] + elif len(imagenes) == 2: + if imagenes[0] != check_fanart: + item.fanart = imagenes[0] + else: + item.fanart = imagenes[1] + if imagenes[1] != check_fanart and imagenes[1] != item.fanart: + item.extra = imagenes[0] + "|" + imagenes[1] + else: + item.extra = imagenes[1] + "|" + imagenes[0] + elif len(imagenes) == 1: + item.extra = imagenes[0] + "|" + imagenes[0] + else: + item.extra = item.fanart + "|" + item.fanart + item.extra = item.extra + images_fanarttv = fanartv(item, id_tvdb, id) + if images_fanarttv: + if item.contentType == "movie": + if images_fanarttv.get("moviedisc"): + item.thumbnail = images_fanarttv.get("moviedisc")[0].get("url") + elif images_fanarttv.get("hdmovielogo"): + item.thumbnail = images_fanarttv.get("hdmovielogo")[0].get("url") + elif images_fanarttv.get("moviethumb"): + item.thumbnail = images_fanarttv.get("moviethumb")[0].get("url") + elif images_fanarttv.get("moviebanner"): + item.thumbnail_ = images_fanarttv.get("moviebanner")[0].get("url") + else: + item.thumbnail = item.thumbnail + else: + if images_fanarttv.get("hdtvlogo"): + item.thumbnail = images_fanarttv.get("hdtvlogo")[0].get("url") + elif images_fanarttv.get("clearlogo"): + item.thumbnail = images_fanarttv.get("hdmovielogo")[0].get("url") + item.thumb_info = item.thumbnail + if images_fanarttv.get("tvbanner"): + item.thumb_art = images_fanarttv.get("tvbanner")[0].get("url") + elif images_fanarttv.get("tvthumb"): + item.thumb_art = images_fanarttv.get("tvthumb")[0].get("url") + else: + item.thumb_art = item.thumbnail + + else: + item.extra = item.extra + "|" + item.thumbnail diff --git a/plugin.video.alfa/channels/bricocine.json b/plugin.video.alfa/channels/bricocine.json new file mode 100755 index 00000000..70235fa1 --- /dev/null +++ b/plugin.video.alfa/channels/bricocine.json @@ -0,0 +1,35 @@ +{ + "id": "bricocine", + "name": "Bricocine", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "http://s6.postimg.org/9u8m1ep8x/bricocine.jpg", + "banner": "bricocine.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ], + "categories": [ + "torrent", + "movie", + "tvshow" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/bricocine.py b/plugin.video.alfa/channels/bricocine.py new file mode 100755 index 00000000..c44af86e --- /dev/null +++ b/plugin.video.alfa/channels/bricocine.py @@ -0,0 +1,2308 @@ +# -*- coding: utf-8 -*- + +import os +import re +import urllib +import urllib2 + +import xbmcgui +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + + +## Cargar los datos con la librería 'requests' +def get_page(url): + from lib import requests + response = requests.get(url) + return response.content + + +def browser(url): + import mechanize + + # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing + br = mechanize.Browser() + # Browser options + br.set_handle_equiv(False) + br.set_handle_gzip(True) + br.set_handle_redirect(True) + br.set_handle_referer(False) + br.set_handle_robots(False) + # Follows refresh 0 but not hangs on refresh > 0 + br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) + + # Want debugging messages? + # br.set_debug_http(True) + # br.set_debug_redirects(True) + # br.set_debug_responses(True) + + # User-Agent (this is cheating, ok?) + br.addheaders = [('User-agent', + 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')] + # br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')] + # Open some site, let's pick a random one, the first that pops in mind + r = br.open(url) + response = r.read() + print response + if not ".ftrH,.ftrHd,.ftrD>" in response: + r = br.open("http://anonymouse.org/cgi-bin/anon-www.cgi/" + url) + print "prooooxy" + response = r.read() + return response + + +def mainlist(item): + logger.info() + + itemlist = [] + itemlist.append( + Item(channel=item.channel, title="[COLOR sandybrown][B]Pelis MicroHD[/B][/COLOR]", action="peliculas", + url="http://www.bricocine.com/c/hd-microhd/", thumbnail="http://s6.postimg.org/5vgi38jf5/HD_brico10.jpg", + fanart="http://s16.postimg.org/6g9tc2nyt/brico_pelifan.jpg")) + itemlist.append( + Item(channel=item.channel, title="[COLOR sandybrown][B]Pelis Bluray-Rip[/B][/COLOR]", action="peliculas", + url="http://www.bricocine.com/c/bluray-rip/", thumbnail="http://s6.postimg.org/5w82dorpt/blueraybrico.jpg", + fanart="http://i59.tinypic.com/11rdnjm.jpg")) + itemlist.append( + Item(channel=item.channel, title="[COLOR sandybrown][B]Pelis DVD-Rip[/B][/COLOR]", action="peliculas", + url="http://www.bricocine.com/c/dvdrip/", thumbnail="http://s6.postimg.org/d2dlld4y9/dvd2.jpg", + fanart="http://s6.postimg.org/hcehbq5w1/brico_blue_fan.jpg")) + itemlist.append(Item(channel=item.channel, title="[COLOR sandybrown][B]Pelis 3D[/B][/COLOR]", action="peliculas", + url="http://www.bricocine.com/c/3d/", + thumbnail="http://www.eias3d.com/wp-content/uploads/2011/07/3d2_5.png", + fanart="http://s6.postimg.org/u18rvec0h/bric3dd.jpg")) + import xbmc + ###Para musica(si hay) y borra customkeys + if xbmc.Player().isPlaying(): + xbmc.executebuiltin('xbmc.PlayMedia(Stop)') + TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") + KEYMAPDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customkey.xml") + REMOTEDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remote.xml") + APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") + try: + os.remove(KEYMAPDESTFILE) + print "Custom Keyboard.xml borrado" + os.remove(TESTPYDESTFILE) + print "Testpy borrado" + os.remove(REMOTEDESTFILE) + print "Remote borrado" + os.remove(APPCOMMANDDESTFILE) + print "Appcommand borrado" + xbmc.executebuiltin('Action(reloadkeymaps)') + except Exception as inst: + xbmc.executebuiltin('Action(reloadkeymaps)') + print "No hay customs" + + itemlist.append(Item(channel=item.channel, title="[COLOR sandybrown][B]Series[/B][/COLOR]", action="peliculas", + url="http://www.bricocine.com/c/series", + thumbnail="http://img0.mxstatic.com/wallpapers/bc795faa71ba7c490fcf3961f3b803bf_large.jpeg", + fanart="http://s6.postimg.org/z1ath370x/bricoseries.jpg", extra="Series")) + import xbmc + if xbmc.Player().isPlaying(): + print "PLAYIIING" + xbmc.executebuiltin('xbmc.PlayMedia(Stop)') + TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") + KEYMAPDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customkey.xml") + REMOTEDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remote.xml") + APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") + SEARCHDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "search.txt") + TRAILERDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "trailer.txt") + try: + os.remove(KEYMAPDESTFILE) + print "Custom Keyboard.xml borrado" + os.remove(TESTPYDESTFILE) + print "Testpy borrado" + os.remove(REMOTEDESTFILE) + print "Remote borrado" + os.remove(APPCOMMANDDESTFILE) + print "Appcommand borrado" + xbmc.executebuiltin('Action(reloadkeymaps)') + except Exception as inst: + xbmc.executebuiltin('Action(reloadkeymaps)') + print "No hay customs" + try: + os.remove(SEARCHDESTFILE) + print "Custom search.txt borrado" + except: + print "No hay search.txt" + + try: + os.remove(TRAILERDESTFILE) + print "Custom Trailer.txt borrado" + except: + print "No hay Trailer.txt" + itemlist.append(Item(channel=item.channel, title="[COLOR sandybrown][B]Buscar[/B][/COLOR]", action="search", url="", + thumbnail="http://fc04.deviantart.net/fs70/i/2012/285/3/2/poltergeist___tv_wallpaper_by_elclon-d5hmmlp.png", + fanart="http://s6.postimg.org/f44w84o5t/bricosearch.jpg", extra="search")) + + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = "http://www.bricocine.com/index.php/?s=%s" % texto + + try: + return peliculas(item, texto.replace("+", " ")) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def peliculas(item, texto=""): + logger.info() + itemlist = [] + + # Borra customkeys + import xbmc + if xbmc.Player().isPlaying(): + xbmc.executebuiltin('xbmc.PlayMedia(Stop)') + + TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") + KEYMAPDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customkey.xml") + REMOTEDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remote.xml") + APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") + TRAILERDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "trailer.txt") + + try: + os.remove(KEYMAPDESTFILE) + print "Custom Keyboard.xml borrado" + os.remove(TESTPYDESTFILE) + print "Testpy borrado" + os.remove(REMOTEDESTFILE) + print "Remote borrado" + os.remove(APPCOMMANDDESTFILE) + print "App borrado" + xbmc.executebuiltin('Action(reloadkeymaps)') + except Exception as inst: + xbmc.executebuiltin('Action(reloadkeymaps)') + print "No hay customs" + + try: + os.remove(TRAILERDESTFILE) + print "Trailer.txt borrado" + except: + print "No hay Trailer.txt" + + # Descarga la página + data = get_page(item.url) + data = re.sub(r"amp;", "", data) + ''' + <div class="post-10888 post type-post status-publish format-standard hentry category-the-leftovers + tag-ciencia-ficcion tag-drama tag-fantasia tag-misterio"> + <div class="entry"> + <a href="http://www.bricocine.com/10888/leftovers-temporada-1/"> + <img src="http://www.bricocine.com/wp-content/plugins/wp_movies/files/thumb_185_the_leftovers_.jpg" + alt="The Leftovers " /> + </a> + </div> + <div class="entry-meta"> + <div class="clearfix"> + <div itemprop="aggregateRating" itemscope itemtype="http://schema.org/AggregateRating" class="rating" + title="Puntos IMDB: 7.4"> + <div class="rating-stars imdb-rating"> + <div class="stars" style="width:74%"></div> + </div> + <div itemprop="ratingValue" class="rating-number"> 7.4</div> + </div> + <div itemprop="aggregateRating" itemscope itemtype="http://schema.org/AggregateRating" class="rating" + title="Puntos Bricocine: 6.2"> + <div class="rating-stars brico-rating"> + <div class="stars" style="width:62%"></div> + </div> + <div itemprop="ratingValue" class="rating-number"> 6.2</div> + </div> + <span class="vcard author none"> Publicado por + <a class="fn" href="" rel="author" target="_blank"></a> + </span> + <span class="date updated none">2014-10-07T23:36:17+00:00</span> + </div> + </div> + <h2 class="title2 entry-title"> + <a href="http://www.bricocine.com/10888/leftovers-temporada-1/"> The Leftovers – Temporada 1 </a> + </h2> + </div> + ''' + patron = 'format-standard hentry category(.*?)">.*?' + patron += '<div class="entry"> ' + patron += '<a href="(.*?)"> ' + patron += '<img src="(.*?)".*?' + patron += 'class="rating-number">([^<]+)</div></div>.*?' + patron += '<h2 class="title2 entry-title">.*?"> ([^<]+).*?</a>' + + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + if len(matches) == 0 and texto == "": + itemlist.append(Item(channel=item.channel, title="[COLOR gold][B]No hay resultados...[/B][/COLOR]", + thumbnail="http://s6.postimg.org/fay99h9ox/briconoisethumb.png", + fanart="http://s6.postimg.org/uie8tu1jl/briconoisefan.jpg", folder=False)) + + for tag, scrapedurl, scrapedthumbnail, scrapedcreatedate, scrapedtitle in matches: + # fix para el buscador para que no muestre entradas con texto que no es correcto + if texto.lower() not in scrapedtitle.lower(): + continue + + if scrapedthumbnail == "": + scrapedthumbnail = "http://s6.postimg.org/aseij0y4x/briconoimage.png" + title = scrapedtitle + # Separa entre series y peliculas + if not item.extra == "Series" and "index" not in item.url: + title = re.sub(r"\(.*?\) |\[.*?\] |&#.*?;", "", title) + + try: + scrapedyear = scrapertools.get_match(scrapedurl, '.*?www.bricocine.com/.*?/.*?(\d\d\d\d)') + except: + scrapedyear = "" + title_fan = title.strip() + + if item.extra == "Series" and "index" not in item.url: + title = re.sub(r"&#.*?;|Temporada.*?\d+ | Todas las Temporadas |\[.*?\]|\([0-9].*?\)|¡|!", "", title) + title_fan = title.strip() + scrapedyear = "" + # Diferencia si viene de la búsqueda + if "index" in item.url: + # Se usa tag en busqueda para diferenciar series no bien tipificadas + if ("3d" not in tag and not "dvdrip" in tag and not "bluray-rip" in tag and not "hd-microhd" in tag and + not "bdrip" in tag and not "estrenos" in tag and not "latino" in tag and not "hannibal" in tag): + title = re.sub(r"\n|\r|\t|\s{2}| |&#.*?;|\(.*?\)|\d\d\d\d", "", title) + title_fan = re.sub(r"\n|\r|\t|\s{2}| |&#.*?;|Temporada.*?\d+| Todas Las Temp.*?das", "", title) + title = title.replace("Temporada", "[COLOR green]Temporada[/COLOR]") + title = title.replace(title, "[COLOR white]" + title + "[/COLOR]") + + import xbmc + # Crea el archivo search.txt.Regula el buen funcionaiento de la música y volver atras en la busqueda + SEARCHDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "search.txt") + urllib.urlretrieve( + "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/search.txt", + SEARCHDESTFILE) + item.extra = "Series" + scrapedyear = "" + + else: + title = re.sub(r"\(.*?\)|\[.*?\]|&#.*?;|", "", scrapedtitle) + title = title.strip() + try: + scrapedyear = scrapertools.get_match(scrapedurl, '.*?www.bricocine.com/.*?/.*?(\d\d\d\d)') + except: + scrapedyear = "" + + title_fan = title.strip() + if item.extra == "Series": + item.extra = "peliculas" + # print item.extra + # Crea el archivo search.txt.Regula el buen funcionaiento de la música y volver atras en la busqueda + import xbmc + SEARCHDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "search.txt") + urllib.urlretrieve( + "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/search.txt", + SEARCHDESTFILE) + + scrapedcreatedate = scrapedcreatedate.replace(scrapedcreatedate, + "[COLOR sandybrown][B]" + scrapedcreatedate + "[/B][/COLOR]") + title = title.replace(title, "[COLOR white]" + title + "[/COLOR]") + title = title + "(Puntuación:" + scrapedcreatedate + ")" + show = title_fan + "|" + scrapedyear + itemlist.append(Item(channel=item.channel, title=title, url=scrapedurl, action="fanart", + thumbnail=scrapedthumbnail, fanart="http://s15.postimg.org/id6ec47vf/bricocinefondo.jpg", + show=show, extra=item.extra, folder=True)) + + # Paginación + # <span class='current'>1</span><a href='http://www.bricocine.com/c/hd-microhd/page/2/' + + # Si falla no muestra ">> Página siguiente" + try: + next_page = scrapertools.get_match(data, "<span class='current'>\d+</span><a href='([^']+)'") + title = "[COLOR red]Pagina siguiente>>[/COLOR]" + itemlist.append(Item(channel=item.channel, title=title, url=next_page, action="peliculas", + fanart="http://s15.postimg.org/id6ec47vf/bricocinefondo.jpg", extra=item.extra, + thumbnail="http://s7.postimg.org/w2e0nr7hn/pdksiguiente.jpg", folder=True)) + except: + pass + + return itemlist + + +def fanart(item): + # Vamos a sacar todos los fanarts y arts posibles + logger.info() + itemlist = [] + url = item.url + data = get_page(item.url) + data = re.sub(r"\n|\r|\t|\s{2}|\(.*?\)|\[.*?\]| ", "", data) + title = item.show.split("|")[0].strip() + import xbmc + xbmc.executebuiltin('Action(reloadkeymaps)') + + print "ya esta bien" + print item.extra + title = title.replace('[BDRIP]', '') + title = title.replace('á', 'a') + title = title.replace('Á', 'A') + title = title.replace('é', 'e') + title = title.replace('É', 'E') + title = title.replace('í', 'i') + title = title.replace('Í', 'i') + title = title.replace('ó', 'o') + title = title.replace('Ó', 'o') + title = title.replace('ú', 'u') + title = title.replace('Ú', 'U') + title = title.replace('ñ', 'n') + title = title.replace('Ñ', 'N') + + print title + if "temporada" in item.url or "Temporada" in item.show.split("|")[0] or item.extra == "Series": + import xbmc + # Establece destino customkey + SEARCHDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "search.txt") + TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") + KEYMAPDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customkey.xml") + REMOTEDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remote.xml") + APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") + TRAILERDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "trailer.txt") + + title = re.sub(r"&#.*?;|Temporada.*?\d+ | Todas las Temporadas ", "", title) + title = title.replace("&", "y") + if "Los originales" in title: + title = (translate(title, "en")) + if title == "Hope": + title = "Raising hope" + if title == "Invisibles": + title = "The whispers" + if title == "Secretos y mentiras": + title = "Secrets and lies" + if title == "Brotherhood": + title = title + " " + "comedy" + if title == "Las Palomas de Judea": + title = "the dovekeepers" + if title == "90210 Sensacion de vivir": + title = "90210" + + plot = title + title_tunes = re.sub(r"\(.*?\)", "", title) + title_tunes = (translate(title_tunes, "en")) + ###Prepara customkeys y borra cuando vuelve + import xbmc + if not xbmc.Player().isPlaying() and not os.path.exists(TRAILERDESTFILE): + + TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") + KEYMAPDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customkey.xml") + REMOTEDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remote.xml") + APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") + try: + os.remove(KEYMAPDESTFILE) + print "Custom Keyboard.xml borrado" + os.remove(TESTPYDESTFILE) + print "Testpy borrado" + os.remove(REMOTEDESTFILE) + print "Remote borrado" + os.remove(APPCOMMANDDESTFILE) + print "Appcommand borrado" + xbmc.executebuiltin('Action(reloadkeymaps)') + except Exception as inst: + xbmc.executebuiltin('Action(reloadkeymaps)') + print "No hay customs" + + try: + ###Busca música serie y caraga customkey. En la vuelta evita busqueda si ya suena música + url_bing = "http://www.bing.com/search?q=%s+theme+song+site:televisiontunes.com" % title_tunes.replace( + ' ', '+') + # Llamamos al browser de mechanize. Se reitera en todas las busquedas bing + data = browser(url_bing) + + try: + subdata_tvt = scrapertools.get_match(data, '<li class="b_algo">(.*?)h="ID') + except: + pass + try: + url_tvt = scrapertools.get_match(subdata_tvt, '<a href="(.*?)"') + except: + url_tvt = "" + + if "-theme-songs.html" in url_tvt: + url_tvt = "" + if "http://m.televisiontunes" in url_tvt: + url_tvt = url_tvt.replace("http://m.televisiontunes", "http://televisiontunes") + + data = scrapertools.cachePage(url_tvt) + song = scrapertools.get_match(data, '<form name="song_name_form">.*?type="hidden" value="(.*?)"') + song = song.replace(" ", "%20") + print song + xbmc.executebuiltin('xbmc.PlayMedia(' + song + ')') + import xbmc, time + TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") + urllib.urlretrieve( + "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bricocine/test.py", + TESTPYDESTFILE) + KEYMAPDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customkey.xml") + + urllib.urlretrieve( + "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bricocine/customkey.xml", + KEYMAPDESTFILE) + REMOTEDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remote.xml") + urllib.urlretrieve( + "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bricocine/remote.xml", + REMOTEDESTFILE) + APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") + urllib.urlretrieve( + "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bricocine/customapp.xml", + APPCOMMANDDESTFILE) + + xbmc.executebuiltin('Action(reloadkeymaps)') + + except: + pass + try: + os.remove(TRAILERDESTFILE) + print "Trailer.txt borrado" + except: + print "No hay Trailer.txt" + + if os.path.exists(SEARCHDESTFILE): + try: + os.remove(KEYMAPDESTFILE) + print "Custom Keyboard.xml borrado" + os.remove(TESTPYDESTFILE) + print "Testpy borrado" + os.remove(REMOTEDESTFILE) + print "Remote borrado" + os.remove(APPCOMMANDDESTFILE) + print "Appcommand borrado" + os.remove(SEARCHDESTFILE) + print "search.txt borrado" + xbmc.executebuiltin('Action(reloadkeymaps)') + except Exception as inst: + xbmc.executebuiltin('Action(reloadkeymaps)') + print "No hay customs" + ###Busqueda en bing el id de imdb de la serie + urlbing_imdb = "http://www.bing.com/search?q=%s+tv+serie+site:imdb.com" % title.replace(' ', '+') + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + try: + subdata_imdb = scrapertools.get_match(data, '<li class="b_algo">(.*?)h="ID') + except: + pass + + try: + imdb_id = scrapertools.get_match(subdata_imdb, '<a href=.*?http.*?imdb.com/title/(.*?)/.*?"') + except: + imdb_id = "" + ###Busca id de tvdb mediante imdb id + urltvdb_remote = "http://thetvdb.com/api/GetSeriesByRemoteID.php?imdbid=" + imdb_id + "&language=es" + data = scrapertools.cachePage(urltvdb_remote) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '<Data><Series><seriesid>([^<]+)</seriesid>' + matches = re.compile(patron, re.DOTALL).findall(data) + print matches + if len(matches) == 0: + ###Si no hay coincidencia busca en tvdb directamente + + + if ":" in title or "(" in title: + + title = title.replace(" ", "%20") + url_tvdb = "http://thetvdb.com/api/GetSeries.php?seriesname=" + title + "&language=es" + data = scrapertools.cachePage(url_tvdb) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '<Data><Series><seriesid>([^<]+)</seriesid>' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + title = re.sub(r"(:.*)|\(.*?\)", "", title) + title = title.replace(" ", "%20") + + url_tvdb = "http://thetvdb.com/api/GetSeries.php?seriesname=" + title + "&language=es" + data = scrapertools.cachePage(url_tvdb) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '<Data><Series><seriesid>([^<]+)</seriesid>' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + postertvdb = item.thumbnail + extra = "http://s6.postimg.org/6flcihb69/brico1sinopsis.png" + show = "http://s6.postimg.org/4asrg755b/bricotvshows2.png" + fanart_info = "http://s6.postimg.org/77fsghaz3/bricotvshows4.jpg" + fanart_trailer = "http://s6.postimg.org/77fsghaz3/bricotvshows4.jpg" + category = "" + itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action="findvideos", + thumbnail=item.thumbnail, plot=plot, + fanart="http://s6.postimg.org/77fsghaz3/bricotvshows4.jpg", extra=extra, + category=category, show=show, folder=True)) + + else: + title = title.replace(" ", "%20") + url_tvdb = "http://thetvdb.com/api/GetSeries.php?seriesname=" + title + "&language=es" + data = scrapertools.cachePage(url_tvdb) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '<Data><Series><seriesid>([^<]+)</seriesid>' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + postertvdb = item.thumbnail + extra = "http://s6.postimg.org/6flcihb69/brico1sinopsis.png" + show = "http://s6.postimg.org/4asrg755b/bricotvshows2.png" + fanart_info = "http://s6.postimg.org/77fsghaz3/bricotvshows4.jpg" + fanart_trailer = "http://s6.postimg.org/77fsghaz3/bricotvshows4.jpg" + category = "" + itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action="findvideos", + thumbnail=item.thumbnail, plot=plot, + fanart="http://s6.postimg.org/77fsghaz3/bricotvshows4.jpg", extra=extra, + category=category, show=show, folder=True)) + + # 1ºfanart mediante id tvdb + + for id in matches: + category = id + id_serie = id + urltvdb_banners = "http://thetvdb.com/api/1D62F2F90030C444/series/" + id_serie + "/banners.xml" + + data = scrapertools.cachePage(urltvdb_banners) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '<Banners><Banner>.*?<VignettePath>(.*?)</VignettePath>' + matches = re.compile(patron, re.DOTALL).findall(data) + try: + # intenta poster tvdb + postertvdb = scrapertools.get_match(data, '<Banners><Banner>.*?<BannerPath>posters/(.*?)</BannerPath>') + postertvdb = "http://thetvdb.com/banners/_cache/posters/" + postertvdb + except: + postertvdb = item.thumbnail + + if len(matches) == 0: + extra = "http://s6.postimg.org/6flcihb69/brico1sinopsis.png" + show = "http://s6.postimg.org/4asrg755b/bricotvshows2.png" + fanart_info = "http://s6.postimg.org/77fsghaz3/bricotvshows4.jpg" + fanart_trailer = "http://s6.postimg.org/77fsghaz3/bricotvshows4.jpg" + itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action="findvideos", + thumbnail=postertvdb, fanart="http://s6.postimg.org/77fsghaz3/bricotvshows4.jpg", + plot=plot, category=category, extra=extra, show=show, folder=True)) + + for fan in matches: + fanart = "http://thetvdb.com/banners/" + fan + fanart_1 = fanart + # Busca fanart para info, fanart para trailer y 2ºfanart + patron = '<Banners><Banner>.*?<BannerPath>.*?</BannerPath>.*?</Banner><Banner>.*?<BannerPath>(.*?)</BannerPath>.*?</Banner><Banner>.*?<BannerPath>(.*?)</BannerPath>.*?</Banner><Banner>.*?<BannerPath>(.*?)</BannerPath>' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + fanart_info = fanart_1 + fanart_trailer = fanart_1 + fanart_2 = fanart_1 + show = fanart_1 + extra = postertvdb + itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action="findvideos", + thumbnail=postertvdb, fanart=fanart_1, plot=plot, category=category, + extra=extra, show=show, folder=True)) + for fanart_info, fanart_trailer, fanart_2 in matches: + fanart_info = "http://thetvdb.com/banners/" + fanart_info + fanart_trailer = "http://thetvdb.com/banners/" + fanart_trailer + fanart_2 = "http://thetvdb.com/banners/" + fanart_2 + # Busqueda de todos loas arts posibles + for id in matches: + url_fanartv = "http://webservice.fanart.tv/v3/tv/" + id_serie + "?api_key=dffe90fba4d02c199ae7a9e71330c987" + data = scrapertools.cachePage(url_fanartv) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '"clearlogo":.*?"url": "([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + if '"tvposter"' in data: + tvposter = scrapertools.get_match(data, '"tvposter":.*?"url": "([^"]+)"') + if '"tvbanner"' in data: + tvbanner = scrapertools.get_match(data, '"tvbanner":.*?"url": "([^"]+)"') + if '"tvthumb"' in data: + tvthumb = scrapertools.get_match(data, '"tvthumb":.*?"url": "([^"]+)"') + if '"hdtvlogo"' in data: + hdtvlogo = scrapertools.get_match(data, '"hdtvlogo":.*?"url": "([^"]+)"') + if '"hdclearart"' in data: + hdtvclear = scrapertools.get_match(data, '"hdclearart":.*?"url": "([^"]+)"') + if len(matches) == 0: + item.thumbnail = postertvdb + if '"hdtvlogo"' in data: + if "showbackground" in data: + + if '"hdclearart"' in data: + thumbnail = hdtvlogo + extra = hdtvclear + show = fanart_2 + else: + thumbnail = hdtvlogo + extra = thumbnail + show = fanart_2 + itemlist.append( + Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, plot=plot, fanart=fanart_1, + category=category, extra=extra, show=show, folder=True)) + + + else: + if '"hdclearart"' in data: + thumbnail = hdtvlogo + extra = hdtvclear + show = fanart_2 + else: + thumbnail = hdtvlogo + extra = thumbnail + show = fanart_2 + + itemlist.append( + Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, plot=plot, fanart=fanart_1, extra=extra, + show=show, category=category, folder=True)) + else: + extra = "http://s6.postimg.org/6flcihb69/brico1sinopsis.png" + show = fanart_2 + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=item.thumbnail, plot=plot, fanart=fanart_1, + extra=extra, show=show, category=category, folder=True)) + + for logo in matches: + if '"hdtvlogo"' in data: + thumbnail = hdtvlogo + elif not '"hdtvlogo"' in data: + if '"clearlogo"' in data: + thumbnail = logo + else: + thumbnail = item.thumbnail + if '"clearart"' in data: + clear = scrapertools.get_match(data, '"clearart":.*?"url": "([^"]+)"') + if "showbackground" in data: + + extra = clear + show = fanart_2 + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, plot=plot, fanart=fanart_1, + extra=extra, show=show, category=category, folder=True)) + else: + extra = clear + show = fanart_2 + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, plot=plot, fanart=fanart_1, + extra=extra, show=show, category=category, folder=True)) + + if "showbackground" in data: + + if '"clearart"' in data: + clear = scrapertools.get_match(data, '"clearart":.*?"url": "([^"]+)"') + extra = clear + show = fanart_2 + else: + extra = logo + show = fanart_2 + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, plot=plot, fanart=fanart_1, + extra=extra, show=show, category=category, folder=True)) + + if not '"clearart"' in data and not '"showbackground"' in data: + if '"hdclearart"' in data: + extra = hdtvclear + show = fanart_2 + else: + extra = thumbnail + show = fanart_2 + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, plot=plot, fanart=fanart_1, extra=extra, + show=show, category=category, folder=True)) + + else: + ###Películas + title = title.decode('utf8').encode('latin1') + title = title.replace("&", " y ") + if title == "JustiCia": + title = "Justi&cia" + if title == "El milagro": + title = "Miracle" + if "La Saga Crepusculo" in title: + title = re.sub(r"La Saga", "", title) + + year = item.show.split("|")[1] + if "Saga" in title: + title = title.replace('Saga completa', '') + title = title.replace('Saga', '') + title_collection = title.replace(" ", "+") + url_collection = "http://api.themoviedb.org/3/search/collection?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + title_collection + "+&language=es" + data = scrapertools.cachePage(url_collection) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + try: + id = scrapertools.get_match(data, '"page":1.*?"id":(.*?),') + except: + id = "" + urlc_images = "http://api.themoviedb.org/3/collection/" + id + "?api_key=2e2160006592024ba87ccdf78c28f49f" + data = scrapertools.cachePage(urlc_images) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '"poster_path":"(.*?)","backdrop_path":"(.*?)".*?"backdrop_path":"(.*?)".*?"backdrop_path":"(.*?)".*?"backdrop_path":"(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + if len(matches) == 0: + posterdb = item.thumbnail + extra = "http://s6.postimg.org/6flcihb69/brico1sinopsis.png" + fanart_1 = "http://img1.gtsstatic.com/wallpapers/55cb135265088aeee5147c2db20515d8_large.jpeg" + fanart = "http://img1.gtsstatic.com/wallpapers/55cb135265088aeee5147c2db20515d8_large.jpeg" + fanart_info = "http://img1.gtsstatic.com/wallpapers/55cb135265088aeee5147c2db20515d8_large.jpeg" + fanart_trailer = "http://img1.gtsstatic.com/wallpapers/55cb135265088aeee5147c2db20515d8_large.jpeg" + fanart_2 = "http://img1.gtsstatic.com/wallpapers/55cb135265088aeee5147c2db20515d8_large.jpeg" + for posterdb, fanart_1, fanart_info, fanart_trailer, fanart_2 in matches: + posterdb = "https://image.tmdb.org/t/p/original" + posterdb + fanart_1 = "https://image.tmdb.org/t/p/original" + fanart_1 + fanart_info = "https://image.tmdb.org/t/p/original" + fanart_info + fanart_trailer = "https://image.tmdb.org/t/p/original" + fanart_trailer + fanart_2 = "https://image.tmdb.org/t/p/original" + fanart_2 + + else: + + try: + try: + ###Busqueda en Tmdb la peli por titulo y año + title_tmdb = title.replace(" ", "%20") + url_tmdb = "http://api.themoviedb.org/3/search/movie?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + title_tmdb + "&year=" + year + "&language=es&include_adult=false" + data = scrapertools.cachePage(url_tmdb) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + id = scrapertools.get_match(data, '"page":1.*?,"id":(.*?),') + except: + if ":" in title or "(" in title: + title_tmdb = title.replace(" ", "%20") + url_tmdb = "http://api.themoviedb.org/3/search/movie?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + title_tmdb + "&year=" + year + "&language=es&include_adult=false" + data = scrapertools.cachePage(url_tmdb) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + id = scrapertools.get_match(data, '"page":1.*?,"id":(.*?),') + else: + title_tmdb = title.replace(" ", "%20") + title_tmdb = re.sub(r"(:.*)|\(.*?\)", "", title_tmdb) + url_tmdb = "http://api.themoviedb.org/3/search/movie?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + title_tmdb + "&year=" + year + "&language=es&include_adult=false" + data = scrapertools.cachePage(url_tmdb) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + id = scrapertools.get_match(data, '"page":1.*?,"id":(.*?),') + + + except: + ###Si no hay coincidencia realiza busqueda por bing del id Imdb + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % (title.replace(' ', '+'), year) + data = browser(urlbing_imdb) + + try: + subdata_imdb = scrapertools.get_match(data, '<li class="b_algo">(.*?)h="ID') + subdata_imdb = re.sub("http://anonymouse.org/cgi-bin/anon-www.cgi/", "", subdata_imdb) + except: + pass + + try: + url_imdb = scrapertools.get_match(subdata_imdb, '<a href="([^"]+)"') + except: + pass + try: + id_imdb = scrapertools.get_match(url_imdb, '.*?www.imdb.com/.*?/(.*?)/') + except: + pass + try: + ###Busca id Tmdb mediante el id de Imdb + urltmdb_remote = "https://api.themoviedb.org/3/find/" + id_imdb + "?external_source=imdb_id&api_key=2e2160006592024ba87ccdf78c28f49f" + + data = scrapertools.cachePage(urltmdb_remote) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + id = scrapertools.get_match(data, '"movie_results".*?,"id":(\d+)') + except: + id = "" + + ###Llegados aqui ya tenemos(o no) el id(Tmdb);Busca fanart_1 + urltmdb_fan1 = "http://api.themoviedb.org/3/movie/" + id + "?api_key=2e2160006592024ba87ccdf78c28f49f" + data = scrapertools.cachePage(urltmdb_fan1) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '"adult".*?"backdrop_path":"(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + try: + ###Prueba poster de Tmdb + posterdb = scrapertools.get_match(data, '"adult".*?"poster_path":"(.*?)"') + posterdb = "https://image.tmdb.org/t/p/original" + posterdb + except: + posterdb = item.thumbnail + + if len(matches) == 0: + + ###Si no encuentra fanart_1 en Tmdb realiza busqueda directamente en Imdb + try: + + urlbing_imdb = "http://www.bing.com/search?q=imdb+movie+%s+%s" % (title.replace(' ', '+'), year) + + data = browser(urlbing_imdb) + try: + subdata_imdb = scrapertools.get_match(data, '<li class="b_algo">(.*?)h="ID') + subdata_imdb = re.sub("http://anonymouse.org/cgi-bin/anon-www.cgi/", "", subdata_imdb) + except: + pass + try: + url_imdb = scrapertools.get_match(subdata_imdb, '<a href="([^"]+)"') + url_imdb = re.sub("http://www.imdb.comhttp://anonymouse.org/cgi-bin/anon-www.cgi/", "", + url_imdb) + except: + url_imdb = data + data = scrapertools.cachePage(url_imdb) + + try: + poster_imdb = scrapertools.get_match(data, '<td rowspan="2" id="img_primary">.*?src="([^"]+)"') + poster_imdb = poster_imdb.replace("._.*?jpg", "._V1_SX640_SY720_.jpg") + + except: + poster_imdb = posterdb + + try: + url_photo = scrapertools.get_match(data, + '<div class="combined-see-more see-more">.*?<a href="([^"]+)"') + url_photos = "http://www.imdb.com" + url_photo + data = scrapertools.cachePage(url_photos) + try: + photo_imdb = scrapertools.get_match(data, + '<div class="media_index_thumb_list".*?src="([^"]+)"') + photo_imdb = re.sub(r"._.*?jpg", "._V1_SX1280_SY720_.jpg", photo_imdb) + + except: + pass + + try: + photo_imdb2 = scrapertools.get_match(data, + '<div class="media_index_thumb_list".*?src=.*?src="([^"]+)"') + photo_imdb2 = re.sub(r"._.*?jpg", "._V1_SX1280_SY720_.jpg", photo_imdb2) + except: + pass + try: + photo_imdb3 = scrapertools.get_match(data, + '<div class="media_index_thumb_list".*?src=.*?src=.*?src="([^"]+)"') + photo_imdb3 = re.sub(r"._.*?jpg", "._V1_SX1280_SY720_.jpg", photo_imdb3) + except: + pass + try: + photo_imdb4 = scrapertools.get_match(data, + '<div class="media_index_thumb_list".*?src=.*?src=.*?src=.*?src="([^"]+)"') + photo_imdb4 = re.sub(r"._.*?jpg", "._V1_SX1280_SY720_.jpg", photo_imdb4) + except: + pass + + except: + pass + except: + pass + + extra = "http://s6.postimg.org/6flcihb69/brico1sinopsis.png" # http://4.bp.blogspot.com/-0rYZjLStWrM/TcIqkbq-MaI/AAAAAAAACiM/7_qFGM4WvnA/s1600/BarraSeparadora-Sinopsis.png + + try: + fanart_1 = photo_imdb3 + except: + try: + fanart_1 = photo_imdb2 + except: + try: + fanart_1 = photo_imdb1 + except: + fanart_1 = "http://img1.gtsstatic.com/wallpapers/55cb135265088aeee5147c2db20515d8_large.jpeg" + + try: + fanart_2 = photo_imdb4 + except: + try: + fanart_2 = photo_imdb2 + except: + try: + fanart_2 = photo_imdb + except: + fanart_2 = "http://img1.gtsstatic.com/wallpapers/55cb135265088aeee5147c2db20515d8_large.jpeg" + try: + fanart_info = photo_imdb2 + except: + try: + fanart_info = photo_imdb + except: + fanart_info = "http://img1.gtsstatic.com/wallpapers/55cb135265088aeee5147c2db20515d8_large.jpeg" + + try: + fanart_trailer = photo_imdb3 + except: + try: + fanart_trailer = photo_imdb2 + except: + try: + fanart_trailer = photo_imdb + except: + fanart_trailer = "http://img1.gtsstatic.com/wallpapers/55cb135265088aeee5147c2db20515d8_large.jpeg" + + try: + category = photo_imdb3 + except: + try: + category = photo_imdb + except: + try: + category = photo_imdb3 + except: + category = "http://s6.postimg.org/yefi9ccsx/briconofotoventanuco.png" + try: + fanart = photo_imdb + except: + try: + fanart = photo_imdb2 + except: + try: + fanart = photo_imdb3 + except: + fanart = "http://img1.gtsstatic.com/wallpapers/55cb135265088aeee5147c2db20515d8_large.jpeg" + try: + show = photo_imdb4 + except: + try: + show = photo_imdb2 + except: + try: + show = photo_imdb + except: + show = "http://img1.gtsstatic.com/wallpapers/55cb135265088aeee5147c2db20515d8_large.jpeg" + + + + + ###Encontrado fanart_1 en Tmdb + for fan in matches: + fanart = "https://image.tmdb.org/t/p/original" + fan + fanart_1 = fanart + ###Busca fanart para info, fanart para trailer y fanart_2(finvideos) en Tmdb + urltmdb_images = "http://api.themoviedb.org/3/movie/" + id + "/images?api_key=2e2160006592024ba87ccdf78c28f49f" + data = scrapertools.cachePage(urltmdb_images) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '"backdrops".*?"file_path":".*?",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + patron = '"backdrops".*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + fanart_info = fanart_1 + fanart_trailer = fanart_1 + fanart_2 = fanart_1 + for fanart_info, fanart_trailer, fanart_2 in matches: + fanart_info = "https://image.tmdb.org/t/p/original" + fanart_info + fanart_trailer = "https://image.tmdb.org/t/p/original" + fanart_trailer + fanart_2 = "https://image.tmdb.org/t/p/original" + fanart_2 + + if fanart_info == fanart: + ###Busca fanart_info en Imdb si coincide con fanart + try: + url_imdbphoto = "http://www.imdb.com/title/" + id_imdb + "/mediaindex" + photo_imdb = scrapertools.get_match(url_imdbphoto, + '<div class="media_index_thumb_list".*?src="([^"]+)"') + photo_imdb = photo_imdb.replace("@._V1_UY100_CR25,0,100,100_AL_.jpg", + "@._V1_SX1280_SY720_.jpg") + fanart_info = photo_imdb + except: + fanart_info = fanart_2 + + # Busqueda de todos los arts posibles + + url_fanartv = "http://webservice.fanart.tv/v3/movies/" + id + "?api_key=dffe90fba4d02c199ae7a9e71330c987" + data = scrapertools.cachePage(url_fanartv) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '"hdmovielogo":.*?"url": "([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + if '"moviedisc"' in data: + disc = scrapertools.get_match(data, '"moviedisc":.*?"url": "([^"]+)"') + if '"movieposter"' in data: + poster = scrapertools.get_match(data, '"movieposter":.*?"url": "([^"]+)"') + if '"moviethumb"' in data: + thumb = scrapertools.get_match(data, '"moviethumb":.*?"url": "([^"]+)"') + if '"moviebanner"' in data: + banner = scrapertools.get_match(data, '"moviebanner":.*?"url": "([^"]+)"') + + if len(matches) == 0: + extra = "http://s6.postimg.org/6flcihb69/brico1sinopsis.png" + show = fanart_2 + category = fanart_1 + itemlist.append( + Item(channel=item.channel, title=item.title, action="findvideos_peli", url=item.url, server="torrent", + thumbnail=posterdb, fanart=fanart, extra=extra, show=show, category=category, folder=True)) + for logo in matches: + if '"hdmovieclearart"' in data: + clear = scrapertools.get_match(data, '"hdmovieclearart":.*?"url": "([^"]+)"') + if '"moviebackground"' in data: + extra = clear + show = fanart_2 + if '"moviebanner"' in data: + category = banner + else: + category = clear + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos_peli", url=item.url, + server="torrent", thumbnail=logo, fanart=fanart_1, extra=extra, show=show, + category=category, folder=True)) + else: + extra = clear + show = fanart_2 + if '"moviebanner"' in data: + category = banner + else: + category = clear + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos_peli", url=item.url, + server="torrent", thumbnail=logo, fanart=fanart_1, extra=extra, show=show, + category=category, folder=True)) + + if '"moviebackground"' in data: + + if '"hdmovieclearart"' in data: + clear = scrapertools.get_match(data, '"hdmovieclearart":.*?"url": "([^"]+)"') + extra = clear + show = fanart_2 + if '"moviebanner"' in data: + category = banner + else: + category = clear + + else: + extra = logo + show = fanart_2 + if '"moviebanner"' in data: + category = banner + else: + category = logo + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos_peli", url=item.url, + server="torrent", thumbnail=logo, fanart=fanart_1, extra=extra, show=show, + category=category, folder=True)) + + if not '"hdmovieclearart"' in data and not '"moviebackground"' in data: + extra = logo + show = fanart_2 + if '"moviebanner"' in data: + category = banner + else: + category = extra + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos_peli", url=item.url, + server="torrent", thumbnail=logo, fanart=fanart_1, category=category, extra=extra, + show=show, folder=True)) + ####Info item. Se añade item.show.split("|")[0] and item.extra != "Series" para salvar el error de cuando una serie no está perfectamente tipificada como tal en Bricocine + title = "Info" + title = title.replace(title, "[COLOR skyblue]" + title + "[/COLOR]") + if not "temporada" in item.url and not "Temporada" in item.show.split("|")[0] and item.extra != "Series": + thumbnail = posterdb + if "temporada" in item.url or "Temporada" in item.show.split("|")[0] or item.extra == "Series": + if '"tvposter"' in data: + thumbnail = tvposter + else: + thumbnail = postertvdb + + if "tvbanner" in data: + category = tvbanner + else: + category = show + + itemlist.append( + Item(channel=item.channel, action="info", title=title, url=item.url, thumbnail=thumbnail, fanart=fanart_info, + show=show, extra=extra, category=category, folder=False)) + + ####Trailer item + title = "[COLOR crimson]Trailer[/COLOR]" + if "temporada" in item.url or "Temporada" in item.show.split("|")[0] or item.extra == "Series": + if '"tvthumb"' in data: + thumbnail = tvthumb + else: + thumbnail = postertvdb + if '"tvbanner"' in data: + extra = tvbanner + elif '"tvthumb"' in data: + extra = tvthumb + else: + extra = item.thumbnail + else: + if '"moviethumb"' in data: + thumbnail = thumb + else: + thumbnail = posterdb + + if '"moviedisc"' in data: + extra = disc + else: + if '"moviethumb"' in data: + extra = thumb + + else: + extra = posterdb + + itemlist.append(Item(channel=item.channel, action="trailer", title=title, url=item.url, thumbnail=thumbnail, + fulltitle=item.title, fanart=fanart_trailer, extra=extra, folder=True)) + import xbmc + xbmc.executebuiltin('Action(reloadkeymaps)') + return itemlist + + +def findvideos(item): + logger.info() + + itemlist = [] + ###Ubicacion Customkey + import xbmc + SEARCHDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "search.txt") + TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") + KEYMAPDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customkey.xml") + REMOTEDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remote.xml") + APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") + ###Carga Customkey en Finvideos cuando se trata de una busqueda + if xbmc.Player().isPlaying(): + if not os.path.exists(TESTPYDESTFILE): + import xbmc + urllib.urlretrieve("https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/search.txt", + SEARCHDESTFILE) + urllib.urlretrieve("https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/test.py", + TESTPYDESTFILE) + urllib.urlretrieve( + "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bricocine/customkey.xml", + KEYMAPDESTFILE) + urllib.urlretrieve( + "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bricocine/remote.xml", + REMOTEDESTFILE) + urllib.urlretrieve( + "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bricocine/customapp.xml", + APPCOMMANDDESTFILE) + + xbmc.executebuiltin('Action(reloadkeymaps)') + + data = get_page(item.url) + data = re.sub(r"\n|\r|\t|\s{2}| | - REPARADO", "", data) + ###Borra Customkey cuando no hay música + import xbmc + if not xbmc.Player().isPlaying(): + TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") + KEYMAPDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customkey.xml") + REMOTEDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remote.xml") + APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") + try: + os.remove(KEYMAPDESTFILE) + print "Custom Keyboard.xml borrado" + os.remove(TESTPYDESTFILE) + print "Testpy borrado" + os.remove(REMOTEDESTFILE) + print "Remote borrado" + os.remove(APPCOMMANDDESTFILE) + print "Appcommand borrado" + xbmc.executebuiltin('Action(reloadkeymaps)') + except Exception as inst: + xbmc.executebuiltin('Action(reloadkeymaps)') + print "No hay customs" + + ###Busca video cuando hay torrents y magnet en la serie + if 'id="magnet"' in data: + if 'id="file"' in data: + bloque_capitulos = scrapertools.get_match(data, + '<table class="table table-series">(.*?)<span class="block mtop clearfix">') + patron = '<span class="title">([^<]+)-.*?(\d)(\d+)([^<]+)</span></td>.*?' + patron += 'id="([^"]+)".*?href="([^"]+)".*?id="([^"]+)" href="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(bloque_capitulos) + if len(matches) == 0: + patron = '<span class="title">(.*?)(\d)(\d+)([^<]+)</span></td>.*?' + patron += 'id="([^"]+)".*?href="([^"]+)".*?id="([^"]+)".*?href="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(bloque_capitulos) + if len(matches) == 0: + show = item.show + extra = item.thumbnail + ###Se identifica como serie respetando en anterior item.category + category = item.category + "|" + "series" + itemlist.append(Item(channel=item.channel, + title="[COLOR gold][B]Ooops!! Algo no va bien,pulsa para ser dirigido a otra busqueda, ...[/B][/COLOR]", + action="findvideos_peli", url=item.url, + thumbnail="http://s6.postimg.org/fay99h9ox/briconoisethumb.png", + fanart="http://s6.postimg.org/uie8tu1jl/briconoisefan.jpg", extra=extra, + show=show, category=category, plot=item.plot, folder=True)) + + import base64 + for title_links, seasson, epi, calidad, title_torrent, url_torrent, title_magnet, url_magnet in matches: + try: + season = scrapertools.get_match(data, '<title>.*?Temporada.*?(\d+).*?Torrent') + except: + try: + ###Busqueda de season el las series que no vienen bien tipificadas como tal + season = scrapertools.get_match(data, '<span class="title">.*?-.*?(\d+)x') + except: + season = "0" + epi = re.sub(r"101|201|301|401|501|601|701|801|901", "01", epi) + epi = re.sub(r"102|202|302|402|502|602|702|802|902", "02", epi) + epi = re.sub(r"103|203|303|403|503|603|703|803|903", "03", epi) + epi = re.sub(r"104|204|304|404|504|604|704|804|904", "04", epi) + epi = re.sub(r"105|205|305|405|505|605|705|805|905", "05", epi) + epi = re.sub(r"106|206|306|406|506|606|706|806|906", "06", epi) + epi = re.sub(r"107|207|307|407|507|607|707|807|907", "07", epi) + epi = re.sub(r"108|208|308|408|508|608|708|808|908", "08", epi) + epi = re.sub(r"109|209|309|409|509|609|709|809|909", "09", epi) + epi = re.sub(r"110|210|310|410|510|610|710|810|910", "10", epi) + epi = re.sub(r"111|211|311|411|511|611|711|811|911", "11", epi) + epi = re.sub(r"112|212|312|412|512|612|712|812|912", "12", epi) + epi = re.sub(r"113|213|313|413|513|613|713|813|913", "13", epi) + epi = re.sub(r"114|214|314|414|514|614|714|814|914", "14", epi) + epi = re.sub(r"115|215|315|415|515|615|715|815|915", "15", epi) + epi = re.sub(r"116|216|316|416|516|616|716|816|916", "16", epi) + epi = re.sub(r"117|217|317|417|517|617|717|817|917", "17", epi) + epi = re.sub(r"118|218|318|418|518|618|718|818|918", "18", epi) + epi = re.sub(r"119|219|319|419|519|619|719|819|919", "19", epi) + epi = re.sub(r"120|220|320|420|520|620|720|820|920", "20", epi) + epi = re.sub(r"121|221|321|421|521|621|721|821|921", "21", epi) + epi = re.sub(r"122|222|322|422|522|622|722|822|922", "22", epi) + epi = re.sub(r"123|223|323|423|523|623|723|823|923", "23", epi) + epi = re.sub(r"124|224|324|424|524|624|724|824|924", "24", epi) + epi = re.sub(r"125|225|325|425|525|625|725|825|925", "25", epi) + epi = re.sub(r"126|226|326|426|526|626|726|826|926", "26", epi) + epi = re.sub(r"127|227|327|427|527|627|727|827|927", "27", epi) + epi = re.sub(r"128|228|328|428|528|628|728|828|928", "28", epi) + epi = re.sub(r"129|229|329|429|529|629|729|829|929", "29", epi) + epi = re.sub(r"130|230|330|430|530|630|730|830|930", "30", epi) + + seasson_epi = season + "x" + epi + seasson_epi = seasson_epi.replace(seasson_epi, "[COLOR sandybrown]" + seasson_epi + "[/COLOR]") + ###Ajuste de episodio para info_epi + if "x0" in seasson_epi: + epi = epi.replace("0", "") + + title_links = title_links.replace("\\'s", "'s") + title_torrent = "[" + title_torrent.replace("file", "torrent") + "]" + title_torrent = title_torrent.replace(title_torrent, "[COLOR green]" + title_torrent + "[/COLOR]") + title_magnet = "[" + "magnet" + "]" + title_magnet = "[COLOR red]Opción[/COLOR]" + " " + title_magnet.replace(title_magnet, + "[COLOR crimson]" + title_magnet + "[/COLOR]") + calidad = calidad.replace(calidad, "[COLOR sandybrown]" + calidad + "[/COLOR]") + title_links = title_links.replace(title_links, "[COLOR orange]" + title_links + "[/COLOR]") + title_torrent = title_links + " " + seasson_epi + calidad + "- " + title_torrent + url_torrent = base64.decodestring(url_torrent.split('&u=')[1][::-1]) + url_magnet = base64.decodestring(url_magnet.split('&u=')[1][::-1]) + title_links = re.sub(r"\n|\r|\t|\s{2}|\(.*?\)|\[.*?\]| |REPARADO", "", title_links) + title_links = title_links.replace('\[.*?\]', '') + title_links = title_links.replace('á', 'a') + title_links = title_links.replace('Á', 'A') + title_links = title_links.replace('é', 'e') + title_links = title_links.replace('í', 'i') + title_links = title_links.replace('ó', 'o') + title_links = title_links.replace('ú', 'u') + title_links = title_links.replace(' ', '%20') + + extra = season + "|" + title_links + "|" + epi + if "sinopsis.png" in item.extra: + item.extra = item.thumbnail + if "bricotvshows2.png" in item.show: + item.show = item.fanart + + itemlist.append(Item(channel=item.channel, title=title_torrent, action="episodios", url=url_torrent, + thumbnail=item.extra, fanart=item.show, plot=item.plot, extra=extra, + category=item.category, folder=True)) + itemlist.append(Item(channel=item.channel, title=title_magnet, action="episodios", url=url_magnet, + thumbnail=item.extra, fanart=item.show, extra=extra, plot=item.plot, + category=item.category, folder=True)) + try: + ###Comprueba si, aparte de cápitulos torrent/magnet hay algun torrent suelto sin magnet + checktorrent = scrapertools.get_match(data, + 'id="magnet".*?Descargar .torrent<\/a><\/li><\/ul><\/td><\/tr><tr><td><span class="title">.*?rel="nofollow">(.*?)<\/a><\/li><\/ul><\/td><\/tr><tr><td>') + except: + checktorrent = "" + ###Busqueda Torrent si los encuentra sueltos + if checktorrent == "Descargar .torrent": + torrent_bloque = scrapertools.get_match(data, + 'id="file".*?id="magnet".*?<span class="title">.*?<a id="file".*?a id="file".*?class="btn btn-primary".*?d="file"(.*?class="btn btn-primary".*?)</table>') + + patron = '<span class="title">([^<]+)- (\d)(\d+)([^<]+).*?' + patron += 'id="file".*?href="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(torrent_bloque) + if len(matches) == 0: + patron = '<span class="title">(.*?)(\d)(\d+)([^<]+)</span></td>.*?' + patron += 'id="([^"]+)".*?href="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(bloque_capitulos) + if len(matches) == 0: + show = item.show + extra = item.thumbnail + category = item.category + "|" + "series" + + itemlist.append(Item(channel=item.channel, + title="[COLOR gold][B]Ooops!! Algo no va bien,pulsa para ser dirigido a otra busqueda, ...[/B][/COLOR]", + action="findvideos_peli", url=item.url, + thumbnail="http://s6.postimg.org/fay99h9ox/briconoisethumb.png", + fanart="http://s6.postimg.org/uie8tu1jl/briconoisefan.jpg", extra=extra, + show=show, category=category, plot=item.plot, folder=True)) + + import base64 + + for title_links, seasson, epi, calidad, url_torrent in matches: + ## torrent + try: + season = scrapertools.get_match(data, '<title>.*?Temporada.*?(\d+).*?Torrent') + except: + ###Busqueda de season el las series que no vienen bien tipificadas como tal + season = scrapertools.get_match(data, '<span class="title">.*?-.*?(\d+)x') + epi = re.sub(r"101|201|301|401|501|601|701|801|901", "01", epi) + epi = re.sub(r"102|202|302|402|502|602|702|802|902", "02", epi) + epi = re.sub(r"103|203|303|403|503|603|703|803|903", "03", epi) + epi = re.sub(r"104|204|304|404|504|604|704|804|904", "04", epi) + epi = re.sub(r"105|205|305|405|505|605|705|805|905", "05", epi) + epi = re.sub(r"106|206|306|406|506|606|706|806|906", "06", epi) + epi = re.sub(r"107|207|307|407|507|607|707|807|907", "07", epi) + epi = re.sub(r"108|208|308|408|508|608|708|808|908", "08", epi) + epi = re.sub(r"109|209|309|409|509|609|709|809|909", "09", epi) + epi = re.sub(r"110|210|310|410|510|610|710|810|910", "10", epi) + epi = re.sub(r"111|211|311|411|511|611|711|811|911", "11", epi) + epi = re.sub(r"112|212|312|412|512|612|712|812|912", "12", epi) + epi = re.sub(r"113|213|313|413|513|613|713|813|913", "13", epi) + epi = re.sub(r"114|214|314|414|514|614|714|814|914", "14", epi) + epi = re.sub(r"115|215|315|415|515|615|715|815|915", "15", epi) + epi = re.sub(r"116|216|316|416|516|616|716|816|916", "16", epi) + epi = re.sub(r"117|217|317|417|517|617|717|817|917", "17", epi) + epi = re.sub(r"118|218|318|418|518|618|718|818|918", "18", epi) + epi = re.sub(r"119|219|319|419|519|619|719|819|919", "19", epi) + epi = re.sub(r"120|220|320|420|520|620|720|820|920", "20", epi) + epi = re.sub(r"121|221|321|421|521|621|721|821|921", "21", epi) + epi = re.sub(r"122|222|322|422|522|622|722|822|922", "22", epi) + epi = re.sub(r"123|223|323|423|523|623|723|823|923", "23", epi) + epi = re.sub(r"124|224|324|424|524|624|724|824|924", "24", epi) + epi = re.sub(r"125|225|325|425|525|625|725|825|925", "25", epi) + epi = re.sub(r"126|226|326|426|526|626|726|826|926", "26", epi) + epi = re.sub(r"127|227|327|427|527|627|727|827|927", "27", epi) + epi = re.sub(r"128|228|328|428|528|628|728|828|928", "28", epi) + epi = re.sub(r"129|229|329|429|529|629|729|829|929", "29", epi) + epi = re.sub(r"130|230|330|430|530|630|730|830|930", "30", epi) + seasson_epi = season + "x" + epi + seasson_epi = seasson_epi.replace(seasson_epi, "[COLOR sandybrown]" + seasson_epi + "[/COLOR]") + if "x0" in seasson_epi: + epi = epi.replace("0", "") + title_torrent = "[torrent]" + title_torrent = title_torrent.replace(title_torrent, "[COLOR green]" + title_torrent + "[/COLOR]") + calidad = calidad.replace(calidad, "[COLOR sandybrown]" + calidad + "[/COLOR]") + title_links = title_links.replace(title_links, "[COLOR orange]" + title_links + "[/COLOR]") + title_torrent = title_links + " " + seasson_epi + calidad + "- " + title_torrent + url_torrent = base64.decodestring(url_torrent.split('&u=')[1][::-1]) + title_links = re.sub(r"\n|\r|\t|\s{2}|\(.*?\)|\[.*?\]| |REPARADO", "", title_links) + title_links = title_links.replace('\[.*?\]', '') + title_links = title_links.replace('á', 'a') + title_links = title_links.replace('Á', 'A') + title_links = title_links.replace('é', 'e') + title_links = title_links.replace('í', 'i') + title_links = title_links.replace('ó', 'o') + title_links = title_links.replace('ú', 'u') + title_links = title_links.replace(' ', '%20') + extra = season + "|" + title_links + "|" + epi + itemlist.append(Item(channel=item.channel, title=title_torrent, action="episodios", url=url_torrent, + thumbnail=item.extra, fanart=item.show, extra=extra, plot=item.plot, + category=item.category, folder=True)) + else: + ###Busqueda cuando hay Torrent pero no magnet en la serie + if 'id="file"' in data and not 'id="magnet"' in data: + + patron = '<span class="title">([^<]+)- (\d)(\d+)([^<]+).*?' + patron += 'id="([^"]+)".*?href="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + patron = '<span class="title">(.*?)(\d)(\d+)([^<]+)</span></td>.*?' + patron += 'id="([^"]+)".*?href="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(bloque_capitulos) + if len(matches) == 0: + show = item.show + extra = item.thumbnail + category = item.category + "|" + "series" + itemlist.append(Item(channel=item.channel, + title="[COLOR gold][B]Ooops!! Algo no va bien,pulsa para ser dirigido a otra busqueda, ...[/B][/COLOR]", + action="findvideos_peli", url=item.url, + thumbnail="http://s6.postimg.org/fay99h9ox/briconoisethumb.png", + fanart="http://s6.postimg.org/uie8tu1jl/briconoisefan.jpg", extra=extra, + show=show, category=category, plot=item.plot, folder=True)) + import base64 + for title_links, seasson, epi, calidad, title_torrent, url_torrent in matches: + try: + season = scrapertools.get_match(data, '<title>.*?Temporada.*?(\d+).*?Torrent') + except: + ###Busqueda de season el las series que no vienen bien tipificadas como tal + season = scrapertools.get_match(data, '<span class="title">.*?-.*?(\d+)x') + epi = re.sub(r"101|201|301|401|501|601|701|801|901", "01", epi) + epi = re.sub(r"102|202|302|402|502|602|702|802|902", "02", epi) + epi = re.sub(r"103|203|303|403|503|603|703|803|903", "03", epi) + epi = re.sub(r"104|204|304|404|504|604|704|804|904", "04", epi) + epi = re.sub(r"105|205|305|405|505|605|705|805|905", "05", epi) + epi = re.sub(r"106|206|306|406|506|606|706|806|906", "06", epi) + epi = re.sub(r"107|207|307|407|507|607|707|807|907", "07", epi) + epi = re.sub(r"108|208|308|408|508|608|708|808|908", "08", epi) + epi = re.sub(r"109|209|309|409|509|609|709|809|909", "09", epi) + epi = re.sub(r"110|210|310|410|510|610|710|810|910", "10", epi) + epi = re.sub(r"111|211|311|411|511|611|711|811|911", "11", epi) + epi = re.sub(r"112|212|312|412|512|612|712|812|912", "12", epi) + epi = re.sub(r"113|213|313|413|513|613|713|813|913", "13", epi) + epi = re.sub(r"114|214|314|414|514|614|714|814|914", "14", epi) + epi = re.sub(r"115|215|315|415|515|615|715|815|915", "15", epi) + epi = re.sub(r"116|216|316|416|516|616|716|816|916", "16", epi) + epi = re.sub(r"117|217|317|417|517|617|717|817|917", "17", epi) + epi = re.sub(r"118|218|318|418|518|618|718|818|918", "18", epi) + epi = re.sub(r"119|219|319|419|519|619|719|819|919", "19", epi) + epi = re.sub(r"120|220|320|420|520|620|720|820|920", "20", epi) + epi = re.sub(r"121|221|321|421|521|621|721|821|921", "21", epi) + epi = re.sub(r"122|222|322|422|522|622|722|822|922", "22", epi) + epi = re.sub(r"123|223|323|423|523|623|723|823|923", "23", epi) + epi = re.sub(r"124|224|324|424|524|624|724|824|924", "24", epi) + epi = re.sub(r"125|225|325|425|525|625|725|825|925", "25", epi) + epi = re.sub(r"126|226|326|426|526|626|726|826|926", "26", epi) + epi = re.sub(r"127|227|327|427|527|627|727|827|927", "27", epi) + epi = re.sub(r"128|228|328|428|528|628|728|828|928", "28", epi) + epi = re.sub(r"129|229|329|429|529|629|729|829|929", "29", epi) + epi = re.sub(r"130|230|330|430|530|630|730|830|930", "30", epi) + + seasson_epi = season + "x" + epi + seasson_epi = seasson_epi.replace(seasson_epi, "[COLOR sandybrown]" + seasson_epi + "[/COLOR]") + if "x0" in seasson_epi: + epi = epi.replace("0", "") + title_torrent = "[" + title_torrent.replace("file", "torrent") + "]" + title_torrent = title_torrent.replace(title_torrent, "[COLOR green]" + title_torrent + "[/COLOR]") + calidad = calidad.replace(calidad, "[COLOR sandybrown]" + calidad + "[/COLOR]") + title_links = title_links.replace(title_links, "[COLOR orange]" + title_links + "[/COLOR]") + title_torrent = title_links + " " + seasson_epi + calidad + "- " + title_torrent + url_torrent = base64.decodestring(url_torrent.split('&u=')[1][::-1]) + title_links = re.sub(r"\n|\r|\t|\s{2}|\(.*?\)|\[.*?\]| |REPARADO", "", title_links) + title_links = title_links.replace('\[.*?\]', '') + title_links = title_links.replace('á', 'a') + title_links = title_links.replace('Á', 'A') + title_links = title_links.replace('é', 'e') + title_links = title_links.replace('í', 'i') + title_links = title_links.replace('ó', 'o') + title_links = title_links.replace('ú', 'u') + title_links = title_links.replace(' ', '%20') + extra = season + "|" + title_links + "|" + epi + itemlist.append(Item(channel=item.channel, title=title_torrent, action="episodios", url=url_torrent, + thumbnail=item.extra, fanart=item.show, extra=extra, plot=item.plot, + category=item.category, folder=True)) + ###Busqueda cuando hay Magnet pero no Torrent + if 'id="magnet"' in data and not 'id="file"' in data: + patron = '<span class="title">([^<]+)- (\d)(\d+)([^<]+).*?' + patron += 'id="([^"]+)" href="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + patron = '<span class="title">(.*?)(\d)(\d+)([^<]+)</span></td>.*?' + patron += 'id="([^"]+)".*?href="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(bloque_capitulos) + if len(matches) == 0: + show = item.show + extra = item.extra + itemlist.append(Item(channel=item.channel, + title="[COLOR gold][B]Ooops!! Algo no va bien,pulsa para ser dirigido a otra busqueda, ...[/B][/COLOR]", + action="findvideos_peli", url=item.url, + thumbnail="http://s6.postimg.org/fay99h9ox/briconoisethumb.png", + fanart="http://s6.postimg.org/uie8tu1jl/briconoisefan.jpg", extra=extra, show=show, + folder=True)) + import base64 + for title_links, seasson, epi, calidad, title_magnet, url_magnet in matches: + try: + season = scrapertools.get_match(data, '<title>.*?Temporada.*?(\d+).*?Torrent') + except: + ###Busqueda de season el las series que no vienen bien tipificadas como tal + season = scrapertools.get_match(data, '<span class="title">.*?-.*?(\d+)x') + epi = re.sub(r"101|201|301|401|501|601|701|801|901", "01", epi) + epi = re.sub(r"102|202|302|402|502|602|702|802|902", "02", epi) + epi = re.sub(r"103|203|303|403|503|603|703|803|903", "03", epi) + epi = re.sub(r"104|204|304|404|504|604|704|804|904", "04", epi) + epi = re.sub(r"105|205|305|405|505|605|705|805|905", "05", epi) + epi = re.sub(r"106|206|306|406|506|606|706|806|906", "06", epi) + epi = re.sub(r"107|207|307|407|507|607|707|807|907", "07", epi) + epi = re.sub(r"108|208|308|408|508|608|708|808|908", "08", epi) + epi = re.sub(r"109|209|309|409|509|609|709|809|909", "09", epi) + epi = re.sub(r"110|210|310|410|510|610|710|810|910", "10", epi) + epi = re.sub(r"111|211|311|411|511|611|711|811|911", "11", epi) + epi = re.sub(r"112|212|312|412|512|612|712|812|912", "12", epi) + epi = re.sub(r"113|213|313|413|513|613|713|813|913", "13", epi) + epi = re.sub(r"114|214|314|414|514|614|714|814|914", "14", epi) + epi = re.sub(r"115|215|315|415|515|615|715|815|915", "15", epi) + epi = re.sub(r"116|216|316|416|516|616|716|816|916", "16", epi) + epi = re.sub(r"117|217|317|417|517|617|717|817|917", "17", epi) + epi = re.sub(r"118|218|318|418|518|618|718|818|918", "18", epi) + epi = re.sub(r"119|219|319|419|519|619|719|819|919", "19", epi) + epi = re.sub(r"120|220|320|420|520|620|720|820|920", "20", epi) + epi = re.sub(r"121|221|321|421|521|621|721|821|921", "21", epi) + epi = re.sub(r"122|222|322|422|522|622|722|822|922", "22", epi) + epi = re.sub(r"123|223|323|423|523|623|723|823|923", "23", epi) + epi = re.sub(r"124|224|324|424|524|624|724|824|924", "24", epi) + epi = re.sub(r"125|225|325|425|525|625|725|825|925", "25", epi) + epi = re.sub(r"126|226|326|426|526|626|726|826|926", "26", epi) + epi = re.sub(r"127|227|327|427|527|627|727|827|927", "27", epi) + epi = re.sub(r"128|228|328|428|528|628|728|828|928", "28", epi) + epi = re.sub(r"129|229|329|429|529|629|729|829|929", "29", epi) + epi = re.sub(r"130|230|330|430|530|630|730|830|930", "30", epi) + + seasson_epi = season + "x" + epi + seasson_epi = seasson_epi.replace(seasson_epi, "[COLOR sandybrown]" + seasson_epi + "[/COLOR]") + if "x0" in seasson_epi: + epi = epi.replace("0", "") + title_magnet = "[" + "magnet" + "]" + title_magnet = "[COLOR red]Opción[/COLOR]" + " " + title_magnet.replace(title_magnet, + "[COLOR crimson]" + title_magnet + "[/COLOR]") + calidad = calidad.replace(calidad, "[COLOR sandybrown]" + calidad + "[/COLOR]") + title_links = title_links.replace(title_links, "[COLOR orange]" + title_links + "[/COLOR]") + title_magnet = title_links + " " + seasson_epi + calidad + "- " + title_magnet + url_magnet = base64.decodestring(url_magnet.split('&u=')[1][::-1]) + title_links = re.sub(r"\n|\r|\t|\s{2}|\(.*?\)|\[.*?\]| |REPARADO", "", title_links) + title_links = title_links.replace('\[.*?\]', '') + title_links = title_links.replace('á', 'a') + title_links = title_links.replace('Á', 'A') + title_links = title_links.replace('é', 'e') + title_links = title_links.replace('í', 'i') + title_links = title_links.replace('ó', 'o') + title_links = title_links.replace('ú', 'u') + title_links = title_links.replace(' ', '%20') + extra = season + "|" + title_links + "|" + epi + itemlist.append( + Item(channel=item.channel, title=title_magnet, action="episodios", url=url_magnet, thumbnail=item.extra, + fanart=item.show, extra=extra, plot=item.plot, category=item.category, folder=True)) + ###No hay video + if not 'id="file"' in data and not 'id="magnet"' in data: + show = item.show + extra = item.extra + itemlist.append(Item(channel=item.channel, + title="[COLOR gold][B]Ooops!! Algo no va bien,pulsa para ser dirigido a otra busqueda, ...[/B][/COLOR]", + action="findvideos_peli", url=item.url, + thumbnail="http://s6.postimg.org/fay99h9ox/briconoisethumb.png", + fanart="http://s6.postimg.org/uie8tu1jl/briconoisefan.jpg", extra=extra, show=show, + folder=True)) + + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + ###Borra Customkey si no hay música + import xbmc + TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") + if not xbmc.Player().isPlaying() and os.path.exists(TESTPYDESTFILE): + TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") + KEYMAPDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customkey.xml") + REMOTEDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remote.xml") + APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") + try: + os.remove(KEYMAPDESTFILE) + print "Custom Keyboard.xml borrado" + os.remove(TESTPYDESTFILE) + print "Testpy borrado" + os.remove(REMOTEDESTFILE) + print "Remote borrado" + os.remove(APPCOMMANDDESTFILE) + print "Appcommand borrado" + xbmc.executebuiltin('Action(reloadkeymaps)') + except Exception as inst: + xbmc.executebuiltin('Action(reloadkeymaps)') + print "No hay customs" + + season = item.extra.split("|")[0] + title_links = item.extra.split("|")[1] + epi = item.extra.split("|")[2] + title_tag = "[COLOR yellow]Ver --[/COLOR]" + item.title = item.title.replace("Ver --", "") + if "magnet" in item.title: + title_links = title_links.replace("%20", "") + title_links = "[COLOR orange]" + title_links + " " + season + "x" + epi + "[/COLOR]" + title = title_tag + title_links + " " + item.title + else: + item.title = re.sub(r"\[.*?\]", "", item.title) + title = title_tag + "[COLOR orange]" + item.title + "[/COLOR]" + "[COLOR green][torrent][/COLOR]" + + if item.plot == "Sensación de vivir: La nueva generación": + item.plot = "90210" + if item.plot == "La historia del universo": + item.plot = "how the universe works" + try: + # Nueva busqueda bing de Imdb serie id + url_imdb = "http://www.bing.com/search?q=%s+tv+series+site:imdb.com" % item.plot.replace(' ', '+') + data = browser(url_imdb) + + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + try: + subdata_imdb = scrapertools.get_match(data, '<li class="b_algo">(.*?)h="ID') + except: + pass + try: + imdb_id = scrapertools.get_match(subdata_imdb, '<a href=.*?http.*?imdb.com/title/(.*?)/.*?"') + except: + imdb_id = "" + ### Busca en Tmdb quinta imagen para episodios mediate Imdb id + urltmdb_imdb = "https://api.themoviedb.org/3/find/" + imdb_id + "?api_key=2e2160006592024ba87ccdf78c28f49f&external_source=imdb_id" + data = scrapertools.cachePage(urltmdb_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + id = scrapertools.get_match(data, '"tv_results":.*?,"id":(.*?),"') + + except: + ###Si no hay coincidencia busca directamente en Tmdb por título + if ":" in item.plot: + try: + item.plot = item.plot.replace(" ", "%20") + url_tmdb = "http://api.themoviedb.org/3/search/tv?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + item.plot + "&language=es&include_adult=false" + data = scrapertools.cachePage(url_tmdb) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + id = scrapertools.get_match(data, 'page":1.*?,"id":(.*?),"') + except: + try: + item.plot = re.sub(r"(:.*)", "", item.plot) + item.plot = item.plot.replace(" ", "%20") + url_tmdb = "http://api.themoviedb.org/3/search/tv?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + item.plot + "&language=es&include_adult=false" + data = scrapertools.cachePage(url_tmdb) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + id = scrapertools.get_match(data, 'page":1.*?,"id":(.*?),"') + except: + thumbnail = item.thumbnail + fanart = item.fanart + id = "" + else: + try: + if "De la A a la Z" in item.plot: + item.plot = "A to Z" + item.plot = item.plot.replace(" ", "%20") + url_tmdb = "http://api.themoviedb.org/3/search/tv?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + item.plot + "&language=es&include_adult=false" + data = scrapertools.cachePage(url_tmdb) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + id = scrapertools.get_match(data, 'page":1.*?,"id":(.*?),"') + except: + thumbnail = item.thumbnail + fanart = item.fanart + id = "" + + ###Teniendo (o no) el id Tmdb busca imagen + urltmdb_images = "https://api.themoviedb.org/3/tv/" + id + "?api_key=2e2160006592024ba87ccdf78c28f49f" + data = scrapertools.cachePage(urltmdb_images) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + try: + backdrop = scrapertools.get_match(data, '"backdrop_path":"(.*?)"') + fanart_3 = "https://image.tmdb.org/t/p/original" + backdrop + fanart = fanart_3 + except: + fanart_3 = item.fanart + fanart = fanart_3 + ###Se hace también la busqueda de el thumb del episodio en Tmdb + urltmdb_epi = "https://api.themoviedb.org/3/tv/" + id + "/season/" + item.extra.split("|")[0] + "/episode/" + \ + item.extra.split("|")[2] + "/images?api_key=2e2160006592024ba87ccdf78c28f49f" + data = scrapertools.cachePage(urltmdb_epi) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '{"id".*?"file_path":"(.*?)","height"' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + thumbnail = item.thumbnail + fanart = fanart_3 + itemlist.append( + Item(channel=item.channel, title=title, action="play", url=item.url, server="torrent", thumbnail=thumbnail, + fanart=fanart, folder=False)) + + for foto in matches: + thumbnail = "https://image.tmdb.org/t/p/original" + foto + + extra = id + "|" + season + itemlist.append( + Item(channel=item.channel, title=title, action="play", url=item.url, thumbnail=thumbnail, fanart=fanart, + category=item.category, folder=False)) + ###Busca poster de temporada Tmdb + urltmdb_temp = "http://api.themoviedb.org/3/tv/" + id + "/season/" + season + "/images?api_key=2e2160006592024ba87ccdf78c28f49f" + data = get_page(urltmdb_temp) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '{"id".*?"file_path":"(.*?)","height"' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + thumbnail = item.thumbnail + for temp in matches: + thumbnail = "https://image.tmdb.org/t/p/original" + temp + ####Busca el fanart para el item info#### + urltmdb_faninfo = "http://api.themoviedb.org/3/tv/" + id + "/images?api_key=2e2160006592024ba87ccdf78c28f49f" + data = get_page(urltmdb_faninfo) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '{"backdrops".*?"file_path":".*?","height".*?"file_path":"(.*?)",' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + fanart = item.fanart + for fanart_4 in matches: + fanart = "https://image.tmdb.org/t/p/original" + fanart_4 + show = item.category + "|" + item.thumbnail + ### Item info de episodios + import xbmc + xbmc.executebuiltin('Action(reloadkeymaps)') + title = "Info" + title = title.replace(title, "[COLOR skyblue]" + title + "[/COLOR]") + itemlist.append(Item(channel=item.channel, action="info_capitulos", title=title, url=item.url, thumbnail=thumbnail, + fanart=fanart, extra=item.extra, show=show, folder=False)) + + return itemlist + + +def play(item): + logger.info() + itemlist = [] + ###Opción para trailers + if "youtube" in item.url: + itemlist.append(Item(channel=item.channel, action="play", server="youtube", url=item.url, fulltitle=item.title, + fanart="http://s23.postimg.org/84vkeq863/movietrailers.jpg", folder=False)) + + import xbmc + xbmc.executebuiltin('Action(reloadkeymaps)') + itemlist.append(Item(channel=item.channel, title=item.title, action="play", url=item.url, server="torrent", + thumbnail=item.thumbnail, fanart=item.fanart, category=item.category, folder=False)) + + return itemlist + + +def findvideos_peli(item): + logger.info() + + itemlist = [] + data = get_page(item.url) + data = re.sub(r"\n|\r|\t|\s{2}| | - REPARADO", "", data) + + # Busca video si hay magnet y torrent + if 'id="magnet"' in data: + if 'id="file"' in data: + patron = '<span class="title">([^"]+)</span>.*?' + patron += 'id="([^"]+)".*?href="([^"]+)".*?id="([^"]+)" href="([^"]+)"' + + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + itemlist.append(Item(channel=item.channel, + title="[COLOR gold][B]El video ya no se encuentra en la web, prueba a encontrala por busqueda...[/B][/COLOR]", + thumbnail="http://s6.postimg.org/fay99h9ox/briconoisethumb.png", + fanart="http://s6.postimg.org/uie8tu1jl/briconoisefan.jpg", folder=False)) + import base64 + for title_links, title_torrent, url_torrent, title_magnet, url_magnet in matches: + + title_torrent = "[" + title_torrent.replace("file", "torrent") + "]" + title_torrent = title_torrent.replace(title_torrent, "[COLOR green]" + title_torrent + "[/COLOR]") + title_magnet = "[" + "magnet" + "]" + title_magnet = "[COLOR red]Opción[/COLOR]" + " " + title_magnet.replace(title_magnet, + "[COLOR crimson]" + title_magnet + "[/COLOR]") + title_links = title_links.replace(title_links, "[COLOR sandybrown]" + title_links + "[/COLOR]") + title_links = re.sub(r"&#.*?;|\[HD .*?\]|\(.*?\)", "", title_links) + title_tag = "[COLOR yellow]Ver --[/COLOR]" + title_torrent = title_tag + title_links + "- " + title_torrent + url_torrent = base64.decodestring(url_torrent.split('&u=')[1][::-1]) + url_magnet = base64.decodestring(url_magnet.split('&u=')[1][::-1]) + if "sinopsis.png" in item.extra and not "series" in item.category: + item.extra = "http://oi67.tinypic.com/28sxwrs.jpg" + ###Se identifica si es una serie mal tipificada + if "series" in item.category and not "Completa" in title_links: + try: + season = scrapertools.get_match(data, '<title>.*?Temporada.*?(\d+).*?Torrent') + except: + season = "1" + title_link = scrapertools.get_match(title_links, '(.*?) -') + epi = scrapertools.get_match(title_links, '-.*?(x\d+)') + if "x0" in epi: + epi = epi.replace("x0", "") + title_links = title_link + action = "episodios" + extra = season + "|" + title_links + "|" + epi + itemlist.append(Item(channel=item.channel, title=title_torrent, action=action, url=url_torrent, + server="torrent", thumbnail=item.extra, fanart=item.show, extra=extra, + category=item.category, plot=item.plot, folder=True)) + itemlist.append( + Item(channel=item.channel, title=title_magnet, action=action, url=url_magnet, server="torrent", + thumbnail=item.extra, category=item.category, fanart=item.show, extra=extra, + plot=item.plot, folder=True)) + else: + action = "play" + itemlist.append(Item(channel=item.channel, title=title_torrent, action=action, url=url_torrent, + server="torrent", thumbnail=item.extra, fanart=item.show, folder=False)) + itemlist.append( + Item(channel=item.channel, title=title_magnet, action=action, url=url_magnet, server="torrent", + thumbnail=item.extra, fanart=item.show, folder=False)) + else: + ###Busca video cuando hay torrent pero no magnet + if 'id="file"' in data and not 'id="magnet"' in data: + patron = '<span class="title">([^"]+)</span>.*?' + patron += 'id="([^"]+)".*?href="([^"]+)".*?' + + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + itemlist.append(Item(channel=item.channel, + title="[COLOR gold][B]El video ya no se encuentra en la web, prueba a encontrala por busqueda...[/B][/COLOR]", + thumbnail="http://s6.postimg.org/fay99h9ox/briconoisethumb.png", + fanart="http://s6.postimg.org/uie8tu1jl/briconoisefan.jpg", folder=False)) + import base64 + for title_links, title_torrent, url_torrent in matches: + ## torrent + title_torrent = "[" + title_torrent.replace("file", "torrent") + "]" + title_torrent = title_torrent.replace(title_torrent, "[COLOR green]" + title_torrent + "[/COLOR]") + title_links = title_links.replace(title_links, "[COLOR sandybrown]" + title_links + "[/COLOR]") + title_links = re.sub(r"&#.*?;", "", title_links) + title_tag = "[COLOR yellow]Ver --[/COLOR]" + title_torrent = title_tag + title_links + "- " + title_torrent + url_torrent = base64.decodestring(url_torrent.split('&u=')[1][::-1]) + if "sinopsis.png" in item.extra: + item.extra = "http://oi67.tinypic.com/28sxwrs.jpg" + ###Se identifica si es una serie mal tipificada + if "series" in item.category and not "Completa" in title_links: + try: + season = scrapertools.get_match(data, '<title>.*?Temporada.*?(\d+).*?Torrent') + except: + season = "1" + title_link = scrapertools.get_match(title_links, '(.*?) -') + epi = scrapertools.get_match(title_links, '-.*?(x\d+)') + if "x0" in epi: + epi = epi.replace("x0", "") + title_links = title_link + action = "episodios" + extra = season + "|" + title_links + "|" + epi + itemlist.append(Item(channel=item.channel, title=title_torrent, action=action, url=url_torrent, + server="torrent", thumbnail=item.extra, fanart=item.show, extra=extra, + category=item.category, plot=item.plot, folder=True)) + + else: + action = "play" + itemlist.append(Item(channel=item.channel, title=title_torrent, action=action, url=url_torrent, + server="torrent", thumbnail=item.extra, fanart=item.show, folder=False)) + ###Busca video cuando solo hay magnet y no torrent + if 'id="magnet"' in data and not 'id="file"' in data: + patron = '<span class="title">([^"]+)</span>.*?' + patron += 'id="([^"]+)" href="([^"]+)"' + + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + itemlist.append(Item(channel=item.channel, + title="[COLOR gold][B]El video ya no se encuentra en la web, prueba a encontrala por busqueda...[/B][/COLOR]", + thumbnail="http://s6.postimg.org/fay99h9ox/briconoisethumb.png", + fanart="http://s6.postimg.org/uie8tu1jl/briconoisefan.jpg", folder=False)) + import base64 + for title_links, title_magnet, url_magnet in matches: + title_magnet = "[" + "magnet" + "]" + title_links = title_links.replace(title_links, "[COLOR sandybrown]" + title_links + "[/COLOR]") + title_links = re.sub(r"&#.*?;", "", title_links) + title_tag = "[COLOR red]Ver --[/COLOR]" + title_magnet = title_tag + title_links + "- " + title_magnet.replace(title_magnet, + "[COLOR crimson]" + title_magnet + "[/COLOR]") + url_magnet = base64.decodestring(url_magnet.split('&u=')[1][::-1]) + if "sinopsis.png" in item.extra: + item.extra = "http://oi67.tinypic.com/28sxwrs.jpg" + ###Se identifica si es una serie mal tipificada + if "series" in item.category and not "Completa" in title_links: + try: + season = scrapertools.get_match(data, '<title>.*?Temporada.*?(\d+).*?Torrent') + except: + season = "1" + title_link = scrapertools.get_match(title_links, '(.*?) -') + epi = scrapertools.get_match(title_links, '-.*?(x\d+)') + if "x0" in epi: + epi = epi.replace("x0", "") + title_links = title_link + action = "episodios" + extra = season + "|" + title_links + "|" + epi + itemlist.append(Item(channel=item.channel, title=title_torrent, action=action, url=url_torrent, + server="torrent", thumbnail=item.extra, fanart=item.show, extra=extra, + category=item.category, plot=item.plot, folder=True)) + + else: + action = "play" + + itemlist.append( + Item(channel=item.channel, title=title_magnet, action=action, url=url_magnet, server="torrent", + thumbnail=item.extra, fanart=item.show, folder=False)) + ###No hay torrent ni magnet + if not 'id="file"' in data and not 'id="magnet"' in data: + itemlist.append(Item(channel=item.channel, + title="[COLOR gold][B]El video ya no se encuentra en la web, prueba a encontrala por busqueda...[/B][/COLOR]", + thumbnail="http://s6.postimg.org/fay99h9ox/briconoisethumb.png", + fanart="http://s6.postimg.org/uie8tu1jl/briconoisefan.jpg", folder=False)) + return itemlist + + +def trailer(item): + logger.info() + ###Crea archivo control trailer.txt para evitar la recarga de la música cuando se vuelve de trailer + import xbmc + TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") + if os.path.exists(TESTPYDESTFILE): + TRAILERDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "trailer.txt") + + urllib.urlretrieve("https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/trailer.txt", + TRAILERDESTFILE) + + itemlist = [] + data = get_page(item.url) + + # trailer + patron = "<iframe width='.*?' height='.*?' src='([^']+)?" + + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + itemlist.append( + Item(channel=item.channel, title="[COLOR gold][B]Esta pelicula no tiene trailer,lo sentimos...[/B][/COLOR]", + thumbnail="http://s6.postimg.org/fay99h9ox/briconoisethumb.png", + fanart="http://s6.postimg.org/uie8tu1jl/briconoisefan.jpg", folder=False)) + + for url in matches: + listavideos = servertools.findvideos(url) + + for video in listavideos: + videotitle = scrapertools.unescape(video[0]) + url = video[1] + server = video[2] + + title = "[COLOR crimson]Trailer - [/COLOR]" + itemlist.append(Item(channel=item.channel, action="play", server="youtube", title=title + videotitle, url=url, + thumbnail=item.extra, fulltitle=item.title, + fanart="http://s23.postimg.org/84vkeq863/movietrailers.jpg", folder=False)) + return itemlist + + +def info(item): + logger.info() + url = item.url + data = get_page(url) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + if "temporada" in item.url: + ###Se prepara el Customkey para no permitir el forcerefresh y evitar conflicto con info + import xbmc + APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") + try: + os.remove(APPCOMMANDDESTFILE) + except: + pass + patron = '<title>([^<]+).*?Temporada.*?' + patron += '<div class="description" itemprop="text.*?">.*?([^<]+).*?</div></div></div>' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" + plot = "Esta serie no tiene informacion..." + plot = plot.replace(plot, "[COLOR yellow][B]" + plot + "[/B][/COLOR]") + photo = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png" + foto = "http://s6.postimg.org/ub7pb76c1/noinfo.png" + info = "" + quit = "Pulsa" + " [COLOR crimson][B]INTRO [/B][/COLOR]" + "para quitar" + for title, plot in matches: + plot_title = "Sinopsis" + "[CR]" + plot_title = plot_title.replace(plot_title, "[COLOR red]" + plot_title + "[/COLOR]") + plot = plot_title + plot + plot = plot.replace(plot, "[COLOR white][B]" + plot + "[/B][/COLOR]") + plot = re.sub(r'div class=".*?">', '', plot) + plot = plot.replace("div>", "") + plot = plot.replace('div class="margin_20b">', '') + plot = plot.replace('div class="post-entry">', '') + plot = plot.replace('p style="text-align: left;">', '') + title = re.sub(r"&#.*?;", "", title) + title = title.replace(title, "[COLOR sandybrown][B]" + title + "[/B][/COLOR]") + title = title.replace("-", "") + title = title.replace("Torrent", "") + title = title.replace("amp;", "") + title = title.replace("Descargar en Bricocine.com", "") + try: + scrapedinfo = scrapertools.get_match(data, 'Ficha técnica</h2><dl class="list"><dt>(.*?)hellip') + except IndexError: + scrapedinfo = scrapertools.get_match(data, + 'Ficha técnica</h2><dl class="list"><dt>(.*?)</div><div class="quad-2"') + scrapedinfo = scrapedinfo.replace("<br />", " ") + scrapedinfo = scrapedinfo.replace("</dl>", "<dt>") + scrpaedinfo = re.sub(r'<a href=".*?"|title=".*?"|item.*?=".*?"', '', scrapedinfo) + + infoformat = re.compile('(.*?</dt><dd.*?>).*?</dd><dt>', re.DOTALL).findall(scrapedinfo) + for info in infoformat: + scrapedinfo = scrapedinfo.replace(scrapedinfo, "[COLOR white][B]" + scrapedinfo + "[/COLOR]") + scrapedinfo = scrapedinfo.replace(info, "[COLOR red][B]" + info + "[/B][/COLOR]") + info = scrapedinfo + info = re.sub( + r'<a href=".*?">|title=".*?">|<span itemprop=.*?>|</span></span>|<span>|</a>|itemprop=".*?"|y otros.*?&', + '', info) + info = info.replace("</dt><dd>", ":") + info = info.replace("</dt><dd >", ":") + info = info.replace("</dt><dd > ", ":") + info = info.replace("</dd><dt>", " ") + info = info.replace("</span>", " ") + + info = info.replace("Actores:", "[COLOR red][B]Actores:[/B][/COLOR] ") + photo = item.extra + foto = item.category + quit = "Pulsa" + " [COLOR crimson][B]INTRO [/B][/COLOR]" + "para quitar" + ###Se carga Customkey no atras + NOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "noback.xml") + REMOTENOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remotenoback.xml") + APPNOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "appnoback.xml") + urllib.urlretrieve("https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/noback.xml", + NOBACKDESTFILE) + urllib.urlretrieve( + "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bricocine/remotenoback.xml", + REMOTENOBACKDESTFILE) + urllib.urlretrieve( + "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/appnoback.xml", + APPNOBACKDESTFILE) + xbmc.executebuiltin('Action(reloadkeymaps)') + else: + data = get_page(item.url) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '<div class="description" itemprop="text.*?">.*?([^<]+).*?</div></div></div>.*?' + patron += '<span class="title">([^"]+)</span>' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" + plot = "Esta pelicula no tiene sinopsis..." + plot = plot.replace(plot, "[COLOR yellow][B]" + plot + "[/B][/COLOR]") + foto = "http://s6.postimg.org/ub7pb76c1/noinfo.png" + photo = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png" + info = "" + quit = "Pulsa" + " [COLOR crimson][B]INTRO [/B][/COLOR]" + "para quitar" + + for plot, title in matches: + title = title.upper() + title = title.replace(title, "[COLOR sandybrown][B]" + title + "[/B][/COLOR]") + title = re.sub(r"&#.*?;|\[HD .*?\]|", "", title) + plot_title = "Sinopsis" + "[CR]" + plot_title = plot_title.replace(plot_title, "[COLOR red]" + plot_title + "[/COLOR]") + plot = plot_title + plot + plot = plot.replace(plot, "[COLOR white][B]" + plot + "[/B][/COLOR]") + plot = plot.replace('div class="margin_20b">', '') + plot = plot.replace('div class="post-entry">', '') + try: + scrapedinfo = scrapertools.get_match(data, 'Ficha técnica</h2><dl class="list"><dt>(.*?)hellip') + except IndexError: + scrapedinfo = scrapertools.get_match(data, + 'Ficha técnica</h2><dl class="list"><dt>(.*?)</div><div class="quad-2"') + scrapedinfo = scrapedinfo.replace("<br />", " ") + scrapedinfo = scrapedinfo.replace("</dl>", "<dt>") + scrpaedinfo = re.sub(r'<a href=".*?"|title=".*?"|item.*?=".*?"', '', scrapedinfo) + infoformat = re.compile('(.*?</dt><dd.*?>).*?</dd><dt>', re.DOTALL).findall(scrapedinfo) + for info in infoformat: + scrapedinfo = scrapedinfo.replace(scrapedinfo, "[COLOR white][B]" + scrapedinfo + "[/COLOR]") + scrapedinfo = scrapedinfo.replace(info, "[COLOR red][B]" + info + "[/B][/COLOR]") + info = scrapedinfo + info = re.sub( + r'<a href=".*?">|title=".*?">|<span itemprop=.*?>|</span></span>|<span>|</a>|itemprop=".*?"|y otros.*?&', + '', info) + info = info.replace("</dt><dd>", ":") + info = info.replace("</dt><dd >", ":") + info = info.replace("</dt><dd > ", ":") + info = info.replace("</dd><dt>", " ") + info = info.replace("</span>", " ") + if "hellip" in data: + info = info.replace("Actores:", "[COLOR red][B]Actores:[/B][/COLOR] ") + + foto = item.category + photo = item.extra + quit = "Pulsa" + " [COLOR crimson][B]INTRO [/B][/COLOR]" + "para quitar" + + ventana2 = TextBox1(title=title, plot=plot, info=info, thumbnail=photo, fanart=foto, quit=quit) + ventana2.doModal() + + +ACTION_GESTURE_SWIPE_LEFT = 511 +ACTION_SELECT_ITEM = 7 + + +class TextBox1(xbmcgui.WindowDialog): + """ Create a skinned textbox window """ + + def __init__(self, *args, **kwargs): + + self.getTitle = kwargs.get('title') + self.getPlot = kwargs.get('plot') + self.getInfo = kwargs.get('info') + self.getThumbnail = kwargs.get('thumbnail') + self.getFanart = kwargs.get('fanart') + self.getQuit = kwargs.get('quit') + + self.background = xbmcgui.ControlImage(70, 20, 1150, 630, + 'http://s6.postimg.org/58jknrvtd/backgroundventana5.png') + self.title = xbmcgui.ControlTextBox(140, 60, 1130, 50) + self.quit = xbmcgui.ControlTextBox(145, 90, 1030, 45) + self.plot = xbmcgui.ControlTextBox(120, 150, 1056, 140) + self.info = xbmcgui.ControlFadeLabel(120, 310, 1056, 100) + self.thumbnail = xbmcgui.ControlImage(813, 43, 390, 100, self.getThumbnail) + self.fanart = xbmcgui.ControlImage(120, 365, 1060, 250, self.getFanart) + + self.addControl(self.background) + self.addControl(self.title) + self.addControl(self.quit) + self.addControl(self.plot) + self.addControl(self.thumbnail) + self.addControl(self.fanart) + self.addControl(self.info) + + self.title.setText(self.getTitle) + self.quit.setText(self.getQuit) + try: + self.plot.autoScroll(7000, 6000, 30000) + except: + ###Información de incompatibilidd autoscroll con versiones inferiores a isengrd + print "Actualice a la ultima version de kodi para mejor info" + import xbmc + xbmc.executebuiltin( + 'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000,"https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/kodi-icon.png")') + self.plot.setText(self.getPlot) + self.info.addLabel(self.getInfo) + + def get(self): + + self.show() + + def onAction(self, action): + if action == ACTION_SELECT_ITEM or action == ACTION_GESTURE_SWIPE_LEFT: + ###Se vuelven a cargar Customkey al salir de info + import os, sys + import xbmc + APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") + NOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "noback.xml") + REMOTENOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remotenoback.xml") + APPNOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "appnoback.xml") + TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") + try: + os.remove(NOBACKDESTFILE) + os.remove(REMOTENOBACKDESTFILE) + os.remove(APPNOBACKDESTFILE) + if os.path.exists(TESTPYDESTFILE): + urllib.urlretrieve( + "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bricocine/customapp.xml", + APPCOMMANDDESTFILE) + xbmc.executebuiltin('Action(reloadkeymaps)') + except: + pass + self.close() + + +def info_capitulos(item): + logger.info() + import xbmc + APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") + try: + os.remove(APPCOMMANDDESTFILE) + except: + pass + url = item.url + data = scrapertools.cache_page(url) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + if "series" in item.category: + item.category = item.category.split("|")[0] + else: + item.category = item.show.split("|")[0] + item.thumbnail = item.show.split("|")[1] + capitulo = item.extra.split("|")[2] + capitulo = re.sub(r"(0)\d;", "", capitulo) + url = "http://thetvdb.com/api/1D62F2F90030C444/series/" + item.show.split("|")[0] + "/default/" + \ + item.extra.split("|")[0] + "/" + capitulo + "/es.xml" + data = scrapertools.cache_page(url) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '<Data>.*?<EpisodeName>([^<]+)</EpisodeName>.*?' + patron += '<Overview>(.*?)</Overview>.*?' + + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" + plot = "Este capitulo no tiene informacion..." + plot = plot.replace(plot, "[COLOR yellow][B]" + plot + "[/B][/COLOR]") + image = "http://s6.postimg.org/ub7pb76c1/noinfo.png" + foto = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png" + quit = "Pulsa" + " [COLOR greenyellow][B]INTRO [/B][/COLOR]" + "para quitar" + else: + + for name_epi, info in matches: + if "<filename>episodes" in data: + foto = scrapertools.get_match(data, '<Data>.*?<filename>(.*?)</filename>') + fanart = "http://thetvdb.com/banners/" + foto + else: + fanart = item.show.split("|")[1] + if item.show.split("|")[1] == item.thumbnail: + fanart = "http://s6.postimg.org/4asrg755b/bricotvshows2.png" + + plot = info + plot = (translate(plot, "es")) + plot = plot.replace(plot, "[COLOR yellow][B]" + plot + "[/B][/COLOR]") + name_epi = re.sub(r"&#.*?;|&", "", name_epi) + plot = re.sub(r"&#.*?;", "", plot) + title = name_epi.upper() + title = title.replace(title, "[COLOR sandybrown][B]" + title + "[/B][/COLOR]") + image = fanart + foto = item.show.split("|")[1] + if not ".png" in item.show.split("|")[1]: + foto = "http://s6.postimg.org/6flcihb69/brico1sinopsis.png" + quit = "Pulsa" + " [COLOR greenyellow][B]INTRO [/B][/COLOR]" + "para quitar" + NOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "noback.xml") + REMOTENOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remotenoback.xml") + APPNOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "appnoback.xml") + TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") + urllib.urlretrieve("https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/noback.xml", + NOBACKDESTFILE) + urllib.urlretrieve( + "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bricocine/remotenoback.xml", + REMOTENOBACKDESTFILE) + urllib.urlretrieve( + "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/appnoback.xml", + APPNOBACKDESTFILE) + xbmc.executebuiltin('Action(reloadkeymaps)') + ventana = TextBox2(title=title, plot=plot, thumbnail=image, fanart=foto, quit=quit) + ventana.doModal() + + +ACTION_GESTURE_SWIPE_LEFT = 511 +ACTION_SELECT_ITEM = 7 + + +class TextBox2(xbmcgui.WindowDialog): + """ Create a skinned textbox window """ + + def __init__(self, *args, **kwargs): + self.getTitle = kwargs.get('title') + self.getPlot = kwargs.get('plot') + self.getThumbnail = kwargs.get('thumbnail') + self.getFanart = kwargs.get('fanart') + self.getQuit = kwargs.get('quit') + + self.background = xbmcgui.ControlImage(70, 20, 1150, 630, 'http://s6.postimg.org/n3ph1uxn5/ventana.png') + self.title = xbmcgui.ControlTextBox(120, 60, 430, 50) + self.quit = xbmcgui.ControlTextBox(145, 110, 1030, 45) + self.plot = xbmcgui.ControlTextBox(120, 150, 1056, 100) + self.thumbnail = xbmcgui.ControlImage(120, 300, 1056, 300, self.getThumbnail) + self.fanart = xbmcgui.ControlImage(780, 43, 390, 100, self.getFanart) + + self.addControl(self.background) + self.addControl(self.title) + self.addControl(self.quit) + self.addControl(self.plot) + self.addControl(self.thumbnail) + self.addControl(self.fanart) + + self.title.setText(self.getTitle) + self.quit.setText(self.getQuit) + try: + self.plot.autoScroll(7000, 6000, 30000) + except: + print "Actualice a la ultima version de kodi para mejor info" + import xbmc + xbmc.executebuiltin( + 'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000,"https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/kodi-icon.png")') + self.plot.setText(self.getPlot) + + def get(self): + self.show() + + def onAction(self, action): + if action == ACTION_SELECT_ITEM or action == ACTION_GESTURE_SWIPE_LEFT: + import os, sys + import xbmc + APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") + NOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "noback.xml") + REMOTENOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remotenoback.xml") + APPNOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "appnoback.xml") + TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") + try: + os.remove(NOBACKDESTFILE) + os.remove(REMOTENOBACKDESTFILE) + os.remove(APPNOBACKDESTFILE) + xbmc.executebuiltin('Action(reloadkeymaps)') + if os.path.exists(TESTPYDESTFILE): + urllib.urlretrieve( + "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bricocine/customapp.xml", + APPCOMMANDDESTFILE) + xbmc.executebuiltin('Action(reloadkeymaps)') + except: + xbmc.executebuiltin('Action(reloadkeymaps)') + self.close() + + +def translate(to_translate, to_langage="auto", langage="auto"): + ###Traducción atraves de Google + '''Return the translation using google translate + you must shortcut the langage you define (French = fr, English = en, Spanish = es, etc...) + if you don't define anything it will detect it or use english by default + Example: + print(translate("salut tu vas bien?", "en")) + hello you alright?''' + agents = { + 'User-Agent': "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)"} + before_trans = 'class="t0">' + link = "http://translate.google.com/m?hl=%s&sl=%s&q=%s" % (to_langage, langage, to_translate.replace(" ", "+")) + request = urllib2.Request(link, headers=agents) + page = urllib2.urlopen(request).read() + result = page[page.find(before_trans) + len(before_trans):] + result = result.split("<")[0] + return result + + +if __name__ == '__main__': + to_translate = 'Hola como estas?' + print("%s >> %s" % (to_translate, translate(to_translate))) + print("%s >> %s" % (to_translate, translate(to_translate, 'fr'))) +# should print Hola como estas >> Hello how are you +# and Hola como estas? >> Bonjour comment allez-vous? diff --git a/plugin.video.alfa/channels/canalporno.json b/plugin.video.alfa/channels/canalporno.json new file mode 100755 index 00000000..e4a4078d --- /dev/null +++ b/plugin.video.alfa/channels/canalporno.json @@ -0,0 +1,23 @@ +{ + "id": "canalporno", + "name": "Canalporno", + "active": true, + "adult": true, + "language": "es", + "thumbnail": "http://i.imgur.com/gAbPcvT.png?1", + "banner": "canalporno.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "09/01/2017", + "description": "Primera version." + } + ], + "categories": [ + "adult" + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/canalporno.py b/plugin.video.alfa/channels/canalporno.py new file mode 100755 index 00000000..24436b0d --- /dev/null +++ b/plugin.video.alfa/channels/canalporno.py @@ -0,0 +1,86 @@ +# -*- coding: utf-8 -*- + +from core import httptools +from core import logger +from core import scrapertools + +host = "http://www.canalporno.com" + + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append(item.clone(action="findvideos", title="Útimos videos", url=host)) + itemlist.append(item.clone(action="categorias", title="Listado Categorias", + url=host + "/categorias")) + itemlist.append(item.clone(action="search", title="Buscar", url=host + "/search/?q=%s")) + return itemlist + + +def search(item, texto): + logger.info() + + try: + item.url = item.url % texto + itemlist = findvideos(item) + return sorted(itemlist, key=lambda it: it.title) + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def findvideos(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + + patron = '<img src="([^"]+)".*?alt="([^"]+)".*?<h2><a href="([^"]+)">.*?' \ + '<div class="duracion"><span class="ico-duracion sprite"></span> ([^"]+) min</div>' + matches = scrapertools.find_multiple_matches(data, patron) + for thumbnail, title, url, time in matches: + scrapedtitle = time + " - " + title + scrapedurl = host + url + scrapedthumbnail = "http:" + thumbnail + itemlist.append(item.clone(action="play", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail)) + + patron = '<div class="paginacion">.*?<span class="selected">.*?<a href="([^"]+)">([^"]+)</a>' + matches = scrapertools.find_multiple_matches(data, patron) + for url, title in matches: + url = host + url + title = "Página %s" % title + itemlist.append(item.clone(action="findvideos", title=title, url=url)) + + return itemlist + + +def categorias(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + bloque = scrapertools.find_single_match(data, '<ul class="ordenar-por ordenar-por-categoria">' + '(.*?)<div class="publis-bottom">') + + patron = '<div class="muestra-categorias">.*?<a class="thumb" href="([^"]+)".*?<img class="categorias" src="([^"]+)".*?<div class="nombre">([^"]+)</div>' + matches = scrapertools.find_multiple_matches(bloque, patron) + for url, thumbnail, title in matches: + url = host + url + thumbnail = "http:" + thumbnail + itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumbnail)) + + return itemlist + + +def play(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + url = "http:" + scrapertools.find_single_match(data, '<source src="([^"]+)"') + itemlist.append(item.clone(url=url, server="directo")) + + return itemlist diff --git a/plugin.video.alfa/channels/cartoonlatino.json b/plugin.video.alfa/channels/cartoonlatino.json new file mode 100755 index 00000000..43fa8a91 --- /dev/null +++ b/plugin.video.alfa/channels/cartoonlatino.json @@ -0,0 +1,20 @@ +{ + "id": "cartoonlatino", + "name": "Cartoon-Latino", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "http://i.imgur.com/wk6fRDZ.png", + "banner": "http://i.imgur.com/115c59F.png", + "version": 1, + "changes": [ + { + "date": "07/06/2017", + "description": "Primera version del canal" + } + ], + "categories": [ + "tvshow", + "latino" + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/cartoonlatino.py b/plugin.video.alfa/channels/cartoonlatino.py new file mode 100755 index 00000000..4f28dd32 --- /dev/null +++ b/plugin.video.alfa/channels/cartoonlatino.py @@ -0,0 +1,204 @@ +# -*- coding: utf-8 -*- + +import re + +from channels import renumbertools +from channelselector import get_thumb +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item + +host = "http://www.cartoon-latino.com/" + + +def mainlist(item): + logger.info() + + thumb_series = get_thumb("thumb_channels_tvshow.png") + thumb_series_az = get_thumb("thumb_channels_tvshow_az.png") + + itemlist = list() + + itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=host, + thumbnail=thumb_series)) + itemlist = renumbertools.show_option(item.channel, itemlist) + + return itemlist + + +""" +def search(item, texto): + logger.info() + texto = texto.replace(" ","+") + item.url = item.url+texto + if texto!='': + return lista(item) +""" + + +def lista_gen(item): + logger.info() + + itemlist = [] + + data1 = httptools.downloadpage(item.url).data + data1 = re.sub(r"\n|\r|\t|\s{2}| ", "", data1) + patron_sec = '<section class="content">.+?<\/section>' + data = scrapertools.find_single_match(data1, patron_sec) + patron = '<article id=.+? class=.+?><div.+?>' + patron += '<a href="([^"]+)" title="([^"]+)' # scrapedurl, # scrapedtitle + patron += ' Capítulos Completos ([^"]+)">' # scrapedlang + patron += '<img.+? data-src=.+? data-lazy-src="([^"]+)"' # scrapedthumbnail + matches = scrapertools.find_multiple_matches(data, patron) + i = 0 + for scrapedurl, scrapedtitle, scrapedlang, scrapedthumbnail in matches: + i = i + 1 + if 'HD' in scrapedlang: + scrapedlang = scrapedlang.replace('HD', '') + title = scrapedtitle + " [ " + scrapedlang + "]" + itemlist.append( + Item(channel=item.channel, title=title, url=scrapedurl, thumbnail=scrapedthumbnail, action="episodios", + show=scrapedtitle, context=renumbertools.context(item))) + tmdb.set_infoLabels(itemlist) + # Paginacion + patron_pag = '<a class="nextpostslink" rel="next" href="([^"]+)">' + next_page_url = scrapertools.find_single_match(data, patron_pag) + + if next_page_url != "" and i != 1: + item.url = next_page_url + itemlist.append(Item(channel=item.channel, action="lista_gen", title=">> Página siguiente", url=next_page_url, + thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png')) + + return itemlist + + +def lista(item): + logger.info() + + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + data_lista = scrapertools.find_single_match(data, '<div class="su-list su-list-style-"><ul>(.+?)<\/ul><\/div>') + patron = "<a href='(.+?)'>(.+?)<\/a>" + matches = scrapertools.find_multiple_matches(data_lista, patron) + for link, name in matches: + title = name + " [Latino]" + url = link + itemlist.append( + item.clone(title=title, url=url, plot=title, action="episodios", show=title, + context=renumbertools.context(item))) + tmdb.set_infoLabels(itemlist) + return itemlist + + +def episodios(item): + logger.info() + + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + data_lista = scrapertools.find_single_match(data, + '<div class="su-list su-list-style-"><ulclass="lista-capitulos">.+?<\/div><\/p>') + if '×' in data_lista: + data_lista = data_lista.replace('×', 'x') + + show = item.title + if "[Latino]" in show: + show = show.replace("[Latino]", "") + if "Ranma" in show: + patron_caps = '<\/i> <strong>.+?Capitulo ([^"]+)\: <a .+? href="([^"]+)">([^"]+)<\/a>' + else: + patron_caps = '<\/i> <strong>Capitulo ([^"]+)x.+?\: <a .+? href="([^"]+)">([^"]+)<\/a>' + matches = scrapertools.find_multiple_matches(data_lista, patron_caps) + scrapedplot = scrapertools.find_single_match(data, '<strong>Sinopsis<\/strong><strong>([^"]+)<\/strong><\/pre>') + number = 0 + ncap = 0 + A = 1 + for temp, link, name in matches: + if A != temp: + number = 0 + if "Ranma" in show: + number = int(temp) + temp = str(1) + else: + number = number + 1 + if number < 10: + capi = "0" + str(number) + else: + capi = str(number) + if "Ranma" in show: + season = 1 + episode = number + season, episode = renumbertools.numbered_for_tratk( + item.channel, item.show, season, episode) + date = name + if episode < 10: + capi = "0" + str(episode) + else: + capi = episode + title = str(season) + "x" + str(capi) + " - " + name # "{0}x{1} - ({2})".format(season, episode, date) + else: + title = str(temp) + "x" + capi + " - " + name + url = link + A = temp + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, show=show)) + + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append(Item(channel=item.channel, title="Añadir " + show + " a la videoteca", url=item.url, + + action="add_serie_to_library", extra="episodios", show=show)) + + return itemlist + + +def findvideos(item): + logger.info() + + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + data_function = scrapertools.find_single_match(data, '<!\[CDATA\[function (.+?)\]\]') + data_id = scrapertools.find_single_match(data, + "<script>\(adsbygoogle = window\.adsbygoogle \|\| \[\]\)\.push\({}\);<\/script><\/div><br \/>(.+?)<\/ins>") + itemla = scrapertools.find_multiple_matches(data_function, "src='(.+?)'") + serverid = scrapertools.find_multiple_matches(data_id, '<script>([^"]+)\("([^"]+)"\)') + for server, id in serverid: + for link in itemla: + if server in link: + url = link.replace('" + ID' + server + ' + "', str(id)) + if "drive" in server: + server1 = 'googlevideo' + else: + server1 = server + itemlist.append(item.clone(url=url, action="play", server=server1, + title="Enlace encontrado en %s " % (server1.capitalize()))) + return itemlist + + +def play(item): + logger.info() + + itemlist = [] + + # Buscamos video por servidor ... + + devuelve = servertools.findvideosbyserver(item.url, item.server) + + if not devuelve: + # ...sino lo encontramos buscamos en todos los servidores disponibles + + devuelve = servertools.findvideos(item.url, skip=True) + + if devuelve: + # logger.debug(devuelve) + itemlist.append(Item(channel=item.channel, title=item.contentTitle, action="play", server=devuelve[0][2], + + url=devuelve[0][1], thumbnail=item.thumbnail, folder=False)) + + return itemlist diff --git a/plugin.video.alfa/channels/ciberdocumentales.json b/plugin.video.alfa/channels/ciberdocumentales.json new file mode 100755 index 00000000..816e6020 --- /dev/null +++ b/plugin.video.alfa/channels/ciberdocumentales.json @@ -0,0 +1,37 @@ +{ + "id": "ciberdocumentales", + "name": "CiberDocumentales", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "https://s9.postimg.org/secdb5s8v/ciberdocumentales.png", + "banner": "https://s1.postimg.org/sa486z0of/ciberdocumentales_banner.png", + "version": 1, + "changes": [ + { + "date": "18/06/2016", + "descripcion": "First release" + } + ], + "categories": [ + "documentary" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_documentales", + "type": "bool", + "label": "Incluir en Novedades - Documentales", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/ciberdocumentales.py b/plugin.video.alfa/channels/ciberdocumentales.py new file mode 100755 index 00000000..fe3dd4ec --- /dev/null +++ b/plugin.video.alfa/channels/ciberdocumentales.py @@ -0,0 +1,122 @@ +# -*- coding: utf-8 -*- + +import re + +from core import httptools +from core import logger +from core import scrapertools +from core import tmdb +from core.item import Item + +host = 'http://www.ciberdocumentales.com' + + +def mainlist(item): + logger.info() + + itemlist = [] + + itemlist.append(item.clone(title="Todas", action="lista", thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png', + fanart='https://s18.postimg.org/fwvaeo6qh/todas.png', url=host)) + + itemlist.append(Item(channel=item.channel, title="Generos", action="generos", url=host, + thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png', + fanart='https://s3.postimg.org/5s9jg2wtf/generos.png')) + + itemlist.append(Item(channel=item.channel, title="Mas Vistas", action="lista", url=host, + thumbnail='https://s9.postimg.org/wmhzu9d7z/vistas.png', + fanart='https://s9.postimg.org/wmhzu9d7z/vistas.png', extra='masvistas')) + + itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host, + thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png', + fanart='https://s30.postimg.org/pei7txpa9/buscar.png')) + + return itemlist + + +def lista(item): + logger.info() + + itemlist = [] + if item.extra == 'buscar': + data = httptools.downloadpage(host + '/index.php?' + 'categoria=0&keysrc=' + item.text).data + else: + data = httptools.downloadpage(item.url).data + data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data) + if item.extra == 'masvistas': + patron = '<div class=bloquecenmarcado><a title=.*? target=_blank href=(.*?) class=game><img src=(.*?) alt=(.*?) title= class=bloquecenimg \/>.*?<strong>(.*?)<\/strong>' + else: + patron = '<div class=fotonoticia><a.*?target=_blank href=(.*?)><img src=(.*?) alt=(.*?) \/>.*?class=textonoticia>.*?\/><br \/>(.*?)<\/div>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot in matches: + url = host + scrapedurl + thumbnail = host + scrapedthumbnail + plot = scrapertools.htmlclean(scrapedplot) + plot = plot.decode('iso8859-1').encode('utf-8') + contentTitle = scrapedtitle + title = contentTitle + title = title.decode('iso8859-1').encode('utf-8') + fanart = '' + itemlist.append( + Item(channel=item.channel, action='findvideos', title=title, url=url, thumbnail=thumbnail, plot=plot, + fanart=fanart, contentTitle=contentTitle)) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + # Paginacion + + if itemlist != []: + actual_page_url = item.url + next_page = scrapertools.find_single_match(data, 'class=current>.*?<\/span><a href=(.*?)>.*?<\/a>') + if next_page != '' and item.extra != 'masvistas': + itemlist.append(Item(channel=item.channel, action="lista", title='Siguiente >>>', url=host + next_page, + thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png')) + return itemlist + + +def generos(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data) + patron = '<a style=text-transform:capitalize; href=(.*?)\/>(.*?)<\/a><\/span><\/li>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + thumbnail = '' + fanart = '' + title = scrapedtitle + url = host + scrapedurl + + itemlist.append( + Item(channel=item.channel, action="lista", title=title, fulltitle=item.title, url=url, thumbnail=thumbnail, + fanart=fanart)) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.text = texto + item.extra = 'buscar' + if texto != '': + return lista(item) + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + try: + if categoria == 'documentales': + item.url = host + + itemlist = lista(item) + if itemlist[-1].title == 'Siguiente >>>': + itemlist.pop() + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist diff --git a/plugin.video.alfa/channels/cineasiaenlinea.json b/plugin.video.alfa/channels/cineasiaenlinea.json new file mode 100755 index 00000000..df5f5c91 --- /dev/null +++ b/plugin.video.alfa/channels/cineasiaenlinea.json @@ -0,0 +1,60 @@ +{ + "id": "cineasiaenlinea", + "name": "CineAsiaEnLinea", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "http://i.imgur.com/5KOU8uy.png?3", + "banner": "cineasiaenlinea.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "07/02/17", + "description": "Fix bug in newest" + }, + { + "date": "09/01/2017", + "description": "Primera version" + } + ], + "categories": [ + "movie", + "vos" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en búsqueda global", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Películas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "perfil", + "type": "list", + "label": "Perfil de color", + "default": 3, + "enabled": true, + "visible": true, + "lvalues": [ + "Sin color", + "Perfil 3", + "Perfil 2", + "Perfil 1" + ] + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/cineasiaenlinea.py b/plugin.video.alfa/channels/cineasiaenlinea.py new file mode 100755 index 00000000..5adfbdd9 --- /dev/null +++ b/plugin.video.alfa/channels/cineasiaenlinea.py @@ -0,0 +1,166 @@ +# -*- coding: utf-8 -*- + +import re + +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + +host = "http://www.cineasiaenlinea.com/" +# Configuracion del canal +__perfil__ = int(config.get_setting('perfil', 'cineasiaenlinea')) + +# Fijar perfil de color +perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'], + ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'], + ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']] + +if __perfil__ - 1 >= 0: + color1, color2, color3 = perfil[__perfil__ - 1] +else: + color1 = color2 = color3 = "" + + +def mainlist(item): + logger.info() + itemlist = [] + + itemlist.append(item.clone(action="peliculas", title="Novedades", url=host + "archivos/peliculas", + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres" + "/0/Directors%20Chair.png", text_color=color1)) + itemlist.append(item.clone(action="peliculas", title="Estrenos", url=host + "archivos/estrenos", + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres" + "/0/Directors%20Chair.png", text_color=color1)) + itemlist.append(item.clone(action="indices", title="Por géneros", url=host, + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres" + "/0/Genre.png", text_color=color1)) + itemlist.append(item.clone(action="indices", title="Por país", url=host, text_color=color1)) + itemlist.append(item.clone(action="indices", title="Por año", url=host, text_color=color1)) + + itemlist.append(item.clone(title="", action="")) + itemlist.append(item.clone(action="search", title="Buscar...", text_color=color3)) + itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False)) + + return itemlist + + +def configuracion(item): + from platformcode import platformtools + ret = platformtools.show_channel_settings() + platformtools.itemlist_refresh() + return ret + + +def search(item, texto): + logger.info() + + item.url = "%s?s=%s" % (host, texto.replace(" ", "+")) + + try: + return peliculas(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + try: + if categoria == 'peliculas': + item.url = host + "archivos/peliculas" + item.action = "peliculas" + itemlist = peliculas(item) + + if itemlist[-1].action == "peliculas": + itemlist.pop() + + # Se captura la excepción, para no interrumpir al canal novedades si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist + + +def peliculas(item): + logger.info() + itemlist = [] + item.text_color = color2 + + # Descarga la página + data = httptools.downloadpage(item.url).data + + patron = '<h3><a href="([^"]+)">([^<]+)<.*?src="([^"]+)".*?<a rel="tag">([^<]+)<' \ + '.*?<a rel="tag">([^<]+)<' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedtitle, scrapedthumbnail, year, calidad in matches: + title = re.sub(r' \((\d+)\)', '', scrapedtitle) + scrapedtitle += " [%s]" % calidad + infolab = {'year': year} + itemlist.append(item.clone(action="findvideos", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, infoLabels=infolab, + contentTitle=title, contentType="movie")) + + next_page = scrapertools.find_single_match(data, '<a class="nextpostslink" href="([^"]+)"') + if next_page: + itemlist.append(item.clone(title=">> Página Siguiente", url=next_page)) + + return itemlist + + +def indices(item): + logger.info() + itemlist = [] + + # Descarga la página + data = httptools.downloadpage(item.url).data + logger.info(data) + if "géneros" in item.title: + bloque = scrapertools.find_single_match(data, '(?i)<h4>Peliculas por genero</h4>(.*?)</ul>') + matches = scrapertools.find_multiple_matches(bloque, '<a href="([^"]+)".*?>([^<]+)<') + elif "año" in item.title: + bloque = scrapertools.find_single_match(data, '(?i)<h4>Peliculas por Año</h4>(.*?)</select>') + matches = scrapertools.find_multiple_matches(bloque, '<option value="([^"]+)">([^<]+)<') + else: + bloque = scrapertools.find_single_match(data, '(?i)<h4>Peliculas por Pais</h4>(.*?)</ul>') + matches = scrapertools.find_multiple_matches(bloque, '<a href="([^"]+)".*?>([^<]+)<') + + for scrapedurl, scrapedtitle in matches: + if "año" in item.title: + scrapedurl = "%sfecha-estreno/%s" % (host, scrapedurl) + itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl, + thumbnail=item.thumbnail, text_color=color3)) + + return itemlist + + +def findvideos(item): + logger.info() + data = httptools.downloadpage(item.url).data + item.infoLabels["plot"] = scrapertools.find_single_match(data, '(?i)<h2>SINOPSIS.*?<p>(.*?)</p>') + item.infoLabels["trailer"] = scrapertools.find_single_match(data, 'src="(http://www.youtube.com/embed/[^"]+)"') + + itemlist = servertools.find_video_items(item=item, data=data) + for it in itemlist: + it.thumbnail = item.thumbnail + it.text_color = color2 + + itemlist.append(item.clone(action="add_pelicula_to_library", title="Añadir película a la videoteca")) + if item.infoLabels["trailer"]: + folder = True + if config.is_xbmc(): + folder = False + itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Ver Trailer", folder=folder, + contextual=not folder)) + + return itemlist diff --git a/plugin.video.alfa/channels/cinecalidad.json b/plugin.video.alfa/channels/cinecalidad.json new file mode 100755 index 00000000..3998e737 --- /dev/null +++ b/plugin.video.alfa/channels/cinecalidad.json @@ -0,0 +1,75 @@ +{ + "id": "cinecalidad", + "name": "CineCalidad", + "compatible": { + "addon_version": "4.3" + }, + "active": true, + "adult": false, + "language": "es", + "thumbnail": "https://s31.postimg.org/puxmvsi7v/cinecalidad.png", + "banner": "https://s32.postimg.org/kihkdpx1x/banner_cinecalidad.png", + "version": 1, + "changes": [ + { + "date": "24/06/2017", + "description": "Cambios para autoplay" + }, + { + "date": "06/06/2017", + "description": "Compatibilidad con AutoPlay" + }, + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "18/06/2016", + "description": "First release." + } + ], + "categories": [ + "latino", + "movie" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": false, + "visible": false + }, + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "No filtrar", + "Latino", + "Español", + "Portuges" + ] + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_infantiles", + "type": "bool", + "label": "Incluir en Novedades - Infantiles", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/cinecalidad.py b/plugin.video.alfa/channels/cinecalidad.py new file mode 100755 index 00000000..95dba25a --- /dev/null +++ b/plugin.video.alfa/channels/cinecalidad.py @@ -0,0 +1,488 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from channels import autoplay +from channels import filtertools +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item + +IDIOMAS = {'latino': 'Latino', 'castellano': 'Español', 'portugues': 'Portugues'} +list_language = IDIOMAS.values() +logger.debug('lista_language: %s' % list_language) + +list_quality = ['1080p', '720p', '480p', '360p', '240p', 'default'] +list_servers = [ + 'yourupload', + 'thevideos', + 'filescdn', + 'uptobox', + 'okru', + 'nowvideo', + 'userscloud', + 'pcloud', + 'usersfiles', + 'vidbull', + 'openload', + 'directo' +] + +host = 'http://www.cinecalidad.to' +thumbmx = 'http://flags.fmcdn.net/data/flags/normal/mx.png' +thumbes = 'http://flags.fmcdn.net/data/flags/normal/es.png' +thumbbr = 'http://flags.fmcdn.net/data/flags/normal/br.png' + + +def mainlist(item): + idioma2 = "destacadas" + logger.info() + + autoplay.init(item.channel, list_servers, list_quality) + itemlist = [] + + itemlist.append( + item.clone(title="CineCalidad Latino", + action="submenu", + host="http://cinecalidad.com/", + thumbnail=thumbmx, + extra="peliculas", + language='latino' + )) + + itemlist.append(item.clone(title="CineCalidad España", + action="submenu", + host="http://cinecalidad.com/espana/", + thumbnail=thumbes, + extra="peliculas", + language='castellano' + )) + + itemlist.append( + item.clone(title="CineCalidad Brasil", + action="submenu", + host="http://cinemaqualidade.com/", + thumbnail=thumbbr, + extra="filmes", + language='portugues' + )) + + autoplay.show_option(item.channel, itemlist) + + return itemlist + + +def submenu(item): + idioma = 'peliculas' + idioma2 = "destacada" + host = item.host + if item.host == "http://cinemaqualidade.com/": + idioma = "filmes" + idioma2 = "destacado" + logger.info() + itemlist = [] + itemlist.append(Item(channel=item.channel, + title=idioma.capitalize(), + action="peliculas", + url=host, + thumbnail='https://s8.postimg.org/6wqwy2c2t/peliculas.png', + fanart='https://s8.postimg.org/6wqwy2c2t/peliculas.png', + language=item.language + )) + itemlist.append(Item(channel=item.channel, + title="Destacadas", + action="peliculas", + url=host + "/genero-" + idioma + "/" + idioma2 + "/", + thumbnail='https://s30.postimg.org/humqxklsx/destacadas.png', + fanart='https://s30.postimg.org/humqxklsx/destacadas.png', + language=item.language + )) + itemlist.append(Item(channel=item.channel, + title="Generos", + action="generos", + url=host + "/genero-" + idioma, + thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png', + fanart='https://s3.postimg.org/5s9jg2wtf/generos.png', + language=item.language + )) + itemlist.append(Item(channel=item.channel, + title="Por Año", + action="anyos", + url=host + "/" + idioma + "-por-ano", + thumbnail='https://s8.postimg.org/7eoedwfg5/pora_o.png', + fanart='https://s8.postimg.org/7eoedwfg5/pora_o.png', + language=item.language + )) + itemlist.append(Item(channel=item.channel, + title="Buscar", + action="search", + thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png', + url=host + '/apiseries/seriebyword/', + fanart='https://s30.postimg.org/pei7txpa9/buscar.png', + host=item.host, + language=item.language + )) + + return itemlist + + +def anyos(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = '<a href="([^"]+)">([^<]+)</a> ' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + url = urlparse.urljoin(item.url, scrapedurl) + title = scrapedtitle + thumbnail = item.thumbnail + plot = item.plot + itemlist.append( + Item(channel=item.channel, + action="peliculas", + title=title, + url=url, + thumbnail=thumbnail, + plot=plot, + fanart=item.thumbnail, + language=item.language + )) + + return itemlist + + +def generos(item): + tgenero = {"Comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png", + "Suspenso": "https://s13.postimg.org/wmw6vl1cn/suspenso.png", + "Drama": "https://s16.postimg.org/94sia332d/drama.png", + "Acción": "https://s3.postimg.org/y6o9puflv/accion.png", + "Aventura": "https://s10.postimg.org/6su40czih/aventura.png", + "Romance": "https://s15.postimg.org/fb5j8cl63/romance.png", + "Fantas\xc3\xada": "https://s13.postimg.org/65ylohgvb/fantasia.png", + "Infantil": "https://s23.postimg.org/g5rmazozv/infantil.png", + "Ciencia ficción": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png", + "Terror": "https://s7.postimg.org/yi0gij3gb/terror.png", + "Com\xc3\xa9dia": "https://s7.postimg.org/ne9g9zgwb/comedia.png", + "Suspense": "https://s13.postimg.org/wmw6vl1cn/suspenso.png", + "A\xc3\xa7\xc3\xa3o": "https://s3.postimg.org/y6o9puflv/accion.png", + "Fantasia": "https://s13.postimg.org/65ylohgvb/fantasia.png", + "Fic\xc3\xa7\xc3\xa3o cient\xc3\xadfica": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png"} + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = '<li id="menu-item-.*?" class="menu-item menu-item-type-taxonomy menu-item-object-category ' \ + 'menu-item-.*?"><a href="([^"]+)">([^<]+)<\/a></li>' + matches = re.compile(patron, re.DOTALL).findall(data) + for scrapedurl, scrapedtitle in matches: + url = urlparse.urljoin(item.url, scrapedurl) + title = scrapedtitle + thumbnail = tgenero[scrapedtitle] + plot = item.plot + itemlist.append( + Item(channel=item.channel, + action="peliculas", + title=title, url=url, + thumbnail=thumbnail, + plot=plot, + fanart=item.thumbnail, + language=item.language + )) + + return itemlist + + +def peliculas(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + + patron = '<div class="home_post_cont.*? post_box">.*?<a href="([^"]+)".*?src="([^"]+)".*?title="(.*?) \((' \ + '.*?)\)".*?p>([^&]+)<' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedplot in matches: + url = urlparse.urljoin(item.url, scrapedurl) + contentTitle = scrapedtitle + title = scrapedtitle + ' (' + scrapedyear + ')' + thumbnail = scrapedthumbnail + plot = scrapedplot + year = scrapedyear + itemlist.append( + Item(channel=item.channel, + action="findvideos", + title=title, + url=url, + thumbnail=thumbnail, + plot=plot, + fanart='https://s31.postimg.org/puxmvsi7v/cinecalidad.png', + contentTitle=contentTitle, + infoLabels={'year': year}, + language=item.language, + context=autoplay.context + )) + + try: + patron = "<link rel='next' href='([^']+)' />" + next_page = re.compile(patron, re.DOTALL).findall(data) + itemlist.append(Item(channel=item.channel, + action="peliculas", + title="Página siguiente >>", + url=next_page[0], + fanart='https://s31.postimg.org/puxmvsi7v/cinecalidad.png', + language=item.language + )) + + except: + pass + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + return itemlist + + +def dec(item): + link = [] + val = item.split(' ') + link = map(int, val) + for i in range(len(link)): + link[i] = link[i] - 7 + real = ''.join(map(chr, link)) + return (real) + + +def findvideos(item): + servidor = {"http://uptobox.com/": "uptobox", + "http://userscloud.com/": "userscloud", + "https://my.pcloud.com/publink/show?code=": "pcloud", + "http://thevideos.tv/": "thevideos", + "http://ul.to/": "uploadedto", + "http://turbobit.net/": "turbobit", + "http://www.cinecalidad.com/protect/v.html?i=": "cinecalidad", + "http://www.mediafire.com/download/": "mediafire", + "https://www.youtube.com/watch?v=": "youtube", + "http://thevideos.tv/embed-": "thevideos", + "//www.youtube.com/embed/": "youtube", + "http://ok.ru/video/": "okru", + "http://ok.ru/videoembed/": "okru", + "http://www.cinemaqualidade.com/protect/v.html?i=": "cinemaqualidade.com", + "http://usersfiles.com/": "usersfiles", + "https://depositfiles.com/files/": "depositfiles", + "http://www.nowvideo.sx/video/": "nowvideo", + "http://vidbull.com/": "vidbull", + "http://filescdn.com/": "filescdn", + "https://www.yourupload.com/watch/": "yourupload", + "http://www.cinecalidad.to/protect/gdredirect.php?l=": "directo", + "https://openload.co/embed/": "openload" + } + + logger.info() + itemlist = [] + duplicados = [] + data = httptools.downloadpage(item.url).data + + patron = 'dec\("([^"]+)"\)\+dec\("([^"]+)"\)' + matches = re.compile(patron, re.DOTALL).findall(data) + recomendados = ["uptobox", "thevideos", "nowvideo", "pcloud", "directo"] + for scrapedurl, scrapedtitle in matches: + + if dec(scrapedurl) in servidor: + server = servidor[dec(scrapedurl)] + title = "Ver " + item.contentTitle + " en " + servidor[dec(scrapedurl)].upper() + if 'yourupload' in dec(scrapedurl): + url = dec(scrapedurl).replace('watch', 'embed') + dec(scrapedtitle) + elif 'gdredirect' in dec(scrapedurl): + url = '' + url_list = [] + url_list += get_urls(item, dec(scrapedtitle)) + + for video in url_list: + new_title = title + ' (' + video['label'] + ')' + itemlist.append( + Item(channel=item.channel, + action="play", + title=new_title, + fulltitle=item.title, + url=video['file'], + language=IDIOMAS[item.language], + thumbnail=item.thumbnail, + plot=item.plot, + quality=video['label'], + server='directo' + )) + duplicados.append(title) + else: + + if 'youtube' in dec(scrapedurl): + title = '[COLOR orange]Trailer en Youtube[/COLOR]' + url = dec(scrapedurl) + dec(scrapedtitle) + + if (servidor[dec(scrapedurl)]) in recomendados: + title = title + "[COLOR limegreen] [I] (Recomedado) [/I] [/COLOR]" + thumbnail = servertools.guess_server_thumbnail(servidor[dec(scrapedurl)]) + plot = "" + if title not in duplicados and url != '': + new_item = Item(channel=item.channel, + action="play", + title=title, + fulltitle=item.title, + url=url, + thumbnail=thumbnail, + plot=item.plot, + extra=item.thumbnail, + language=IDIOMAS[item.language], + quality='default', + server=server.lower() + ) + if 'Trailer' in new_item.title: + trailer_item = new_item + else: + itemlist.append(new_item) + duplicados.append(title) + + # Requerido para FilterTools + + itemlist = filtertools.get_links(itemlist, item, list_language) + + # Requerido para AutoPlay + + autoplay.start(itemlist, item) + + itemlist.append(trailer_item) + if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': + itemlist.append( + Item(channel=item.channel, + title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', + url=item.url, + action="add_pelicula_to_library", + extra="findvideos", + contentTitle=item.contentTitle, + )) + + return itemlist + + +def get_urls(item, link): + from core import jsontools + logger.info() + url = 'http://www.cinecalidad.to/ccstream/ccstream.php' + headers = dict() + headers["Referer"] = item.url + post = 'link=%s' % link + + data = httptools.downloadpage(url, post=post, headers=headers).data + dict_data = jsontools.load(data) + logger.debug(dict_data['link']) + logger.debug(data) + return dict_data['link'] + + +def play(item): + logger.info() + itemlist = [] + logger.debug('item: %s' % item) + if 'juicyapi' not in item.url: + itemlist = servertools.find_video_items(data=item.url) + + for videoitem in itemlist: + videoitem.title = item.fulltitle + videoitem.fulltitle = item.fulltitle + videoitem.thumbnail = item.extra + videoitem.channel = item.channel + else: + itemlist.append(item) + + return itemlist + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + try: + if categoria == 'peliculas': + item.url = 'http://www.cinecalidad.to' + elif categoria == 'infantiles': + item.url = 'http://www.cinecalidad.to/genero-peliculas/infantil/' + itemlist = peliculas(item) + if itemlist[-1].title == 'Página siguiente >>': + itemlist.pop() + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist + + +def busqueda(item): + logger.info() + itemlist = [] + + # Descarga la página + data = httptools.downloadpage(item.url).data + + from core import jsontools + data = jsontools.load(data) + + for entry in data["results"]: + title = entry["richSnippet"]["metatags"]["ogTitle"] + url = entry["url"] + plot = entry["content"] + plot = scrapertools.htmlclean(plot) + thumbnail = entry["richSnippet"]["metatags"]["ogImage"] + title = scrapertools.find_single_match(title, '(.*?) \(.*?\)') + year = re.sub(r'.*?\((\d{4})\)', '', title) + title = year + fulltitle = title + + new_item = item.clone(action="findvideos", + title=title, + fulltitle=fulltitle, + url=url, + thumbnail=thumbnail, + contentTitle=title, + contentType="movie", + plot=plot, + infoLabels={'year': year, 'sinopsis': plot} + ) + itemlist.append(new_item) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + + actualpage = int(scrapertools.find_single_match(item.url, 'start=(\d+)')) + totalresults = int(data["cursor"]["resultCount"]) + if actualpage + 20 <= totalresults: + url_next = item.url.replace("start=" + str(actualpage), "start=" + str(actualpage + 20)) + itemlist.append( + Item(channel=item.channel, + action="busqueda", + title=">> Página Siguiente", + url=url_next + )) + + return itemlist + + +def search(item, texto): + logger.info() + + data = httptools.downloadpage(host).data + cx = scrapertools.find_single_match(data, 'name="cx" value="(.*?)"') + texto = texto.replace(" ", "%20") + item.url = "https://www.googleapis.com/customsearch/v1element?key=AIzaSyCVAXiUzRYsML1Pv6RwSG1gunmMikTzQqY&rsz" \ + "=filtered_cse&num=20&hl=es&sig=0c3990ce7a056ed50667fe0c3873c9b6&cx=%s&q=%s&sort=&googlehost=www" \ + ".google.com&start=0" % (cx, texto) + + try: + return busqueda(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] diff --git a/plugin.video.alfa/channels/cinefox.json b/plugin.video.alfa/channels/cinefox.json new file mode 100755 index 00000000..25637dfa --- /dev/null +++ b/plugin.video.alfa/channels/cinefox.json @@ -0,0 +1,157 @@ +{ + "id": "cinefox", + "name": "Cinefox", + "active": true, + "adult": false, + "language": "es", + "version": 1, + "thumbnail": "cinefox.png", + "banner": "cinefox.png", + "changes": [ + { + "date": "05/04/2017", + "description": "Cambio en los servidores" + }, + { + "date": "21/03/2017", + "description": "Adaptado a httptools y corregido episodios para videoteca" + }, + { + "date": "18/07/2016", + "description": "Primera version" + } + ], + "categories": [ + "movie", + "tvshow", + "latino", + "vos" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "save_last_search", + "type": "bool", + "label": "Guardar última búsqueda", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Películas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_series", + "type": "bool", + "label": "Incluir en Novedades - Series", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "modo_grafico", + "type": "bool", + "label": "Buscar información extra", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "perfil", + "type": "list", + "label": "Perfil de color", + "default": 2, + "enabled": true, + "visible": true, + "lvalues": [ + "Perfil 3", + "Perfil 2", + "Perfil 1", + "Ninguno" + ] + }, + { + "id": "menu_info", + "type": "bool", + "label": "Mostrar menú intermedio película/episodio", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "last_page", + "type": "bool", + "label": "Ocultar opción elegir página en películas (Kodi)", + "default": false, + "enabled": true, + "visible": true + }, + { + "id": "filtro_defecto_peliculas", + "type": "label", + "enabled": true, + "visible": false + }, + { + "id": "pers_peliculas1", + "type": "label", + "enabled": true, + "visible": false + }, + { + "id": "pers_peliculas2", + "type": "label", + "enabled": true, + "visible": false + }, + { + "pers_peliculas3": { + "type": "label", + "enabled": true, + "visible": false + } + }, + { + "id": "filtro_defecto_series", + "type": "label", + "enabled": true, + "visible": false + }, + { + "id": "pers_series1", + "type": "label", + "enabled": true, + "visible": false + }, + { + "id": "pers_series2", + "type": "label", + "enabled": true, + "visible": false + }, + { + "id": "pers_series3", + "type": "label", + "enabled": true, + "visible": false + }, + { + "id": "last_search", + "type": "text", + "enabled": true, + "visible": false + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/cinefox.py b/plugin.video.alfa/channels/cinefox.py new file mode 100755 index 00000000..770b75a8 --- /dev/null +++ b/plugin.video.alfa/channels/cinefox.py @@ -0,0 +1,745 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import config +from core import httptools +from core import jsontools +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + +__modo_grafico__ = config.get_setting('modo_grafico', 'cinefox') +__perfil__ = int(config.get_setting('perfil', "cinefox")) +__menu_info__ = config.get_setting('menu_info', 'cinefox') + +# Fijar perfil de color +perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFF088A08'], + ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFF088A08'], + ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFF088A08']] +if __perfil__ < 3: + color1, color2, color3, color4, color5 = perfil[__perfil__] +else: + color1 = color2 = color3 = color4 = color5 = "" + +host = "http://www.cinefox.tv" + + +def mainlist(item): + logger.info() + item.text_color = color1 + itemlist = [] + + itemlist.append(item.clone(action="seccion_peliculas", title="Películas", fanart="http://i.imgur.com/PjJaW8o.png", + url="http://www.cinefox.tv/catalogue?type=peliculas")) + # Seccion series + itemlist.append(item.clone(action="seccion_series", title="Series", + url="http://www.cinefox.tv/ultimos-capitulos", fanart="http://i.imgur.com/9loVksV.png")) + + itemlist.append(item.clone(action="peliculas", title="Documentales", fanart="http://i.imgur.com/Q7fsFI6.png", + url="http://www.cinefox.tv/catalogue?type=peliculas&genre=documental")) + + if config.get_setting("adult_mode") != 0: + itemlist.append(item.clone(action="peliculas", title="Sección Adultos +18", + url="http://www.cinefox.tv/catalogue?type=adultos", + fanart="http://i.imgur.com/kIvE1Zh.png")) + + itemlist.append(item.clone(title="Buscar...", action="local_search")) + itemlist.append(item.clone(title="Configurar canal...", text_color="gold", action="configuracion", folder=False)) + + return itemlist + + +def configuracion(item): + from platformcode import platformtools + ret = platformtools.show_channel_settings() + platformtools.itemlist_refresh() + return ret + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = "http://www.cinefox.tv/search?q=%s" % texto + try: + return busqueda(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def local_search(item): + logger.info() + text = "" + if config.get_setting("save_last_search", item.channel): + text = config.get_setting("last_search", item.channel) + from platformcode import platformtools + texto = platformtools.dialog_input(default=text, heading="Buscar en Cinefox") + if texto is None: + return + + if config.get_setting("save_last_search", item.channel): + config.set_setting("last_search", texto, item.channel) + + return search(item, texto) + + +def busqueda(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + patron = '<div class="poster-media-card">(.*?)(?:<li class="search-results-item media-item">|<footer>)' + bloque = scrapertools.find_multiple_matches(data, patron) + for match in bloque: + patron = 'href="([^"]+)" title="([^"]+)".*?src="([^"]+)".*?' \ + '<p class="search-results-main-info">.*?del año (\d+).*?' \ + 'p class.*?>(.*?)<' + matches = scrapertools.find_multiple_matches(match, patron) + for scrapedurl, scrapedtitle, scrapedthumbnail, year, plot in matches: + scrapedtitle = scrapedtitle.capitalize() + item.infoLabels["year"] = year + plot = scrapertools.htmlclean(plot) + if "/serie/" in scrapedurl: + action = "episodios" + show = scrapedtitle + scrapedurl += "/episodios" + title = " [Serie]" + contentType = "tvshow" + elif "/pelicula/" in scrapedurl: + action = "menu_info" + show = "" + title = " [Película]" + contentType = "movie" + else: + continue + title = scrapedtitle + title + " (" + year + ")" + itemlist.append(item.clone(action=action, title=title, url=scrapedurl, thumbnail=scrapedthumbnail, + contentTitle=scrapedtitle, fulltitle=scrapedtitle, + plot=plot, show=show, text_color=color2, contentType=contentType)) + + try: + from core import tmdb + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + except: + pass + + next_page = scrapertools.find_single_match(data, 'href="([^"]+)"[^>]+>Más resultados') + if next_page != "": + next_page = urlparse.urljoin(host, next_page) + itemlist.append(Item(channel=item.channel, action="busqueda", title=">> Siguiente", url=next_page, + thumbnail=item.thumbnail, text_color=color3)) + + return itemlist + + +def filtro(item): + logger.info() + + list_controls = [] + valores = {} + strings = {} + # Se utilizan los valores por defecto/guardados o los del filtro personalizado + if not item.values: + valores_guardados = config.get_setting("filtro_defecto_" + item.extra, item.channel) + else: + valores_guardados = item.values + item.values = "" + if valores_guardados: + dict_values = valores_guardados + else: + dict_values = None + if dict_values: + dict_values["filtro_per"] = 0 + + excluidos = ['País', 'Películas', 'Series', 'Destacar'] + data = httptools.downloadpage(item.url).data + matches = scrapertools.find_multiple_matches(data, '<div class="dropdown-sub[^>]+>(\S+)(.*?)</ul>') + i = 0 + for filtro_title, values in matches: + if filtro_title in excluidos: continue + + filtro_title = filtro_title.replace("Tendencia", "Ordenar por") + id = filtro_title.replace("Género", "genero").replace("Año", "year").replace(" ", "_").lower() + list_controls.append({'id': id, 'label': filtro_title, 'enabled': True, + 'type': 'list', 'default': 0, 'visible': True}) + valores[id] = [] + valores[id].append('') + strings[filtro_title] = [] + list_controls[i]['lvalues'] = [] + if filtro_title == "Ordenar por": + list_controls[i]['lvalues'].append('Más recientes') + strings[filtro_title].append('Más recientes') + else: + list_controls[i]['lvalues'].append('Cualquiera') + strings[filtro_title].append('Cualquiera') + patron = '<li>.*?(?:genre|release|quality|language|order)=([^"]+)">([^<]+)<' + matches_v = scrapertools.find_multiple_matches(values, patron) + for value, key in matches_v: + if value == "action-adventure": continue + list_controls[i]['lvalues'].append(key) + valores[id].append(value) + strings[filtro_title].append(key) + + i += 1 + + item.valores = valores + item.strings = strings + if "Filtro Personalizado" in item.title: + return filtrado(item, valores_guardados) + + list_controls.append({'id': 'espacio', 'label': '', 'enabled': False, + 'type': 'label', 'default': '', 'visible': True}) + list_controls.append({'id': 'save', 'label': 'Establecer como filtro por defecto', 'enabled': True, + 'type': 'bool', 'default': False, 'visible': True}) + list_controls.append({'id': 'filtro_per', 'label': 'Guardar filtro en acceso directo...', 'enabled': True, + 'type': 'list', 'default': 0, 'visible': True, 'lvalues': ['No guardar', 'Filtro 1', + 'Filtro 2', 'Filtro 3']}) + list_controls.append({'id': 'remove', 'label': 'Eliminar filtro personalizado...', 'enabled': True, + 'type': 'list', 'default': 0, 'visible': True, 'lvalues': ['No eliminar', 'Filtro 1', + 'Filtro 2', 'Filtro 3']}) + + from platformcode import platformtools + return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values, + caption="Filtra los resultados", item=item, callback='filtrado') + + +def filtrado(item, values): + values_copy = values.copy() + # Guarda el filtro para que sea el que se cargue por defecto + if "save" in values and values["save"]: + values_copy.pop("remove") + values_copy.pop("filtro_per") + values_copy.pop("save") + config.set_setting("filtro_defecto_" + item.extra, values_copy, item.channel) + + # Elimina el filtro personalizado elegido + if "remove" in values and values["remove"] != 0: + config.set_setting("pers_" + item.extra + str(values["remove"]), "", item.channel) + + values_copy = values.copy() + # Guarda el filtro en un acceso directo personalizado + if "filtro_per" in values and values["filtro_per"] != 0: + index = item.extra + str(values["filtro_per"]) + values_copy.pop("filtro_per") + values_copy.pop("save") + values_copy.pop("remove") + config.set_setting("pers_" + index, values_copy, item.channel) + + genero = item.valores["genero"][values["genero"]] + year = item.valores["year"][values["year"]] + calidad = item.valores["calidad"][values["calidad"]] + idioma = item.valores["idioma"][values["idioma"]] + order = item.valores["ordenar_por"][values["ordenar_por"]] + + strings = [] + for key, value in dict(item.strings).items(): + key2 = key.replace("Género", "genero").replace("Año", "year").replace(" ", "_").lower() + strings.append(key + ": " + value[values[key2]]) + + item.valores = "Filtro: " + ", ".join(sorted(strings)) + item.strings = "" + item.url = "http://www.cinefox.tv/catalogue?type=%s&genre=%s&release=%s&quality=%s&language=%s&order=%s" % \ + (item.extra, genero, year, calidad, idioma, order) + + return globals()[item.extra](item) + + +def seccion_peliculas(item): + logger.info() + itemlist = [] + # Seccion peliculas + itemlist.append(item.clone(action="peliculas", title="Novedades", fanart="http://i.imgur.com/PjJaW8o.png", + url="http://www.cinefox.tv/catalogue?type=peliculas")) + itemlist.append(item.clone(action="peliculas", title="Estrenos", + url="http://www.cinefox.tv/estrenos-de-cine", fanart="http://i.imgur.com/PjJaW8o.png")) + itemlist.append(item.clone(action="filtro", title="Filtrar películas", extra="peliculas", + url="http://www.cinefox.tv/catalogue?type=peliculas", + fanart="http://i.imgur.com/PjJaW8o.png")) + # Filtros personalizados para peliculas + for i in range(1, 4): + filtros = config.get_setting("pers_peliculas" + str(i), item.channel) + if filtros: + title = "Filtro Personalizado " + str(i) + new_item = item.clone() + new_item.values = filtros + itemlist.append(new_item.clone(action="filtro", title=title, fanart="http://i.imgur.com/PjJaW8o.png", + url="http://www.cinefox.tv/catalogue?type=peliculas", extra="peliculas")) + itemlist.append(item.clone(action="mapa", title="Mapa de películas", extra="peliculas", + url="http://www.cinefox.tv/mapa-de-peliculas", + fanart="http://i.imgur.com/PjJaW8o.png")) + + return itemlist + + +def seccion_series(item): + logger.info() + itemlist = [] + # Seccion series + itemlist.append(item.clone(action="ultimos", title="Últimos capítulos", + url="http://www.cinefox.tv/ultimos-capitulos", fanart="http://i.imgur.com/9loVksV.png")) + itemlist.append(item.clone(action="series", title="Series recientes", + url="http://www.cinefox.tv/catalogue?type=series", + fanart="http://i.imgur.com/9loVksV.png")) + itemlist.append(item.clone(action="filtro", title="Filtrar series", extra="series", + url="http://www.cinefox.tv/catalogue?type=series", + fanart="http://i.imgur.com/9loVksV.png")) + # Filtros personalizados para series + for i in range(1, 4): + filtros = config.get_setting("pers_series" + str(i), item.channel) + if filtros: + title = " Filtro Personalizado " + str(i) + new_item = item.clone() + new_item.values = filtros + itemlist.append(new_item.clone(action="filtro", title=title, fanart="http://i.imgur.com/9loVksV.png", + url="http://www.cinefox.tv/catalogue?type=series", extra="series")) + itemlist.append(item.clone(action="mapa", title="Mapa de series", extra="series", + url="http://www.cinefox.tv/mapa-de-series", + fanart="http://i.imgur.com/9loVksV.png")) + + return itemlist + + +def mapa(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + patron = '<li class="sitemap-initial"> <a class="initial-link" href="([^"]+)">(.*?)</a>' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedtitle in matches: + scrapedurl = host + scrapedurl + scrapedtitle = scrapedtitle.capitalize() + itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action=item.extra, extra="mapa")) + + return itemlist + + +def peliculas(item): + logger.info() + + itemlist = [] + if "valores" in item and item.valores: + itemlist.append(item.clone(action="", title=item.valores, text_color=color4)) + + if __menu_info__: + action = "menu_info" + else: + action = "findvideos" + + data = httptools.downloadpage(item.url).data + bloque = scrapertools.find_multiple_matches(data, + '<div class="media-card "(.*?)<div class="hidden-info">') + for match in bloque: + if item.extra == "mapa": + patron = '.*?src="([^"]+)".*?href="([^"]+)">([^<]+)</a>' + matches = scrapertools.find_multiple_matches(match, patron) + for scrapedthumbnail, scrapedurl, scrapedtitle in matches: + url = urlparse.urljoin(host, scrapedurl) + itemlist.append(Item(channel=item.channel, action=action, title=scrapedtitle, url=url, extra="media", + thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, fulltitle=scrapedtitle, + text_color=color2, contentType="movie")) + else: + patron = '<div class="audio-info">(.*?)</div>(.*?)' \ + 'src="([^"]+)".*?href="([^"]+)">([^<]+)</a>' + matches = scrapertools.find_multiple_matches(match, patron) + + for idiomas, calidad, scrapedthumbnail, scrapedurl, scrapedtitle in matches: + calidad = scrapertools.find_single_match(calidad, '<div class="quality-info".*?>([^<]+)</div>') + if calidad: + calidad = calidad.capitalize().replace("Hd", "HD") + audios = [] + if "medium-es" in idiomas: audios.append('CAST') + if "medium-vs" in idiomas: audios.append('VOSE') + if "medium-la" in idiomas: audios.append('LAT') + if "medium-en" in idiomas or 'medium-"' in idiomas: + audios.append('V.O') + title = "%s [%s]" % (scrapedtitle, "/".join(audios)) + if calidad: + title += " (%s)" % calidad + url = urlparse.urljoin(host, scrapedurl) + + itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, extra="media", + thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, fulltitle=scrapedtitle, + text_color=color2, contentType="movie")) + + next_page = scrapertools.find_single_match(data, 'href="([^"]+)"[^>]+>Siguiente') + if next_page != "" and item.title != "": + itemlist.append(Item(channel=item.channel, action="peliculas", title=">> Siguiente", url=next_page, + thumbnail=item.thumbnail, extra=item.extra, text_color=color3)) + + if not config.get_setting("last_page", item.channel) and config.is_xbmc(): + itemlist.append(Item(channel=item.channel, action="select_page", title="Ir a página...", url=next_page, + thumbnail=item.thumbnail, text_color=color5)) + + return itemlist + + +def ultimos(item): + logger.info() + item.text_color = color2 + + if __menu_info__: + action = "menu_info_episode" + else: + action = "episodios" + + itemlist = [] + data = httptools.downloadpage(item.url).data + + bloque = scrapertools.find_multiple_matches(data, '<div class="media-card "(.*?)<div class="info-availability ' + 'one-line">') + for match in bloque: + patron = '<div class="audio-info">(.*?)<img class.*?src="([^"]+)".*?href="([^"]+)">([^<]+)</a>' + matches = scrapertools.find_multiple_matches(match, patron) + for idiomas, scrapedthumbnail, scrapedurl, scrapedtitle in matches: + show = re.sub(r'(\s*[\d]+x[\d]+\s*)', '', scrapedtitle) + audios = [] + if "medium-es" in idiomas: audios.append('CAST') + if "medium-vs" in idiomas: audios.append('VOSE') + if "medium-la" in idiomas: audios.append('LAT') + if "medium-en" in idiomas or 'medium-"' in idiomas: + audios.append('V.O') + title = "%s - %s" % (show, re.sub(show, '', scrapedtitle)) + if audios: + title += " [%s]" % "/".join(audios) + url = urlparse.urljoin(host, scrapedurl) + itemlist.append(item.clone(action=action, title=title, url=url, thumbnail=scrapedthumbnail, + contentTitle=show, fulltitle=show, show=show, + text_color=color2, extra="ultimos", contentType="tvshow")) + + try: + from core import tmdb + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + except: + pass + + next_page = scrapertools.find_single_match(data, 'href="([^"]+)"[^>]+>Siguiente') + if next_page != "": + itemlist.append(item.clone(action="ultimos", title=">> Siguiente", url=next_page, text_color=color3)) + + return itemlist + + +def series(item): + logger.info() + itemlist = [] + + if "valores" in item: + itemlist.append(item.clone(action="", title=item.valores, text_color=color4)) + + data = httptools.downloadpage(item.url).data + bloque = scrapertools.find_multiple_matches(data, '<div class="media-card "(.*?)<div class="hidden-info">') + for match in bloque: + patron = '<img class.*?src="([^"]+)".*?href="([^"]+)">([^<]+)</a>' + matches = scrapertools.find_multiple_matches(match, patron) + for scrapedthumbnail, scrapedurl, scrapedtitle in matches: + url = urlparse.urljoin(host, scrapedurl + "/episodios") + itemlist.append(Item(channel=item.channel, action="episodios", title=scrapedtitle, url=url, + thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, fulltitle=scrapedtitle, + show=scrapedtitle, text_color=color2, contentType="tvshow")) + + try: + from core import tmdb + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + except: + pass + + next_page = scrapertools.find_single_match(data, 'href="([^"]+)"[^>]+>Siguiente') + if next_page != "": + title = ">> Siguiente - Página " + scrapertools.find_single_match(next_page, 'page=(\d+)') + itemlist.append(Item(channel=item.channel, action="series", title=title, url=next_page, + thumbnail=item.thumbnail, extra=item.extra, text_color=color3)) + + return itemlist + + +def menu_info(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + year = scrapertools.find_single_match(data, '<div class="media-summary">.*?release.*?>(\d+)<') + if year != "" and not "tmdb_id" in item.infoLabels: + try: + from core import tmdb + item.infoLabels["year"] = year + tmdb.set_infoLabels_item(item, __modo_grafico__) + except: + pass + + if item.infoLabels["plot"] == "": + sinopsis = scrapertools.find_single_match(data, '<p id="media-plot".*?>.*?\.\.\.(.*?)Si te parece') + item.infoLabels["plot"] = scrapertools.htmlclean(sinopsis) + + id = scrapertools.find_single_match(item.url, '/(\d+)/') + data_trailer = httptools.downloadpage("http://www.cinefox.tv/media/trailer?idm=%s&mediaType=1" % id).data + trailer_url = jsontools.load(data_trailer)["video"]["url"] + if trailer_url != "": + item.infoLabels["trailer"] = trailer_url + + title = "Ver enlaces %s - [" + item.contentTitle + "]" + itemlist.append(item.clone(action="findvideos", title=title % "Online", extra="media", type="streaming")) + itemlist.append(item.clone(action="findvideos", title=title % "de Descarga", extra="media", type="download")) + itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler", + text_color="magenta", context="")) + if config.get_videolibrary_support(): + itemlist.append(Item(channel=item.channel, action="add_pelicula_to_library", text_color=color5, + title="Añadir película a la videoteca", url=item.url, thumbnail=item.thumbnail, + fanart=item.fanart, fulltitle=item.fulltitle, + extra="media|")) + + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + + if item.extra == "ultimos": + data = httptools.downloadpage(item.url).data + item.url = scrapertools.find_single_match(data, '<a href="([^"]+)" class="h1-like media-title"') + item.url += "/episodios" + + data = httptools.downloadpage(item.url).data + data_season = data[:] + + if "episodios" in item.extra or not __menu_info__ or item.path: + action = "findvideos" + else: + action = "menu_info_episode" + + seasons = scrapertools.find_multiple_matches(data, '<a href="([^"]+)"[^>]+><span class="season-toggle') + for i, url in enumerate(seasons): + if i != 0: + data_season = httptools.downloadpage(url, add_referer=True).data + patron = '<div class="ep-list-number">.*?href="([^"]+)">([^<]+)</a>.*?<span class="name">([^<]+)</span>' + matches = scrapertools.find_multiple_matches(data_season, patron) + for scrapedurl, episode, scrapedtitle in matches: + new_item = item.clone(action=action, url=scrapedurl, text_color=color2, contentType="episode") + new_item.contentSeason = episode.split("x")[0] + new_item.contentEpisodeNumber = episode.split("x")[1] + + new_item.title = episode + " - " + scrapedtitle + new_item.extra = "episode" + if "episodios" in item.extra or item.path: + new_item.extra = "episode|" + itemlist.append(new_item) + + if "episodios" not in item.extra and not item.path: + try: + from core import tmdb + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + except: + pass + + itemlist.reverse() + if "episodios" not in item.extra and not item.path: + id = scrapertools.find_single_match(item.url, '/(\d+)/') + data_trailer = httptools.downloadpage("http://www.cinefox.tv/media/trailer?idm=%s&mediaType=1" % id).data + item.infoLabels["trailer"] = jsontools.load(data_trailer)["video"]["url"] + itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler", + text_color="magenta")) + if config.get_videolibrary_support(): + itemlist.append(Item(channel=item.channel, action="add_serie_to_library", text_color=color5, + title="Añadir serie a la videoteca", show=item.show, thumbnail=item.thumbnail, + url=item.url, fulltitle=item.fulltitle, fanart=item.fanart, + extra="episodios###episodios", + contentTitle=item.fulltitle)) + + return itemlist + + +def menu_info_episode(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + if item.show == "": + item.show = scrapertools.find_single_match(data, 'class="h1-like media-title".*?>([^<]+)</a>') + + episode = scrapertools.find_single_match(data, '<span class="indicator">([^<]+)</span>') + item.infoLabels["season"] = episode.split("x")[0] + item.infoLabels["episode"] = episode.split("x")[1] + + try: + from core import tmdb + tmdb.set_infoLabels_item(item, __modo_grafico__) + except: + pass + + if item.infoLabels["plot"] == "": + sinopsis = scrapertools.find_single_match(data, 'id="episode-plot">(.*?)</p>') + if not "No hay sinopsis" in sinopsis: + item.infoLabels["plot"] = scrapertools.htmlclean(sinopsis) + + title = "Ver enlaces %s - [" + item.show + " " + episode + "]" + itemlist.append(item.clone(action="findvideos", title=title % "Online", extra="episode", type="streaming")) + itemlist.append(item.clone(action="findvideos", title=title % "de Descarga", extra="episode", type="download")) + + siguiente = scrapertools.find_single_match(data, '<a class="episode-nav-arrow next" href="([^"]+)" title="([^"]+)"') + if siguiente: + titulo = ">> Siguiente Episodio - [" + siguiente[1] + "]" + itemlist.append(item.clone(action="menu_info_episode", title=titulo, url=siguiente[0], extra="", + text_color=color1)) + + patron = '<a class="episode-nav-arrow previous" href="([^"]+)" title="([^"]+)"' + anterior = scrapertools.find_single_match(data, patron) + if anterior: + titulo = "<< Episodio Anterior - [" + anterior[1] + "]" + itemlist.append(item.clone(action="menu_info_episode", title=titulo, url=anterior[0], extra="", + text_color=color3)) + + url_serie = scrapertools.find_single_match(data, '<a href="([^"]+)" class="h1-like media-title"') + url_serie += "/episodios" + itemlist.append(item.clone(title="Ir a la lista de capítulos", action="episodios", url=url_serie, extra="", + text_color=color4)) + + itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler", + text_color="magenta", context="")) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + + if not "|" in item.extra and not __menu_info__: + data = httptools.downloadpage(item.url, add_referer=True).data + year = scrapertools.find_single_match(data, '<div class="media-summary">.*?release.*?>(\d+)<') + if year != "" and not "tmdb_id" in item.infoLabels: + try: + from core import tmdb + item.infoLabels["year"] = year + tmdb.set_infoLabels_item(item, __modo_grafico__) + except: + pass + + if item.infoLabels["plot"] == "": + sinopsis = scrapertools.find_single_match(data, '<p id="media-plot".*?>.*?\.\.\.(.*?)Si te parece') + item.infoLabels["plot"] = scrapertools.htmlclean(sinopsis) + + id = scrapertools.find_single_match(item.url, '/(\d+)/') + if "|" in item.extra or not __menu_info__: + extra = item.extra + if "|" in item.extra: + extra = item.extra[:-1] + url = "http://www.cinefox.tv/sources/list?id=%s&type=%s&order=%s" % (id, extra, "streaming") + itemlist.extend(get_enlaces(item, url, "Online")) + url = "http://www.cinefox.tv/sources/list?id=%s&type=%s&order=%s" % (id, extra, "download") + itemlist.extend(get_enlaces(item, url, "de Descarga")) + + if extra == "media": + data_trailer = httptools.downloadpage("http://www.cinefox.tv/media/trailer?idm=%s&mediaType=1" % id).data + trailer_url = jsontools.load(data_trailer)["video"]["url"] + if trailer_url != "": + item.infoLabels["trailer"] = trailer_url + + title = "Ver enlaces %s - [" + item.contentTitle + "]" + itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler", + text_color="magenta", context="")) + + if config.get_videolibrary_support() and not "|" in item.extra: + itemlist.append(Item(channel=item.channel, action="add_pelicula_to_library", text_color=color5, + title="Añadir película a la videoteca", url=item.url, thumbnail=item.thumbnail, + fanart=item.fanart, fulltitle=item.fulltitle, + extra="media|")) + else: + url = "http://www.cinefox.tv/sources/list?id=%s&type=%s&order=%s" % (id, item.extra, item.type) + type = item.type.replace("streaming", "Online").replace("download", "de Descarga") + itemlist.extend(get_enlaces(item, url, type)) + + return itemlist + + +def get_enlaces(item, url, type): + itemlist = [] + itemlist.append(item.clone(action="", title="Enlaces %s" % type, text_color=color1)) + + data = httptools.downloadpage(url, add_referer=True).data + patron = '<div class="available-source".*?data-url="([^"]+)".*?class="language.*?title="([^"]+)"' \ + '.*?class="source-name.*?>\s*([^<]+)<.*?<span class="quality-text">([^<]+)<' + matches = scrapertools.find_multiple_matches(data, patron) + if matches: + for scrapedurl, idioma, server, calidad in matches: + if server == "streamin": server = "streaminto" + if server == "waaw" or server == "miracine": server = "netutv" + if server == "ul": server = "uploadedto" + if server == "player": server = "vimpleru" + if servertools.is_server_enabled(server): + scrapedtitle = " Ver en " + server.capitalize() + " [" + idioma + "/" + calidad + "]" + itemlist.append(item.clone(action="play", url=scrapedurl, title=scrapedtitle, text_color=color2, + extra="", server=server)) + + if len(itemlist) == 1: + itemlist.append(item.clone(title=" No hay enlaces disponibles", action="", text_color=color2)) + + return itemlist + + +def play(item): + logger.info() + itemlist = [] + + if item.extra != "": + post = "id=%s" % item.extra + data = httptools.downloadpage("http://www.cinefox.tv/goto/", post=post, add_referer=True).data + + item.url = scrapertools.find_single_match(data, 'document.location\s*=\s*"([^"]+)"') + + url = item.url.replace("http://miracine.tv/n/?etu=", "http://hqq.tv/player/embed_player.php?vid=") + url = url.replace("streamcloud.eu/embed-", "streamcloud.eu/") + if item.server: + enlaces = servertools.findvideosbyserver(url, item.server)[0] + else: + enlaces = servertools.findvideos(url)[0] + itemlist.append(item.clone(url=enlaces[1], server=enlaces[2])) + + return itemlist + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + try: + if categoria == "peliculas": + item.url = "http://www.cinefox.tv/catalogue?type=peliculas" + item.action = "peliculas" + itemlist = peliculas(item) + + if itemlist[-1].action == "peliculas": + itemlist.pop() + + if categoria == "series": + item.url = "http://www.cinefox.tv/ultimos-capitulos" + item.action = "ultimos" + itemlist = ultimos(item) + + if itemlist[-1].action == "ultimos": + itemlist.pop() + + # Se captura la excepción, para no interrumpir al canal novedades si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist + + +def select_page(item): + import xbmcgui + dialog = xbmcgui.Dialog() + number = dialog.numeric(0, "Introduce el número de página") + if number != "": + item.url = re.sub(r'page=(\d+)', "page=" + number, item.url) + + return peliculas(item) diff --git a/plugin.video.alfa/channels/cinefoxtv.json b/plugin.video.alfa/channels/cinefoxtv.json new file mode 100755 index 00000000..659a00fd --- /dev/null +++ b/plugin.video.alfa/channels/cinefoxtv.json @@ -0,0 +1,54 @@ +{ + "id": "cinefoxtv", + "name": "CineFoxTV", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "https://s28.postimg.org/lytn2q1tp/cinefoxtv.png", + "banner": "cinefoxtv.png", + "version": 1, + "changes": [ + { + "date": "22/05/2017", + "description": "fix por cambio en la estructura" + }, + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "09/02/2017", + "description": "First release." + } + ], + "categories": [ + "latino", + "movie" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": false, + "visible": false + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_infantiles", + "type": "bool", + "label": "Incluir en Novedades - Infantiles", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/cinefoxtv.py b/plugin.video.alfa/channels/cinefoxtv.py new file mode 100755 index 00000000..08bcdade --- /dev/null +++ b/plugin.video.alfa/channels/cinefoxtv.py @@ -0,0 +1,209 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item + +host = 'http://cinefoxtv.net/' +headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'], + ['Referer', host]] + +global duplicado +global itemlist +global temp_list +canal = 'cinefoxtv' + +tgenero = {"Comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png", + "Suspenso": "https://s13.postimg.org/wmw6vl1cn/suspenso.png", + "Drama": "https://s16.postimg.org/94sia332d/drama.png", + "Acción": "https://s3.postimg.org/y6o9puflv/accion.png", + "Aventuras": "https://s10.postimg.org/6su40czih/aventura.png", + "Animacion": "https://s13.postimg.org/5on877l87/animacion.png", + "Ciencia Ficcion": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png", + "Terror": "https://s7.postimg.org/yi0gij3gb/terror.png", + "Documentales": "https://s16.postimg.org/7xjj4bmol/documental.png", + "Musical": "https://s29.postimg.org/bbxmdh9c7/musical.png", + "Western": "https://s23.postimg.org/lzyfbjzhn/western.png", + "Belico": "https://s23.postimg.org/71itp9hcr/belica.png", + "Crimen": "https://s4.postimg.org/6z27zhirx/crimen.png", + "Biográfica": "https://s15.postimg.org/5lrpbx323/biografia.png", + "Deporte": "https://s13.postimg.org/xuxf5h06v/deporte.png", + "Fantástico": "https://s10.postimg.org/pbkbs6j55/fantastico.png", + "Estrenos": "https://s21.postimg.org/fy69wzm93/estrenos.png", + "Película 18+": "https://s15.postimg.org/exz7kysjf/erotica.png", + "Thriller": "https://s22.postimg.org/5y9g0jsu9/thriller.png", + "Familiar": "https://s7.postimg.org/6s7vdhqrf/familiar.png", + "Romanticas": "https://s21.postimg.org/xfsj7ua0n/romantica.png", + "Intriga": "https://s27.postimg.org/v9og43u2b/intriga.png", + "Infantil": "https://s23.postimg.org/g5rmazozv/infantil.png"} + + +def mainlist(item): + logger.info() + + itemlist = [] + + itemlist.append(item.clone(title="Todas", action="lista", thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png', + fanart='https://s18.postimg.org/fwvaeo6qh/todas.png', extra='peliculas/', + url=host + 'page/1.html')) + + itemlist.append( + itemlist[-1].clone(title="Generos", action="generos", thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png', + fanart='https://s3.postimg.org/5s9jg2wtf/generos.png', url=host)) + + itemlist.append( + itemlist[-1].clone(title="Mas Vistas", action="lista", thumbnail='https://s9.postimg.org/wmhzu9d7z/vistas.png', + fanart='https://s9.postimg.org/wmhzu9d7z/vistas.png', + url=host + 'top-peliculas-online/1.html')) + + itemlist.append(itemlist[-1].clone(title="Buscar", action="search", + thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png', + fanart='https://s30.postimg.org/pei7txpa9/buscar.png', url=host + 'search/')) + + return itemlist + + +def lista(item): + logger.info() + itemlist = [] + duplicado = [] + max_items = 24 + next_page_url = '' + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |<br>", "", data) + data = scrapertools.decodeHtmlentities(data) + patron = '"box_image_b.*?"><a href="([^"]+)" title=".*?><img src="([^"]+)" alt="(.*?)(\d{4}).*?"' + matches = re.compile(patron, re.DOTALL).findall(data) + + if item.next_page != 'b': + if len(matches) > max_items: + next_page_url = item.url + matches = matches[:max_items] + next_page = 'b' + else: + matches = matches[max_items:] + next_page = 'a' + patron_next_page = '<a class="page dark gradient" href="([^"]+)">PROXIMO' + matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data) + if len(matches_next_page) > 0: + next_page_url = urlparse.urljoin(item.url, matches_next_page[0]) + + for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches: + + url = scrapedurl + thumbnail = scrapedthumbnail + contentTitle = re.sub(r"\(.*?\)|\/.*?|\(|\)|.*?\/|!", "", scrapedtitle) + title = scrapertools.decodeHtmlentities(contentTitle) + '(' + scrapedyear + ')' + fanart = '' + plot = '' + + if url not in duplicado: + itemlist.append( + Item(channel=item.channel, action='findvideos', title=title, url=url, thumbnail=thumbnail, plot=plot, + fanart=fanart, contentTitle=contentTitle, infoLabels={'year': scrapedyear})) + duplicado.append(url) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + if next_page_url != '': + itemlist.append(Item(channel=item.channel, action="lista", title='Siguiente >>>', url=next_page_url, + thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png', extra=item.extra, + next_page=next_page)) + return itemlist + + +def generos(item): + logger.info() + + itemlist = [] + + data = httptools.downloadpage(item.url).data + patron = '<li><a href="([^"]+)"><i class="fa fa-caret-right"><\/i> <strong>Películas de (.*?)<\/strong><\/a><\/li>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + url = scrapedurl + if scrapedtitle in tgenero: + thumbnail = tgenero[scrapedtitle] + else: + thumbnail = '' + title = scrapedtitle + fanart = '' + plot = '' + + if title != 'Series': + itemlist.append( + Item(channel=item.channel, action='lista', title=title, url=url, thumbnail=thumbnail, plot=plot, + fanart=fanart)) + return itemlist + + +def getinfo(page_url): + logger.info() + data = httptools.downloadpage(page_url).data + plot = scrapertools.find_single_match(data, '<\/em>\.(?:\s*|.)(.*?)\s*<\/p>') + info = plot + + return info + + +def findvideos(item): + logger.info() + itemlist = [] + info = getinfo(item.url) + data = httptools.downloadpage(item.url, headers=headers).data + patron = 'src="(.*?)" style="border:none;' + matches = re.compile(patron, re.DOTALL).findall(data) + for scrapedurl in matches: + itemlist.extend(servertools.find_video_items(data=scrapedurl)) + + for videoitem in itemlist: + videoitem.title = item.contentTitle + ' (' + videoitem.server + ')' + videoitem.channel = item.channel + videoitem.plot = info + videoitem.action = "play" + videoitem.folder = False + + if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': + itemlist.append( + Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url, + action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle)) + + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "-") + item.url = item.url + texto + if texto != '': + return lista(item) + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + # categoria='peliculas' + try: + if categoria == 'peliculas': + item.url = host + 'page/1.html' + elif categoria == 'infantiles': + item.url = host + 'peliculas-de-genero/infantil/1.html' + itemlist = lista(item) + if itemlist[-1].title == 'Siguiente >>>': + itemlist.pop() + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist diff --git a/plugin.video.alfa/channels/cinehindi.json b/plugin.video.alfa/channels/cinehindi.json new file mode 100755 index 00000000..ec7879d4 --- /dev/null +++ b/plugin.video.alfa/channels/cinehindi.json @@ -0,0 +1,19 @@ +{ + "id": "cinehindi", + "name": "CineHindi", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "cinehindi.png", + "banner": "http://i.imgur.com/cau9TVe.png", + "version": 1, + "changes": [ + { + "date": "25/05/2017", + "description": "Primera versión completa del canal" + } + ], + "categories": [ + "movie" + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/cinehindi.py b/plugin.video.alfa/channels/cinehindi.py new file mode 100755 index 00000000..e3f59a3f --- /dev/null +++ b/plugin.video.alfa/channels/cinehindi.py @@ -0,0 +1,148 @@ +# -*- coding: UTF-8 -*- + +import re +import urlparse + +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + +host = "http://www.cinehindi.com/" + + +def mainlist(item): + logger.info() + + itemlist = list() + + itemlist.append(Item(channel=item.channel, action="genero", title="Generos", url=host)) + itemlist.append(Item(channel=item.channel, action="lista", title="Novedades", url=host)) + itemlist.append(Item(channel=item.channel, action="proximas", title="Próximas Películas", + url=urlparse.urljoin(host, "proximamente"))) + itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=urlparse.urljoin(host, "?s="))) + return itemlist + + +def genero(item): + logger.info() + itemlist = list() + data = httptools.downloadpage(host).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron_generos = '<ul id="menu-submenu" class=""><li id="menu-item-.+?"(.+)<\/li><\/ul>' + data_generos = scrapertools.find_single_match(data, patron_generos) + patron = 'class="menu-item menu-item-type-taxonomy menu-item-object-category menu-item-.*?"><a href="(.*?)">(.*?)<\/a><\/li>' + matches = scrapertools.find_multiple_matches(data_generos, patron) + for scrapedurl, scrapedtitle in matches: + if scrapedtitle != 'Próximas Películas': + itemlist.append(item.clone(action='lista', title=scrapedtitle, url=scrapedurl)) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = item.url + texto + if texto != '': + return lista(item) + + +def proximas(item): + logger.info() + + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) # Eliminamos tabuladores, dobles espacios saltos de linea, etc... + patron = 'class="item">.*?' # Todos los items de peliculas (en esta web) empiezan con esto + patron += '<a href="([^"]+).*?' # scrapedurl + patron += '<img src="([^"]+).*?' # scrapedthumbnail + patron += 'alt="([^"]+).*?' # scrapedtitle + patron += '<span class="player">.+?<span class="year">([^"]+)<\/span>' # scrapedyear + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches: + if "ver" in scrapedurl: + scrapedtitle = scrapedtitle + " [" + scrapedyear + "]" + else: + scrapedtitle = scrapedtitle + " [" + scrapedyear + "]" + '(Proximamente)' + itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="findvideos", extra=scrapedtitle, + show=scrapedtitle, thumbnail=scrapedthumbnail, contentType="movie", + context=["buscar_trailer"])) + # Paginacion + patron_pag = '<a rel=.+?nofollow.+? class=.+?page larger.+? href=.+?(.+?)proximamente.+?>([^"]+)<\/a>' + pagina = scrapertools.find_multiple_matches(data, patron_pag) + for next_page_url, i in pagina: + if int(i) == 2: + item.url = next_page_url + 'proximamente/page/' + str(i) + '/' + itemlist.append(Item(channel=item.channel, action="proximas", title=">> Página siguiente", url=item.url, + thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png')) + + return itemlist + + +def lista(item): + logger.info() + + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) # Eliminamos tabuladores, dobles espacios saltos de linea, etc... + patron = 'class="item">.*?' # Todos los items de peliculas (en esta web) empiezan con esto + patron += '<a href="([^"]+).*?' # scrapedurl + patron += '<img src="([^"]+).*?' # scrapedthumbnail + patron += 'alt="([^"]+).*?' # scrapedtitle + patron += '<span class="ttx">([^<]+).*?' # scrapedplot + patron += '<div class="fixyear">(.*?)</span></div></div>' # scrapedfixyear + + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot, scrapedfixyear in matches: + patron = '<span class="year">([^<]+)' # scrapedyear + scrapedyear = scrapertools.find_single_match(scrapedfixyear, patron) + if scrapedyear: + scrapedtitle += ' (%s)' % (scrapedyear) + + patron = '<span class="calidad2">([^<]+).*?' # scrapedquality + scrapedquality = scrapertools.find_single_match(scrapedfixyear, patron) + if scrapedquality: + scrapedtitle += ' [%s]' % (scrapedquality) + + itemlist.append( + item.clone(title=scrapedtitle, url=scrapedurl, plot=scrapedplot, action="findvideos", extra=scrapedtitle, + show=scrapedtitle, thumbnail=scrapedthumbnail, contentType="movie", context=["buscar_trailer"])) + + # Paginacion + patron_genero = '<h1>([^"]+)<\/h1>' + genero = scrapertools.find_single_match(data, patron_genero) + if genero == "Romance" or genero == "Drama": + patron = "<a rel='nofollow' class=previouspostslink' href='([^']+)'>Siguiente " + else: + patron = "<span class='current'>.+?href='(.+?)'>" + + next_page_url = scrapertools.find_single_match(data, patron) + + if next_page_url != "": + item.url = next_page_url + itemlist.append(Item(channel=item.channel, action="lista", title=">> Página siguiente", url=next_page_url, + thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png')) + return itemlist + + +def findvideos(item): + logger.info() + + itemlist = [] + + data = httptools.downloadpage(item.url).data + itemlist.extend(servertools.find_video_items(data=data)) + patron_show = '<div class="data"><h1 itemprop="name">([^<]+)<\/h1>' + show = scrapertools.find_single_match(data, patron_show) + for videoitem in itemlist: + videoitem.channel = item.channel + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append( + Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url, + action="add_pelicula_to_library", extra="findvideos", contentTitle=show)) + + return itemlist diff --git a/plugin.video.alfa/channels/cinetemagay.json b/plugin.video.alfa/channels/cinetemagay.json new file mode 100755 index 00000000..ee01d7df --- /dev/null +++ b/plugin.video.alfa/channels/cinetemagay.json @@ -0,0 +1,23 @@ +{ + "id": "cinetemagay", + "name": "Cinetemagay", + "active": true, + "adult": true, + "language": "es", + "thumbnail": "cinetemagay.png", + "banner": "cinetemagay.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "05/08/2016", + "description": "Eliminado de sección películas" + } + ], + "categories": [ + "adult" + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/cinetemagay.py b/plugin.video.alfa/channels/cinetemagay.py new file mode 100755 index 00000000..5e40e0aa --- /dev/null +++ b/plugin.video.alfa/channels/cinetemagay.py @@ -0,0 +1,128 @@ +# -*- coding: utf-8 -*- + +import os +import re + +from core import config +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + +IMAGES_PATH = os.path.join(config.get_runtime_path(), 'resources', 'images', 'cinetemagay') + + +def strip_tags(value): + return re.sub(r'<[^>]*?>', '', value) + + +def mainlist(item): + logger.info() + + itemlist = [] + itemlist.append(Item(channel=item.channel, action="lista", title="Cine gay latinoamericano", + url="http://cinegaylatinoamericano.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1", + thumbnail="http://www.americaeconomia.com/sites/default/files/imagecache/foto_nota/homosexual1.jpg")) + itemlist.append(Item(channel=item.channel, action="lista", title="Cine y cortos gay", + url="http://cineycortosgay.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1", + thumbnail="http://www.elmolar.org/wp-content/uploads/2015/05/cortometraje.jpg")) + itemlist.append(Item(channel=item.channel, action="lista", title="Cine gay online (México)", + url="http://cinegayonlinemexico.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1", + thumbnail="https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTmmqL6tS2Ced1VoxlGQT0q-ibPEz1DCV3E1waHFDI5KT0pg1lJ")) + itemlist.append(Item(channel=item.channel, action="lista", title="Sentido gay", + url="http://www.sentidogay.blogspot.com.es//feeds/posts/default/?max-results=100&start-index=1", + thumbnail="http://1.bp.blogspot.com/-epOPgDD_MQw/VPGZGQOou1I/AAAAAAAAAkI/lC25GrukDuo/s1048/SentidoGay.jpg")) + itemlist.append(Item(channel=item.channel, action="lista", title="PGPA", + url="http://pgpa.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1", + thumbnail="http://themes.googleusercontent.com/image?id=0BwVBOzw_-hbMNTRlZjk2YWMtYTVlMC00ZjZjLWI3OWEtMWEzZDEzYWVjZmQ4")) + + return itemlist + + +def lista(item): + logger.info() + itemlist = [] + + # Descarga la pagina + data = scrapertools.cache_page(item.url) + + # Extrae las entradas (carpetas) + patronvideos = '<img .*?src="(.*?)"' + patronvideos += "(.*?)<link rel='alternate' type='text/html' href='([^']+)' title='([^']+)'.*?>" + matches = re.compile(patronvideos, re.DOTALL).findall(data) + + for match in matches: + scrapedtitle = match[3] + scrapedtitle = scrapedtitle.replace("'", "'") + scrapedtitle = scrapedtitle.replace(""", "'") + scrapedtitle = scrapedtitle.replace("&amp;", "'") + scrapedtitle = scrapedtitle.replace("&#39;", "'") + scrapedurl = match[2] + scrapedthumbnail = match[0] + imagen = "" + scrapedplot = match[1] + tipo = match[1] + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + scrapedplot = "<" + scrapedplot + scrapedplot = scrapedplot.replace(">", ">") + scrapedplot = scrapedplot.replace("<", "<") + scrapedplot = scrapedplot.replace("</div>", "\n") + scrapedplot = scrapedplot.replace("<br />", "\n") + scrapedplot = scrapedplot.replace("&", "") + scrapedplot = scrapedplot.replace("nbsp;", "") + scrapedplot = strip_tags(scrapedplot) + itemlist.append( + Item(channel=item.channel, action="detail", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, + plot=scrapedurl + scrapedplot, folder=True)) + + variable = item.url.split("index=")[1] + variable = int(variable) + variable += 100 + variable = str(variable) + variable_url = item.url.split("index=")[0] + url_nueva = variable_url + "index=" + variable + itemlist.append( + Item(channel=item.channel, action="lista", title="Ir a la página siguiente (desde " + variable + ")", + url=url_nueva, thumbnail="", plot="Pasar a la página siguiente (en grupos de 100)\n\n" + url_nueva)) + + return itemlist + + +def detail(item): + logger.info() + itemlist = [] + + # Descarga la pagina + data = scrapertools.cachePage(item.url) + + data = data.replace("%3A", ":") + data = data.replace("%2F", "/") + data = data.replace("%3D", "=") + data = data.replace("%3", "?") + data = data.replace("%26", "&") + descripcion = "" + plot = "" + patrondescrip = 'SINOPSIS:(.*?)' + matches = re.compile(patrondescrip, re.DOTALL).findall(data) + if len(matches) > 0: + descripcion = matches[0] + descripcion = descripcion.replace(" ", "") + descripcion = descripcion.replace("<br/>", "") + descripcion = descripcion.replace("\r", "") + descripcion = descripcion.replace("\n", " ") + descripcion = descripcion.replace("\t", " ") + descripcion = re.sub("<[^>]+>", " ", descripcion) + descripcion = descripcion + try: + plot = unicode(descripcion, "utf-8").encode("iso-8859-1") + except: + plot = descripcion + + # Busca los enlaces a los videos de servidores + video_itemlist = servertools.find_video_items(data=data) + for video_item in video_itemlist: + itemlist.append(Item(channel=item.channel, action="play", server=video_item.server, + title=item.title + " " + video_item.title, url=video_item.url, thumbnail=item.thumbnail, + plot=video_item.url, folder=False)) + + return itemlist diff --git a/plugin.video.alfa/channels/cinetux.json b/plugin.video.alfa/channels/cinetux.json new file mode 100755 index 00000000..f4547781 --- /dev/null +++ b/plugin.video.alfa/channels/cinetux.json @@ -0,0 +1,124 @@ +{ + "id": "cinetux", + "name": "Cinetux", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "cinetux.png", + "banner": "cinetux.png", + "fanart": "cinetux.jpg", + "version": 1, + "changes": [ + { + "date": "12/05/2017", + "description": "Arreglada paginación y enlaces directos" + }, + { + "date": "16/02/2017", + "description": "Adaptado a httptools y añadidos enlaces directos" + }, + { + "date": "08/07/2016", + "description": "Correciones y adaptaciones a la nueva version" + } + ], + "categories": [ + "latino", + "movie" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Películas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_documentales", + "type": "bool", + "label": "Incluir en Novedades - Documentales", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_infantiles", + "type": "bool", + "label": "Incluir en Novedades - Infantiles", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "modo_grafico", + "type": "bool", + "label": "Buscar información extra", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "perfil", + "type": "list", + "label": "Perfil de color", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "Perfil 3", + "Perfil 2", + "Perfil 1" + ] + }, + { + "id": "filterlanguages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 3, + "enabled": true, + "visible": true, + "lvalues": [ + "VOSE", + "Latino", + "Español", + "No filtrar" + ] + }, + { + "id": "filterlinks", + "type": "list", + "label": "Mostrar enlaces de tipo...", + "default": 2, + "enabled": true, + "visible": true, + "lvalues": [ + "Solo Descarga", + "Solo Online", + "No filtrar" + ] + }, + { + "id": "viewmode", + "type": "list", + "label": "Elegir vista por defecto (Confluence)...", + "default": 2, + "enabled": true, + "visible": true, + "lvalues": [ + "Sinopsis", + "Miniatura", + "Lista" + ] + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/cinetux.py b/plugin.video.alfa/channels/cinetux.py new file mode 100755 index 00000000..6f732a12 --- /dev/null +++ b/plugin.video.alfa/channels/cinetux.py @@ -0,0 +1,354 @@ +# -*- coding: utf-8 -*- + +import urlparse + +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item + +CHANNEL_HOST = "http://www.cinetux.net/" + +# Configuracion del canal +__modo_grafico__ = config.get_setting('modo_grafico', 'cinetux') +__perfil__ = config.get_setting('perfil', 'cinetux') + +# Fijar perfil de color +perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'], + ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'], + ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']] +color1, color2, color3 = perfil[__perfil__] + +viewmode_options = {0: 'movie_with_plot', 1: 'movie', 2: 'list'} +viewmode = viewmode_options[config.get_setting('viewmode', 'cinetux')] + + +def mainlist(item): + logger.info() + itemlist = [] + item.viewmode = viewmode + + itemlist.append(item.clone(title="Películas", text_color=color2, action="", text_bold=True)) + itemlist.append(item.clone(action="peliculas", title=" Novedades", url=CHANNEL_HOST, + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres" + "/0/Directors%20Chair.png", + text_color=color1)) + itemlist.append(item.clone(action="vistas", title=" Más vistas", url="http://www.cinetux.net/mas-vistos/", + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres" + "/0/Favorites.png", + text_color=color1)) + itemlist.append(item.clone(action="idioma", title=" Por idioma", text_color=color1)) + itemlist.append(item.clone(action="generos", title=" Por géneros", url=CHANNEL_HOST, + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres" + "/0/Genre.png", + text_color=color1)) + + url = urlparse.urljoin(CHANNEL_HOST, "genero/documental/") + itemlist.append(item.clone(title="Documentales", text_bold=True, text_color=color2, action="")) + itemlist.append(item.clone(action="peliculas", title=" Novedades", url=url, text_color=color1, + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres" + "/0/Documentaries.png")) + url = urlparse.urljoin(CHANNEL_HOST, "genero/documental/?orderby=title&order=asc&gdsr_order=asc") + itemlist.append(item.clone(action="peliculas", title=" Por orden alfabético", text_color=color1, url=url, + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres" + "/0/A-Z.png")) + itemlist.append(item.clone(title="", action="")) + itemlist.append(item.clone(action="search", title="Buscar...", text_color=color3)) + itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False)) + + return itemlist + + +def configuracion(item): + from platformcode import platformtools + ret = platformtools.show_channel_settings() + platformtools.itemlist_refresh() + return ret + + +def search(item, texto): + logger.info() + item.url = "http://www.cinetux.net/?s=" + texto = texto.replace(" ", "+") + item.url = item.url + texto + try: + return peliculas(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + try: + if categoria == 'peliculas': + item.url = CHANNEL_HOST + item.action = "peliculas" + itemlist = peliculas(item) + + if itemlist[-1].action == "peliculas": + itemlist.pop() + + elif categoria == 'documentales': + item.url = urlparse.urljoin(CHANNEL_HOST, "genero/documental/") + item.action = "peliculas" + itemlist = peliculas(item) + + if itemlist[-1].action == "peliculas": + itemlist.pop() + + elif categoria == 'infantiles': + item.url = urlparse.urljoin(CHANNEL_HOST, "genero/infantil/") + item.action = "peliculas" + itemlist = peliculas(item) + + if itemlist[-1].action == "peliculas": + itemlist.pop() + + # Se captura la excepción, para no interrumpir al canal novedades si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist + + +def peliculas(item): + logger.info() + itemlist = [] + item.text_color = color2 + + # Descarga la página + data = httptools.downloadpage(item.url).data + + # Extrae las entradas (carpetas) + patron = '<div class="item">.*?<div class="audio">\s*([^<]*)<.*?href="([^"]+)".*?src="([^"]+)"' \ + '.*?<h3 class="name"><a.*?>([^<]+)</a>' + matches = scrapertools.find_multiple_matches(data, patron) + for calidad, scrapedurl, scrapedthumbnail, scrapedtitle in matches: + try: + fulltitle, year = scrapedtitle.rsplit("(", 1) + year = scrapertools.get_match(year, '(\d{4})') + if "/" in fulltitle: + fulltitle = fulltitle.split(" /", 1)[0] + scrapedtitle = "%s (%s)" % (fulltitle, year) + except: + fulltitle = scrapedtitle + year = "" + + if calidad: + scrapedtitle += " [%s]" % calidad + new_item = item.clone(action="findvideos", title=scrapedtitle, fulltitle=fulltitle, + url=scrapedurl, thumbnail=scrapedthumbnail, + contentTitle=fulltitle, contentType="movie") + if year: + new_item.infoLabels['year'] = int(year) + itemlist.append(new_item) + try: + tmdb.set_infoLabels(itemlist, __modo_grafico__) + except: + pass + + # Extrae el paginador + next_page_link = scrapertools.find_single_match(data, '<a href="([^"]+)"\s*><span [^>]+>»</span>') + if next_page_link: + itemlist.append(item.clone(action="peliculas", title=">> Página siguiente", url=next_page_link, + text_color=color3)) + + return itemlist + + +def vistas(item): + logger.info() + itemlist = [] + item.text_color = color2 + + # Descarga la página + data = httptools.downloadpage(item.url).data + + # Extrae las entradas (carpetas) + patron = '<li class="item">.*?href="([^"]+)".*?src="([^"]+)"' \ + '.*?<h3 class="name"><a.*?>([^<]+)</a>' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedthumbnail, scrapedtitle in matches: + new_item = item.clone(action="findvideos", title=scrapedtitle, fulltitle=scrapedtitle, + url=scrapedurl, thumbnail=scrapedthumbnail, + contentTitle=scrapedtitle, contentType="movie") + itemlist.append(new_item) + + # Extrae el paginador + next_page_link = scrapertools.find_single_match(data, '<a href="([^"]+)"\s+><span [^>]+>»</span>') + if next_page_link: + itemlist.append(item.clone(action="vistas", title=">> Página siguiente", url=next_page_link, text_color=color3)) + + return itemlist + + +def generos(item): + logger.info() + itemlist = [] + + # Descarga la página + data = httptools.downloadpage(item.url).data + bloque = scrapertools.find_single_match(data, '<div class="sub_title">Géneros</div>(.*?)</ul>') + + # Extrae las entradas + patron = '<li><a href="([^"]+)">(.*?)</li>' + matches = scrapertools.find_multiple_matches(bloque, patron) + for scrapedurl, scrapedtitle in matches: + scrapedtitle = scrapertools.htmlclean(scrapedtitle).strip() + scrapedtitle = unicode(scrapedtitle, "utf8").capitalize().encode("utf8") + if scrapedtitle == "Erotico" and config.get_setting("adult_mode") == '0': + continue + + itemlist.append(item.clone(action="peliculas", title=scrapedtitle, url=scrapedurl)) + + return itemlist + + +def idioma(item): + logger.info() + itemlist = [] + + itemlist.append(item.clone(action="peliculas", title="Español", url="http://www.cinetux.net/idioma/espanol/")) + itemlist.append(item.clone(action="peliculas", title="Latino", url="http://www.cinetux.net/idioma/latino/")) + itemlist.append(item.clone(action="peliculas", title="VOSE", url="http://www.cinetux.net/idioma/subtitulado/")) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + + try: + filtro_idioma = config.get_setting("filterlanguages", item.channel) + filtro_enlaces = config.get_setting("filterlinks", item.channel) + except: + filtro_idioma = 3 + filtro_enlaces = 2 + dict_idiomas = {'Español': 2, 'Latino': 1, 'Subtitulado': 0} + + # Busca el argumento + data = httptools.downloadpage(item.url).data + year = scrapertools.find_single_match(data, '<h1><span>.*?rel="tag">([^<]+)</a>') + + if year and item.extra != "library": + item.infoLabels['year'] = int(year) + # Ampliamos datos en tmdb + if not item.infoLabels['plot']: + try: + tmdb.set_infoLabels(item, __modo_grafico__) + except: + pass + + if not item.infoLabels.get('plot'): + plot = scrapertools.find_single_match(data, '<div class="sinopsis"><p>(.*?)</p>') + item.infoLabels['plot'] = plot + + if filtro_enlaces != 0: + list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas, "online", item) + if list_enlaces: + itemlist.append(item.clone(action="", title="Enlaces Online", text_color=color1, + text_bold=True)) + itemlist.extend(list_enlaces) + if filtro_enlaces != 1: + list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas, "descarga", item) + if list_enlaces: + itemlist.append(item.clone(action="", title="Enlaces Descarga", text_color=color1, + text_bold=True)) + itemlist.extend(list_enlaces) + + if itemlist: + itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="", + text_color="magenta")) + # Opción "Añadir esta película a la videoteca de XBMC" + if item.extra != "library": + if config.get_videolibrary_support(): + itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green", + filtro=True, action="add_pelicula_to_library", url=item.url, + infoLabels={'title': item.fulltitle}, fulltitle=item.fulltitle, + extra="library")) + + else: + itemlist.append(item.clone(title="No hay enlaces disponibles", action="", text_color=color3)) + + return itemlist + + +def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item): + logger.info() + lista_enlaces = [] + + matches = [] + if type == "online": + patron = '<a href="#([^"]+)" data-toggle="tab">([^<]+)</a>' + bloques = scrapertools.find_multiple_matches(data, patron) + for id, language in bloques: + patron = 'id="' + id + '">.*?<iframe src="([^"]+)"' + url = scrapertools.find_single_match(data, patron) + matches.append([url, "", language]) + + bloque2 = scrapertools.find_single_match(data, '<div class="table-link" id="%s">(.*?)</table>' % type) + patron = 'tr>[^<]+<td>.*?href="([^"]+)".*?src.*?title="([^"]+)"' \ + '.*?src.*?title="([^"]+)".*?src.*?title="(.*?)"' + matches.extend(scrapertools.find_multiple_matches(bloque2, patron)) + filtrados = [] + for match in matches: + scrapedurl = match[0] + language = match[2].strip() + title = " Mirror en %s (" + language + ")" + if len(match) == 4: + title += " (Calidad " + match[3].strip() + ")" + + if filtro_idioma == 3 or item.filtro: + lista_enlaces.append(item.clone(title=title, action="play", text_color=color2, + url=scrapedurl, idioma=language, extra=item.url)) + else: + idioma = dict_idiomas[language] + if idioma == filtro_idioma: + lista_enlaces.append(item.clone(title=title, text_color=color2, action="play", url=scrapedurl, + extra=item.url)) + else: + if language not in filtrados: + filtrados.append(language) + + lista_enlaces = servertools.get_servers_itemlist(lista_enlaces, lambda i: i.title % i.server) + if filtro_idioma != 3: + if len(filtrados) > 0: + title = "Mostrar enlaces filtrados en %s" % ", ".join(filtrados) + lista_enlaces.append(item.clone(title=title, action="findvideos", url=item.url, text_color=color3, + filtro=True)) + + return lista_enlaces + + +def play(item): + logger.info() + itemlist = [] + if "api.cinetux" in item.url: + data = httptools.downloadpage(item.url, headers={'Referer': item.extra}).data.replace("\\", "") + bloque = scrapertools.find_single_match(data, 'sources:\s*(\[.*?\])') + if bloque: + bloque = eval(bloque) + video_urls = [] + for b in bloque: + ext = b["type"].replace("video/", "") + video_urls.append([".%s %sp [directo]" % (ext, b["label"]), b["file"], b["label"]]) + + video_urls.sort(key=lambda vdu: vdu[2]) + for v in video_urls: + itemlist.append([v[0], v[1]]) + else: + return [item] + return itemlist diff --git a/plugin.video.alfa/channels/clasicofilm.json b/plugin.video.alfa/channels/clasicofilm.json new file mode 100755 index 00000000..1c7cb4e2 --- /dev/null +++ b/plugin.video.alfa/channels/clasicofilm.json @@ -0,0 +1,63 @@ +{ + "id": "clasicofilm", + "name": "ClasicoFilm", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "http://i.imgur.com/F7sevVu.jpg?1", + "banner": "clasicofilm.png", + "version": 1, + "changes": [ + { + "date": "28/05/2017", + "description": "Corregido findvideos" + }, + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "07/02/17", + "description": "Fix bug in newest" + }, + { + "date": "09/01/2017", + "description": "Primera version" + } + ], + "categories": [ + "movie" + ], + "settings": [ + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Películas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "modo_grafico", + "type": "bool", + "label": "Buscar información extra", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "perfil", + "type": "list", + "label": "Perfil de color", + "default": 3, + "enabled": true, + "visible": true, + "lvalues": [ + "Sin color", + "Perfil 3", + "Perfil 2", + "Perfil 1" + ] + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/clasicofilm.py b/plugin.video.alfa/channels/clasicofilm.py new file mode 100755 index 00000000..539f720b --- /dev/null +++ b/plugin.video.alfa/channels/clasicofilm.py @@ -0,0 +1,256 @@ +# -*- coding: utf-8 -*- + +import re + +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import tmdb +from core.item import Item + +host = "http://www.clasicofilm.com/" +# Configuracion del canal +__modo_grafico__ = config.get_setting('modo_grafico', 'clasicofilm') +__perfil__ = config.get_setting('perfil', 'clasicofilm') + +# Fijar perfil de color +perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'], + ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'], + ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']] + +if __perfil__ - 1 >= 0: + color1, color2, color3 = perfil[__perfil__ - 1] +else: + color1 = color2 = color3 = "" + + +def mainlist(item): + logger.info() + itemlist = [] + + itemlist.append(item.clone(title="Películas", text_color=color2, action="", text_bold=True)) + itemlist.append(item.clone(action="peliculas", title=" Novedades", + url="http://www.clasicofilm.com/feeds/posts/summary?start-index=1&max-results=20&alt=json-in-script&callback=finddatepost", + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres" + "/0/Directors%20Chair.png", + text_color=color1)) + itemlist.append(item.clone(action="generos", title=" Por géneros", url=host, + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres" + "/0/Genre.png", + text_color=color1)) + + itemlist.append(item.clone(title="", action="")) + itemlist.append(item.clone(action="search", title="Buscar...", text_color=color3)) + itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False)) + + return itemlist + + +def configuracion(item): + from platformcode import platformtools + ret = platformtools.show_channel_settings() + platformtools.itemlist_refresh() + return ret + + +def search(item, texto): + logger.info() + + data = httptools.downloadpage(host).data + cx = scrapertools.find_single_match(data, "var cx = '([^']+)'") + texto = texto.replace(" ", "%20") + item.url = "https://www.googleapis.com/customsearch/v1element?key=AIzaSyCVAXiUzRYsML1Pv6RwSG1gunmMikTzQqY&rsz=filtered_cse&num=20&hl=es&sig=0c3990ce7a056ed50667fe0c3873c9b6&cx=%s&q=%s&sort=&googlehost=www.google.com&start=0" % ( + cx, texto) + + try: + return busqueda(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + try: + if categoria == 'peliculas': + item.url = "http://www.clasicofilm.com/feeds/posts/summary?start-index=1&max-results=20&alt=json-in-script&callback=finddatepost" + item.action = "peliculas" + itemlist = peliculas(item) + + if itemlist[-1].action == "peliculas": + itemlist.pop() + + # Se captura la excepción, para no interrumpir al canal novedades si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist + + +def peliculas(item): + logger.info() + itemlist = [] + item.text_color = color2 + + # Descarga la página + data = httptools.downloadpage(item.url).data + + data = scrapertools.find_single_match(data, 'finddatepost\((\{.*?\]\}\})\);') + from core import jsontools + data = jsontools.load(data)["feed"] + + for entry in data["entry"]: + for link in entry["link"]: + if link["rel"] == "alternate": + title = link["title"] + url = link["href"] + break + thumbnail = entry["media$thumbnail"]["url"].replace("s72-c/", "") + try: + title_split = re.split(r"\s*\((\d)", title, 1) + year = title_split[1] + scrapertools.find_single_match(title_split[2], '(\d{3})\)') + fulltitle = title_split[0] + except: + fulltitle = title + year = "" + if not "DVD" in title and not "HDTV" in title and not "HD-" in title: + continue + infolabels = {'year': year} + new_item = item.clone(action="findvideos", title=title, fulltitle=fulltitle, + url=url, thumbnail=thumbnail, infoLabels=infolabels, + contentTitle=fulltitle, contentType="movie") + itemlist.append(new_item) + + try: + tmdb.set_infoLabels(itemlist, __modo_grafico__) + except: + pass + + actualpage = int(scrapertools.find_single_match(item.url, 'start-index=(\d+)')) + totalresults = int(data["openSearch$totalResults"]["$t"]) + if actualpage + 20 < totalresults: + url_next = item.url.replace("start-index=" + str(actualpage), "start-index=" + str(actualpage + 20)) + itemlist.append(Item(channel=item.channel, action=item.action, title=">> Página Siguiente", url=url_next)) + + return itemlist + + +def busqueda(item): + logger.info() + itemlist = [] + item.text_color = color2 + + # Descarga la página + data = httptools.downloadpage(item.url).data + + from core import jsontools + data = jsontools.load(data) + + for entry in data["results"]: + try: + title = entry["richSnippet"]["metatags"]["ogTitle"] + url = entry["richSnippet"]["metatags"]["ogUrl"] + thumbnail = entry["richSnippet"]["metatags"]["ogImage"] + except: + continue + + try: + title_split = re.split(r"\s*\((\d)", title, 1) + year = title_split[1] + scrapertools.find_single_match(title_split[2], '(\d{3})\)') + fulltitle = title_split[0] + except: + fulltitle = title + year = "" + if not "DVD" in title and not "HDTV" in title and not "HD-" in title: + continue + infolabels = {'year': year} + new_item = item.clone(action="findvideos", title=title, fulltitle=fulltitle, + url=url, thumbnail=thumbnail, infoLabels=infolabels, + contentTitle=fulltitle, contentType="movie") + itemlist.append(new_item) + + try: + tmdb.set_infoLabels(itemlist, __modo_grafico__) + except: + pass + + actualpage = int(scrapertools.find_single_match(item.url, 'start=(\d+)')) + totalresults = int(data["cursor"]["resultCount"]) + if actualpage + 20 <= totalresults: + url_next = item.url.replace("start=" + str(actualpage), "start=" + str(actualpage + 20)) + itemlist.append(Item(channel=item.channel, action="busqueda", title=">> Página Siguiente", url=url_next)) + + return itemlist + + +def generos(item): + logger.info() + itemlist = [] + + # Descarga la página + data = httptools.downloadpage(item.url).data + patron = '<b>([^<]+)</b><br/>\s*<script src="([^"]+)"' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedtitle, scrapedurl in matches: + scrapedurl = scrapedurl.replace("max-results=500", "start-index=1&max-results=20") \ + .replace("recentpostslist", "finddatepost") + itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl, + thumbnail=item.thumbnail, text_color=color3)) + + itemlist.sort(key=lambda x: x.title) + return itemlist + + +def findvideos(item): + from core import servertools + + if item.infoLabels["tmdb_id"]: + tmdb.set_infoLabels_item(item, __modo_grafico__) + + data = httptools.downloadpage(item.url).data + iframe = scrapertools.find_single_match(data, '<iframe src="([^"]+)"') + if "goo.gl/" in iframe: + data += httptools.downloadpage(iframe, follow_redirects=False, only_headers=True).headers.get("location", "") + itemlist = servertools.find_video_items(item, data) + + library_path = config.get_videolibrary_path() + if config.get_videolibrary_support(): + title = "Añadir película a la videoteca" + if item.infoLabels["imdb_id"] and not library_path.lower().startswith("smb://"): + try: + from core import filetools + movie_path = filetools.join(config.get_videolibrary_path(), 'CINE') + files = filetools.walk(movie_path) + for dirpath, dirname, filename in files: + for f in filename: + if item.infoLabels["imdb_id"] in f and f.endswith(".nfo"): + from core import videolibrarytools + head_nfo, it = videolibrarytools.read_nfo(filetools.join(dirpath, dirname, f)) + canales = it.library_urls.keys() + canales.sort() + if "clasicofilm" in canales: + canales.pop(canales.index("clasicofilm")) + canales.insert(0, "[COLOR red]clasicofilm[/COLOR]") + title = "Película ya en tu videoteca. [%s] ¿Añadir?" % ",".join(canales) + break + except: + import traceback + logger.error(traceback.format_exc()) + + itemlist.append(item.clone(action="add_pelicula_to_library", title=title)) + + token_auth = config.get_setting("token_trakt", "tvmoviedb") + if token_auth and item.infoLabels["tmdb_id"]: + itemlist.append(item.clone(channel="tvmoviedb", title="[Trakt] Gestionar con tu cuenta", action="menu_trakt", + extra="movie")) + + return itemlist diff --git a/plugin.video.alfa/channels/copiapop.json b/plugin.video.alfa/channels/copiapop.json new file mode 100755 index 00000000..470c4df5 --- /dev/null +++ b/plugin.video.alfa/channels/copiapop.json @@ -0,0 +1,85 @@ +{ + "id": "copiapop", + "name": "Copiapop/Diskokosmiko", + "language": "es", + "active": true, + "adult": false, + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "autor": "SeiTaN", + "description": "limpieza código" + }, + { + "date": "16/02/2017", + "autor": "Cmos", + "description": "Primera versión" + } + ], + "thumbnail": "http://i.imgur.com/EjbfM7p.png?1", + "banner": "copiapop.png", + "categories": [ + "movie", + "tvshow" + ], + "settings": [ + { + "id": "copiapopuser", + "type": "text", + "color": "0xFF25AA48", + "label": "Usuario Copiapop", + "enabled": true, + "visible": true + }, + { + "id": "copiapoppassword", + "type": "text", + "color": "0xFF25AA48", + "hidden": true, + "label": "Password Copiapop", + "enabled": "!eq(-1,'')", + "visible": true + }, + { + "id": "diskokosmikouser", + "type": "text", + "color": "0xFFC52020", + "label": "Usuario Diskokosmiko", + "enabled": true, + "visible": true + }, + { + "id": "diskokosmikopassword", + "type": "text", + "color": "0xFFC52020", + "hidden": true, + "label": "Password Diskokosmiko", + "enabled": "!eq(-1,'')", + "visible": true + }, + { + "id": "adult_content", + "type": "bool", + "color": "0xFFd50b0b", + "label": "Mostrar contenido adulto en las búsquedas", + "default": false, + "enabled": true, + "visible": true + }, + { + "id": "perfil", + "type": "list", + "label": "Perfil de color", + "default": 3, + "enabled": true, + "visible": true, + "lvalues": [ + "Sin color", + "Perfil 3", + "Perfil 2", + "Perfil 1" + ] + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/copiapop.py b/plugin.video.alfa/channels/copiapop.py new file mode 100755 index 00000000..826b7042 --- /dev/null +++ b/plugin.video.alfa/channels/copiapop.py @@ -0,0 +1,427 @@ +# -*- coding: utf-8 -*- + +import re +import threading + +from core import config +from core import filetools +from core import httptools +from core import logger +from core import scrapertools +from core.item import Item + +__perfil__ = config.get_setting('perfil', "copiapop") + +# Fijar perfil de color +perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFF088A08'], + ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFF088A08'], + ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFF088A08']] + +if __perfil__ - 1 >= 0: + color1, color2, color3, color4, color5 = perfil[__perfil__ - 1] +else: + color1 = color2 = color3 = color4 = color5 = "" + +adult_content = config.get_setting("adult_content", "copiapop") + + +def login(pagina): + logger.info() + + try: + user = config.get_setting("%suser" % pagina.split(".")[0], "copiapop") + password = config.get_setting("%spassword" % pagina.split(".")[0], "copiapop") + if pagina == "copiapop.com": + if user == "" and password == "": + return False, "Para ver los enlaces de copiapop es necesario registrarse en copiapop.com" + elif user == "" or password == "": + return False, "Copiapop: Usuario o contraseña en blanco. Revisa tus credenciales" + else: + if user == "" or password == "": + return False, "DiskoKosmiko: Usuario o contraseña en blanco. Revisa tus credenciales" + + data = httptools.downloadpage("http://%s" % pagina).data + if re.search(r'(?i)%s' % user, data): + return True, "" + + token = scrapertools.find_single_match(data, 'name="__RequestVerificationToken".*?value="([^"]+)"') + post = "__RequestVerificationToken=%s&UserName=%s&Password=%s" % (token, user, password) + headers = {'X-Requested-With': 'XMLHttpRequest'} + url_log = "http://%s/action/Account/Login" % pagina + data = httptools.downloadpage(url_log, post, headers).data + if "redirectUrl" in data: + logger.info("Login correcto") + return True, "" + else: + logger.error("Error en el login") + return False, "Nombre de usuario no válido. Comprueba tus credenciales" + except: + import traceback + logger.error(traceback.format_exc()) + return False, "Error durante el login. Comprueba tus credenciales" + + +def mainlist(item): + logger.info() + itemlist = [] + item.text_color = color1 + + logueado, error_message = login("copiapop.com") + + if not logueado: + itemlist.append(item.clone(title=error_message, action="configuracion", folder=False)) + else: + item.extra = "http://copiapop.com" + itemlist.append(item.clone(title="Copiapop", action="", text_color=color2)) + itemlist.append( + item.clone(title=" Búsqueda", action="search", url="http://copiapop.com/action/SearchFiles")) + itemlist.append(item.clone(title=" Colecciones", action="colecciones", + url="http://copiapop.com/action/home/MoreNewestCollections?pageNumber=1")) + itemlist.append(item.clone(title=" Búsqueda personalizada", action="filtro", + url="http://copiapop.com/action/SearchFiles")) + itemlist.append(item.clone(title=" Mi cuenta", action="cuenta")) + + item.extra = "http://diskokosmiko.mx/" + itemlist.append(item.clone(title="DiskoKosmiko", action="", text_color=color2)) + itemlist.append(item.clone(title=" Búsqueda", action="search", url="http://diskokosmiko.mx/action/SearchFiles")) + itemlist.append(item.clone(title=" Colecciones", action="colecciones", + url="http://diskokosmiko.mx/action/home/MoreNewestCollections?pageNumber=1")) + itemlist.append(item.clone(title=" Búsqueda personalizada", action="filtro", + url="http://diskokosmiko.mx/action/SearchFiles")) + itemlist.append(item.clone(title=" Mi cuenta", action="cuenta")) + itemlist.append(item.clone(action="", title="")) + + folder_thumb = filetools.join(config.get_data_path(), 'thumbs_copiapop') + files = filetools.listdir(folder_thumb) + if files: + itemlist.append( + item.clone(title="Eliminar caché de imágenes (%s)" % len(files), action="delete_cache", text_color="red")) + itemlist.append(item.clone(title="Configuración del canal", action="configuracion", text_color="gold")) + + return itemlist + + +def search(item, texto): + logger.info() + item.post = "Mode=List&Type=Video&Phrase=%s&SizeFrom=0&SizeTo=0&Extension=&ref=pager&pageNumber=1" % texto.replace( + " ", "+") + try: + return listado(item) + except: + import sys, traceback + for line in sys.exc_info(): + logger.error("%s" % line) + logger.error(traceback.format_exc()) + return [] + + +def configuracion(item): + from platformcode import platformtools + ret = platformtools.show_channel_settings() + platformtools.itemlist_refresh() + return ret + + +def listado(item): + logger.info() + itemlist = [] + + data_thumb = httptools.downloadpage(item.url, item.post.replace("Mode=List", "Mode=Gallery")).data + if not item.post: + data_thumb = "" + item.url = item.url.replace("/gallery,", "/list,") + + data = httptools.downloadpage(item.url, item.post).data + data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data) + + folder = filetools.join(config.get_data_path(), 'thumbs_copiapop') + patron = '<div class="size">(.*?)</div></div></div>' + bloques = scrapertools.find_multiple_matches(data, patron) + for block in bloques: + if "adult_info" in block and not adult_content: + continue + size = scrapertools.find_single_match(block, '<p>([^<]+)</p>') + scrapedurl, scrapedtitle = scrapertools.find_single_match(block, + '<div class="name"><a href="([^"]+)".*?>([^<]+)<') + scrapedthumbnail = scrapertools.find_single_match(block, "background-image:url\('([^']+)'") + if scrapedthumbnail: + try: + thumb = scrapedthumbnail.split("-", 1)[0].replace("?", "\?") + if data_thumb: + url_thumb = scrapertools.find_single_match(data_thumb, "(%s[^']+)'" % thumb) + else: + url_thumb = scrapedthumbnail + scrapedthumbnail = filetools.join(folder, "%s.jpg" % url_thumb.split("e=", 1)[1][-20:]) + except: + scrapedthumbnail = "" + + if scrapedthumbnail: + t = threading.Thread(target=download_thumb, args=[scrapedthumbnail, url_thumb]) + t.setDaemon(True) + t.start() + + else: + scrapedthumbnail = item.extra + "/img/file_types/gallery/movie.png" + + scrapedurl = item.extra + scrapedurl + title = "%s (%s)" % (scrapedtitle, size) + if "adult_info" in block: + title += " [COLOR %s][+18][/COLOR]" % color4 + plot = scrapertools.find_single_match(block, '<div class="desc">(.*?)</div>') + if plot: + plot = scrapertools.decodeHtmlentities(plot) + + new_item = Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl, + thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, text_color=color2, + extra=item.extra, infoLabels={'plot': plot}, post=item.post) + if item.post: + try: + new_item.folderurl, new_item.foldername = scrapertools.find_single_match(block, + '<p class="folder"><a href="([^"]+)".*?>([^<]+)<') + except: + pass + else: + new_item.folderurl = item.url.rsplit("/", 1)[0] + new_item.foldername = item.foldername + new_item.fanart = item.thumbnail + + itemlist.append(new_item) + + next_page = scrapertools.find_single_match(data, '<div class="pageSplitterBorder" data-nextpage-number="([^"]+)"') + if next_page: + if item.post: + post = re.sub(r'pageNumber=(\d+)', "pageNumber=" + next_page, item.post) + url = item.url + else: + url = re.sub(r',\d+\?ref=pager', ",%s?ref=pager" % next_page, item.url) + post = "" + itemlist.append(Item(channel=item.channel, action="listado", title=">> Página Siguiente (%s)" % next_page, + url=url, post=post, extra=item.extra)) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + + itemlist.append(item.clone(action="play", title="Reproducir/Descargar", server="copiapop")) + usuario = scrapertools.find_single_match(item.url, '%s/([^/]+)/' % item.extra) + url_usuario = item.extra + "/" + usuario + + if item.folderurl and not item.folderurl.startswith(item.extra): + item.folderurl = item.extra + item.folderurl + if item.post: + itemlist.append(item.clone(action="listado", title="Ver colección: %s" % item.foldername, + url=item.folderurl + "/gallery,1,1?ref=pager", post="")) + + data = httptools.downloadpage(item.folderurl).data + token = scrapertools.find_single_match(data, + 'data-action="followChanged.*?name="__RequestVerificationToken".*?value="([^"]+)"') + collection_id = item.folderurl.rsplit("-", 1)[1] + post = "__RequestVerificationToken=%s&collectionId=%s" % (token, collection_id) + url = "%s/action/Follow/Follow" % item.extra + title = "Seguir Colección: %s" % item.foldername + if "dejar de seguir" in data: + title = "Dejar de seguir la colección: %s" % item.foldername + url = "%s/action/Follow/UnFollow" % item.extra + itemlist.append(item.clone(action="seguir", title=title, url=url, post=post, text_color=color5, folder=False)) + + itemlist.append( + item.clone(action="colecciones", title="Ver colecciones del usuario: %s" % usuario, url=url_usuario)) + + return itemlist + + +def colecciones(item): + logger.info() + from core import jsontools + itemlist = [] + + usuario = False + data = httptools.downloadpage(item.url).data + if "Ver colecciones del usuario" not in item.title and not item.index: + data = jsontools.load(data)["Data"] + content = data["Content"] + content = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", content) + else: + usuario = True + if item.follow: + content = scrapertools.find_single_match(data, + 'id="followed_collections"(.*?)<div id="recommended_collections"') + else: + content = scrapertools.find_single_match(data, + '<div id="collections".*?<div class="collections_list(.*?)<div class="collections_list') + content = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", content) + + patron = '<a class="name" href="([^"]+)".*?>([^<]+)<.*?src="([^"]+)".*?<p class="info">(.*?)</p>' + matches = scrapertools.find_multiple_matches(content, patron) + + index = "" + if item.index and item.index != "0": + matches = matches[item.index:item.index + 20] + if len(matches) > item.index + 20: + index = item.index + 20 + elif len(matches) > 20: + matches = matches[:20] + index = 20 + + folder = filetools.join(config.get_data_path(), 'thumbs_copiapop') + for url, scrapedtitle, thumb, info in matches: + url = item.extra + url + "/gallery,1,1?ref=pager" + title = "%s (%s)" % (scrapedtitle, scrapertools.htmlclean(info)) + try: + scrapedthumbnail = filetools.join(folder, "%s.jpg" % thumb.split("e=", 1)[1][-20:]) + except: + try: + scrapedthumbnail = filetools.join(folder, "%s.jpg" % thumb.split("/thumbnail/", 1)[1][-20:]) + thumb = thumb.replace("/thumbnail/", "/") + except: + scrapedthumbnail = "" + if scrapedthumbnail: + t = threading.Thread(target=download_thumb, args=[scrapedthumbnail, thumb]) + t.setDaemon(True) + t.start() + else: + scrapedthumbnail = thumb + + itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, + thumbnail=scrapedthumbnail, text_color=color2, extra=item.extra, + foldername=scrapedtitle)) + + if not usuario and data.get("NextPageUrl"): + url = item.extra + data["NextPageUrl"] + itemlist.append(item.clone(title=">> Página Siguiente", url=url, text_color="")) + elif index: + itemlist.append(item.clone(title=">> Página Siguiente", url=item.url, index=index, text_color="")) + + return itemlist + + +def seguir(item): + logger.info() + data = httptools.downloadpage(item.url, item.post) + message = "Colección seguida" + if "Dejar" in item.title: + message = "La colección ya no se sigue" + if data.sucess and config.get_platform() != "plex": + from platformcode import platformtools + platformtools.dialog_notification("Acción correcta", message) + + +def cuenta(item): + logger.info() + import urllib + itemlist = [] + + web = "copiapop" + if "diskokosmiko" in item.extra: + web = "diskokosmiko" + logueado, error_message = login("diskokosmiko.mx") + if not logueado: + itemlist.append(item.clone(title=error_message, action="configuracion", folder=False)) + return itemlist + + user = config.get_setting("%suser" % web, "copiapop") + user = unicode(user, "utf8").lower().encode("utf8") + url = item.extra + "/" + urllib.quote(user) + data = httptools.downloadpage(url).data + num_col = scrapertools.find_single_match(data, 'name="Has_collections" value="([^"]+)"') + if num_col != "0": + itemlist.append(item.clone(action="colecciones", url=url, index="0", title="Ver mis colecciones", + text_color=color5)) + else: + itemlist.append(item.clone(action="", title="No tienes ninguna colección", text_color=color4)) + + num_follow = scrapertools.find_single_match(data, 'name="Follows_collections" value="([^"]+)"') + if num_follow != "0": + itemlist.append(item.clone(action="colecciones", url=url, index="0", title="Colecciones que sigo", + text_color=color5, follow=True)) + else: + itemlist.append(item.clone(action="", title="No sigues ninguna colección", text_color=color4)) + + return itemlist + + +def filtro(item): + logger.info() + + list_controls = [] + valores = {} + + dict_values = None + list_controls.append({'id': 'search', 'label': 'Texto a buscar', 'enabled': True, 'color': '0xFFC52020', + 'type': 'text', 'default': '', 'visible': True}) + list_controls.append({'id': 'tipo', 'label': 'Tipo de búsqueda', 'enabled': True, 'color': '0xFFFF8000', + 'type': 'list', 'default': -1, 'visible': True}) + list_controls[1]['lvalues'] = ['Aplicación', 'Archivo', 'Documento', 'Imagen', 'Música', 'Vídeo', 'Todos'] + valores['tipo'] = ['Application', 'Archive', 'Document', 'Image', 'Music', 'Video', ''] + + list_controls.append({'id': 'ext', 'label': 'Extensión', 'enabled': True, 'color': '0xFFF4FA58', + 'type': 'text', 'default': '', 'visible': True}) + list_controls.append({'id': 'tmin', 'label': 'Tamaño mínimo (MB)', 'enabled': True, 'color': '0xFFCC2EFA', + 'type': 'text', 'default': '0', 'visible': True}) + list_controls.append({'id': 'tmax', 'label': 'Tamaño máximo (MB)', 'enabled': True, 'color': '0xFF2ECCFA', + 'type': 'text', 'default': '0', 'visible': True}) + + # Se utilizan los valores por defecto/guardados + web = "copiapop" + if "diskokosmiko" in item.extra: + web = "diskokosmiko" + valores_guardados = config.get_setting("filtro_defecto_" + web, item.channel) + if valores_guardados: + dict_values = valores_guardados + item.valores = valores + from platformcode import platformtools + return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values, + caption="Filtra la búsqueda", item=item, callback='filtrado') + + +def filtrado(item, values): + values_copy = values.copy() + web = "copiapop" + if "diskokosmiko" in item.extra: + web = "diskokosmiko" + # Guarda el filtro para que sea el que se cargue por defecto + config.set_setting("filtro_defecto_" + web, values_copy, item.channel) + + tipo = item.valores["tipo"][values["tipo"]] + search = values["search"] + ext = values["ext"] + tmin = values["tmin"] + tmax = values["tmax"] + + if not tmin.isdigit(): + tmin = "0" + if not tmax.isdigit(): + tmax = "0" + + item.valores = "" + item.post = "Mode=List&Type=%s&Phrase=%s&SizeFrom=%s&SizeTo=%s&Extension=%s&ref=pager&pageNumber=1" \ + % (tipo, search, tmin, tmax, ext) + item.action = "listado" + return listado(item) + + +def download_thumb(filename, url): + from core import downloadtools + + lock = threading.Lock() + lock.acquire() + folder = filetools.join(config.get_data_path(), 'thumbs_copiapop') + if not filetools.exists(folder): + filetools.mkdir(folder) + lock.release() + + if not filetools.exists(filename): + downloadtools.downloadfile(url, filename, silent=True) + + return filename + + +def delete_cache(url): + folder = filetools.join(config.get_data_path(), 'thumbs_copiapop') + filetools.rmdirtree(folder) + if config.is_xbmc(): + import xbmc + xbmc.executebuiltin("Container.Refresh") diff --git a/plugin.video.alfa/channels/crimenes.json b/plugin.video.alfa/channels/crimenes.json new file mode 100755 index 00000000..8b6d4342 --- /dev/null +++ b/plugin.video.alfa/channels/crimenes.json @@ -0,0 +1,37 @@ +{ + "id": "crimenes", + "name": "Crimenes Imperfectos", + "active": true, + "adult": false, + "language": "es", + "banner": "crimenes.png", + "thumbnail": "crimenes.png", + "version": 1, + "changes": [ + { + "date": "19/06/2017", + "description": "correcion xml" + }, + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ], + "categories": [ + "movie" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": false, + "visible": false + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/crimenes.py b/plugin.video.alfa/channels/crimenes.py new file mode 100755 index 00000000..c1060748 --- /dev/null +++ b/plugin.video.alfa/channels/crimenes.py @@ -0,0 +1,167 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +import xbmc +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + + +# Main list manual + +def listav(item): + itemlist = [] + + data = scrapertools.cache_page(item.url) + + patronbloque = '<li><div class="yt-lockup.*?<img.*?src="([^"]+)".*?' + patronbloque += '<h3 class="yt-lockup-title "><a href="([^"]+)".*?title="([^"]+)".*?' + patronbloque += '</a><span class=.*?">(.*?)</span></h3>' + matchesbloque = re.compile(patronbloque, re.DOTALL).findall(data) + scrapertools.printMatches(matchesbloque) + + scrapedduration = '' + for scrapedthumbnail, scrapedurl, scrapedtitle, scrapedduration in matchesbloque: + scrapedtitle = '[COLOR white]' + scrapedtitle + '[/COLOR] [COLOR red]' + scrapedduration + '[/COLOR]' + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = urlparse.urljoin(item.thumbnail, scrapedthumbnail) + xbmc.log("$ " + scrapedurl + " " + scrapedtitle + " " + scrapedthumbnail) + itemlist.append(Item(channel=item.channel, action="play", title=scrapedtitle, fulltitle=scrapedtitle, url=url, + thumbnail=thumbnail, fanart=thumbnail)) + + + # Paginacion + + patronbloque = '<div class="branded-page-box .*? spf-link ">(.*?)</div>' + matches = re.compile(patronbloque, re.DOTALL).findall(data) + for bloque in matches: + patronvideo = '<a href="([^"]+)"' + matchesx = re.compile(patronvideo, re.DOTALL).findall(bloque) + for scrapedurl in matchesx: + url = urlparse.urljoin(item.url, 'https://www.youtube.com' + scrapedurl) + # solo me quedo con el ultimo enlace + itemlist.append( + Item(channel=item.channel, action="listav", title="Siguiente pag >>", fulltitle="Siguiente Pag >>", url=url, + thumbnail=item.thumbnail, fanart=item.fanart)) + + return itemlist + + +def busqueda(item): + itemlist = [] + keyboard = xbmc.Keyboard("", "Busqueda") + keyboard.doModal() + if (keyboard.isConfirmed()): + myurl = keyboard.getText().replace(" ", "+") + + data = scrapertools.cache_page('https://www.youtube.com/results?q=' + myurl) + data = data.replace("\n", "").replace("\t", "") + data = scrapertools.decodeHtmlentities(data) + + patronbloque = '<li><div class="yt-lockup.*?<img.*?src="([^"]+)".*?' + patronbloque += '<h3 class="yt-lockup-title "><a href="([^"]+)".*?title="([^"]+)".*?' + patronbloque += '</a><span class=.*?">(.*?)</span></h3>' + matchesbloque = re.compile(patronbloque, re.DOTALL).findall(data) + scrapertools.printMatches(matchesbloque) + + for scrapedthumbnail, scrapedurl, scrapedtitle, scrapedduracion in matchesbloque: + scrapedtitle = scrapedtitle + ' ' + scrapedduracion + url = scrapedurl + thumbnail = scrapedthumbnail + xbmc.log("$ " + scrapedurl + " " + scrapedtitle + " " + scrapedthumbnail) + itemlist.append( + Item(channel=item.channel, action="play", title=scrapedtitle, fulltitle=scrapedtitle, url=url, + thumbnail=thumbnail, fanart=thumbnail)) + + + # Paginacion + + patronbloque = '<div class="branded-page-box .*? spf-link ">(.*?)</div>' + matches = re.compile(patronbloque, re.DOTALL).findall(data) + for bloque in matches: + patronvideo = '<a href="([^"]+)"' + matchesx = re.compile(patronvideo, re.DOTALL).findall(bloque) + for scrapedurl in matchesx: + url = 'https://www.youtube.com' + scrapedurl + # solo me quedo con el ultimo enlace + + itemlist.append( + Item(channel=item.channel, action="listav", title="Siguiente pag >>", fulltitle="Siguiente Pag >>", + url=url)) + return itemlist + else: + # xbmcgui.Dialog().ok(item.channel, "nada que buscar") + # xbmc.executebuiltin("Action(up)") + xbmc.executebuiltin("Action(enter)") + + # itemlist.append( Item(channel=item.channel, action="listav", title="<< Volver", fulltitle="Volver" , url="history.back()") ) + + +def mainlist(item): + logger.info() + itemlist = [] + + item.url = 'https://www.youtube.com/results?q=crimenes+imperfectos&sp=CAI%253D' + scrapedtitle = "[COLOR white]Crimenes [COLOR red]Imperfectos[/COLOR]" + item.thumbnail = urlparse.urljoin(item.thumbnail, + "https://encrypted-tbn2.gstatic.com/images?q=tbn:ANd9GcQ2PcyvcYIg6acvdUZrHGFFk_E3mXK9QSh-5TypP8Rk6zQ6S1yb2g") + item.fanart = urlparse.urljoin(item.fanart, + "https://encrypted-tbn2.gstatic.com/images?q=tbn:ANd9GcQ2PcyvcYIg6acvdUZrHGFFk_E3mXK9QSh-5TypP8Rk6zQ6S1yb2g") + + itemlist.append( + Item(channel=item.channel, action="listav", title=scrapedtitle, fulltitle=scrapedtitle, url=item.url, + thumbnail=item.thumbnail, fanart=item.fanart)) + + item.url = 'https://www.youtube.com/results?search_query=russian+dash+cam&sp=CAI%253D' + scrapedtitle = "[COLOR blue]Russian[/COLOR] [COLOR White]Dash[/COLOR] [COLOR red]Cam[/COLOR]" + item.thumbnail = urlparse.urljoin(item.thumbnail, "https://i.ytimg.com/vi/-C6Ftromtig/maxresdefault.jpg") + item.fanart = urlparse.urljoin(item.fanart, + "https://encrypted-tbn2.gstatic.com/images?q=tbn:ANd9GcRQLO-n-kO1ByY8lLhKxz0-cejJD1J7rLge_j0E0Gh9LJ2WtTbSnA") + + itemlist.append( + Item(channel=item.channel, action="listav", title=scrapedtitle, fulltitle=scrapedtitle, url=item.url, + thumbnail=item.thumbnail, fanart=item.fanart)) + + item.url = 'https://www.youtube.com/results?search_query=cuarto+milenio+programa+completo&sp=CAI%253D' + scrapedtitle = "[COLOR green]Cuarto[/COLOR] [COLOR White]Milenio[/COLOR]" + item.thumbnail = urlparse.urljoin(item.thumbnail, + "http://cuatrostatic-a.akamaihd.net/cuarto-milenio/Cuarto-Milenio-analiza-fantasma-Granada_MDSVID20100924_0063_3.jpg") + item.fanart = urlparse.urljoin(item.fanart, + "http://cuatrostatic-a.akamaihd.net/cuarto-milenio/programas/temporada-07/t07xp32/fantasma-universidad_MDSVID20120420_0001_3.jpg") + + itemlist.append( + Item(channel=item.channel, action="listav", title=scrapedtitle, fulltitle=scrapedtitle, url=item.url, + thumbnail=item.thumbnail, fanart=item.fanart)) + + item.url = 'https://www.youtube.com/results?q=milenio+3&sp=CAI%253D' + scrapedtitle = "[COLOR green]Milenio[/COLOR] [COLOR White]3- Podcasts[/COLOR]" + item.thumbnail = urlparse.urljoin(item.thumbnail, + "http://cuatrostatic-a.akamaihd.net/cuarto-milenio/Cuarto-Milenio-analiza-fantasma-Granada_MDSVID20100924_0063_3.jpg") + item.fanart = urlparse.urljoin(item.fanart, + "http://cuatrostatic-a.akamaihd.net/cuarto-milenio/programas/temporada-07/t07xp32/fantasma-universidad_MDSVID20120420_0001_3.jpg") + + itemlist.append( + Item(channel=item.channel, action="listav", title=scrapedtitle, fulltitle=scrapedtitle, url=item.url, + thumbnail=item.thumbnail, fanart=item.fanart)) + + scrapedtitle = "[COLOR red]buscar ...[/COLOR]" + item.thumbnail = urlparse.urljoin(item.thumbnail, + "http://cuatrostatic-a.akamaihd.net/cuarto-milenio/Cuarto-Milenio-analiza-fantasma-Granada_MDSVID20100924_0063_3.jpg") + item.fanart = urlparse.urljoin(item.fanart, + "http://cuatrostatic-a.akamaihd.net/cuarto-milenio/programas/temporada-07/t07xp32/fantasma-universidad_MDSVID20120420_0001_3.jpg") + + itemlist.append(Item(channel=item.channel, action="busqueda", title=scrapedtitle, fulltitle=scrapedtitle, + thumbnail=item.thumbnail, fanart=item.fanart)) + + return itemlist + + +def play(item): + logger.info("url=" + item.url) + + itemlist = servertools.find_video_items(data=item.url) + + return itemlist diff --git a/plugin.video.alfa/channels/crunchyroll.json b/plugin.video.alfa/channels/crunchyroll.json new file mode 100755 index 00000000..5d53c4cc --- /dev/null +++ b/plugin.video.alfa/channels/crunchyroll.json @@ -0,0 +1,102 @@ +{ + "id": "crunchyroll", + "name": "Crunchyroll", + "language": "es", + "active": true, + "adult": false, + "version": 1, + "changes": [ + { + "date": "16/05/2017", + "description": "Primera versión" + } + ], + "thumbnail": "http://i.imgur.com/O49fDS1.png", + "categories": [ + "anime", + "tvshow" + ], + "settings": [ + { + "id": "crunchyrolluser", + "type": "text", + "color": "0xFF25AA48", + "label": "@30014", + "enabled": true, + "visible": true + }, + { + "id": "crunchyrollpassword", + "type": "text", + "color": "0xFF25AA48", + "hidden": true, + "label": "@30015", + "enabled": "!eq(-1,'')", + "visible": true + }, + { + "id": "crunchyrollidioma", + "type": "list", + "label": "Idioma de los textos de la web", + "default": 6, + "enabled": true, + "visible": true, + "lvalues": [ + "Alemán", + "Portugués", + "Francés", + "Italiano", + "Inglés", + "Español Latino", + "Español España" + ] + }, + { + "id": "crunchyrollsub", + "type": "list", + "label": "Idioma de subtítulos preferido en Crunchyroll", + "default": 6, + "enabled": true, + "visible": true, + "lvalues": [ + "Alemán", + "Portugués", + "Francés", + "Italiano", + "Inglés", + "Español Latino", + "Español España" + ] + }, + { + "id": "proxy_usa", + "type": "bool", + "label": "Usar proxy para ver el catálogo de USA", + "default": false, + "visible": true, + "enabled": "!eq(+1,true)" + }, + { + "id": "proxy_spain", + "type": "bool", + "label": "Usar proxy para ver el catálogo de España", + "default": false, + "visible": true, + "enabled": "!eq(-1,true)" + }, + { + "id": "perfil", + "type": "list", + "label": "Perfil de color", + "default": 3, + "enabled": true, + "visible": true, + "lvalues": [ + "Sin color", + "Perfil 3", + "Perfil 2", + "Perfil 1" + ] + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/crunchyroll.py b/plugin.video.alfa/channels/crunchyroll.py new file mode 100755 index 00000000..7dd41311 --- /dev/null +++ b/plugin.video.alfa/channels/crunchyroll.py @@ -0,0 +1,360 @@ +# -*- coding: utf-8 -*- + +import re +import urllib + +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core.item import Item + +__perfil__ = config.get_setting('perfil', "crunchyroll") + +# Fijar perfil de color +perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFF088A08'], + ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFF088A08'], + ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFF088A08']] + +if __perfil__ - 1 >= 0: + color1, color2, color3, color4, color5 = perfil[__perfil__ - 1] +else: + color1 = color2 = color3 = color4 = color5 = "" + +host = "http://www.crunchyroll.com" +proxy_u = "http://anonymouse.org/cgi-bin/anon-www.cgi/" +proxy_e = "http://proxyanonimo.es/browse.php?u=" + + +def login(): + logger.info() + + langs = ['deDE', 'ptPT', 'frFR', 'itIT', 'enUS', 'esLA', 'esES'] + lang = langs[config.get_setting("crunchyrollidioma", "crunchyroll")] + httptools.downloadpage("http://www.crunchyroll.com/ajax/", "req=RpcApiTranslation_SetLang&locale=%s" % lang) + + login_page = "https://www.crunchyroll.com/login" + user = config.get_setting("crunchyrolluser", "crunchyroll") + password = config.get_setting("crunchyrollpassword", "crunchyroll") + if not user or not password: + return False, "", "" + data = httptools.downloadpage(login_page).data + + if not "<title>Redirecting" in data: + token = scrapertools.find_single_match(data, 'name="login_form\[_token\]" value="([^"]+)"') + redirect_url = scrapertools.find_single_match(data, 'name="login_form\[redirect_url\]" value="([^"]+)"') + post = "login_form%5Bname%5D=" + user + "&login_form%5Bpassword%5D=" + password + \ + "&login_form%5Bredirect_url%5D=" + redirect_url + "&login_form%5B_token%5D=" + token + + data = httptools.downloadpage(login_page, post).data + if not "<title>Redirecting" in data: + if "Usuario %s no disponible" % user in data: + return False, "El usuario de crunchyroll no existe.", "" + elif '<li class="error">Captcha' in data: + return False, "Es necesario resolver un captcha. Loguéate desde un navegador y vuelve a intentarlo", "" + else: + return False, "No se ha podido realizar el login.", "" + + data = httptools.downloadpage(host).data + premium = scrapertools.find_single_match(data, ',"premium_status":"([^"]+)"') + premium = premium.replace("_", " ").replace("free trial", "Prueba Gratuita").capitalize() + locate = scrapertools.find_single_match(data, 'title="Your detected location is (.*?)."') + if locate: + premium += " - %s" % locate + return True, "", premium + + +def mainlist(item): + logger.info() + itemlist = [] + item.text_color = color1 + + proxy_usa = config.get_setting("proxy_usa", "crunchyroll") + proxy_spain = config.get_setting("proxy_spain", "crunchyroll") + item.login = False + error_message = "" + global host + if not proxy_usa and not proxy_spain: + item.login, error_message, premium = login() + elif proxy_usa: + item.proxy = "usa" + host = proxy_u + host + elif proxy_spain: + httptools.downloadpage("http://proxyanonimo.es/") + item.proxy = "spain" + host = proxy_e + host + + if not item.login and error_message: + itemlist.append(item.clone(title=error_message, action="configuracion", folder=False, text_color=color4)) + elif item.login: + itemlist.append(item.clone(title="Tipo de cuenta: %s" % premium, action="", text_color=color4)) + elif item.proxy: + itemlist.append(item.clone(title="Usando proxy: %s" % item.proxy.capitalize(), action="", text_color=color4)) + + itemlist.append(item.clone(title="Anime", action="", text_color=color2)) + item.contentType = "tvshow" + itemlist.append( + item.clone(title=" Novedades", action="lista", url=host + "/videos/anime/updated/ajax_page?pg=0", page=0)) + itemlist.append( + item.clone(title=" Popular", action="lista", url=host + "/videos/anime/popular/ajax_page?pg=0", page=0)) + itemlist.append(item.clone(title=" Emisiones Simultáneas", action="lista", + url=host + "/videos/anime/simulcasts/ajax_page?pg=0", page=0)) + itemlist.append(item.clone(title=" Índices", action="indices")) + + itemlist.append(item.clone(title="Drama", action="", text_color=color2)) + itemlist.append( + item.clone(title=" Popular", action="lista", url=host + "/videos/drama/popular/ajax_page?pg=0", page=0)) + itemlist.append(item.clone(title=" Índice Alfabético", action="indices", + url="http://www.crunchyroll.com/videos/drama/alpha")) + + if item.proxy != "usa": + itemlist.append(item.clone(action="calendario", title="Calendario de Estrenos Anime", text_color=color4, + url=host + "/simulcastcalendar")) + itemlist.append(item.clone(title="Configuración del canal", action="configuracion", text_color="gold")) + + return itemlist + + +def configuracion(item): + from platformcode import platformtools + ret = platformtools.show_channel_settings() + + user = config.get_setting("crunchyrolluser", "crunchyroll") + password = config.get_setting("crunchyrollpassword", "crunchyroll") + sub = config.get_setting("crunchyrollsub", "crunchyroll") + + config.set_setting("crunchyrolluser", user) + config.set_setting("crunchyrollpassword", password) + values = [6, 5, 4, 3, 2, 1, 0] + config.set_setting("crunchyrollsub", str(values[sub])) + platformtools.itemlist_refresh() + + return ret + + +def lista(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + + next = item.url.replace("?pg=%s" % item.page, "?pg=%s" % str(item.page + 1)) + data_next = httptools.downloadpage(next).data + patron = '<li id="media_group_(\d+)".*?title="([^"]+)".*?href="([^"]+)".*?src="([^"]+)"' \ + '.*?<span class="series-data.*?>\s*([^<]+)</span>' + matches = scrapertools.find_multiple_matches(data, patron) + for id, title, url, thumb, videos in matches: + if item.proxy == "spain": + url = "http://proxyanonimo.es" + url.replace("&b=12", "") + elif not item.proxy: + url = host + url + thumb = urllib.unquote(thumb.replace("/browse.php?u=", "").replace("_thumb", "_full").replace("&b=12", "")) + scrapedtitle = "%s (%s)" % (title, videos.strip()) + plot = scrapertools.find_single_match(data, '%s"\).data.*?description":"([^"]+)"' % id) + plot = unicode(plot, 'unicode-escape', "ignore") + itemlist.append(item.clone(action="episodios", url=url, title=scrapedtitle, thumbnail=thumb, + contentTitle=title, contentSerieName=title, infoLabels={'plot': plot}, + text_color=color2)) + + if '<li id="media_group' in data_next: + itemlist.append(item.clone(action="lista", url=next, title=">> Página Siguiente", page=item.page + 1, + text_color="")) + + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r'\n|\t|\s{2,}', '', data) + patron = '<li id="showview_videos.*?href="([^"]+)".*?(?:src|data-thumbnailUrl)="([^"]+)".*?media_id="([^"]+)" ' \ + 'style="width: (.*?)%.*?<span class="series-title.*?>\s*(.*?)</span>.*?<p class="short-desc".*?>' \ + '\s*(.*?)</p>.*?description":"([^"]+)"' + if data.count('class="season-dropdown') > 1: + bloques = scrapertools.find_multiple_matches(data, 'class="season-dropdown[^"]+" title="([^"]+)"(.*?)</ul>') + for season, b in bloques: + matches = scrapertools.find_multiple_matches(b, patron) + if matches: + itemlist.append(item.clone(action="", title=season, text_color=color3)) + for url, thumb, media_id, visto, title, subt, plot in matches: + if item.proxy == "spain": + url = urllib.unquote(url.replace("/browse.php?u=", "").replace("&b=12", "")) + elif not item.proxy: + url = host + url + url = url.replace(proxy_u, "") + thumb = urllib.unquote( + thumb.replace("/browse.php?u=", "").replace("_wide.", "_full.").replace("&b=12", "")) + title = " %s - %s" % (title, subt) + if visto != "0": + title += " [COLOR %s][V][/COLOR]" % color5 + itemlist.append( + Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumb, media_id=media_id, + server="crunchyroll", text_color=item.text_color, contentTitle=item.contentTitle, + contentSerieName=item.contentSerieName, contentType="tvshow")) + else: + matches = scrapertools.find_multiple_matches(data, patron) + for url, thumb, media_id, visto, title, subt, plot in matches: + if item.proxy == "spain": + url = urllib.unquote(url.replace("/browse.php?u=", "").replace("&b=12", "")) + elif not item.proxy: + url = host + url + url = url.replace(proxy_u, "") + thumb = urllib.unquote( + thumb.replace("/browse.php?u=", "").replace("_wide.", "_full.").replace("&b=12", "")) + title = "%s - %s" % (title, subt) + if visto != "0": + title += " [COLOR %s][V][/COLOR]" % color5 + itemlist.append( + Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumb, media_id=media_id, + server="crunchyroll", text_color=item.text_color, contentTitle=item.contentTitle, + contentSerieName=item.contentSerieName, contentType="tvshow")) + + return itemlist + + +def indices(item): + logger.info() + itemlist = [] + + if not item.url: + itemlist.append(item.clone(title="Alfabético", url="http://www.crunchyroll.com/videos/anime/alpha")) + itemlist.append(item.clone(title="Géneros", url="http://www.crunchyroll.com/videos/anime")) + itemlist.append(item.clone(title="Temporadas", url="http://www.crunchyroll.com/videos/anime")) + + else: + data = httptools.downloadpage(item.url).data + if "Alfabético" in item.title: + bloque = scrapertools.find_single_match(data, '<div class="content-menu cf ">(.*?)</div>') + matches = scrapertools.find_multiple_matches(bloque, '<a href="([^"]+)".*?>([^<]+)<') + for url, title in matches: + if "todo" in title: + continue + if item.proxy == "spain": + url = proxy_e + host + url + elif item.proxy == "usa": + url = proxy_u + host + url + else: + url = host + url + + itemlist.append(item.clone(action="alpha", title=title, url=url, page=0)) + elif "Temporadas" in item.title: + bloque = scrapertools.find_single_match(data, + '<div class="season-selectors cf selectors">(.*?)<div id="container"') + matches = scrapertools.find_multiple_matches(bloque, 'href="#([^"]+)".*?title="([^"]+)"') + for url, title in matches: + url += "/ajax_page?pg=0" + if item.proxy == "spain": + url = proxy_e + host + url + elif item.proxy == "usa": + url = proxy_u + host + url + else: + url = host + url + + itemlist.append(item.clone(action="lista", title=title, url=url, page=0)) + else: + bloque = scrapertools.find_single_match(data, '<div class="genre-selectors selectors">(.*?)</div>') + matches = scrapertools.find_multiple_matches(bloque, '<input id="([^"]+)".*?title="([^"]+)"') + for url, title in matches: + url = "%s/genres/ajax_page?pg=0&tagged=%s" % (item.url, url) + if item.proxy == "spain": + url = proxy_e + url.replace("&", "%26") + elif item.proxy == "usa": + url = proxy_u + url + + itemlist.append(item.clone(action="lista", title=title, url=url, page=0)) + + return itemlist + + +def alpha(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + + patron = '<div class="wrapper hover-toggle-queue.*?title="([^"]+)".*?href="([^"]+)".*?src="([^"]+)"' \ + '.*?<span class="series-data.*?>\s*([^<]+)</span>.*?<p.*?>(.*?)</p>' + matches = scrapertools.find_multiple_matches(data, patron) + for title, url, thumb, videos, plot in matches: + if item.proxy == "spain": + url = "http://proxyanonimo.es" + url.replace("&b=12", "") + elif not item.proxy: + url = host + url + thumb = urllib.unquote(thumb.replace("/browse.php?u=", "").replace("_small", "_full").replace("&b=12", "")) + scrapedtitle = "%s (%s)" % (title, videos.strip()) + itemlist.append(item.clone(action="episodios", url=url, title=scrapedtitle, thumbnail=thumb, + contentTitle=title, contentSerieName=title, infoLabels={'plot': plot}, + text_color=color2)) + + return itemlist + + +def calendario(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + + patron = '<div class="specific-date">.*?datetime="\d+-(\d+)-(\d+).*?class="day-name">.*?>\s*([^<]+)</time>(.*?)</section>' + bloques = scrapertools.find_multiple_matches(data, patron) + for mes, dia, title, b in bloques: + patron = 'class="available-time">([^<]+)<.*?<cite itemprop="name">(.*?)</cite>.*?href="([^"]+)"' \ + '.*?>\s*(.*?)\s*</a>(.*?)</article>' + matches = scrapertools.find_multiple_matches(b, patron) + if matches: + title = "%s/%s - %s" % (dia, mes, title.strip()) + itemlist.append(item.clone(action="", title=title)) + for hora, title, url, subt, datos in matches: + subt = subt.replace("Available", "Disponible").replace("Episode", "Episodio").replace("in ", "en ") + subt = re.sub(r"\s{2,}", " ", subt) + if "<time" in subt: + subt = re.sub(r"<time.*?>", "", subt).replace("</time>", "") + + scrapedtitle = " [%s] %s - %s" % (hora, scrapertools.htmlclean(title), subt) + scrapedtitle = re.sub(r"\[email protected\]|\[email\xc2\xa0protected\]", "Idolm@ster", scrapedtitle) + + if "Disponible" in scrapedtitle: + if item.proxy == "spain": + url = urllib.unquote(url.replace("/browse.php?u=", "").replace("&b=12", "")) + action = "play" + server = "crunchyroll" + else: + action = "" + server = "" + thumb = scrapertools.find_single_match(datos, '<img class="thumbnail" src="([^"]+)"') + if not thumb: + thumb = scrapertools.find_single_match(datos, 'src="([^"]+)"') + if thumb: + thumb = urllib.unquote(thumb.replace("/browse.php?u=", "").replace("_thumb", "_full") \ + .replace("&b=12", "").replace("_large", "_full")) + itemlist.append(item.clone(action=action, url=url, title=scrapedtitle, contentTitle=title, thumbnail=thumb, + text_color=color2, contentSerieName=title, server=server)) + + next = scrapertools.find_single_match(data, 'js-pagination-next"\s*href="([^"]+)"') + if next: + if item.proxy == "spain": + next = "http://proxyanonimo.es" + url.replace("&b=12", "") + else: + next = host + next + itemlist.append(item.clone(action="calendario", url=next, title=">> Siguiente Semana")) + prev = scrapertools.find_single_match(data, 'js-pagination-last"\s*href="([^"]+)"') + if prev: + if item.proxy == "spain": + prev = "http://proxyanonimo.es" + url.replace("&b=12", "") + else: + prev = host + prev + itemlist.append(item.clone(action="calendario", url=prev, title="<< Semana Anterior")) + + return itemlist + + +def play(item): + logger.info() + if item.login and not "[V]" in item.title: + post = "cbelapsed=60&h=&media_id=%s" % item.media_id + "&req=RpcApiVideo_VideoView&cbcallcount=1&ht=0" \ + "&media_type=1&video_encode_id=0&playhead=10000" + httptools.downloadpage("http://www.crunchyroll.com/ajax/", post) + + return [item] diff --git a/plugin.video.alfa/channels/cuelgame.json b/plugin.video.alfa/channels/cuelgame.json new file mode 100755 index 00000000..805701b7 --- /dev/null +++ b/plugin.video.alfa/channels/cuelgame.json @@ -0,0 +1,41 @@ +{ + "id": "cuelgame", + "name": "Cuelgame", + "active": true, + "adult": false, + "language": "es", + "version": 1, + "changes": [ + { + "date": "10/12/2016", + "description": "Reparado fanart y thumbs y correción código.Adaptado a Infoplus" + }, + { + "date": "04/04/2017", + "description": "Migración a Httptools" + }, + { + "date": "28/06/2017", + "description": "Correciones código.Algunas mejoras" + } + ], + "thumbnail": "cuelgame.png", + "banner": "cuelgame.png", + "categories": [ + "torrent", + "movie", + "tvshow", + "documentary", + "vos" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/cuelgame.py b/plugin.video.alfa/channels/cuelgame.py new file mode 100755 index 00000000..acd732e8 --- /dev/null +++ b/plugin.video.alfa/channels/cuelgame.py @@ -0,0 +1,1225 @@ +# -*- coding: utf-8 -*- + +import re +import unicodedata +import urlparse + +import xbmc +import xbmcgui +from core import logger +from core import scrapertools, httptools +from core.item import Item +from core.scrapertools import decodeHtmlentities as dhe + +ACTION_SHOW_FULLSCREEN = 36 +ACTION_GESTURE_SWIPE_LEFT = 511 +ACTION_SELECT_ITEM = 7 +ACTION_PREVIOUS_MENU = 10 +ACTION_MOVE_LEFT = 1 +ACTION_MOVE_RIGHT = 2 +ACTION_MOVE_DOWN = 4 +ACTION_MOVE_UP = 3 +OPTION_PANEL = 6 +OPTIONS_OK = 5 + +api_key = "2e2160006592024ba87ccdf78c28f49f" +api_fankey = "dffe90fba4d02c199ae7a9e71330c987" + + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append(Item(channel=item.channel, title="[COLOR forestgreen]Cine[/COLOR]", action="scraper", + url="http://cuelgame.net/?category=4", + thumbnail="http://img5a.flixcart.com/image/poster/q/t/d/vintage-camera-collage-sr148-medium-400x400-imadkbnrnbpggqyz.jpeg", + fanart="http://imgur.com/7frGoPL.jpg")) + itemlist.append(Item(channel=item.channel, title="[COLOR forestgreen]Series[/COLOR]", action="scraper", + url="http://cuelgame.net/?category=8", thumbnail="http://imgur.com/OjP42lL.jpg", + fanart="http://imgur.com/Xm49VbL.jpg")) + itemlist.append(Item(channel=item.channel, title="[COLOR forestgreen]TV[/COLOR]", action="scraper", + url="http://cuelgame.net/?category=67", thumbnail="http://imgur.com/C4VDnTo.png", + fanart="http://imgur.com/LDoJrAf.jpg")) + itemlist.append(Item(channel=item.channel, title="[COLOR forestgreen]Documentales[/COLOR]", action="scraper", + url="http://cuelgame.net/?category=68", thumbnail="http://imgur.com/nofNYjy.jpg", + fanart="http://imgur.com/upB1jL8.jpg")) + itemlist.append(Item(channel=item.channel, title="[COLOR forestgreen]Música[/COLOR]", action="scraper", + url="http://cuelgame.net/?category=13", thumbnail="http://imgur.com/DPrOlme.jpg", + fanart="http://imgur.com/FxM6xGY.jpg")) + + itemlist.append(Item(channel=item.channel, title="[COLOR forestgreen]Buscar[/COLOR]", action="search", url="", + thumbnail="http://images2.alphacoders.com/846/84682.jpg", + fanart="http://imgur.com/1sIHN1r.jpg")) + + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = "http://cuelgame.net/search.php?q=%s" % (texto) + + try: + return scraper(item) + # Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def scraper(item): + logger.info() + itemlist = [] + check_search = item.url + # Descarga la página + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| |CET", "", data) + + # corrige la falta de imagen + # data = re.sub(r" </div><p>","</div><img src='http://ampaenriquealonso.files.wordpress.com/2011/09/logocineenlacalle.png' texto ><p>",data) + + ''' + <h2> <a href="magnet:?xt=urn:btih:4E7192D0885DDB9219699BBFFD72E709006BF9F2&dn=automata+2014+hdrip+xvid+sam+etrg&tr=udp%3A%2F%2Fopen.demonii.com%3A1337%2Fannounce" class="l:12087"onmousedown="return clk(this, 12087)" >Automata (2014) [HDRip] [VO] </a><img src="http://cuelgame.net/img/common/is-magnet.png" class="media-icon" width="18" height="15" alt="magnet" title="magnet" /> <a href="magnet:?xt=urn:btih:4E7192D0885DDB9219699BBFFD72E709006BF9F2&dn=automata+2014+hdrip+xvid+sam+etrg&tr=udp%3A%2F%2Fopen.demonii.com%3A1337%2Fannounce" title="Direct link" rel="nofollow, noindex"><img src="http://cuelgame.net/img/common/link-02.png" class="media-icon" width="18" height="15" alt="Enlace directo" title="Enlace directo" /></a> </h2> <div class="news-submitted"><a href="/user/Dios" class="tooltip u:1179"><img src="http://cuelgame.net/cache/00/04/1179-1328100564-25.jpg" width="25" height="25" alt=""/></a><strong>magnet:?xt=urn:btih:4E7192D0885DDB9219699BBFFD72E709006BF...</strong><br /> por<a href="/user/Dios/history">Dios</a> hace7 horaspublicado hace5 horas 58 minutos</div><img src='http://cuelgame.net/cache/00/2f/thumb-12087.jpg' width='70' height='70' alt='' class='thumbnail'/><p> En un futuro no lejano, en el que el planeta Tierra sufre una creciente desertización, Jacq Vaucan (Antonio Banderas), un agente de seguros de una compañía de robótica, investiga un caso en apariencia rutinario cuando descubre algo que podría tener consecuencias decisivas para el futuro de la humanidad. Banderas produce y protagoniza este thriller futurista, que especula sobre lo que ocurriría si la inteligencia artificial superase a la humana.|<i> Más info. en comentarios.</i></p> + + ''' + # id_torrent = scrapertools.get_match(item.url,"(\d+)-") + patron = '<h2> <a href="([^"]+)".*?class="l:\d+".*?>(.*?)<\/a>(.*?)<p>([^<]+)<.*?p>.*?title="meta.*?href=".*?amp;(.*?)"' + ''' + patron += '<a href="([^"]+)".*?' + patron += 'class="l:\d+".*? >([^<]+)</a>.*?' + patron += '<img src=\'([^\']+)\'.*?' + patron += '<p>([^<]+)<.*?p>' + patron += '.*?class="counter">.*?<a href="\/\?meta=multimedia&(.*?)"' + ''' + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + for scrapedurl, scrapedtitle, check_thumb, scrapedplot, check_multimedia in matches: + scrapedtitle = re.sub(r'\.', ' ', scrapedtitle) + try: + scrapedthumbnail = scrapertools.get_match(check_thumb, "</div><img src=\'([^\']+)\'") + except: + scrapedthumbnail = "http://ampaenriquealonso.files.wordpress.com/2011/09/logocineenlacalle.png" + item.url = check_search + title_year = re.sub(r"(\d+)p", "", scrapedtitle) + if "search" in item.url: + if "category=4" or "category=8" in check_multimedia: + item.url = check_multimedia + if "category=4" in item.url: + try: + year = scrapertools.find_single_match(title_year, '.*?(\d\d\d\d)') + except: + year = "" + else: + year = "" + + title_fan = re.sub( + r"End Part \d|\||\[.*?\].*|\(.*?\).*|\d+x\d+.*?Final|-\d+|\d+x\d+|Temporada.*?Completa| ;|V.O|\d.*?GB|\+|subs|s\d+e\d+p.*|s\d+e\d+i.*|s\d+e\d+.*|S\d+E\d+[^<]+|VO|Serie.*|S\d+E\d+p.*|S\d+E\d+720p.*", + "", scrapedtitle) + + # No deja pasar items de la mula + if not scrapedurl.startswith("ed2k:"): + scrapedtitle.strip() + scrapedplot = re.sub(r"\|<i> Más info. en comentarios.</i>", "", scrapedplot) + scrapedplot = re.sub(r"<.*?>", "", scrapedplot).strip() + + scrapedtitle = "[COLOR greenyellow]" + scrapedtitle + "[/COLOR]" + + extra = title_fan + "|" + year + "|" + scrapedplot + "|" + scrapedurl + "|" + item.url + + if "category=4" in item.url or "category=8" in item.url: + + itemlist.append(Item(channel=item.channel, title=scrapedtitle, url="", action="fanart", server="torrent", + thumbnail=scrapedthumbnail, extra=extra, folder=True)) + else: + + itemlist.append( + Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="play", server="torrent", + thumbnail=scrapedthumbnail, folder=False)) + + # Extrae el paginador + + + patronvideos = '<a href="([^"]+)" rel="next">siguiente »</a>' + matches = re.compile(patronvideos, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + if len(matches) > 0: + # corrige "&" para la paginación + next_page = matches[0].replace("amp;", "") + + if "search" in check_search: + scrapedurl = urlparse.urljoin(check_search, next_page) + else: + scrapedurl = urlparse.urljoin(item.url, next_page) + itemlist.append(Item(channel=item.channel, action="scraper", title="Página siguiente >>", url=scrapedurl, + thumbnail="http://imgur.com/ycPgVVO.png", folder=True)) + + return itemlist + + +def fanart(item): + logger.info() + itemlist = [] + + check_sp = item.extra.split("|")[4] + title_fan = item.extra.split("|")[0] + fulltitle = item.title + fulltitle = re.sub(r"720p|1080.*", "", fulltitle) + title_fan = re.sub(r"H264.*|Netflix.*|Mitos Griegos|HDTV.*|\d\d\d\d", "", title_fan).strip() + item.title = title_fan.upper() + item.title = "[COLOR springgreen][B]" + item.title + "[/B][/COLOR]" + title = title_fan.replace(' ', '%20') + title = ''.join((c for c in unicodedata.normalize('NFD', unicode(title.decode('utf-8'))) if + unicodedata.category(c) != 'Mn')).encode("ascii", "ignore") + item.url = item.extra.split("|")[3] + + try: + sinopsis = item.extra.split("|")[2] + except: + sinopsis = "" + + if "category=4" in check_sp: + id_tmdb = "" + # filmafinity + year = item.extra.split("|")[1] + + url = "http://www.filmaffinity.com/es/advsearch.php?stext={0}&stype%5B%5D=title&country=&genre=&fromyear={1}&toyear={1}".format( + title, year) + data = httptools.downloadpage(url).data + + url_filmaf = scrapertools.find_single_match(data, '<div class="mc-poster">\s*<a title="[^"]*" href="([^"]+)"') + if url_filmaf: + url_filmaf = "http://www.filmaffinity.com%s" % url_filmaf + data = httptools.downloadpage(url_filmaf).data + else: + + try: + url_bing = "http://www.bing.com/search?q=%s+%s+site:filmaffinity.com" % (title.replace(' ', '+'), year) + data = browser(url_bing) + data = re.sub(r'\n|\r|\t|\s{2}| ', '', data) + + if "myaddrproxy.php" in data: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><div class="b_title"><h2>(<a href="/ myaddrproxy.php/http/www.filmaffinity.com/es/film.*?)"') + subdata_bing = re.sub(r'\/myaddrproxy.php\/http\/', '', subdata_bing) + else: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><h2>(<a href="http://www.filmaffinity.com/es/film.*?)"') + + url_filma = scrapertools.get_match(subdata_bing, '<a href="([^"]+)') + + if not "http" in url_filma: + data = httptools.downloadpage("http://" + url_filma).data + else: + data = httptools.downloadpage(url_filma).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + except: + pass + + if sinopsis == " ": + try: + sinopsis = scrapertools.find_single_match(data, '<dd itemprop="description">(.*?)</dd>') + sinopsis = sinopsis.replace("<br><br />", "\n") + sinopsis = re.sub(r"\(FILMAFFINITY\)<br />", "", sinopsis) + except: + pass + try: + rating_filma = scrapertools.get_match(data, 'itemprop="ratingValue" content="(.*?)">') + except: + rating_filma = "Sin puntuacion" + + critica = "" + patron = '<div itemprop="reviewBody">(.*?)</div>.*?itemprop="author">(.*?)\s*<i alt="([^"]+)"' + matches_reviews = scrapertools.find_multiple_matches(data, patron) + + if matches_reviews: + for review, autor, valoracion in matches_reviews: + review = dhe(scrapertools.htmlclean(review)) + review += "\n" + autor + "[CR]" + review = re.sub(r'Puntuac.*?\)', '', review) + if "positiva" in valoracion: + critica += "[COLOR green][B]%s[/B][/COLOR]\n" % review + elif "neutral" in valoracion: + critica += "[COLOR yellow][B]%s[/B][/COLOR]\n" % review + else: + critica += "[COLOR red][B]%s[/B][/COLOR]\n" % review + else: + critica = "[COLOR floralwhite][B]Esta película no tiene críticas todavía...[/B][/COLOR]" + print "ozuu" + print critica + + url = "http://api.themoviedb.org/3/search/movie?api_key=" + api_key + "&query=" + title + "&year=" + year + "&language=es&include_adult=false" + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + + title = re.sub(r":.*|\(.*?\)", "", title) + url = "http://api.themoviedb.org/3/search/movie?api_key=" + api_key + "&query=" + title + "&language=es&include_adult=false" + + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + extra = item.thumbnail + "|" + "" + "|" + "" + "|" + "Sin puntuación" + "|" + rating_filma + "|" + critica + show = item.fanart + "|" + "" + "|" + sinopsis + posterdb = item.thumbnail + fanart_info = item.fanart + fanart_3 = "" + fanart_2 = item.fanart + category = item.thumbnail + id_scraper = "" + + itemlist.append( + Item(channel=item.channel, title=item.title, url=item.url, action="play", server="torrent", + thumbnail=item.thumbnail, fanart=item.fanart, extra=extra, show=show, category=category, + folder=False)) + + for id, fan in matches: + + fan = re.sub(r'\\|"', '', fan) + + try: + rating = scrapertools.find_single_match(data, '"vote_average":(.*?),') + except: + rating = "Sin puntuación" + + id_scraper = id + "|" + "peli" + "|" + rating + "|" + rating_filma + "|" + critica + try: + posterdb = scrapertools.get_match(data, '"page":1,.*?"poster_path":"\\\(.*?)"') + posterdb = "https://image.tmdb.org/t/p/original" + posterdb + except: + posterdb = item.thumbnail + + if "null" in fan: + fanart = item.fanart + else: + fanart = "https://image.tmdb.org/t/p/original" + fan + item.extra = fanart + + url = "http://api.themoviedb.org/3/movie/" + id + "/images?api_key=" + api_key + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '"backdrops".*?"file_path":".*?",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + patron = '"backdrops".*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + fanart_info = item.extra + fanart_3 = "" + fanart_2 = item.extra + for fanart_info, fanart_3, fanart_2 in matches: + fanart_info = "https://image.tmdb.org/t/p/original" + fanart_info + fanart_3 = "https://image.tmdb.org/t/p/original" + fanart_3 + fanart_2 = "https://image.tmdb.org/t/p/original" + fanart_2 + + # clearart, fanart_2 y logo + url = "http://webservice.fanart.tv/v3/movies/" + id + "?api_key=" + api_fankey + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '"hdmovielogo":.*?"url": "([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + if '"moviedisc"' in data: + disc = scrapertools.get_match(data, '"moviedisc":.*?"url": "([^"]+)"') + if '"movieposter"' in data: + poster = scrapertools.get_match(data, '"movieposter":.*?"url": "([^"]+)"') + if '"moviethumb"' in data: + thumb = scrapertools.get_match(data, '"moviethumb":.*?"url": "([^"]+)"') + if '"moviebanner"' in data: + banner = scrapertools.get_match(data, '"moviebanner":.*?"url": "([^"]+)"') + + if len(matches) == 0: + extra = posterdb + # "http://es.seaicons.com/wp-content/uploads/2015/11/Editing-Overview-Pages-1-icon.png" + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + category = posterdb + + itemlist.append( + Item(channel=item.channel, title=item.title, action="play", url=item.url, server="torrent", + thumbnail=posterdb, fanart=item.extra, extra=extra, show=show, category=category, + folder=False)) + for logo in matches: + if '"hdmovieclearart"' in data: + clear = scrapertools.get_match(data, '"hdmovieclearart":.*?"url": "([^"]+)"') + if '"moviebackground"' in data: + + extra = clear + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + if '"moviedisc"' in data: + category = disc + else: + category = clear + itemlist.append( + Item(channel=item.channel, title=item.title, action="play", url=item.url, server="torrent", + thumbnail=logo, fanart=item.extra, extra=extra, show=show, category=category, + folder=False)) + else: + extra = clear + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + if '"moviedisc"' in data: + category = disc + else: + category = clear + itemlist.append( + Item(channel=item.channel, title=item.title, action="play", url=item.url, server="torrent", + thumbnail=logo, fanart=item.extra, extra=extra, show=show, category=category, + folder=False)) + + if '"moviebackground"' in data: + + if '"hdmovieclearart"' in data: + clear = scrapertools.get_match(data, '"hdmovieclearart":.*?"url": "([^"]+)"') + extra = clear + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + if '"moviedisc"' in data: + category = disc + else: + category = clear + else: + extra = logo + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + if '"moviedisc"' in data: + category = disc + else: + category = logo + + itemlist.append( + Item(channel=item.channel, title=item.title, action="play", url=item.url, server="torrent", + thumbnail=logo, fanart=item.extra, extra=extra, show=show, category=category, + folder=False)) + + if not '"hdmovieclearart"' in data and not '"moviebackground"' in data: + extra = logo + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + if '"moviedisc"' in data: + category = disc + else: + category = item.extra + itemlist.append( + Item(channel=item.channel, title=item.title, action="play", url=item.url, server="torrent", + thumbnail=logo, fanart=item.extra, extra=extra, show=show, category=category, + folder=False)) + + + else: + # Busca la temporada y capitulo de la serie. + + try: + temp, epi = scrapertools.find_single_match(fulltitle, 'S(\d+)E(\d+)') + temp_epi = "" + except: + try: + temp, epi = scrapertools.find_single_match(fulltitle, 's(\d+)e(\d+)') + temp_epi = "" + except: + try: + temp, epi = scrapertools.find_single_match(fulltitle, '(\d+)x(\d+)') + temp_epi = "" + except: + temp_epi = "No capitulos" + + url_bing = "http://www.bing.com/search?q=%s+Serie+de+tv+site:filmaffinity.com" % (title.replace(' ', '+')) + data = browser(url_bing) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + try: + if "myaddrproxy.php" in data: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><div class="b_title"><h2>(<a href="/myaddrproxy.php/http/www.filmaffinity.com/es/film.*?)"') + subdata_bing = re.sub(r'\/myaddrproxy.php\/http\/', '', subdata_bing) + else: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><h2>(<a href="http://www.filmaffinity.com/es/film.*?)"') + except: + pass + + try: + url_filma = scrapertools.get_match(subdata_bing, '<a href="([^"]+)') + if not "http" in url_filma: + data = httptools.downloadpage("http://" + url_filma).data + else: + data = httptools.downloadpage(url_filma).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + year = scrapertools.get_match(data, '<dt>Año</dt>.*?>(.*?)</dd>') + except: + year = "" + + if sinopsis == " ": + sinopsis = scrapertools.find_single_match(data, '<dd itemprop="description">(.*?)</dd>') + sinopsis = sinopsis.replace("<br><br />", "\n") + sinopsis = re.sub(r"\(FILMAFFINITY\)<br />", "", sinopsis) + try: + rating_filma = scrapertools.get_match(data, 'itemprop="ratingValue" content="(.*?)">') + except: + rating_filma = "Sin puntuacion" + print "lobeznito" + print rating_filma + + critica = "" + patron = '<div itemprop="reviewBody">(.*?)</div>.*?itemprop="author">(.*?)\s*<i alt="([^"]+)"' + matches_reviews = scrapertools.find_multiple_matches(data, patron) + + if matches_reviews: + for review, autor, valoracion in matches_reviews: + review = dhe(scrapertools.htmlclean(review)) + review += "\n" + autor + "[CR]" + review = re.sub(r'Puntuac.*?\)', '', review) + if "positiva" in valoracion: + critica += "[COLOR green][B]%s[/B][/COLOR]\n" % review + elif "neutral" in valoracion: + critica += "[COLOR yellow][B]%s[/B][/COLOR]\n" % review + else: + critica += "[COLOR red][B]%s[/B][/COLOR]\n" % review + else: + critica = "[COLOR floralwhite][B]Esta serie no tiene críticas[/B][/COLOR]" + + ###Busqueda en tmdb + + url_tmdb = "http://api.themoviedb.org/3/search/tv?api_key=" + api_key + "&query=" + title + "&language=es&include_adult=false&first_air_date_year=" + year + data_tmdb = httptools.downloadpage(url_tmdb).data + data_tmdb = re.sub(r"\n|\r|\t|\s{2}| ", "", data_tmdb) + patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),' + matches = re.compile(patron, re.DOTALL).findall(data_tmdb) + + ###Busqueda en bing el id de imdb de la serie + if len(matches) == 0: + url_tmdb = "http://api.themoviedb.org/3/search/tv?api_key=" + api_key + "&query=" + title + "&language=es" + data_tmdb = httptools.downloadpage(url_tmdb).data + data_tmdb = re.sub(r"\n|\r|\t|\s{2}| ", "", data_tmdb) + patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),' + matches = re.compile(patron, re.DOTALL).findall(data_tmdb) + if len(matches) == 0: + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % ( + title.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data) + try: + subdata_imdb = scrapertools.find_single_match(data, + '<li class="b_algo">(.*?)h="ID.*?<strong>.*?TV Series') + except: + pass + + try: + imdb_id = scrapertools.get_match(subdata_imdb, '<a href=.*?http.*?imdb.com/title/(.*?)/.*?"') + except: + imdb_id = "" + ###Busca id de tvdb y tmdb mediante imdb id + + urlremotetbdb = "https://api.themoviedb.org/3/find/" + imdb_id + "?api_key=" + api_key + "&external_source=imdb_id&language=es" + data_tmdb = httptools.downloadpage(urlremotetbdb).data + matches = scrapertools.find_multiple_matches(data_tmdb, + '"tv_results":.*?"id":(.*?),.*?"poster_path":(.*?),"popularity"') + + if len(matches) == 0: + id_tmdb = "" + fanart_3 = "" + extra = item.thumbnail + "|" + year + "|" + "no data" + "|" + "no data" + "|" + "Sin puntuación" + "|" + "" + "|" + "" + "|" + id_tmdb + show = item.fanart + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + item.thumbnail + "|" + id_tmdb + fanart_info = item.fanart + fanart_2 = item.fanart + id_scraper = "" + category = "" + posterdb = item.thumbnail + if temp_epi: + itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action="play", + thumbnail=item.thumbnail, fanart=item.fanart, server="torrent", + extra=extra, category=category, show=show, folder=False)) + else: + itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action="findvideos", + thumbnail=item.thumbnail, fanart=item.fanart, fulltitle=fulltitle, + extra=extra, category=category, show=show, folder=True)) + + for id_tmdb, fan in matches: + ###Busca id tvdb + urlid_tvdb = "https://api.themoviedb.org/3/tv/" + id_tmdb + "/external_ids?api_key=" + api_key + "&language=es" + data_tvdb = httptools.downloadpage(urlid_tvdb).data + id = scrapertools.find_single_match(data_tvdb, 'tvdb_id":(.*?),"tvrage_id"') + if id == "null": + id = "" + category = id + ###Busqueda nºepisodios y temporadas,status + url_status = "http://api.themoviedb.org/3/tv/" + id_tmdb + "?api_key=" + api_key + "&append_to_response=credits&language=es" + data_status = httptools.downloadpage(url_status).data + season_episodes = scrapertools.find_single_match(data_status, + '"(number_of_episodes":\d+,"number_of_seasons":\d+,)"') + season_episodes = re.sub(r'"', '', season_episodes) + season_episodes = re.sub(r'number_of_episodes', 'Episodios ', season_episodes) + season_episodes = re.sub(r'number_of_seasons', 'Temporadas', season_episodes) + season_episodes = re.sub(r'_', ' ', season_episodes) + status = scrapertools.find_single_match(data_status, '"status":"(.*?)"') + if status == "Ended": + status = "Finalizada" + else: + status = "En emisión" + status = status + " (" + season_episodes + ")" + status = re.sub(r',', '.', status) + ####### + + fan = re.sub(r'\\|"', '', fan) + try: + # rating tvdb + url_rating_tvdb = "http://thetvdb.com/api/1D62F2F90030C444/series/" + id + "/es.xml" + print "pepote" + print url_rating_tvdb + data = httptools.downloadpage(url_rating_tvdb).data + rating = scrapertools.find_single_match(data, '<Rating>(.*?)<') + except: + ratintg_tvdb = "" + try: + rating = scrapertools.get_match(data, '"vote_average":(.*?),') + except: + + rating = "Sin puntuación" + + id_scraper = id_tmdb + "|" + "serie" + "|" + rating_filma + "|" + critica + "|" + rating + "|" + status # +"|"+emision + + posterdb = scrapertools.find_single_match(data_tmdb, '"poster_path":(.*?)",') + + if "null" in posterdb: + posterdb = item.thumbnail + else: + posterdb = re.sub(r'\\|"', '', posterdb) + posterdb = "https://image.tmdb.org/t/p/original" + posterdb + + if "null" in fan: + fanart = item.fanart + else: + fanart = "https://image.tmdb.org/t/p/original" + fan + + item.extra = fanart + + url = "http://api.themoviedb.org/3/tv/" + id_tmdb + "/images?api_key=" + api_key + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '"backdrops".*?"file_path":".*?",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + patron = '"backdrops".*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + fanart_info = item.extra + fanart_3 = "" + fanart_2 = item.extra + for fanart_info, fanart_3, fanart_2 in matches: + fanart_info = "https://image.tmdb.org/t/p/original" + fanart_info + fanart_3 = "https://image.tmdb.org/t/p/original" + fanart_3 + fanart_2 = "https://image.tmdb.org/t/p/original" + fanart_2 + if fanart == item.fanart: + fanart = fanart_info + url = "http://webservice.fanart.tv/v3/tv/" + id + "?api_key=" + api_fankey + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '"clearlogo":.*?"url": "([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + if '"tvbanner"' in data: + tvbanner = scrapertools.get_match(data, '"tvbanner":.*?"url": "([^"]+)"') + tfv = tvbanner + elif '"tvposter"' in data: + tvposter = scrapertools.get_match(data, '"tvposter":.*?"url": "([^"]+)"') + tfv = tvposter + else: + tfv = posterdb + if '"tvthumb"' in data: + tvthumb = scrapertools.get_match(data, '"tvthumb":.*?"url": "([^"]+)"') + if '"hdtvlogo"' in data: + hdtvlogo = scrapertools.get_match(data, '"hdtvlogo":.*?"url": "([^"]+)"') + if '"hdclearart"' in data: + hdtvclear = scrapertools.get_match(data, '"hdclearart":.*?"url": "([^"]+)"') + if len(matches) == 0: + if '"hdtvlogo"' in data: + if "showbackground" in data: + + if '"hdclearart"' in data: + thumbnail = hdtvlogo + extra = hdtvclear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + else: + thumbnail = hdtvlogo + extra = thumbnail + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + if temp_epi: + itemlist.append(Item(channel=item.channel, title=item.title, action="play", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=item.extra, + category=category, extra=extra, show=show, folder=False)) + else: + itemlist.append( + Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + fulltitle=temp + "|" + epi, thumbnail=thumbnail, fanart=item.extra, + category=category, extra=extra, show=show, folder=True)) + + + else: + if '"hdclearart"' in data: + thumbnail = hdtvlogo + extra = hdtvclear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + else: + thumbnail = hdtvlogo + extra = thumbnail + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + if temp_epi: + itemlist.append(Item(channel=item.channel, title=item.title, action="play", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=item.extra, + category=category, extra=extra, show=show, folder=False)) + else: + itemlist.append( + Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + fulltitle=temp + "|" + epi, thumbnail=thumbnail, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + else: + extra = "" + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + if temp_epi: + itemlist.append( + Item(channel=item.channel, title=item.title, action="play", url=item.url, server="torrent", + thumbnail=posterdb, fanart=fanart, extra=extra, show=show, category=category, + folder=False)) + else: + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + fulltitle=temp + "|" + epi, thumbnail=posterdb, fanart=fanart, extra=extra, + show=show, category=category, folder=True)) + + for logo in matches: + if '"hdtvlogo"' in data: + thumbnail = hdtvlogo + elif not '"hdtvlogo"' in data: + if '"clearlogo"' in data: + thumbnail = logo + else: + thumbnail = item.thumbnail + if '"clearart"' in data: + clear = scrapertools.get_match(data, '"clearart":.*?"url": "([^"]+)"') + if "showbackground" in data: + + extra = clear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + if temp_epi: + itemlist.append(Item(channel=item.channel, title=item.title, action="play", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, + show=show, category=category, folder=False)) + else: + itemlist.append( + Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + fulltitle=temp + "|" + epi, thumbnail=thumbnail, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + + else: + extra = clear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + if temp_epi: + itemlist.append(Item(channel=item.channel, title=item.title, action="play", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, + show=show, category=category, folder=False)) + else: + itemlist.append( + Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + fulltitle=temp + "|" + epi, thumbnail=thumbnail, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + + if "showbackground" in data: + + if '"clearart"' in data: + clear = scrapertools.get_match(data, '"clearart":.*?"url": "([^"]+)"') + extra = clear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + else: + extra = logo + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + if temp_epi: + itemlist.append(Item(channel=item.channel, title=item.title, action="play", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, + show=show, category=category, folder=False)) + else: + itemlist.append( + Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + fulltitle=temp + "|" + epi, thumbnail=thumbnail, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + + if not '"clearart"' in data and not '"showbackground"' in data: + if '"hdclearart"' in data: + extra = hdtvclear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + else: + extra = thumbnail + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + if temp_epi: + itemlist.append( + Item(channel=item.channel, title=item.title, action="play", url=item.url, server="torrent", + thumbnail=thumbnail, fanart=item.extra, extra=extra, show=show, category=category, + folder=False)) + else: + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + fulltitle=temp + "|" + epi, thumbnail=thumbnail, fanart=item.extra, + extra=extra, show=show, category=category, folder=True)) + title_info = "[COLOR olivedrab][B]Info[/B][/COLOR]" + if not "serie" in item.url: + thumbnail = posterdb + + if "serie" in item.url: + + if '"tvposter"' in data: + thumbnail = scrapertools.get_match(data, '"tvposter":.*?"url": "([^"]+)"') + else: + thumbnail = item.thumbnail + + if "tvbanner" in data: + category = tvbanner + else: + category = show + if '"tvthumb"' in data: + plot = item.plot + "|" + tvthumb + else: + plot = item.plot + "|" + item.thumbnail + if '"tvbanner"' in data: + plot = plot + "|" + tvbanner + elif '"tvthumb"' in data: + plot = plot + "|" + tvthumb + else: + plot = plot + "|" + item.thumbnail + else: + if '"moviethumb"' in data: + plot = item.plot + "|" + thumb + else: + plot = item.plot + "|" + posterdb + + if '"moviebanner"' in data: + plot = plot + "|" + banner + else: + if '"hdmovieclearart"' in data: + plot = plot + "|" + clear + + else: + plot = plot + "|" + posterdb + + id = id_scraper + + extra = extra + "|" + id + "|" + title.encode('utf8') + "|" + check_sp + + itemlist.append( + Item(channel=item.channel, action="info", title=title_info, plot=plot, url=item.url, thumbnail=thumbnail, + fanart=fanart_info, extra=extra, category=category, show=show, viewmode="movie_with_plot", folder=False)) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + temp = item.fulltitle.split("|")[0] + epi = item.fulltitle.split("|")[1] + + url_temp = "http://api.themoviedb.org/3/tv/" + item.show.split("|")[ + 5] + "/season/" + temp + "/images?api_key=" + api_key + "" + data = httptools.downloadpage(url_temp).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '{"id".*?"file_path":"(.*?)","height"' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + thumbnail = item.thumbnail + for thumtemp in matches: + thumbnail = "https://image.tmdb.org/t/p/original" + thumtemp + title = item.show.split("|")[3] + " " + temp + "x" + epi + title = "[COLOR lightgreen]" + title + "[/COLOR]" + itemlist.append(Item(channel=item.channel, title=title, action="play", url=item.url, server="torrent", + thumbnail=item.show.split("|")[4], extra=item.extra, show=item.show, + fanart=item.show.split("|")[0], fulltitle=title, folder=False)) + extra = item.extra + "|" + temp + "|" + epi + title_info = " Info" + title_info = "[COLOR darkseagreen]" + title_info + "[/COLOR]" + itemlist.append( + Item(channel=item.channel, action="info_capitulos", title=title_info, url=item.url, thumbnail=thumbnail, + fanart=item.show.split("|")[1], extra=extra, show=item.show, category=item.category, folder=False)) + return itemlist + + +def info(item): + logger.info() + itemlist = [] + url = item.url + id = item.extra + + if "serie" in item.extra.split("|")[3]: + try: + rating_tmdba_tvdb = item.extra.split("|")[6] + if item.extra.split("|")[6] == "": + rating_tmdba_tvdb = "Sin puntuación" + except: + rating_tmdba_tvdb = "Sin puntuación" + else: + rating_tmdba_tvdb = item.extra.split("|")[3] + rating_filma = item.extra.split("|")[4] + print "eztoquee" + print rating_filma + print rating_tmdba_tvdb + + filma = "http://s6.postimg.org/6yhe5fgy9/filma.png" + + try: + if "serie" in item.extra.split("|")[3]: + title = item.extra.split("|")[8] + + else: + title = item.extra.split("|")[6] + title = title.replace("%20", " ") + title = "[COLOR green][B]" + title + "[/B][/COLOR]" + except: + title = item.title + + try: + if "." in rating_tmdba_tvdb: + check_rat_tmdba = scrapertools.get_match(rating_tmdba_tvdb, '(\d+).') + else: + check_rat_tmdba = rating_tmdba_tvdb + if int(check_rat_tmdba) >= 5 and int(check_rat_tmdba) < 8: + rating = "[COLOR springgreen][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + elif int(check_rat_tmdba) >= 8 or rating_tmdba_tvdb == 10: + rating = "[COLOR yellow][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + else: + rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + print "lolaymaue" + except: + rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + if "10." in rating: + rating = re.sub(r'10\.\d+', '10', rating) + try: + check_rat_filma = scrapertools.get_match(rating_filma, '(\d)') + print "paco" + print check_rat_filma + if int(check_rat_filma) >= 5 and int(check_rat_filma) < 8: + print "dios" + print check_rat_filma + rating_filma = "[COLOR springgreen][B]" + rating_filma + "[/B][/COLOR]" + elif int(check_rat_filma) >= 8: + + print check_rat_filma + rating_filma = "[COLOR yellow][B]" + rating_filma + "[/B][/COLOR]" + else: + rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]" + print "rojo??" + print check_rat_filma + except: + rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]" + + try: + if not "serie" in item.extra.split("|")[3]: + url_plot = "http://api.themoviedb.org/3/movie/" + item.extra.split("|")[ + 1] + "?api_key=" + api_key + "&append_to_response=credits&language=es" + data_plot = httptools.downloadpage(url_plot).data + plot, tagline = scrapertools.find_single_match(data_plot, '"overview":"(.*?)",.*?"tagline":(".*?")') + if plot == "": + plot = item.show.split("|")[2] + + plot = "[COLOR moccasin][B]" + plot + "[/B][/COLOR]" + plot = re.sub(r"\\", "", plot) + + else: + plot = item.show.split("|")[2] + plot = "[COLOR moccasin][B]" + plot + "[/B][/COLOR]" + plot = re.sub(r"\\", "", plot) + + if item.extra.split("|")[7] != "": + tagline = item.extra.split("|")[7] + # tagline= re.sub(r',','.',tagline) + else: + tagline = "" + except: + title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" + plot = "Esta pelicula no tiene informacion..." + plot = plot.replace(plot, "[COLOR yellow][B]" + plot + "[/B][/COLOR]") + photo = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png" + foto = "http://s6.postimg.org/ub7pb76c1/noinfo.png" + info = "" + + if "serie" in item.extra.split("|")[3]: + check2 = "serie" + icon = "http://s6.postimg.org/hzcjag975/tvdb.png" + foto = item.show.split("|")[1] + if item.extra.split("|")[5] != "": + critica = item.extra.split("|")[5] + else: + critica = "Esta serie no tiene críticas..." + + photo = item.extra.split("|")[0].replace(" ", "%20") + try: + tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]" + except: + tagline = "" + + else: + + critica = item.extra.split("|")[5] + if "%20" in critica: + critica = "No hay críticas" + icon = "http://imgur.com/SenkyxF.png" + photo = item.extra.split("|")[0].replace(" ", "%20") + foto = item.show.split("|")[1] + + try: + if tagline == "\"\"": + tagline = " " + except: + tagline = " " + tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]" + check2 = "pelicula" + # Tambien te puede interesar + peliculas = [] + if "serie" in item.extra.split("|")[3]: + + url_tpi = "http://api.themoviedb.org/3/tv/" + item.show.split("|")[ + 5] + "/recommendations?api_key=" + api_key + "&language=es" + data_tpi = httptools.downloadpage(url_tpi).data + tpi = scrapertools.find_multiple_matches(data_tpi, + 'id":(.*?),.*?"original_name":"(.*?)",.*?"poster_path":(.*?),"popularity"') + + else: + url_tpi = "http://api.themoviedb.org/3/movie/" + item.extra.split("|")[ + 1] + "/recommendations?api_key=" + api_key + "&language=es" + data_tpi = httptools.downloadpage(url_tpi).data + tpi = scrapertools.find_multiple_matches(data_tpi, + 'id":(.*?),.*?"original_title":"(.*?)",.*?"poster_path":(.*?),"popularity"') + + for idp, peli, thumb in tpi: + + thumb = re.sub(r'"|}', '', thumb) + if "null" in thumb: + thumb = "http://s6.postimg.org/tw1vhymj5/noposter.png" + else: + thumb = "https://image.tmdb.org/t/p/original" + thumb + peliculas.append([idp, peli, thumb]) + + check2 = check2.replace("pelicula", "movie").replace("serie", "tvshow") + infoLabels = {'title': title, 'plot': plot, 'thumbnail': photo, 'fanart': foto, 'tagline': tagline, + 'rating': rating} + item_info = item.clone(info=infoLabels, icon=icon, extra=id, rating=rating, rating_filma=rating_filma, + critica=critica, contentType=check2, thumb_busqueda="http://imgur.com/HqI3JnO.png") + from channels import infoplus + infoplus.start(item_info, peliculas) + + +def info_capitulos(item): + logger.info() + url = "https://api.themoviedb.org/3/tv/" + item.show.split("|")[5] + "/season/" + item.extra.split("|")[ + 2] + "/episode/" + item.extra.split("|")[3] + "?api_key=" + api_key + "&language=es" + + if "/0" in url: + url = url.replace("/0", "/") + + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '],"name":"(.*?)","overview":"(.*?)".*?"still_path":(.*?),"vote_average":(\d+\.\d).*?,"' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + + url = "http://thetvdb.com/api/1D62F2F90030C444/series/" + item.category + "/default/" + item.extra.split("|")[ + 2] + "/" + item.extra.split("|")[3] + "/es.xml" + if "/0" in url: + url = url.replace("/0", "/") + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '<Data>.*?<EpisodeName>([^<]+)</EpisodeName>.*?<Overview>(.*?)</Overview>.*?<Rating>(.*?)</Rating>' + + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + + title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" + plot = "Este capitulo no tiene informacion..." + plot = "[COLOR yellow][B]" + plot + "[/B][/COLOR]" + image = "http://s6.postimg.org/ub7pb76c1/noinfo.png" + foto = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png" + rating = "" + + + else: + + for name_epi, info, rating in matches: + if "<filename>episodes" in data: + foto = scrapertools.get_match(data, '<Data>.*?<filename>(.*?)</filename>') + fanart = "http://thetvdb.com/banners/" + foto + else: + fanart = item.extra.split("|")[1] + plot = info + plot = "[COLOR peachpuff][B]" + plot + "[/B][/COLOR]" + title = name_epi.upper() + title = "[COLOR bisque][B]" + title + "[/B][/COLOR]" + image = fanart + foto = item.extra.split("|")[0] + if not ".png" in foto: + foto = "http://imgur.com/wSIln04.png" + foto = re.sub(r'\(.*?\)|" "|" "', '', foto) + foto = re.sub(r' ', '', foto) + try: + + check_rating = scrapertools.get_match(rating, '(\d+).') + + if int(check_rating) >= 5 and int(check_rating) < 8: + rating = "Puntuación " + "[COLOR springgreen][B]" + rating + "[/B][/COLOR]" + elif int(check_rating) >= 8 and int(check_rating) < 10: + rating = "Puntuación " + "[COLOR yellow][B]" + rating + "[/B][/COLOR]" + elif int(check_rating) == 10: + rating = "Puntuación " + "[COLOR orangered][B]" + rating + "[/B][/COLOR]" + else: + rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" + + except: + rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" + if "10." in rating: + rating = re.sub(r'10\.\d+', '10', rating) + else: + for name_epi, info, fanart, rating in matches: + if info == "" or info == "\\": + info = "Sin informacion del capítulo aún..." + plot = info + plot = re.sub(r'/n', '', plot) + plot = "[COLOR peachpuff][B]" + plot + "[/B][/COLOR]" + title = name_epi.upper() + title = "[COLOR bisque][B]" + title + "[/B][/COLOR]" + image = fanart + image = re.sub(r'"|}', '', image) + if "null" in image: + image = "http://imgur.com/ZiEAVOD.png" + else: + image = "https://image.tmdb.org/t/p/original" + image + foto = item.extra.split("|")[0] + if not ".png" in foto: + foto = "http://imgur.com/X5Xy4ip.png" + foto = re.sub(r'\(.*?\)|" "|" "', '', foto) + foto = re.sub(r' ', '', foto) + try: + + check_rating = scrapertools.get_match(rating, '(\d+).') + + if int(check_rating) >= 5 and int(check_rating) < 8: + rating = "Puntuación " + "[COLOR springgreen][B]" + rating + "[/B][/COLOR]" + elif int(check_rating) >= 8 and int(check_rating) < 10: + rating = "Puntuación " + "[COLOR yellow][B]" + rating + "[/B][/COLOR]" + elif int(check_rating) == 10: + rating = "Puntuación " + "[COLOR orangered][B]" + rating + "[/B][/COLOR]" + else: + rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" + + except: + rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" + if "10." in rating: + rating = re.sub(r'10\.\d+', '10', rating) + ventana = TextBox2(title=title, plot=plot, thumbnail=image, fanart=foto, rating=rating) + ventana.doModal() + + +class TextBox2(xbmcgui.WindowDialog): + """ Create a skinned textbox window """ + + def __init__(self, *args, **kwargs): + self.getTitle = kwargs.get('title') + self.getPlot = kwargs.get('plot') + self.getThumbnail = kwargs.get('thumbnail') + self.getFanart = kwargs.get('fanart') + self.getRating = kwargs.get('rating') + + self.background = xbmcgui.ControlImage(70, 20, 1150, 630, 'http://imgur.com/btby9SG.jpg') + self.title = xbmcgui.ControlTextBox(120, 60, 430, 50) + self.rating = xbmcgui.ControlTextBox(145, 112, 1030, 45) + self.plot = xbmcgui.ControlTextBox(120, 150, 1056, 100) + self.thumbnail = xbmcgui.ControlImage(120, 300, 1056, 300, self.getThumbnail) + self.fanart = xbmcgui.ControlImage(780, 43, 390, 100, self.getFanart) + + self.addControl(self.background) + self.background.setAnimations( + [('conditional', 'effect=slide start=1000% end=0% time=1500 condition=true tween=bounce',), + ('WindowClose', 'effect=slide delay=800 start=0% end=1000% time=800 condition=true',)]) + self.addControl(self.thumbnail) + self.thumbnail.setAnimations([('conditional', + 'effect=zoom start=0% end=100% delay=2700 time=1500 condition=true tween=elastic easing=inout',), + ('WindowClose', 'effect=slide end=0,700% time=300 condition=true',)]) + self.addControl(self.plot) + self.plot.setAnimations( + [('conditional', 'effect=zoom delay=2000 center=auto start=0 end=100 time=800 condition=true ',), ( + 'conditional', + 'effect=rotate delay=2000 center=auto aceleration=6000 start=0% end=360% time=800 condition=true',), + ('WindowClose', 'effect=zoom center=auto start=100% end=-0% time=600 condition=true',)]) + self.addControl(self.fanart) + self.fanart.setAnimations( + [('WindowOpen', 'effect=slide start=0,-700 delay=1000 time=2500 tween=bounce condition=true',), ( + 'conditional', + 'effect=rotate center=auto start=0% end=360% delay=3000 time=2500 tween=bounce condition=true',), + ('WindowClose', 'effect=slide end=0,-700% time=1000 condition=true',)]) + self.addControl(self.title) + self.title.setText(self.getTitle) + self.title.setAnimations( + [('conditional', 'effect=slide start=-1500% end=0% delay=1000 time=2000 condition=true tween=elastic',), + ('WindowClose', 'effect=slide start=0% end=-1500% time=800 condition=true',)]) + self.addControl(self.rating) + self.rating.setText(self.getRating) + self.rating.setAnimations( + [('conditional', 'effect=fade start=0% end=100% delay=3000 time=1500 condition=true',), + ('WindowClose', 'effect=slide end=0,-700% time=600 condition=true',)]) + xbmc.sleep(200) + + try: + self.plot.autoScroll(7000, 6000, 30000) + except: + + xbmc.executebuiltin( + 'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000,"https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/kodi-icon.png")') + self.plot.setText(self.getPlot) + + def get(self): + self.show() + + def onAction(self, action): + if action == ACTION_PREVIOUS_MENU or action.getId() == ACTION_GESTURE_SWIPE_LEFT or action == 110 or action == 92: + self.close() + + +def test(): + return True + + +# Para la busqueda en bing evitando baneos + +def browser(url): + import mechanize + + # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing + br = mechanize.Browser() + # Browser options + br.set_handle_equiv(False) + br.set_handle_gzip(True) + br.set_handle_redirect(True) + br.set_handle_referer(False) + br.set_handle_robots(False) + # Follows refresh 0 but not hangs on refresh > 0 + br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) + # Want debugging messages? + # br.set_debug_http(True) + # br.set_debug_redirects(True) + # br.set_debug_responses(True) + + # User-Agent (this is cheating, ok?) + br.addheaders = [('User-agent', + 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')] + # br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')] + # Open some site, let's pick a random one, the first that pops in mind + r = br.open(url) + response = r.read() + print response + if "img,divreturn" in response: + r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url) + print "prooooxy" + response = r.read() + + return response diff --git a/plugin.video.alfa/channels/cumlouder.json b/plugin.video.alfa/channels/cumlouder.json new file mode 100755 index 00000000..8c41c900 --- /dev/null +++ b/plugin.video.alfa/channels/cumlouder.json @@ -0,0 +1,23 @@ +{ + "id": "cumlouder", + "name": "Cumlouder", + "active": true, + "adult": true, + "language": "es", + "thumbnail": "cumlouder.png", + "banner": "cumlouder.png", + "version": 1, + "changes": [ + { + "date": "04/05/17", + "description": "Corregido, usa proxy en caso de error con https" + }, + { + "date": "13/01/17", + "description": "First version" + } + ], + "categories": [ + "adult" + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/cumlouder.py b/plugin.video.alfa/channels/cumlouder.py new file mode 100755 index 00000000..a3a2d007 --- /dev/null +++ b/plugin.video.alfa/channels/cumlouder.py @@ -0,0 +1,191 @@ +# -*- coding: utf-8 -*- + +import re +import urllib +import urlparse + +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core.item import Item + + +def mainlist(item): + logger.info() + itemlist = [] + + config.set_setting("url_error", False, "cumlouder") + itemlist.append(item.clone(title="Últimos videos", action="videos", url="https://www.cumlouder.com/")) + itemlist.append(item.clone(title="Categorias", action="categorias", url="https://www.cumlouder.com/categories/")) + itemlist.append(item.clone(title="Pornstars", action="pornstars_list", url="https://www.cumlouder.com/girls/")) + itemlist.append(item.clone(title="Buscar", action="search", url="https://www.cumlouder.com/search?q=%s")) + + return itemlist + + +def search(item, texto): + logger.info() + + item.url = item.url % texto + item.action = "videos" + try: + return videos(item) + except: + import traceback + logger.error(traceback.format_exc()) + return [] + + +def pornstars_list(item): + logger.info() + itemlist = [] + for letra in "abcdefghijklmnopqrstuvwxyz": + itemlist.append(item.clone(title=letra.upper(), url=urlparse.urljoin(item.url, letra), action="pornstars")) + + return itemlist + + +def pornstars(item): + logger.info() + itemlist = [] + + data = get_data(item.url) + patron = '<a girl-url="[^"]+" class="[^"]+" href="([^"]+)" title="([^"]+)">[^<]+' + patron += '<img class="thumb" src="([^"]+)" [^<]+<h2[^<]+<span[^<]+</span[^<]+</h2[^<]+' + patron += '<span[^<]+<span[^<]+<span[^<]+</span>([^<]+)</span>' + + matches = re.compile(patron, re.DOTALL).findall(data) + for url, title, thumbnail, count in matches: + if "go.php?" in url: + url = urllib.unquote(url.split("/go.php?u=")[1].split("&")[0]) + thumbnail = urllib.unquote(thumbnail.split("/go.php?u=")[1].split("&")[0]) + else: + url = urlparse.urljoin(item.url, url) + if not thumbnail.startswith("https"): + thumbnail = "https:%s" % thumbnail + itemlist.append(item.clone(title="%s (%s)" % (title, count), url=url, action="videos", thumbnail=thumbnail)) + + # Paginador + matches = re.compile('<li[^<]+<a href="([^"]+)" rel="nofollow">Next[^<]+</a[^<]+</li>', re.DOTALL).findall(data) + if matches: + if "go.php?" in matches[0]: + url = urllib.unquote(matches[0].split("/go.php?u=")[1].split("&")[0]) + else: + url = urlparse.urljoin(item.url, matches[0]) + itemlist.append(item.clone(title="Pagina Siguiente", url=url)) + + return itemlist + + +def categorias(item): + logger.info() + itemlist = [] + + data = get_data(item.url) + # logger.info("channels.cumlouder data="+data) + patron = '<a tag-url="[^"]+" class="[^"]+" href="([^"]+)" title="([^"]+)">[^<]+' + patron += '<img class="thumb" src="([^"]+)".*?<span class="cantidad">([^"]+)</span>' + + matches = re.compile(patron, re.DOTALL).findall(data) + for url, title, thumbnail, count in matches: + if "go.php?" in url: + url = urllib.unquote(url.split("/go.php?u=")[1].split("&")[0]) + thumbnail = urllib.unquote(thumbnail.split("/go.php?u=")[1].split("&")[0]) + else: + url = urlparse.urljoin(item.url, url) + if not thumbnail.startswith("https"): + thumbnail = "https:%s" % thumbnail + itemlist.append( + item.clone(title="%s (%s videos)" % (title, count), url=url, action="videos", thumbnail=thumbnail)) + + # Paginador + matches = re.compile('<li[^<]+<a href="([^"]+)" rel="nofollow">Next[^<]+</a[^<]+</li>', re.DOTALL).findall(data) + if matches: + if "go.php?" in matches[0]: + url = urllib.unquote(matches[0].split("/go.php?u=")[1].split("&")[0]) + else: + url = urlparse.urljoin(item.url, matches[0]) + itemlist.append(item.clone(title="Pagina Siguiente", url=url)) + + return itemlist + + +def videos(item): + logger.info() + itemlist = [] + + data = get_data(item.url) + patron = '<a class="muestra-escena" href="([^"]+)" title="([^"]+)"[^<]+<img class="thumb" src="([^"]+)".*?<span class="minutos"> <span class="ico-minutos sprite"></span> ([^<]+)</span>' + + matches = re.compile(patron, re.DOTALL).findall(data) + for url, title, thumbnail, duration in matches: + if "go.php?" in url: + url = urllib.unquote(url.split("/go.php?u=")[1].split("&")[0]) + thumbnail = urllib.unquote(thumbnail.split("/go.php?u=")[1].split("&")[0]) + else: + url = urlparse.urljoin("https://www.cumlouder.com", url) + if not thumbnail.startswith("https"): + thumbnail = "https:%s" % thumbnail + itemlist.append(item.clone(title="%s (%s)" % (title, duration), url=urlparse.urljoin(item.url, url), + action="play", thumbnail=thumbnail, contentThumbnail=thumbnail, + contentType="movie", contentTitle=title)) + + # Paginador + nextpage = scrapertools.find_single_match(data, '<ul class="paginador"(.*?)</ul>') + matches = re.compile('<a href="([^"]+)" rel="nofollow">Next »</a>', re.DOTALL).findall(nextpage) + if not matches: + matches = re.compile('<li[^<]+<a href="([^"]+)">Next »</a[^<]+</li>', re.DOTALL).findall(nextpage) + if matches: + if "go.php?" in matches[0]: + url = urllib.unquote(matches[0].split("/go.php?u=")[1].split("&")[0]) + else: + url = urlparse.urljoin(item.url, matches[0]) + + itemlist.append(item.clone(title="Pagina Siguiente", url=url)) + + return itemlist + + +def play(item): + logger.info() + itemlist = [] + + data = get_data(item.url) + patron = '<source src="([^"]+)" type=\'video/([^\']+)\' label=\'[^\']+\' res=\'([^\']+)\' />' + url, type, res = re.compile(patron, re.DOTALL).findall(data)[0] + if "go.php?" in url: + url = urllib.unquote(url.split("/go.php?u=")[1].split("&")[0]) + elif not url.startswith("http"): + url = "http:" + url.replace("&", "&") + itemlist.append( + Item(channel='cumlouder', action="play", title='Video' + res, fulltitle=type.upper() + ' ' + res, url=url, + server="directo", folder=False)) + + return itemlist + + +def get_data(url_orig): + try: + if config.get_setting("url_error", "cumlouder"): + raise Exception + response = httptools.downloadpage(url_orig) + if not response.data or "urlopen error [Errno 1]" in str(response.code): + raise Exception + except: + config.set_setting("url_error", True, "cumlouder") + import random + server_random = ['nl', 'de', 'us'] + server = server_random[random.randint(0, 2)] + url = "https://%s.hideproxy.me/includes/process.php?action=update" % server + post = "u=%s&proxy_formdata_server=%s&allowCookies=1&encodeURL=0&encodePage=0&stripObjects=0&stripJS=0&go=" \ + % (urllib.quote(url_orig), server) + while True: + response = httptools.downloadpage(url, post, follow_redirects=False) + if response.headers.get("location"): + url = response.headers["location"] + post = "" + else: + break + + return response.data diff --git a/plugin.video.alfa/channels/datoporn.json b/plugin.video.alfa/channels/datoporn.json new file mode 100755 index 00000000..ca5919ce --- /dev/null +++ b/plugin.video.alfa/channels/datoporn.json @@ -0,0 +1,23 @@ +{ + "id": "datoporn", + "name": "DatoPorn", + "language": "es", + "active": true, + "adult": true, + "changes": [ + { + "date": "28/05/2017", + "description": "Reparado por cambios en la página" + }, + { + "date": "21/02/2017", + "description": "Primera versión" + } + ], + "version": 1, + "thumbnail": "http://i.imgur.com/tBSWudd.png?1", + "banner": "datoporn.png", + "categories": [ + "adult" + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/datoporn.py b/plugin.video.alfa/channels/datoporn.py new file mode 100755 index 00000000..ca62c977 --- /dev/null +++ b/plugin.video.alfa/channels/datoporn.py @@ -0,0 +1,66 @@ +# -*- coding: utf-8 -*- + +from core import httptools +from core import logger +from core import scrapertools + + +def mainlist(item): + logger.info() + itemlist = [] + + itemlist.append(item.clone(action="categorias", title="Categorías", url="http://dato.porn/categories_all")) + itemlist.append(item.clone(title="Buscar...", action="search")) + return itemlist + + +def search(item, texto): + logger.info() + item.url = "http://dato.porn/?k=%s&op=search" % texto.replace(" ", "+") + return lista(item) + + +def lista(item): + logger.info() + itemlist = [] + + # Descarga la pagina + data = httptools.downloadpage(item.url).data + + # Extrae las entradas + patron = '<div class="vid_block">\s*<a href="([^"]+)".*?url\(\'([^\']+)\'.*?<span>(.*?)</span>.*?<b>(.*?)</b>' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedthumbnail, duration, scrapedtitle in matches: + if "/embed-" not in scrapedurl: + scrapedurl = scrapedurl.replace("datoporn.com/", "datoporn.com/embed-") + ".html" + if duration: + scrapedtitle = "%s - %s" % (duration, scrapedtitle) + + itemlist.append(item.clone(action="play", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, + server="datoporn", fanart=scrapedthumbnail.replace("_t.jpg", ".jpg"))) + + # Extrae la marca de siguiente página + next_page = scrapertools.find_single_match(data, "<a href='([^']+)'>Next") + if next_page and itemlist: + itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page)) + + return itemlist + + +def categorias(item): + logger.info() + itemlist = [] + + # Descarga la pagina + data = httptools.downloadpage(item.url).data + + # Extrae las entradas (carpetas) + patron = '<div class="vid_block">\s*<a href="([^"]+)".*?url\((.*?)\).*?<span>(.*?)</span>.*?<b>(.*?)</b>' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedthumbnail, numero, scrapedtitle in matches: + if numero: + scrapedtitle = "%s (%s)" % (scrapedtitle, numero) + + itemlist.append(item.clone(action="lista", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail)) + + return itemlist diff --git a/plugin.video.alfa/channels/descargacineclasico.json b/plugin.video.alfa/channels/descargacineclasico.json new file mode 100755 index 00000000..3bfc0e52 --- /dev/null +++ b/plugin.video.alfa/channels/descargacineclasico.json @@ -0,0 +1,23 @@ +{ + "id": "descargacineclasico", + "name": "descargacineclasico", + "language": "es", + "active": true, + "adult": false, + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ], + "banner": "descargacineclasico2.png", + "thumbnail": "descargacineclasico2.png", + "categories": [ + "movie" + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/descargacineclasico.py b/plugin.video.alfa/channels/descargacineclasico.py new file mode 100755 index 00000000..181cb5c0 --- /dev/null +++ b/plugin.video.alfa/channels/descargacineclasico.py @@ -0,0 +1,178 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from channelselector import get_thumbnail_path +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item +from core.tmdb import Tmdb +from servers.decrypters import expurl + + +def agrupa_datos(data): + ## Agrupa los datos + data = re.sub(r'\n|\r|\t| |<br>|<!--.*?-->', '', data) + data = re.sub(r'\s+', ' ', data) + data = re.sub(r'>\s<', '><', data) + return data + + +def mainlist(item): + logger.info() + + thumb_buscar = get_thumbnail_path() + "thumb_search.png" + + itemlist = [] + itemlist.append(Item(channel=item.channel, title="Últimas agregadas", action="agregadas", + url="http://www.descargacineclasico.net/", viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, title="Listado por género", action="porGenero", + url="http://www.descargacineclasico.net/")) + itemlist.append( + Item(channel=item.channel, title="Buscar", action="search", url="http://www.descargacineclasico.net/", + thumbnail=thumb_buscar)) + + return itemlist + + +def porGenero(item): + logger.info() + + itemlist = [] + data = scrapertools.cache_page(item.url) + logger.info("data=" + data) + + patron = '<ul class="columnas">(.*?)</ul>' + data = re.compile(patron, re.DOTALL).findall(data) + patron = '<li.*?>.*?href="([^"]+).*?>([^<]+)' + matches = re.compile(patron, re.DOTALL).findall(data[0]) + + for url, genero in matches: + itemlist.append( + Item(channel=item.channel, action="agregadas", title=genero, url=url, viewmode="movie_with_plot")) + + return itemlist + + +def search(item, texto): + logger.info() + + ''' + texto_get = texto.replace(" ","%20") + texto_post = texto.replace(" ","+") + item.url = "http://pelisadicto.com/buscar/%s?search=%s" % (texto_get,texto_post) + ''' + + texto = texto.replace(" ", "+") + item.url = "http://www.descargacineclasico.net/?s=" + texto + + try: + return agregadas(item) + # Se captura la excepci?n, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def agregadas(item): + logger.info() + itemlist = [] + ''' + # Descarga la pagina + if "?search=" in item.url: + url_search = item.url.split("?search=") + data = scrapertools.cache_page(url_search[0], url_search[1]) + else: + data = scrapertools.cache_page(item.url) + logger.info("data="+data) + ''' + + data = scrapertools.cache_page(item.url) + logger.info("data=" + data) + + # Extrae las entradas + fichas = re.sub(r"\n|\s{2}", "", scrapertools.get_match(data, '<div class="review-box-container">(.*?)wp-pagenavi')) + + # <a href="http://www.descargacineclasico.net/ciencia-ficcion/quatermass-2-1957/" + # title="Quatermass II (Quatermass 2) (1957) Descargar y ver Online"> + # <img style="border-radius:6px;" + # src="//www.descargacineclasico.net/wp-content/uploads/2015/12/Quatermass-II-2-1957.jpg" + # alt="Quatermass II (Quatermass 2) (1957) Descargar y ver Online Gratis" height="240" width="160"> + + + patron = '<div class="post-thumbnail"><a href="([^"]+)".*?' # url + patron += 'title="([^"]+)".*?' # title + patron += 'src="([^"]+).*?' # thumbnail + patron += '<p>([^<]+)' # plot + + matches = re.compile(patron, re.DOTALL).findall(fichas) + for url, title, thumbnail, plot in matches: + title = title[0:title.find("Descargar y ver Online")] + url = urlparse.urljoin(item.url, url) + thumbnail = urlparse.urljoin(url, thumbnail) + + itemlist.append(Item(channel=item.channel, action="findvideos", title=title + " ", fulltitle=title, url=url, + thumbnail=thumbnail, plot=plot, show=title)) + + # Paginación + try: + + # <ul class="pagination"><li class="active"><span>1</span></li><li><span><a href="2">2</a></span></li><li><span><a href="3">3</a></span></li><li><span><a href="4">4</a></span></li><li><span><a href="5">5</a></span></li><li><span><a href="6">6</a></span></li></ul> + + patron_nextpage = r'<a class="nextpostslink" rel="next" href="([^"]+)' + next_page = re.compile(patron_nextpage, re.DOTALL).findall(data) + itemlist.append(Item(channel=item.channel, action="agregadas", title="Página siguiente >>", url=next_page[0], + viewmode="movie_with_plot")) + except: + pass + + return itemlist + + +def findvideos(item): + logger.info() + + itemlist = [] + + data = scrapertools.cache_page(item.url) + + data = scrapertools.unescape(data) + + titulo = item.title + titulo_tmdb = re.sub("([0-9+])", "", titulo.strip()) + + oTmdb = Tmdb(texto_buscado=titulo_tmdb, idioma_busqueda="es") + item.fanart = oTmdb.get_backdrop() + + # Descarga la pagina + # data = scrapertools.cache_page(item.url) + patron = '#div_\d_\D.+?<img id="([^"]+).*?<span>.*?</span>.*?<span>(.*?)</span>.*?imgdes.*?imgdes/([^\.]+).*?<a href=([^\s]+)' # Añado calidad + matches = re.compile(patron, re.DOTALL).findall(data) + for scrapedidioma, scrapedcalidad, scrapedserver, scrapedurl in matches: + title = titulo + "_" + scrapedidioma + "_" + scrapedserver + "_" + scrapedcalidad + itemlist.append(Item(channel=item.channel, action="play", title=title, fulltitle=title, url=scrapedurl, + thumbnail=item.thumbnail, plot=item.plot, show=item.show, fanart=item.fanart)) + + return itemlist + + +def play(item): + logger.info() + + video = expurl.expand_url(item.url) + + itemlist = [] + + itemlist = servertools.find_video_items(data=video) + + for videoitem in itemlist: + videoitem.title = item.title + videoitem.fulltitle = item.fulltitle + videoitem.thumbnail = item.thumbnail + videoitem.channel = item.channel + + return itemlist diff --git a/plugin.video.alfa/channels/descargasmix.json b/plugin.video.alfa/channels/descargasmix.json new file mode 100755 index 00000000..646514c2 --- /dev/null +++ b/plugin.video.alfa/channels/descargasmix.json @@ -0,0 +1,64 @@ +{ + "id": "descargasmix", + "name": "DescargasMIX", + "language": "es", + "active": true, + "version": 1, + "adult": false, + "changes": [ + { + "date": "06/05/17", + "description": "Cambio de dominio" + }, + { + "date": "17/04/17", + "description": "Mejorado en la deteccion del dominio para futuros cambios" + }, + { + "date": "09/04/17", + "description": "Arreglado por cambios en la página" + }, + { + "date": "27/01/17", + "description": "Sección online en películas modificada" + }, + { + "date": "08/07/16", + "description": "Adaptado el canal a las nuevas funciones" + } + ], + "thumbnail": "descargasmix.png", + "banner": "descargasmix.png", + "categories": [ + "movie", + "latino", + "vos", + "torrent", + "documentary", + "anime", + "tvshow" + ], + "settings": [ + { + "id": "modo_grafico", + "type": "bool", + "label": "Buscar información extra", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "perfil", + "type": "list", + "label": "Perfil de color", + "default": 2, + "enabled": true, + "visible": true, + "lvalues": [ + "Perfil 3", + "Perfil 2", + "Perfil 1" + ] + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/descargasmix.py b/plugin.video.alfa/channels/descargasmix.py new file mode 100755 index 00000000..996ef66a --- /dev/null +++ b/plugin.video.alfa/channels/descargasmix.py @@ -0,0 +1,537 @@ +# -*- coding: utf-8 -*- + +import re +import urllib + +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + +__modo_grafico__ = config.get_setting("modo_grafico", "descargasmix") +__perfil__ = config.get_setting("perfil", "descargasmix") + +# Fijar perfil de color +perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'], + ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'], + ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']] +color1, color2, color3 = perfil[__perfil__] +host = config.get_setting("host", "descargasmix") + + +def mainlist(item): + logger.info() + itemlist = [] + item.text_color = color1 + + # Resetear host y comprobacion de error en https (por si se actualiza Kodi) + config.set_setting("url_error", False, "descargasmix") + host = config.set_setting("host", "https://ddmix.net", "descargasmix") + host_check = get_data(host, True) + if host_check and host_check.startswith("http"): + config.set_setting("host", host_check, "descargasmix") + + itemlist.append(item.clone(title="Películas", action="lista", fanart="http://i.imgur.com/c3HS8kj.png")) + itemlist.append(item.clone(title="Series", action="lista_series", fanart="http://i.imgur.com/9loVksV.png")) + itemlist.append(item.clone(title="Documentales", action="entradas", url="%s/documentales/" % host, + fanart="http://i.imgur.com/Q7fsFI6.png")) + itemlist.append(item.clone(title="Anime", action="entradas", url="%s/anime/" % host, + fanart="http://i.imgur.com/whhzo8f.png")) + itemlist.append(item.clone(title="Deportes", action="entradas", url="%s/deportes/" % host, + fanart="http://i.imgur.com/ggFFR8o.png")) + itemlist.append(item.clone(title="", action="")) + itemlist.append(item.clone(title="Buscar...", action="search")) + itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False)) + + return itemlist + + +def configuracion(item): + from platformcode import platformtools + ret = platformtools.show_channel_settings() + platformtools.itemlist_refresh() + return ret + + +def search(item, texto): + logger.info() + try: + item.url = "%s/?s=%s" % (host, texto) + return busqueda(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def busqueda(item): + logger.info() + itemlist = [] + + data = get_data(item.url) + + contenido = ['Películas', 'Series', 'Documentales', 'Anime', 'Deportes', 'Miniseries', 'Vídeos'] + bloque = scrapertools.find_single_match(data, '<div id="content" role="main">(.*?)<div id="sidebar" ' + 'role="complementary">') + patron = '<a class="clip-link".*?href="([^"]+)".*?<img alt="([^"]+)" src="([^"]+)"' \ + '.*?<span class="overlay.*?>(.*?)<.*?<p class="stats">(.*?)</p>' + matches = scrapertools.find_multiple_matches(bloque, patron) + for scrapedurl, scrapedtitle, scrapedthumbnail, info, scrapedcat in matches: + if not [True for c in contenido if c in scrapedcat]: + continue + scrapedurl = urllib.unquote(re.sub(r'&b=4|/go\.php\?u=', '', scrapedurl)) + scrapedthumbnail = urllib.unquote(re.sub(r'&b=4|/go\.php\?u=', '', scrapedthumbnail)) + if not scrapedthumbnail.startswith("http"): + scrapedthumbnail = "http:" + scrapedthumbnail + scrapedthumbnail = scrapedthumbnail.replace("-129x180", "") + if ("Películas" in scrapedcat or "Documentales" in scrapedcat) and "Series" not in scrapedcat: + titulo = scrapedtitle.split("[")[0] + if info: + scrapedtitle += " [%s]" % unicode(info, "utf-8").capitalize().encode("utf-8") + itemlist.append(item.clone(action="findvideos", title=scrapedtitle, url=scrapedurl, contentTitle=titulo, + thumbnail=scrapedthumbnail, fulltitle=titulo, contentType="movie")) + else: + itemlist.append(item.clone(action="episodios", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, fulltitle=scrapedtitle, contentTitle=scrapedtitle, + show=scrapedtitle, contentType="tvshow")) + + next_page = scrapertools.find_single_match(data, '<a class="nextpostslink".*?href="([^"]+)"') + if next_page: + next_page = urllib.unquote(re.sub(r'&b=4|/go\.php\?u=', '', next_page)) + itemlist.append(item.clone(action="busqueda", title=">> Siguiente", url=next_page)) + + return itemlist + + +def lista(item): + logger.info() + itemlist = [] + + itemlist.append(item.clone(title="Novedades", action="entradas", url="%s/peliculas" % host)) + itemlist.append(item.clone(title="Estrenos", action="entradas", url="%s/peliculas/estrenos" % host)) + itemlist.append(item.clone(title="Dvdrip", action="entradas", url="%s/peliculas/dvdrip" % host)) + itemlist.append(item.clone(title="HD (720p/1080p)", action="entradas", url="%s/peliculas/hd" % host)) + itemlist.append(item.clone(title="HDRIP", action="entradas", url="%s/peliculas/hdrip" % host)) + itemlist.append(item.clone(title="Latino", action="entradas", + url="%s/peliculas/latino-peliculas" % host)) + itemlist.append(item.clone(title="VOSE", action="entradas", url="%s/peliculas/subtituladas" % host)) + itemlist.append(item.clone(title="3D", action="entradas", url="%s/peliculas/3d" % host)) + + return itemlist + + +def lista_series(item): + logger.info() + itemlist = [] + + itemlist.append(item.clone(title="Novedades", action="entradas", url="%s/series/" % host)) + itemlist.append(item.clone(title="Miniseries", action="entradas", url="%s/series/miniseries" % host)) + + return itemlist + + +def entradas(item): + logger.info() + itemlist = [] + item.text_color = color2 + + data = get_data(item.url) + bloque = scrapertools.find_single_match(data, '<div id="content" role="main">(.*?)<div id="sidebar" ' + 'role="complementary">') + contenido = ["series", "deportes", "anime", 'miniseries'] + c_match = [True for match in contenido if match in item.url] + # Patron dependiendo del contenido + if True in c_match: + patron = '<a class="clip-link".*?href="([^"]+)".*?<img alt="([^"]+)" src="([^"]+)"' \ + '.*?<span class="overlay(|[^"]+)">' + matches = scrapertools.find_multiple_matches(bloque, patron) + for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedinfo in matches: + scrapedurl = urllib.unquote(re.sub(r'&b=4|/go\.php\?u=', '', scrapedurl)) + if scrapedinfo != "": + scrapedinfo = scrapedinfo.replace(" ", "").replace("-", " ") + + scrapedinfo = " [%s]" % unicode(scrapedinfo, "utf-8").capitalize().encode("utf-8") + titulo = scrapedtitle + scrapedinfo + titulo = scrapertools.decodeHtmlentities(titulo) + scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle) + scrapedthumbnail = urllib.unquote(re.sub(r'&b=4|/go\.php\?u=', '', scrapedthumbnail)) + if not scrapedthumbnail.startswith("http"): + scrapedthumbnail = "http:" + scrapedthumbnail + scrapedthumbnail = scrapedthumbnail.replace("-129x180", "") + scrapedthumbnail = scrapedthumbnail.rsplit("/", 1)[0] + "/" + \ + urllib.quote(scrapedthumbnail.rsplit("/", 1)[1]) + if "series" in item.url or "anime" in item.url: + item.show = scrapedtitle + itemlist.append(item.clone(action="episodios", title=titulo, url=scrapedurl, thumbnail=scrapedthumbnail, + fulltitle=scrapedtitle, contentTitle=scrapedtitle, contentType="tvshow")) + else: + patron = '<a class="clip-link".*?href="([^"]+)".*?<img alt="([^"]+)" src="([^"]+)"' \ + '.*?<span class="overlay.*?>(.*?)<.*?<p class="stats">(.*?)</p>' + matches = scrapertools.find_multiple_matches(bloque, patron) + for scrapedurl, scrapedtitle, scrapedthumbnail, info, categoria in matches: + scrapedurl = urllib.unquote(re.sub(r'&b=4|/go\.php\?u=', '', scrapedurl)) + titulo = scrapertools.decodeHtmlentities(scrapedtitle) + scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.split("[")[0]) + action = "findvideos" + show = "" + if "Series" in categoria: + action = "episodios" + show = scrapedtitle + elif categoria and categoria != "Películas" and categoria != "Documentales": + try: + titulo += " [%s]" % categoria.rsplit(", ", 1)[1] + except: + titulo += " [%s]" % categoria + if 'l-espmini' in info: + titulo += " [ESP]" + if 'l-latmini' in info: + titulo += " [LAT]" + if 'l-vosemini' in info: + titulo += " [VOSE]" + + if info: + titulo += " [%s]" % unicode(info, "utf-8").capitalize().encode("utf-8") + + scrapedthumbnail = urllib.unquote(re.sub(r'&b=4|/go\.php\?u=', '', scrapedthumbnail)) + if not scrapedthumbnail.startswith("http"): + scrapedthumbnail = "http:" + scrapedthumbnail + scrapedthumbnail = scrapedthumbnail.replace("-129x180", "") + scrapedthumbnail = scrapedthumbnail.rsplit("/", 1)[0] + "/" + \ + urllib.quote(scrapedthumbnail.rsplit("/", 1)[1]) + + itemlist.append(item.clone(action=action, title=titulo, url=scrapedurl, thumbnail=scrapedthumbnail, + fulltitle=scrapedtitle, contentTitle=scrapedtitle, viewmode="movie_with_plot", + show=show, contentType="movie")) + + # Paginación + next_page = scrapertools.find_single_match(data, '<a class="nextpostslink".*?href="([^"]+)"') + if next_page: + next_page = urllib.unquote(re.sub(r'&b=4|/go\.php\?u=', '', next_page)) + itemlist.append(item.clone(title=">> Siguiente", url=next_page, text_color=color3)) + + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + + data = get_data(item.url) + patron = '(<ul class="menu" id="seasons-list">.*?<div class="section-box related-posts">)' + bloque = scrapertools.find_single_match(data, patron) + matches = scrapertools.find_multiple_matches(bloque, '<div class="polo".*?>(.*?)</div>') + for scrapedtitle in matches: + scrapedtitle = scrapedtitle.strip() + new_item = item.clone() + new_item.infoLabels['season'] = scrapedtitle.split(" ", 1)[0].split("x")[0] + new_item.infoLabels['episode'] = scrapedtitle.split(" ", 1)[0].split("x")[1] + if item.fulltitle != "Añadir esta serie a la videoteca": + title = item.fulltitle + " " + scrapedtitle.strip() + else: + title = scrapedtitle.strip() + itemlist.append(new_item.clone(action="findvideos", title=title, extra=scrapedtitle, fulltitle=title, + contentType="episode")) + + itemlist.sort(key=lambda it: it.title, reverse=True) + item.plot = scrapertools.find_single_match(data, '<strong>SINOPSIS</strong>:(.*?)</p>') + if item.show != "" and item.extra == "": + itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="", + text_color="magenta")) + if config.get_videolibrary_support(): + itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url, + action="add_serie_to_library", extra="episodios", show=item.show, + text_color="green")) + + try: + from core import tmdb + tmdb.set_infoLabels_itemlist(itemlist[:-2], __modo_grafico__) + except: + pass + + return itemlist + + +def epienlaces(item): + logger.info() + itemlist = [] + item.text_color = color3 + + data = get_data(item.url) + data = data.replace("\n", "").replace("\t", "") + # Bloque de enlaces + patron = '<div class="polo".*?>%s(.*?)(?:<div class="polo"|</li>)' % item.extra.strip() + bloque = scrapertools.find_single_match(data, patron) + + patron = '<div class="episode-server">.*?data-sourcelk="([^"]+)"' \ + '.*?data-server="([^"]+)"' \ + '.*?<div class="caliycola">(.*?)</div>' + matches = scrapertools.find_multiple_matches(bloque, patron) + + itemlist.append(item.clone(action="", title="Enlaces Online/Descarga", text_color=color1)) + lista_enlaces = [] + for scrapedurl, scrapedserver, scrapedcalidad in matches: + if scrapedserver == "ul": + scrapedserver = "uploadedto" + if scrapedserver == "streamin": + scrapedserver = "streaminto" + titulo = " %s [%s]" % (unicode(scrapedserver, "utf-8").capitalize().encode("utf-8"), scrapedcalidad) + # Enlaces descarga + if scrapedserver == "magnet": + itemlist.insert(0, + item.clone(action="play", title=titulo, server="torrent", url=scrapedurl, extra=item.url)) + else: + if servertools.is_server_enabled(scrapedserver): + try: + servers_module = __import__("servers." + scrapedserver) + lista_enlaces.append(item.clone(action="play", title=titulo, server=scrapedserver, url=scrapedurl, + extra=item.url)) + except: + pass + lista_enlaces.reverse() + itemlist.extend(lista_enlaces) + + if itemlist[0].server == "torrent": + itemlist.insert(0, item.clone(action="", title="Enlaces Torrent", text_color=color1)) + + return itemlist + + +def findvideos(item): + logger.info() + if (item.extra and item.extra != "findvideos") or item.path: + return epienlaces(item) + + itemlist = [] + item.text_color = color3 + + data = get_data(item.url) + item.plot = scrapertools.find_single_match(data, 'SINOPSIS(?:</span>|</strong>):(.*?)</p>') + year = scrapertools.find_single_match(data, '(?:<span class="bold">|<strong>)AÑO(?:</span>|</strong>):\s*(\d+)') + if year: + try: + from core import tmdb + item.infoLabels['year'] = year + tmdb.set_infoLabels_item(item, __modo_grafico__) + except: + pass + + old_format = False + # Patron torrent antiguo formato + if "Enlaces de descarga</div>" in data: + old_format = True + matches = scrapertools.find_multiple_matches(data, 'class="separate3 magnet".*?href="([^"]+)"') + for scrapedurl in matches: + scrapedurl = scrapertools.find_single_match(scrapedurl, '(magnet.*)') + scrapedurl = urllib.unquote(re.sub(r'&b=4', '', scrapedurl)) + title = "[Torrent] " + title += urllib.unquote(scrapertools.find_single_match(scrapedurl, 'dn=(.*?)(?i)WWW.DescargasMix')) + itemlist.append(item.clone(action="play", server="torrent", title=title, url=scrapedurl, + text_color="green")) + + # Patron online + data_online = scrapertools.find_single_match(data, 'Ver online</div>(.*?)<div class="section-box related-posts">') + if data_online: + title = "Enlaces Online" + if '"l-latino2"' in data_online: + title += " [LAT]" + elif '"l-esp2"' in data_online: + title += " [ESP]" + elif '"l-vose2"' in data_online: + title += " [VOSE]" + + patron = 'make_links.*?,[\'"]([^"\']+)["\']' + matches = scrapertools.find_multiple_matches(data_online, patron) + for i, code in enumerate(matches): + enlace = mostrar_enlaces(code) + enlaces = servertools.findvideos(data=enlace[0]) + if enlaces and "peliculas.nu" not in enlaces: + if i == 0: + extra_info = scrapertools.find_single_match(data_online, '<span class="tooltiptext">(.*?)</span>') + size = scrapertools.find_single_match(data_online, '(?i)TAMAÑO:\s*(.*?)<').strip() + + if size: + title += " [%s]" % size + new_item = item.clone(title=title, action="", text_color=color1) + if extra_info: + extra_info = scrapertools.htmlclean(extra_info) + new_item.infoLabels["plot"] = extra_info + new_item.title += " +INFO" + itemlist.append(new_item) + + title = " Ver vídeo en " + enlaces[0][2] + itemlist.append(item.clone(action="play", server=enlaces[0][2], title=title, url=enlaces[0][1])) + scriptg = scrapertools.find_single_match(data, "<script type='text/javascript'>str='([^']+)'") + if scriptg: + gvideo = urllib.unquote_plus(scriptg.replace("@", "%")) + url = scrapertools.find_single_match(gvideo, 'src="([^"]+)"') + if url: + itemlist.append(item.clone(action="play", server="directo", url=url, extra=item.url, + title=" Ver vídeo en Googlevideo (Máxima calidad)")) + + # Patron descarga + patron = '<div class="(?:floatLeft |)double(?:nuevo|)">(.*?)</div>(.*?)' \ + '(?:<div(?: id="mirrors"|) class="(?:contentModuleSmall |)mirrors">|<div class="section-box related-' \ + 'posts">)' + bloques_descarga = scrapertools.find_multiple_matches(data, patron) + for title_bloque, bloque in bloques_descarga: + if title_bloque == "Ver online": + continue + if '"l-latino2"' in bloque: + title_bloque += " [LAT]" + elif '"l-esp2"' in bloque: + title_bloque += " [ESP]" + elif '"l-vose2"' in bloque: + title_bloque += " [VOSE]" + + extra_info = scrapertools.find_single_match(bloque, '<span class="tooltiptext">(.*?)</span>') + size = scrapertools.find_single_match(bloque, '(?i)TAMAÑO:\s*(.*?)<').strip() + + if size: + title_bloque += " [%s]" % size + new_item = item.clone(title=title_bloque, action="", text_color=color1) + if extra_info: + extra_info = scrapertools.htmlclean(extra_info) + new_item.infoLabels["plot"] = extra_info + new_item.title += " +INFO" + itemlist.append(new_item) + + if '<div class="subiendo">' in bloque: + itemlist.append(item.clone(title=" Los enlaces se están subiendo", action="")) + continue + patron = 'class="separate.*? ([^"]+)".*?(?:make_links.*?,|href=)[\'"]([^"\']+)["\']' + matches = scrapertools.find_multiple_matches(bloque, patron) + for scrapedserver, scrapedurl in matches: + if (scrapedserver == "ul") | (scrapedserver == "uploaded"): + scrapedserver = "uploadedto" + titulo = unicode(scrapedserver, "utf-8").capitalize().encode("utf-8") + if titulo == "Magnet" and old_format: + continue + elif titulo == "Magnet" and not old_format: + title = " Enlace Torrent" + scrapedurl = scrapertools.find_single_match(scrapedurl, '(magnet.*)') + scrapedurl = urllib.unquote(re.sub(r'&b=4', '', scrapedurl)) + itemlist.append(item.clone(action="play", server="torrent", title=title, url=scrapedurl, + text_color="green")) + continue + if servertools.is_server_enabled(scrapedserver): + try: + servers_module = __import__("servers." + scrapedserver) + # Saca numero de enlaces + urls = mostrar_enlaces(scrapedurl) + numero = str(len(urls)) + titulo = " %s - Nº enlaces: %s" % (titulo, numero) + itemlist.append(item.clone(action="enlaces", title=titulo, extra=scrapedurl, server=scrapedserver)) + except: + pass + + itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="", + text_color="magenta")) + if item.extra != "findvideos" and config.get_videolibrary_support(): + itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", action="add_pelicula_to_library", + extra="findvideos", url=item.url, infoLabels={'title': item.fulltitle}, + fulltitle=item.fulltitle, text_color="green")) + + return itemlist + + +def play(item): + logger.info() + itemlist = [] + + if not item.url.startswith("http") and not item.url.startswith("magnet"): + post = "source=%s&action=obtenerurl" % urllib.quote(item.url) + headers = {'X-Requested-With': 'XMLHttpRequest'} + data = httptools.downloadpage("%s/wp-admin/admin-ajax.php" % host.replace("https", "http"), post=post, + headers=headers, follow_redirects=False).data + + url = scrapertools.find_single_match(data, 'url":"([^"]+)"').replace("\\", "") + if "enlacesmix" in url: + data = httptools.downloadpage(url, headers={'Referer': item.extra}, follow_redirects=False).data + url = scrapertools.find_single_match(data, '<iframe.*?src="([^"]+)"') + enlaces = servertools.findvideosbyserver(url, item.server) + if enlaces: + itemlist.append(item.clone(action="play", server=enlaces[0][2], url=enlaces[0][1])) + else: + itemlist.append(item.clone()) + + return itemlist + + +def enlaces(item): + logger.info() + itemlist = [] + + urls = mostrar_enlaces(item.extra) + numero = len(urls) + for enlace in urls: + enlaces = servertools.findvideos(data=enlace) + if enlaces: + for link in enlaces: + if "/folder/" in enlace: + titulo = link[0] + else: + titulo = "%s - Enlace %s" % (item.title.split("-")[0], str(numero)) + numero -= 1 + itemlist.append(item.clone(action="play", server=link[2], title=titulo, url=link[1])) + + itemlist.sort(key=lambda it: it.title) + return itemlist + + +def mostrar_enlaces(data): + import base64 + data = data.split(",") + len_data = len(data) + urls = [] + for i in range(0, len_data): + url = [] + value1 = base64.b64decode(data[i]) + value2 = value1.split("-") + for j in range(0, len(value2)): + url.append(chr(int(value2[j]))) + + urls.append("".join(url)) + + return urls + + +def get_data(url_orig, get_host=False): + try: + if config.get_setting("url_error", "descargasmix"): + raise Exception + response = httptools.downloadpage(url_orig) + if not response.data or "urlopen error [Errno 1]" in str(response.code): + raise Exception + if get_host: + if response.url.endswith("/"): + response.url = response.url[:-1] + return response.url + except: + config.set_setting("url_error", True, "descargasmix") + import random + server_random = ['nl', 'de', 'us'] + server = server_random[random.randint(0, 2)] + url = "https://%s.hideproxy.me/includes/process.php?action=update" % server + post = "u=%s&proxy_formdata_server=%s&allowCookies=1&encodeURL=0&encodePage=0&stripObjects=0&stripJS=0&go=" \ + % (url_orig, server) + while True: + response = httptools.downloadpage(url, post, follow_redirects=False) + if response.headers.get("location"): + url = response.headers["location"] + post = "" + else: + if get_host: + target = urllib.unquote(scrapertools.find_single_match(url, 'u=([^&]+)&')) + if target.endswith("/"): + target = target[:-1] + if target and target != host: + return target + else: + return "" + break + + return response.data diff --git a/plugin.video.alfa/channels/discoverymx.json b/plugin.video.alfa/channels/discoverymx.json new file mode 100755 index 00000000..0f3cedc0 --- /dev/null +++ b/plugin.video.alfa/channels/discoverymx.json @@ -0,0 +1,24 @@ +{ + "id": "discoverymx", + "name": "Discoverymx", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "discoverymx.png", + "banner": "discoverymx.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ], + "categories": [ + "latino", + "documentary" + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/discoverymx.py b/plugin.video.alfa/channels/discoverymx.py new file mode 100755 index 00000000..5c44fbe9 --- /dev/null +++ b/plugin.video.alfa/channels/discoverymx.py @@ -0,0 +1,173 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + + +def mainlist(item): + logger.info() + itemlist = [] + + itemlist.append(Item(channel=item.channel, title="Documentales - Novedades", action="listvideos", + url="http://discoverymx.blogspot.com/")) + itemlist.append(Item(channel=item.channel, title="Documentales - Series Disponibles", action="DocuSeries", + url="http://discoverymx.blogspot.com/")) + itemlist.append(Item(channel=item.channel, title="Documentales - Tag", action="DocuTag", + url="http://discoverymx.blogspot.com/")) + itemlist.append(Item(channel=item.channel, title="Documentales - Archivo por meses", action="DocuARCHIVO", + url="http://discoverymx.blogspot.com/")) + + return itemlist + + +def DocuSeries(item): + logger.info() + itemlist = [] + + # Descarga la página + data = scrapertools.cache_page(item.url) + + # Extrae las entradas (carpetas) + patronvideos = '<li><b><a href="([^"]+)" target="_blank">([^<]+)</a></b></li>' + matches = re.compile(patronvideos, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + for match in matches: + scrapedurl = match[0] + scrapedtitle = match[1] + scrapedthumbnail = "" + scrapedplot = "" + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + itemlist.append(Item(channel=item.channel, action="listvideos", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True)) + + return itemlist + + +def DocuTag(item): + logger.info() + itemlist = [] + + # Descarga la página + data = scrapertools.cache_page(item.url) + patronvideos = "<a dir='ltr' href='([^']+)'>([^<]+)</a>[^<]+<span class='label-count' dir='ltr'>(.+?)</span>" + matches = re.compile(patronvideos, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + for match in matches: + scrapedurl = match[0] + scrapedtitle = match[1] + " " + match[2] + scrapedthumbnail = "" + scrapedplot = "" + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + itemlist.append(Item(channel=item.channel, action="listvideos", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True)) + + return itemlist + + +def DocuARCHIVO(item): + logger.info() + itemlist = [] + + # Descarga la página + data = scrapertools.cache_page(item.url) + patronvideos = "<a class='post-count-link' href='([^']+)'>([^<]+)</a>[^<]+" + patronvideos += "<span class='post-count' dir='ltr'>(.+?)</span>" + matches = re.compile(patronvideos, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + for match in matches: + scrapedurl = match[0] + scrapedtitle = match[1] + " " + match[2] + scrapedthumbnail = "" + scrapedplot = "" + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + itemlist.append(Item(channel=item.channel, action="listvideos", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True)) + + return itemlist + + +def listvideos(item): + logger.info() + itemlist = [] + + scrapedthumbnail = "" + scrapedplot = "" + + # Descarga la página + data = scrapertools.cache_page(item.url) + patronvideos = "<h3 class='post-title entry-title'[^<]+" + patronvideos += "<a href='([^']+)'>([^<]+)</a>.*?" + patronvideos += "<div class='post-body entry-content'(.*?)<div class='post-footer'>" + matches = re.compile(patronvideos, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + for match in matches: + scrapedtitle = match[1] + scrapedtitle = re.sub("<[^>]+>", " ", scrapedtitle) + scrapedtitle = scrapertools.unescape(scrapedtitle) + scrapedurl = match[0] + regexp = re.compile(r'src="(http[^"]+)"') + + matchthumb = regexp.search(match[2]) + if matchthumb is not None: + scrapedthumbnail = matchthumb.group(1) + matchplot = re.compile('<div align="center">(<img.*?)</span></div>', re.DOTALL).findall(match[2]) + + if len(matchplot) > 0: + scrapedplot = matchplot[0] + # print matchplot + else: + scrapedplot = "" + + scrapedplot = re.sub("<[^>]+>", " ", scrapedplot) + scrapedplot = scrapertools.unescape(scrapedplot) + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + + # Añade al listado de XBMC + itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True)) + + # Extrae la marca de siguiente página + patronvideos = "<a class='blog-pager-older-link' href='([^']+)'" + matches = re.compile(patronvideos, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + if len(matches) > 0: + scrapedtitle = "Página siguiente" + scrapedurl = urlparse.urljoin(item.url, matches[0]) + scrapedthumbnail = "" + scrapedplot = "" + itemlist.append(Item(channel=item.channel, action="listvideos", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True)) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + + # Descarga la página + data = scrapertools.cachePage(item.url) + data = scrapertools.get_match(data, "<div class='post-body entry-content'(.*?)<div class='post-footer'>") + + # Busca los enlaces a los videos + listavideos = servertools.findvideos(data) + + for video in listavideos: + videotitle = scrapertools.unescape(video[0]) + url = video[1] + server = video[2] + # xbmctools.addnewvideo( item.channel , "play" , category , server , , url , thumbnail , plot ) + itemlist.append(Item(channel=item.channel, action="play", server=server, title=videotitle, url=url, + thumbnail=item.thumbnail, plot=item.plot, fulltitle=item.title, folder=False)) + + return itemlist diff --git a/plugin.video.alfa/channels/divxatope.json b/plugin.video.alfa/channels/divxatope.json new file mode 100755 index 00000000..8690adca --- /dev/null +++ b/plugin.video.alfa/channels/divxatope.json @@ -0,0 +1,63 @@ +{ + "id": "divxatope", + "name": "Divxatope", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "divxatope.png", + "banner": "divxatope.png", + "version": 1, + "changes": [ + { + "date": "17/04/17", + "description": "Reparados torrents, añadidas nuevas secciones" + }, + { + "date": "13/01/17", + "description": "Reparados torrents y paginacion. Añadida seccion Peliculas 4K ultraHD" + }, + { + "date": "31/12/16", + "description": "Adaptado, por cambios en la web" + }, + { + "date": "01/07/16", + "description": "Eliminado código innecesario." + }, + { + "date": "29/04/16", + "description": "Adaptar a Buscador global y Novedades Peliculas y Series" + } + ], + "categories": [ + "torrent", + "movie", + "tvshow" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_series", + "type": "bool", + "label": "Incluir en Novedades - Episodios de series", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/divxatope.py b/plugin.video.alfa/channels/divxatope.py new file mode 100755 index 00000000..79855355 --- /dev/null +++ b/plugin.video.alfa/channels/divxatope.py @@ -0,0 +1,347 @@ +# -*- coding: utf-8 -*- + +import re +import urllib +import urlparse + +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + + +def mainlist(item): + logger.info() + + itemlist = [] + itemlist.append(Item(channel=item.channel, action="menu", title="Películas", url="http://www.divxatope1.com/", + extra="Peliculas", folder=True)) + itemlist.append( + Item(channel=item.channel, action="menu", title="Series", url="http://www.divxatope1.com", extra="Series", + folder=True)) + itemlist.append(Item(channel=item.channel, action="search", title="Buscar...")) + return itemlist + + +def menu(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + # logger.info("data="+data) + + data = scrapertools.find_single_match(data, item.extra + "</a[^<]+<ul(.*?)</ul>") + # logger.info("data="+data) + + patron = "<li><a.*?href='([^']+)'[^>]+>([^<]+)</a></li>" + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + title = scrapedtitle + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = "" + plot = "" + itemlist.append(Item(channel=item.channel, action="lista", title=title, url=url, thumbnail=thumbnail, plot=plot, + folder=True)) + if title != "Todas las Peliculas": + itemlist.append( + Item(channel=item.channel, action="alfabetico", title=title + " [A-Z]", url=url, thumbnail=thumbnail, + plot=plot, folder=True)) + + if item.extra == "Peliculas": + title = "4k UltraHD" + url = "http://divxatope1.com/peliculas-hd/4kultrahd/" + thumbnail = "" + plot = "" + itemlist.append(Item(channel=item.channel, action="lista", title=title, url=url, thumbnail=thumbnail, plot=plot, + folder=True)) + itemlist.append( + Item(channel=item.channel, action="alfabetico", title=title + " [A-Z]", url=url, thumbnail=thumbnail, + plot=plot, + folder=True)) + + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = "http://www.divxatope1.com/buscar/descargas" + item.extra = urllib.urlencode({'q': texto}) + + try: + itemlist = lista(item) + + # Esta pagina coloca a veces contenido duplicado, intentamos descartarlo + dict_aux = {} + for i in itemlist: + if not i.url in dict_aux: + dict_aux[i.url] = i + else: + itemlist.remove(i) + + return itemlist + # Se captura la excepci?n, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def newest(categoria): + itemlist = [] + item = Item() + try: + if categoria == 'peliculas': + item.url = "http://www.divxatope1.com/peliculas" + + elif categoria == 'series': + item.url = "http://www.divxatope1.com/series" + + else: + return [] + + itemlist = lista(item) + if itemlist[-1].title == ">> Página siguiente": + itemlist.pop() + + # Esta pagina coloca a veces contenido duplicado, intentamos descartarlo + dict_aux = {} + for i in itemlist: + if not i.url in dict_aux: + dict_aux[i.url] = i + else: + itemlist.remove(i) + + # Se captura la excepción, para no interrumpir al canal novedades si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + # return dict_aux.values() + return itemlist + + +def alfabetico(item): + logger.info() + itemlist = [] + + data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data) + data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + + patron = '<ul class="alfabeto">(.*?)</ul>' + data = scrapertools.get_match(data, patron) + + patron = '<a href="([^"]+)"[^>]+>([^>]+)</a>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + title = scrapedtitle.upper() + url = scrapedurl + + itemlist.append(Item(channel=item.channel, action="lista", title=title, url=url)) + + return itemlist + + +def lista(item): + logger.info() + itemlist = [] + + # Descarga la pagina + data = httptools.downloadpage(item.url, post=item.extra).data + # logger.info("data="+data) + + bloque = scrapertools.find_single_match(data, '(?:<ul class="pelilist">|<ul class="buscar-list">)(.*?)</ul>') + patron = '<li[^<]+' + patron += '<a href="([^"]+)".*?' + patron += 'src="([^"]+)".*?' + patron += '<h2[^>]*>(.*?)</h2.*?' + patron += '(?:<strong[^>]*>|<span[^>]*>)(.*?)(?:</strong>|</span>)' + + matches = re.compile(patron, re.DOTALL).findall(bloque) + scrapertools.printMatches(matches) + + for scrapedurl, scrapedthumbnail, scrapedtitle, calidad in matches: + scrapedtitle = scrapertools.htmlclean(scrapedtitle) + title = scrapedtitle.strip() + if scrapertools.htmlclean(calidad): + title += " (" + scrapertools.htmlclean(calidad) + ")" + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) + plot = "" + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + + contentTitle = scrapertools.htmlclean(scrapedtitle).strip() + patron = '([^<]+)<br>' + matches = re.compile(patron, re.DOTALL).findall(calidad + '<br>') + idioma = '' + + if "divxatope1.com/serie" in url: + contentTitle = re.sub('\s+-|\.{3}$', '', contentTitle) + capitulo = '' + temporada = 0 + episodio = 0 + + if len(matches) == 3: + calidad = matches[0].strip() + idioma = matches[1].strip() + capitulo = matches[2].replace('Cap', 'x').replace('Temp', '').replace(' ', '') + temporada, episodio = capitulo.strip().split('x') + + itemlist.append(Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=url, + thumbnail=thumbnail, plot=plot, folder=True, contentTitle=contentTitle, + language=idioma, contentSeason=int(temporada), + contentEpisodeNumber=int(episodio), contentQuality=calidad)) + + else: + if len(matches) == 2: + calidad = matches[0].strip() + idioma = matches[1].strip() + + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, + thumbnail=thumbnail, plot=plot, folder=True, contentTitle=contentTitle, + language=idioma, contentThumbnail=thumbnail, contentQuality=calidad)) + + next_page_url = scrapertools.find_single_match(data, '<li><a href="([^"]+)">Next</a></li>') + if next_page_url != "": + itemlist.append(Item(channel=item.channel, action="lista", title=">> Página siguiente", + url=urlparse.urljoin(item.url, next_page_url), folder=True)) + else: + next_page_url = scrapertools.find_single_match(data, + '<li><input type="button" class="btn-submit" value="Siguiente" onClick="paginar..(\d+)') + if next_page_url != "": + itemlist.append(Item(channel=item.channel, action="lista", title=">> Página siguiente", url=item.url, + extra=item.extra + "&pg=" + next_page_url, folder=True)) + + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + + # Descarga la pagina + data = httptools.downloadpage(item.url, post=item.extra).data + # logger.info("data="+data) + + patron = '<div class="chap-desc"[^<]+' + patron += '<a class="chap-title".*?href="([^"]+)" title="([^"]+)"[^<]+' + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + for scrapedurl, scrapedtitle in matches: + title = scrapedtitle.strip() + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = "" + plot = "" + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumbnail, + plot=plot, folder=True)) + + next_page_url = scrapertools.find_single_match(data, "<a class='active' href=[^<]+</a><a\s*href='([^']+)'") + if next_page_url != "": + itemlist.append(Item(channel=item.channel, action="episodios", title=">> Página siguiente", + url=urlparse.urljoin(item.url, next_page_url), folder=True)) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + + # Descarga la pagina + data = httptools.downloadpage(item.url).data + + item.plot = scrapertools.find_single_match(data, '<div class="post-entry" style="height:300px;">(.*?)</div>') + item.plot = scrapertools.htmlclean(item.plot).strip() + item.contentPlot = item.plot + + link = scrapertools.find_single_match(data, 'href="http://(?:tumejorserie|tumejorjuego).*?link=([^"]+)"') + if link != "": + link = "http://www.divxatope1.com/" + link + logger.info("torrent=" + link) + itemlist.append( + Item(channel=item.channel, action="play", server="torrent", title="Vídeo en torrent", fulltitle=item.title, + url=link, thumbnail=servertools.guess_server_thumbnail("torrent"), plot=item.plot, folder=False, + parentContent=item)) + + patron = "<div class=\"box1\"[^<]+<img[^<]+</div[^<]+" + patron += '<div class="box2">([^<]+)</div[^<]+' + patron += '<div class="box3">([^<]+)</div[^<]+' + patron += '<div class="box4">([^<]+)</div[^<]+' + patron += '<div class="box5">(.*?)</div[^<]+' + patron += '<div class="box6">([^<]+)<' + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + itemlist_ver = [] + itemlist_descargar = [] + + for servername, idioma, calidad, scrapedurl, comentarios in matches: + title = "Mirror en " + servername + " (" + calidad + ")" + " (" + idioma + ")" + servername = servername.replace("uploaded", "uploadedto").replace("1fichier", "onefichier") + if comentarios.strip() != "": + title = title + " (" + comentarios.strip() + ")" + url = urlparse.urljoin(item.url, scrapedurl) + mostrar_server = servertools.is_server_enabled(servername) + if mostrar_server: + thumbnail = servertools.guess_server_thumbnail(title) + plot = "" + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + action = "play" + if "partes" in title: + action = "extract_url" + new_item = Item(channel=item.channel, action=action, title=title, fulltitle=title, url=url, + thumbnail=thumbnail, plot=plot, parentContent=item) + if comentarios.startswith("Ver en"): + itemlist_ver.append(new_item) + else: + itemlist_descargar.append(new_item) + + for new_item in itemlist_ver: + itemlist.append(new_item) + + for new_item in itemlist_descargar: + itemlist.append(new_item) + + return itemlist + + +def extract_url(item): + logger.info() + + itemlist = servertools.find_video_items(data=item.url) + + for videoitem in itemlist: + videoitem.title = "Enlace encontrado en " + videoitem.server + " (" + scrapertools.get_filename_from_url( + videoitem.url) + ")" + videoitem.fulltitle = item.fulltitle + videoitem.thumbnail = item.thumbnail + videoitem.channel = item.channel + + return itemlist + + +def play(item): + logger.info() + + if item.server != "torrent": + itemlist = servertools.find_video_items(data=item.url) + + for videoitem in itemlist: + videoitem.title = "Enlace encontrado en " + videoitem.server + " (" + scrapertools.get_filename_from_url( + videoitem.url) + ")" + videoitem.fulltitle = item.fulltitle + videoitem.thumbnail = item.thumbnail + videoitem.channel = item.channel + else: + itemlist = [item] + + return itemlist diff --git a/plugin.video.alfa/channels/divxtotal.json b/plugin.video.alfa/channels/divxtotal.json new file mode 100755 index 00000000..3803aa1d --- /dev/null +++ b/plugin.video.alfa/channels/divxtotal.json @@ -0,0 +1,42 @@ +{ + "id": "divxtotal", + "name": "Divxtotal", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "http://imgur.com/Madj03A.jpg", + "version": 1, + "changes": [ + { + "date": "26/04/2017", + "description": "Release" + }, + { + "date": "28/06/2017", + "description": "Correciones código por cambios web" + } + ], + "categories": [ + "torrent", + "movie", + "tvshow" + ], + "settings": [ + { + "id": "modo_grafico", + "type": "bool", + "label": "Buscar información extra", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/divxtotal.py b/plugin.video.alfa/channels/divxtotal.py new file mode 100755 index 00000000..fa4477ab --- /dev/null +++ b/plugin.video.alfa/channels/divxtotal.py @@ -0,0 +1,1025 @@ +# -*- coding: utf-8 -*- + +import os +import re +import urllib +from threading import Thread + +import xbmc +import xbmcgui +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import tmdb +from core.item import Item +from core.scrapertools import decodeHtmlentities as dhe + +header = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0'} + +ACTION_SHOW_FULLSCREEN = 36 +ACTION_GESTURE_SWIPE_LEFT = 511 +ACTION_SELECT_ITEM = 7 +ACTION_PREVIOUS_MENU = 10 +ACTION_MOVE_LEFT = 1 +ACTION_MOVE_RIGHT = 2 +ACTION_MOVE_DOWN = 4 +ACTION_MOVE_UP = 3 +OPTION_PANEL = 6 +OPTIONS_OK = 5 + +__modo_grafico__ = config.get_setting('modo_grafico', "divxtotal") + + +# Para la busqueda en bing evitando baneos + +def browser(url): + import mechanize + + # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing + br = mechanize.Browser() + # Browser options + br.set_handle_equiv(False) + br.set_handle_gzip(True) + br.set_handle_redirect(True) + br.set_handle_referer(False) + br.set_handle_robots(False) + # Follows refresh 0 but not hangs on refresh > 0 + br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) + # Want debugging messages? + # br.set_debug_http(True) + # br.set_debug_redirects(True) + # br.set_debug_responses(True) + + # User-Agent (this is cheating, ok?) + # br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')] + # br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')] + # Open some site, let's pick a random one, the first that pops in mind + r = br.open(url) + response = r.read() + print response + if "img,divreturn" in response: + r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url) + print "prooooxy" + response = r.read() + + return response + + +api_key = "2e2160006592024ba87ccdf78c28f49f" +api_fankey = "dffe90fba4d02c199ae7a9e71330c987" + + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append(item.clone(title="[COLOR orange][B]Películas[/B][/COLOR]", action="scraper", + url="http://www.divxtotal.com/peliculas/", thumbnail="http://imgur.com/A4zN3OP.png", + fanart="http://imgur.com/fdntKsy.jpg", contentType="movie")) + itemlist.append(item.clone(title="[COLOR orange][B] Películas HD[/B][/COLOR]", action="scraper", + url="http://www.divxtotal.com/peliculas-hd/", thumbnail="http://imgur.com/A4zN3OP.png", + fanart="http://imgur.com/fdntKsy.jpg", contentType="movie")) + itemlist.append(itemlist[-1].clone(title="[COLOR orange][B]Series[/B][/COLOR]", action="scraper", + url="http://www.divxtotal.com/series/", thumbnail="http://imgur.com/GPX2wLt.png", + contentType="tvshow")) + + itemlist.append(itemlist[-1].clone(title="[COLOR orangered][B]Buscar[/B][/COLOR]", action="search", + thumbnail="http://imgur.com/aiJmi3Z.png")) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = "http://www.divxtotal.com/?s=" + texto + item.extra = "search" + try: + return buscador(item) + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def buscador(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url, headers=header, cookies=False).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = scrapertools.find_multiple_matches(data, + '<tr><td class="text-left"><a href="([^"]+)" title="([^"]+)">.*?-left">(.*?)</td>') + + for url, title, check in patron: + + if "N/A" in check: + checkmt = "tvshow" + + else: + checkmt = "movie" + + titulo = title + title = re.sub(r"!|¡|HD|\d+\d+\d+\d+|\(.*?\).*\[.*?]\]", "", title) + title = re.sub(r"’|PRE-Estreno", "'", title) + + if checkmt == "movie": + new_item = item.clone(action="findvideos", title=titulo, url=url, fulltitle=title, contentTitle=title, + contentType="movie", library=True) + else: + if item.extra == "search": + new_item = item.clone(action="findtemporadas", title=titulo, url=url, fulltitle=title, + contentTitle=title, show=title, contentType="tvshow", library=True) + else: + new_item = item.clone(action="findvideos", title=titulo, url=url, fulltitle=title, contentTitle=title, + show=title, contentType="tvshow", library=True) + new_item.infoLabels['year'] = get_year(url) + itemlist.append(new_item) + ## Paginación + next = scrapertools.find_single_match(data, "<ul class=\"pagination\">.*?\(current\).*?href='([^']+)'") + if len(next) > 0: + url = next + + itemlist.append(item.clone(title="[COLOR springgreen][B]Siguiente >>[/B][/COLOR]", action="buscador", url=url)) + + try: + from core import tmdb + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + for item in itemlist: + if not "Siguiente >>" in item.title: + if "0." in str(item.infoLabels['rating']): + item.infoLabels['rating'] = "[COLOR indianred]Sin puntuacíon[/COLOR]" + else: + item.infoLabels['rating'] = "[COLOR springgreen]" + str(item.infoLabels['rating']) + "[/COLOR]" + item.title = item.title + " " + str(item.infoLabels['rating']) + except: + pass + + return itemlist + + +def scraper(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url, headers=header, cookies=False).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + if item.contentType == "movie": + patron = scrapertools.find_multiple_matches(data, + '<tr><td><a href="([^"]+)" title="([^"]+)".*?\d+-\d+-([^"]+)</td><td>') + + for url, title, year in patron: + titulo = re.sub(r"\d+\d+\d+\d+|\(.*?\).*", "", title) + title = re.sub(r"!|¡|HD|\d+\d+\d+\d+|\(.*?\).*", "", title) + title = title.replace("Autosia", "Autopsia") + title = re.sub(r"’|PRE-Estreno", "'", title) + new_item = item.clone(action="findvideos", title="[COLOR orange]" + titulo + "[/COLOR]", url=url, + fulltitle=title, contentTitle=title, contentType="movie", extra=year, library=True) + new_item.infoLabels['year'] = get_year(url) + itemlist.append(new_item) + + + else: + + patron = scrapertools.find_multiple_matches(data, + '<p class="secconimagen"><a href="([^"]+)" title="[^"]+"><img src="([^"]+)".*?title="[^"]+">([^"]+)</a>') + + for url, thumb, title in patron: + titulo = title.strip() + title = re.sub(r"\d+x.*|\(.*?\)", "", title) + new_item = item.clone(action="findvideos", title="[COLOR orange]" + titulo + "[/COLOR]", url=url, + thumbnail=thumb, + fulltitle=title, contentTitle=title, show=title, contentType="tvshow", library=True) + new_item.infoLabels['year'] = get_year(url) + itemlist.append(new_item) + + ## Paginación + next = scrapertools.find_single_match(data, "<ul class=\"pagination\">.*?\(current\).*?href='([^']+)'") + if len(next) > 0: + url = next + + itemlist.append( + item.clone(title="[COLOR springgreen][B]Siguiente >>[/B][/COLOR]", thumbnail="http://imgur.com/a7lQAld.png", + url=url)) + try: + from core import tmdb + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + for item in itemlist: + if not "Siguiente >>" in item.title: + if "0." in str(item.infoLabels['rating']): + item.infoLabels['rating'] = "[COLOR indianred]Sin puntuacíon[/COLOR]" + else: + item.infoLabels['rating'] = "[COLOR springgreen]" + str(item.infoLabels['rating']) + "[/COLOR]" + item.title = item.title + " " + str(item.infoLabels['rating']) + + except: + pass + + for item_tmdb in itemlist: + logger.info(str(item_tmdb.infoLabels['tmdb_id'])) + + return itemlist + + +def findtemporadas(item): + logger.info() + itemlist = [] + + if item.extra == "search": + th = Thread(target=get_art(item)) + th.setDaemon(True) + th.start() + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + if len(item.extra.split("|")): + if len(item.extra.split("|")) >= 4: + fanart = item.extra.split("|")[2] + extra = item.extra.split("|")[3] + try: + fanart_extra = item.extra.split("|")[4] + except: + fanart_extra = item.extra.split("|")[3] + try: + fanart_info = item.extra.split("|")[5] + except: + fanart_extra = item.extra.split("|")[3] + elif len(item.extra.split("|")) == 3: + fanart = item.extra.split("|")[2] + extra = item.extra.split("|")[0] + fanart_extra = item.extra.split("|")[0] + fanart_info = item.extra.split("|")[1] + elif len(item.extra.split("|")) == 2: + fanart = item.extra.split("|")[1] + extra = item.extra.split("|")[0] + fanart_extra = item.extra.split("|")[0] + fanart_info = item.extra.split("|")[1] + else: + extra = item.extra + fanart_extra = item.extra + fanart_info = item.extra + try: + logger.info(fanart_extra) + logger.info(fanart_info) + except: + fanart_extra = item.fanart + fanart_info = item.fanart + + bloque_episodios = scrapertools.find_multiple_matches(data, 'Temporada (\d+) </a>(.*?)</table>') + for temporada, bloque_epis in bloque_episodios: + item.infoLabels = item.InfoLabels + item.infoLabels['season'] = temporada + itemlist.append(item.clone(action="epis", + title="[COLOR saddlebrown][B]Temporada [/B][/COLOR]" + "[COLOR sandybrown][B]" + temporada + "[/B][/COLOR]", + url=bloque_epis, contentType=item.contentType, contentTitle=item.contentTitle, + show=item.show, extra=item.extra, fanart_extra=fanart_extra, fanart_info=fanart_info, + datalibrary=data, folder=True)) + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + for item in itemlist: + item.fanart = fanart + item.extra = extra + if config.get_videolibrary_support() and itemlist: + if len(bloque_episodios) == 1: + extra = "epis" + else: + extra = "epis###serie_add" + + infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'], 'tvdb_id': item.infoLabels['tvdb_id'], + 'imdb_id': item.infoLabels['imdb_id']} + itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", text_color="0xFFe5ffcc", + action="add_serie_to_library", extra=extra, url=item.url, + contentSerieName=item.fulltitle, infoLabels=infoLabels, + thumbnail='http://imgur.com/xQNTqqy.png', datalibrary=data)) + return itemlist + + +def epis(item): + logger.info() + itemlist = [] + if item.extra == "serie_add": + item.url = item.datalibrary + + patron = scrapertools.find_multiple_matches(item.url, + '<td><img src=".*?images/(.*?)\.png.*?<a href="([^"]+)" title="">.*?(\d+x\d+).*?td>') + for idioma, url, epi in patron: + episodio = scrapertools.find_single_match(epi, '\d+x(\d+)') + item.infoLabels['episode'] = episodio + itemlist.append( + item.clone(title="[COLOR orange]" + epi + "[/COLOR]" + "[COLOR sandybrown] " + idioma + "[/COLOR]", url=url, + action="findvideos", show=item.show, fanart=item.extra, extra=item.extra, + fanart_extra=item.fanart_extra, fanart_info=item.fanart_info, folder=True)) + if item.extra != "serie_add": + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + for item in itemlist: + item.fanart = item.extra + if item.infoLabels['title']: title = "[COLOR burlywood]" + item.infoLabels['title'] + "[/COLOR]" + item.title = item.title + " -- \"" + title + "\"" + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + + if not item.infoLabels['episode']: + th = Thread(target=get_art(item)) + th.setDaemon(True) + th.start() + + if item.contentType != "movie": + + if not item.infoLabels['episode']: + capitulo = scrapertools.find_single_match(item.title, '(\d+x\d+)') + url_capitulo = scrapertools.find_single_match(data, + '<a href="(http://www.divxtotal.com/wp-content/uploads/.*?' + capitulo + '.*?.torrent)') + + if len(item.extra.split("|")) >= 2: + extra = item.extra + else: + extra = item.fanart + else: + capitulo = item.title + url_capitulo = item.url + + ext_v, size = ext_size(url_capitulo) + try: + fanart = item.fanart_extra + except: + fanart = item.extra.split("|")[0] + itemlist.append(Item(channel=item.channel, + title="[COLOR chocolate][B]Ver capítulo " + capitulo + "[/B][/COLOR]" + "-" + "[COLOR khaki] ( Video" + "[/COLOR]" + " " + "[COLOR khaki]" + ext_v + "[/COLOR]" + " " + "[COLOR khaki] " + size + " )" + "[/COLOR]", + url=url_capitulo, action="play", server="torrent", fanart=fanart, thumbnail=item.thumbnail, + extra=item.extra, fulltitle=item.fulltitle, folder=False)) + + if item.infoLabels['episode'] and item.library: + thumbnail = scrapertools.find_single_match(item.extra, 'http://assets.fanart.tv/.*jpg') + if thumbnail == "": + thumbnail = item.thumbnail + if not "assets.fanart" in item.fanart_info: + fanart = item.fanart_info + else: + fanart = item.fanart + itemlist.append(Item(channel=item.channel, title="[COLOR darksalmon][B] info[/B][/COLOR]", + action="info_capitulos", fanart=fanart, thumbnail=item.thumb_art, + thumb_info=item.thumb_info, extra=item.extra, show=item.show, + InfoLabels=item.infoLabels, folder=False)) + + if not item.infoLabels['episode']: + itemlist.append( + Item(channel=item.channel, title="[COLOR moccasin][B]Todos los episodios[/B][/COLOR]", url=item.url, + action="findtemporadas", server="torrent", fanart=item.extra.split("|")[1], + thumbnail=item.thumbnail, extra=item.extra + "|" + item.thumbnail, contentType=item.contentType, + contentTitle=item.contentTitle, InfoLabels=item.infoLabels, thumb_art=item.thumb_art, + thumb_info=item.thumbnail, fulltitle=item.fulltitle, library=item.library, folder=True)) + + else: + url = scrapertools.find_single_match(data, '<h3 class="orange text-center">.*?href="([^"]+)"') + item.infoLabels['year'] = None + ext_v, size = ext_size(url) + itemlist.append(Item(channel=item.channel, + title="[COLOR saddlebrown][B]Torrent [/B][/COLOR]" + "-" + "[COLOR khaki] ( Video" + "[/COLOR]" + " " + "[COLOR khaki]" + ext_v + "[/COLOR]" + " " + "[COLOR khaki] " + size + " )" + "[/COLOR]", + url=url, action="play", server="torrent", fanart=item.fanart, thumbnail=item.thumbnail, + extra=item.extra, InfoLabels=item.infoLabels, folder=False)) + + if item.library and config.get_videolibrary_support() and len(itemlist) > 0: + infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'], + 'title': item.infoLabels['title']} + itemlist.append(Item(channel=item.channel, title="Añadir esta película a la videoteca", + action="add_pelicula_to_library", url=item.url, infoLabels=infoLabels, + text_color="0xFFe5ffcc", + thumbnail='http://imgur.com/xQNTqqy.png')) + + return itemlist + + +def info_capitulos(item, images={}): + logger.info() + try: + url = "http://thetvdb.com/api/1D62F2F90030C444/series/" + str(item.InfoLabels['tvdb_id']) + "/default/" + str( + item.InfoLabels['season']) + "/" + str(item.InfoLabels['episode']) + "/es.xml" + if "/0" in url: + url = url.replace("/0", "/") + from core import jsontools + data = httptools.downloadpage(url).data + + if "<filename>episodes" in data: + image = scrapertools.find_single_match(data, '<Data>.*?<filename>(.*?)</filename>') + image = "http://thetvdb.com/banners/" + image + else: + try: + image = item.InfoLabels['episodio_imagen'] + except: + image = "http://imgur.com/ZiEAVOD.png" + + foto = item.thumb_info + if not ".png" in foto: + foto = "http://imgur.com/PRiEW1D.png" + try: + title = item.InfoLabels['episodio_titulo'] + except: + title = "" + title = "[COLOR red][B]" + title + "[/B][/COLOR]" + + try: + plot = "[COLOR peachpuff]" + str(item.InfoLabels['episodio_sinopsis']) + "[/COLOR]" + except: + plot = scrapertools.find_single_match(data, '<Overview>(.*?)</Overview>') + if plot == "": + plot = "Sin información todavia" + try: + rating = item.InfoLabels['episodio_vote_average'] + except: + rating = 0 + try: + + if rating >= 5 and rating < 8: + rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR springgreen][B]" + str(rating) + "[/B][/COLOR]" + elif rating >= 8 and rating < 10: + rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR yellow][B]" + str(rating) + "[/B][/COLOR]" + elif rating == 10: + rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR orangered][B]" + str(rating) + "[/B][/COLOR]" + else: + rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR crimson][B]" + str(rating) + "[/B][/COLOR]" + except: + rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR crimson][B]" + str(rating) + "[/B][/COLOR]" + if "10." in rating: + rating = re.sub(r'10\.\d+', '10', rating) + + + except: + + title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" + plot = "Este capitulo no tiene informacion..." + plot = "[COLOR yellow][B]" + plot + "[/B][/COLOR]" + image = "http://s6.postimg.org/ub7pb76c1/noinfo.png" + foto = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png" + rating = "" + + ventana = TextBox2(title=title, plot=plot, thumbnail=image, fanart=foto, rating=rating) + ventana.doModal() + + +class TextBox2(xbmcgui.WindowDialog): + """ Create a skinned textbox window """ + + def __init__(self, *args, **kwargs): + self.getTitle = kwargs.get('title') + self.getPlot = kwargs.get('plot') + self.getThumbnail = kwargs.get('thumbnail') + self.getFanart = kwargs.get('fanart') + self.getRating = kwargs.get('rating') + + self.background = xbmcgui.ControlImage(70, 20, 1150, 630, 'http://imgur.com/K6wduMe.png') + self.title = xbmcgui.ControlTextBox(120, 60, 430, 50) + self.rating = xbmcgui.ControlTextBox(145, 112, 1030, 45) + self.plot = xbmcgui.ControlTextBox(120, 150, 1056, 100) + self.thumbnail = xbmcgui.ControlImage(120, 300, 1056, 300, self.getThumbnail) + self.fanart = xbmcgui.ControlImage(780, 43, 390, 100, self.getFanart) + + self.addControl(self.background) + self.background.setAnimations( + [('conditional', 'effect=slide start=1000% end=0% time=1500 condition=true tween=bounce',), + ('WindowClose', 'effect=slide delay=800 start=0% end=1000% time=800 condition=true',)]) + self.addControl(self.thumbnail) + self.thumbnail.setAnimations([('conditional', + 'effect=zoom start=0% end=100% delay=2700 time=1500 condition=true tween=elastic easing=inout',), + ('WindowClose', 'effect=slide end=0,700% time=300 condition=true',)]) + self.addControl(self.plot) + self.plot.setAnimations( + [('conditional', 'effect=zoom delay=2000 center=auto start=0 end=100 time=800 condition=true ',), ( + 'conditional', + 'effect=rotate delay=2000 center=auto aceleration=6000 start=0% end=360% time=800 condition=true',), + ('WindowClose', 'effect=zoom center=auto start=100% end=-0% time=600 condition=true',)]) + self.addControl(self.fanart) + self.fanart.setAnimations( + [('WindowOpen', 'effect=slide start=0,-700 delay=1000 time=2500 tween=bounce condition=true',), ( + 'conditional', + 'effect=rotate center=auto start=0% end=360% delay=3000 time=2500 tween=bounce condition=true',), + ('WindowClose', 'effect=slide end=0,-700% time=1000 condition=true',)]) + self.addControl(self.title) + self.title.setText(self.getTitle) + self.title.setAnimations( + [('conditional', 'effect=slide start=-1500% end=0% delay=1000 time=2000 condition=true tween=elastic',), + ('WindowClose', 'effect=slide start=0% end=-1500% time=800 condition=true',)]) + self.addControl(self.rating) + self.rating.setText(self.getRating) + self.rating.setAnimations( + [('conditional', 'effect=fade start=0% end=100% delay=3000 time=1500 condition=true',), + ('WindowClose', 'effect=slide end=0,-700% time=600 condition=true',)]) + xbmc.sleep(200) + + try: + self.plot.autoScroll(7000, 6000, 30000) + except: + + xbmc.executebuiltin( + 'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000,"https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/kodi-icon.png")') + self.plot.setText(self.getPlot) + + def get(self): + self.show() + + def onAction(self, action): + if action == ACTION_PREVIOUS_MENU or action.getId() == ACTION_GESTURE_SWIPE_LEFT or action == 110 or action == 92: + self.close() + + +def test(): + return True + + +def tokenize(text, match=re.compile("([idel])|(\d+):|(-?\d+)").match): + i = 0 + while i < len(text): + m = match(text, i) + s = m.group(m.lastindex) + i = m.end() + if m.lastindex == 2: + yield "s" + yield text[i:i + int(s)] + i = i + int(s) + else: + yield s + + +def decode_item(next, token): + if token == "i": + # integer: "i" value "e" + data = int(next()) + if next() != "e": + raise ValueError + elif token == "s": + # string: "s" value (virtual tokens) + data = next() + elif token == "l" or token == "d": + # container: "l" (or "d") values "e" + data = [] + tok = next() + while tok != "e": + data.append(decode_item(next, tok)) + tok = next() + if token == "d": + data = dict(zip(data[0::2], data[1::2])) + else: + raise ValueError + return data + + +def decode(text): + try: + src = tokenize(text) + data = decode_item(src.next, src.next()) + for token in src: # look for more tokens + raise SyntaxError("trailing junk") + except (AttributeError, ValueError, StopIteration): + try: + data = data + except: + data = src + + return data + + +def convert_size(size): + import math + if (size == 0): + return '0B' + size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") + i = int(math.floor(math.log(size, 1024))) + p = math.pow(1024, i) + s = round(size / p, 2) + return '%s %s' % (s, size_name[i]) + + +def fanartv(item, id_tvdb, id, images={}): + headers = [['Content-Type', 'application/json']] + from core import jsontools + if item.contentType == "movie": + url = "http://webservice.fanart.tv/v3/movies/%s?api_key=cab16e262d72fea6a6843d679aa10300" \ + % id + + else: + url = "http://webservice.fanart.tv/v3/tv/%s?api_key=cab16e262d72fea6a6843d679aa10300" % id_tvdb + try: + data = jsontools.load(scrapertools.downloadpage(url, headers=headers)) + if data and not "error message" in data: + for key, value in data.items(): + if key not in ["name", "tmdb_id", "imdb_id", "thetvdb_id"]: + images[key] = value + else: + images = [] + + except: + images = [] + return images + + +def filmaffinity(item, infoLabels): + title = infoLabels["title"].replace(" ", "+") + try: + year = infoLabels["year"] + except: + year = "" + sinopsis = infoLabels["sinopsis"] + + if year == "": + if item.contentType != "movie": + tipo = "serie" + url_bing = "http://www.bing.com/search?q=%s+Serie+de+tv+site:filmaffinity.com" % title + else: + tipo = "película" + url_bing = "http://www.bing.com/search?q=%s+site:filmaffinity.com" % title + try: + data = browser(url_bing) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + if "myaddrproxy.php" in data: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><div class="b_title"><h2>(<a href="/myaddrproxy.php/http/www.filmaffinity.com/es/film.*?)"') + subdata_bing = re.sub(r'\/myaddrproxy.php\/http\/', '', subdata_bing) + else: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><h2>(<a href="http://www.filmaffinity.com/.*?/film.*?)"') + + url_filma = scrapertools.get_match(subdata_bing, '<a href="([^"]+)') + if not "http" in url_filma: + try: + data = httptools.downloadpage("http://" + url_filma, cookies=False, timeout=1).data + except: + data = httptools.downloadpage("http://" + url_filma, cookies=False, timeout=1).data + else: + try: + data = httptools.downloadpage(url_filma, cookies=False, timeout=1).data + except: + data = httptools.downloadpage(url_filma, cookies=False, timeout=1).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + except: + pass + else: + tipo = "Pelicula" + url = "http://www.filmaffinity.com/es/advsearch.php?stext={0}&stype%5B%5D=title&country=&genre=&fromyear={1}&toyear={1}".format( + title, year) + data = httptools.downloadpage(url, cookies=False).data + url_filmaf = scrapertools.find_single_match(data, '<div class="mc-poster">\s*<a title="[^"]*" href="([^"]+)"') + if url_filmaf: + url_filmaf = "http://www.filmaffinity.com%s" % url_filmaf + data = httptools.downloadpage(url_filmaf, cookies=False).data + else: + if item.contentType != "movie": + tipo = "serie" + url_bing = "http://www.bing.com/search?q=%s+Serie+de+tv+site:filmaffinity.com" % title + else: + tipo = "película" + url_bing = "http://www.bing.com/search?q=%s+site:filmaffinity.com" % title + try: + data = browser(url_bing) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + if "myaddrproxy.php" in data: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><div class="b_title"><h2>(<a href="/myaddrproxy.php/http/www.filmaffinity.com/es/film.*?)"') + subdata_bing = re.sub(r'\/myaddrproxy.php\/http\/', '', subdata_bing) + else: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><h2>(<a href="http://www.filmaffinity.com/.*?/film.*?)"') + + url_filma = scrapertools.get_match(subdata_bing, '<a href="([^"]+)') + if not "http" in url_filma: + data = httptools.downloadpage("http://" + url_filma, cookies=False).data + else: + data = httptools.downloadpage(url_filma, cookies=False).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + except: + pass + sinopsis_f = scrapertools.find_single_match(data, '<dd itemprop="description">(.*?)</dd>') + sinopsis_f = sinopsis_f.replace("<br><br />", "\n") + sinopsis_f = re.sub(r"\(FILMAFFINITY\)<br />", "", sinopsis_f) + try: + year_f = scrapertools.get_match(data, '<dt>Año</dt>.*?>(\d+)</dd>') + except: + year_f = "" + try: + rating_filma = scrapertools.get_match(data, 'itemprop="ratingValue" content="(.*?)">') + except: + rating_filma = "Sin puntuacion" + critica = "" + patron = '<div itemprop="reviewBody">(.*?)</div>.*?itemprop="author">(.*?)\s*<i alt="([^"]+)"' + matches_reviews = scrapertools.find_multiple_matches(data, patron) + + if matches_reviews: + for review, autor, valoracion in matches_reviews: + review = dhe(scrapertools.htmlclean(review)) + review += "\n" + autor + "[CR]" + review = re.sub(r'Puntuac.*?\)', '', review) + if "positiva" in valoracion: + critica += "[COLOR green][B]%s[/B][/COLOR]\n" % review + elif "neutral" in valoracion: + critica += "[COLOR yellow][B]%s[/B][/COLOR]\n" % review + else: + critica += "[COLOR red][B]%s[/B][/COLOR]\n" % review + else: + critica = "[COLOR floralwhite][B]Esta %s no tiene críticas todavía...[/B][/COLOR]" % tipo + + return critica, rating_filma, year_f, sinopsis_f + + +def get_art(item): + logger.info() + id = item.infoLabels['tmdb_id'] + check_fanart = item.infoLabels['fanart'] + if item.contentType != "movie": + tipo_ps = "tv" + else: + tipo_ps = "movie" + if not id: + year = item.extra + otmdb = tmdb.Tmdb(texto_buscado=item.fulltitle, year=year, tipo=tipo_ps) + id = otmdb.result.get("id") + + if id == None: + otmdb = tmdb.Tmdb(texto_buscado=item.fulltitle, tipo=tipo_ps) + id = otmdb.result.get("id") + if id == None: + if item.contentType == "movie": + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % ( + item.fulltitle.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data) + subdata_imdb = scrapertools.find_single_match(data, + '<li class="b_algo">(.*?)h="ID.*?<strong>.*?TV Series') + else: + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % ( + item.fulltitle.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data) + subdata_imdb = scrapertools.find_single_match(data, '<li class="b_algo">(.*?)h="ID.*?<strong>') + try: + imdb_id = scrapertools.get_match(subdata_imdb, '<a href=.*?http.*?imdb.com/title/(.*?)/.*?"') + except: + try: + imdb_id = scrapertools.get_match(subdata_imdb, + '<a href=.*?http.*?imdb.com/.*?/title/(.*?)/.*?"') + except: + imdb_id = "" + otmdb = tmdb.Tmdb(external_id=imdb_id, external_source="imdb_id", tipo=tipo_ps, idioma_busqueda="es") + id = otmdb.result.get("id") + + if id == None: + if "(" in item.fulltitle: + title = scrapertools.find_single_match(item.fulltitle, '\(.*?\)') + if item.contentType != "movie": + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % ( + title.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", + data) + subdata_imdb = scrapertools.find_single_match(data, + '<li class="b_algo">(.*?)h="ID.*?<strong>.*?TV Series') + else: + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % ( + title.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", + data) + subdata_imdb = scrapertools.find_single_match(data, + '<li class="b_algo">(.*?)h="ID.*?<strong>') + try: + imdb_id = scrapertools.get_match(subdata_imdb, + '<a href=.*?http.*?imdb.com/title/(.*?)/.*?"') + except: + try: + imdb_id = scrapertools.get_match(subdata_imdb, + '<a href=.*?http.*?imdb.com/.*?/title/(.*?)/.*?"') + except: + imdb_id = "" + otmdb = tmdb.Tmdb(external_id=imdb_id, external_source="imdb_id", tipo=tipo_ps, + idioma_busqueda="es") + id = otmdb.result.get("id") + + if not id: + fanart = item.fanart + + imagenes = [] + itmdb = tmdb.Tmdb(id_Tmdb=id, tipo=tipo_ps) + images = itmdb.result.get("images") + if images: + for key, value in images.iteritems(): + for detail in value: + imagenes.append('http://image.tmdb.org/t/p/original' + detail["file_path"]) + + if item.contentType == "movie": + if len(imagenes) >= 4: + if imagenes[0] != check_fanart: + item.fanart = imagenes[0] + else: + item.fanart = imagenes[1] + if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart: + item.extra = imagenes[1] + "|" + imagenes[2] + + else: + + if imagenes[1] != check_fanart and imagenes[1] != item.fanart: + item.extra = imagenes[1] + "|" + imagenes[3] + elif imagenes[2] != check_fanart: + item.extra = imagenes[2] + "|" + imagenes[3] + else: + item.extra = imagenes[3] + "|" + imagenes[3] + elif len(imagenes) == 3: + + if imagenes[0] != check_fanart: + item.fanart = imagenes[0] + else: + item.fanart = imagenes[1] + + if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart: + item.extra = imagenes[1] + "|" + imagenes[2] + + else: + if imagenes[1] != check_fanart and imagenes[1] != item.fanart: + item.extra = imagenes[0] + "|" + imagenes[1] + elif imagenes[2] != check_fanart: + item.extra = imagenes[1] + "|" + imagenes[2] + else: + item.extra = imagenes[1] + "|" + imagenes[1] + elif len(imagenes) == 2: + if imagenes[0] != check_fanart: + item.fanart = imagenes[0] + else: + item.fanart = imagenes[1] + if imagenes[1] != check_fanart and imagenes[1] != item.fanart: + item.extra = imagenes[0] + "|" + imagenes[1] + else: + item.extra = imagenes[1] + "|" + imagenes[0] + elif len(imagenes) == 1: + item.extra = imagenes[0] + "|" + imagenes[0] + else: + item.extra = item.fanart + "|" + item.fanart + id_tvdb = "" + else: + + if itmdb.result.get("external_ids").get("tvdb_id"): + id_tvdb = itmdb.result.get("external_ids").get("tvdb_id") + else: + id_tvdb = "" + + if len(imagenes) >= 6: + + if imagenes[0] != check_fanart: + item.fanart = imagenes[0] + else: + item.fanart = imagenes[1] + if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart: + item.extra = imagenes[1] + "|" + imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + \ + imagenes[5] + else: + if imagenes[1] != check_fanart and imagenes[1] != item.fanart: + item.extra = imagenes[1] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[5] + "|" + \ + imagenes[2] + elif imagenes[2] != check_fanart: + item.extra = imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[5] + "|" + \ + imagenes[1] + else: + item.extra = imagenes[3] + "|" + imagenes[4] + "|" + imagenes[5] + "|" + imagenes[2] + "|" + \ + imagenes[1] + elif len(imagenes) == 5: + if imagenes[0] != check_fanart: + item.fanart = imagenes[0] + else: + item.fanart = imagenes[1] + if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart: + item.extra = imagenes[1] + "|" + imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] + else: + if imagenes[1] != check_fanart and imagenes[1] != item.fanart: + item.extra = imagenes[1] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[2] + elif imagenes[2] != check_fanart: + item.extra = imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[1] + else: + item.extra = imagenes[3] + "|" + imagenes[4] + "|" + imagenes[2] + "|" + imagenes[1] + elif len(imagenes) == 4: + if imagenes[0] != check_fanart: + item.fanart = imagenes[0] + else: + item.fanart = imagenes[1] + if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart: + item.extra = imagenes[1] + "|" + imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] + else: + if imagenes[1] != check_fanart and imagenes[1] != item.fanart: + item.extra = imagenes[1] + "|" + imagenes[3] + "|" + imagenes[2] + elif imagenes[2] != check_fanart: + item.extra = imagenes[2] + "|" + imagenes[3] + "|" + imagenes[1] + else: + item.extra = imagenes[3] + "|" + imagenes[2] + "|" + imagenes[1] + + elif len(imagenes) == 3: + if imagenes[0] != check_fanart: + item.fanart = imagenes[0] + else: + item.fanart = imagenes[1] + if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart: + item.extra = imagenes[1] + "|" + imagenes[2] + else: + if imagenes[1] != check_fanart and imagenes[1] != item.fanart: + item.extra = imagenes[0] + "|" + imagenes[1] + elif imagenes[2] != check_fanart: + item.extra = imagenes[1] + "|" + imagenes[2] + else: + item.extra = imagenes[1] + "|" + imagenes[1] + elif len(imagenes) == 2: + if imagenes[0] != check_fanart: + item.fanart = imagenes[0] + else: + item.fanart = imagenes[1] + if imagenes[1] != check_fanart and imagenes[1] != item.fanart: + item.extra = imagenes[0] + "|" + imagenes[1] + else: + item.extra = imagenes[1] + "|" + imagenes[0] + elif len(imagenes) == 1: + item.extra = imagenes[0] + "|" + imagenes[0] + else: + item.extra = item.fanart + "|" + item.fanart + item.extra = item.extra + images_fanarttv = fanartv(item, id_tvdb, id) + if images_fanarttv: + if item.contentType == "movie": + if images_fanarttv.get("moviedisc"): + item.thumbnail = images_fanarttv.get("moviedisc")[0].get("url") + elif images_fanarttv.get("hdmovielogo"): + item.thumbnail = images_fanarttv.get("hdmovielogo")[0].get("url") + elif images_fanarttv.get("moviethumb"): + item.thumbnail = images_fanarttv.get("moviethumb")[0].get("url") + elif images_fanarttv.get("moviebanner"): + item.thumbnail_ = images_fanarttv.get("moviebanner")[0].get("url") + else: + item.thumbnail = item.thumbnail + else: + if images_fanarttv.get("hdtvlogo"): + item.thumbnail = images_fanarttv.get("hdtvlogo")[0].get("url") + elif images_fanarttv.get("clearlogo"): + item.thumbnail = images_fanarttv.get("hdmovielogo")[0].get("url") + item.thumb_info = item.thumbnail + if images_fanarttv.get("tvbanner"): + item.thumb_art = images_fanarttv.get("tvbanner")[0].get("url") + elif images_fanarttv.get("tvthumb"): + item.thumb_art = images_fanarttv.get("tvthumb")[0].get("url") + else: + item.thumb_art = item.thumbnail + + else: + item.extra = item.extra + "|" + item.thumbnail + + +def get_year(url): + data = httptools.downloadpage(url, headers=header, cookies=False).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + year = scrapertools.find_single_match(data, '<h3>.*?(\d+\d+\d+\d+)') + if year == "": + year = " " + return year + + +def ext_size(url): + torrents_path = config.get_videolibrary_path() + '/torrents' + if not os.path.exists(torrents_path): + os.mkdir(torrents_path) + try: + urllib.urlretrieve("http://anonymouse.org/cgi-bin/anon-www.cgi/" + url, torrents_path + "/temp.torrent") + pepe = open(torrents_path + "/temp.torrent", "rb").read() + except: + pepe = "" + + torrent = decode(pepe) + try: + name = torrent["info"]["name"] + sizet = torrent["info"]['length'] + sizet = convert_size(sizet) + except: + name = "no disponible" + try: + check_video = scrapertools.find_multiple_matches(str(torrent["info"]["files"]), "'length': (\d+)}") + size = max([int(i) for i in check_video]) + for file in torrent["info"]["files"]: + manolo = "%r - %d bytes" % ("/".join(file["path"]), file["length"]) + if str(size) in manolo: + video = manolo + size = convert_size(size) + ext_v = re.sub(r"-.*? bytes|.*?\[.*?\].|'|.*?COM.|.*?\[.*?\]|\(.*?\)|.*?\.", "", video) + try: + os.remove(torrents_path + "/temp.torrent") + except: + pass + except: + try: + size = sizet + ext_v = re.sub(r"-.*? bytes|.*?\[.*?\].|'|.*?COM.|.*?\.es.|.*?\[.*?\]|.*?\(.*?\)\.|.*?\.", "", name) + except: + size = "NO REPRODUCIBLE" + ext_v = "" + try: + os.remove(torrents_path + "/temp.torrent") + except: + pass + if "rar" in ext_v: + ext_v = ext_v + " -- No reproducible" + size = "" + return ext_v, size diff --git a/plugin.video.alfa/channels/documaniatv.json b/plugin.video.alfa/channels/documaniatv.json new file mode 100755 index 00000000..5338cc1c --- /dev/null +++ b/plugin.video.alfa/channels/documaniatv.json @@ -0,0 +1,58 @@ +{ + "id": "documaniatv", + "name": "DocumaniaTV", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "http://i.imgur.com/qMR9sg9.png", + "banner": "documaniatv.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "11/07/2016", + "description": "Reparadas cuentas de usuario." + } + ], + "categories": [ + "documentary" + ], + "settings": [ + { + "id": "include_in_newest_documentales", + "type": "bool", + "label": "Incluir en Novedades - Documentales", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "documaniatvaccount", + "type": "bool", + "label": "Usar cuenta de documaniatv", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "documaniatvuser", + "type": "text", + "label": "Usuario", + "color": "0xFFd50b0b", + "enabled": "eq(-1,true)", + "visible": true + }, + { + "id": "documaniatvpassword", + "type": "text", + "label": "Contraseña", + "color": "0xFFd50b0b", + "enabled": "!eq(-1,)+eq(-2,true)", + "visible": true, + "hidden": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/documaniatv.py b/plugin.video.alfa/channels/documaniatv.py new file mode 100755 index 00000000..194467bd --- /dev/null +++ b/plugin.video.alfa/channels/documaniatv.py @@ -0,0 +1,374 @@ +# -*- coding: utf-8 -*- + +import re +import urllib +import urlparse + +from core import config +from core import jsontools +from core import logger +from core import scrapertools +from core.item import Item + +host = "http://www.documaniatv.com/" +account = config.get_setting("documaniatvaccount", "documaniatv") + +headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'], + ['Referer', host]] + + +def login(): + logger.info() + + user = config.get_setting("documaniatvuser", "documaniatv") + password = config.get_setting("documaniatvpassword", "documaniatv") + if user == "" or password == "": + return True, "" + + data = scrapertools.cachePage(host, headers=headers) + if "http://www.documaniatv.com/user/" + user in data: + return False, user + + post = "username=%s&pass=%s&Login=Iniciar Sesión" % (user, password) + data = scrapertools.cachePage("http://www.documaniatv.com/login.php", headers=headers, post=post) + + if "Nombre de usuario o contraseña incorrectas" in data: + logger.error("login erróneo") + return True, "" + + return False, user + + +def mainlist(item): + logger.info() + + itemlist = [] + itemlist.append(item.clone(action="novedades", title="Novedades", url="http://www.documaniatv.com/newvideos.html")) + itemlist.append( + item.clone(action="categorias", title="Categorías y Canales", url="http://www.documaniatv.com/browse.html")) + itemlist.append(item.clone(action="novedades", title="Top", url="http://www.documaniatv.com/topvideos.html")) + itemlist.append(item.clone(action="categorias", title="Series Documentales", + url="http://www.documaniatv.com/top-series-documentales-html")) + itemlist.append(item.clone(action="viendo", title="Viendo ahora", url="http://www.documaniatv.com")) + itemlist.append(item.clone(action="", title="")) + itemlist.append(item.clone(action="search", title="Buscar")) + + folder = False + action = "" + if account: + error, user = login() + if error: + title = "Playlists Personales (Error en usuario y/o contraseña)" + else: + title = "Playlists Personales (Logueado)" + action = "usuario" + folder = True + + else: + title = "Playlists Personales (Sin cuenta configurada)" + user = "" + + url = "http://www.documaniatv.com/user/%s" % user + itemlist.append(item.clone(title=title, action=action, url=url, folder=folder)) + itemlist.append(item.clone(title="Configurar canal...", text_color="gold", action="configuracion", + folder=False)) + return itemlist + + +def configuracion(item): + from platformcode import platformtools + platformtools.show_channel_settings() + if config.is_xbmc(): + import xbmc + xbmc.executebuiltin("Container.Refresh") + + +def newest(categoria): + itemlist = [] + item = Item() + try: + if categoria == 'documentales': + item.url = "http://www.documaniatv.com/newvideos.html" + itemlist = novedades(item) + + if itemlist[-1].action == "novedades": + itemlist.pop() + + # Se captura la excepción, para no interrumpir al canal novedades si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist + + +def search(item, texto): + logger.info() + data = scrapertools.cachePage(host, headers=headers) + item.url = scrapertools.find_single_match(data, 'form action="([^"]+)"') + "?keywords=%s&video-id=" + texto = texto.replace(" ", "+") + item.url = item.url % texto + try: + return novedades(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def novedades(item): + logger.info() + itemlist = [] + + # Descarga la pagina + data = scrapertools.cachePage(item.url, headers=headers) + # Saca el plot si lo tuviese + scrapedplot = scrapertools.find_single_match(data, '<div class="pm-section-head">(.*?)</div>') + if "<div" in scrapedplot: + scrapedplot = "" + else: + scrapedplot = scrapertools.htmlclean(scrapedplot) + bloque = scrapertools.find_multiple_matches(data, '<li class="col-xs-[\d] col-sm-[\d] col-md-[\d]">(.*?)</li>') + + if "Registrarse" in data or not account: + for match in bloque: + patron = '<span class="pm-label-duration">(.*?)</span>.*?<a href="([^"]+)"' \ + '.*?title="([^"]+)".*?data-echo="([^"]+)"' + matches = scrapertools.find_multiple_matches(match, patron) + for duracion, scrapedurl, scrapedtitle, scrapedthumbnail in matches: + contentTitle = scrapedtitle[:] + scrapedtitle += " [" + duracion + "]" + if not scrapedthumbnail.startswith("data:image"): + scrapedthumbnail += "|" + headers[0][0] + "=" + headers[0][1] + else: + scrapedthumbnail = item.thumbnail + logger.debug( + "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + itemlist.append(item.clone(action="play_", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot, + fulltitle=scrapedtitle, contentTitle=contentTitle, folder=False)) + else: + for match in bloque: + patron = '<span class="pm-label-duration">(.*?)</span>.*?onclick="watch_later_add\(([\d]+)\)' \ + '.*?<a href="([^"]+)".*?title="([^"]+)".*?data-echo="([^"]+)"' + matches = scrapertools.find_multiple_matches(match, patron) + for duracion, video_id, scrapedurl, scrapedtitle, scrapedthumbnail in matches: + contentTitle = scrapedtitle[:] + scrapedtitle += " [" + duracion + "]" + if not scrapedthumbnail.startswith("data:image"): + scrapedthumbnail += "|" + headers[0][0] + "=" + headers[0][1] + else: + scrapedthumbnail = item.thumbnail + logger.debug( + "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + itemlist.append(item.clone(action="findvideos", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot, + id=video_id, + fulltitle=scrapedtitle, contentTitle=contentTitle)) + + # Busca enlaces de paginas siguientes... + try: + next_page_url = scrapertools.get_match(data, '<a href="([^"]+)">»</a>') + next_page_url = urlparse.urljoin(host, next_page_url) + itemlist.append(item.clone(action="novedades", title=">> Página siguiente", url=next_page_url)) + except: + logger.error("Siguiente pagina no encontrada") + + return itemlist + + +def categorias(item): + logger.info() + itemlist = [] + data = scrapertools.cachePage(item.url, headers=headers) + + patron = '<div class="pm-li-category">.*?<a href="([^"]+)"' \ + '.*?<img src="([^"]+)".*?<h3>(?:<a.*?><span.*?>|)(.*?)<' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedthumbnail, scrapedtitle in matches: + if not scrapedthumbnail.startswith("data:image"): + scrapedthumbnail += "|" + headers[0][0] + "=" + headers[0][1] + else: + scrapedthumbnail = item.thumbnail + itemlist.append(item.clone(action="novedades", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, + fanart=scrapedthumbnail)) + + # Busca enlaces de paginas siguientes... + next_page_url = scrapertools.find_single_match(data, '<a href="([^"]+)"><i class="fa fa-arrow-right">') + if next_page_url != "": + itemlist.append(item.clone(action="categorias", title=">> Página siguiente", url=next_page_url)) + + return itemlist + + +def viendo(item): + logger.info() + itemlist = [] + + # Descarga la pagina + data = scrapertools.cachePage(item.url, headers=headers) + bloque = scrapertools.find_single_match(data, '<ul class="pm-ul-carousel-videos list-inline"(.*?)</ul>') + patron = '<span class="pm-label-duration">(.*?)</span>.*?<a href="([^"]+)"' \ + '.*?title="([^"]+)".*?data-echo="([^"]+)"' + matches = scrapertools.find_multiple_matches(bloque, patron) + for duracion, scrapedurl, scrapedtitle, scrapedthumbnail in matches: + scrapedtitle += " [" + duracion + "]" + if not scrapedthumbnail.startswith("data:image"): + scrapedthumbnail += "|" + headers[0][0] + "=" + headers[0][1] + else: + scrapedthumbnail = item.thumbnail + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + itemlist.append(item.clone(action="play_", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, + fanart=scrapedthumbnail, fulltitle=scrapedtitle)) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + + # Se comprueba si el vídeo está ya en favoritos/ver más tarde + url = "http://www.documaniatv.com/ajax.php?p=playlists&do=video-watch-load-my-playlists&video-id=%s" % item.id + data = scrapertools.cachePage(url, headers=headers) + data = jsontools.load(data) + data = re.sub(r"\n|\r|\t", '', data['html']) + + itemlist.append(item.clone(action="play_", title=">> Reproducir vídeo", folder=False)) + if "kodi" in config.get_platform(): + folder = False + else: + folder = True + patron = '<li data-playlist-id="([^"]+)".*?onclick="playlist_(\w+)_item' \ + '.*?<span class="pm-playlists-name">(.*?)</span>.*?' \ + '<span class="pm-playlists-video-count">(.*?)</span>' + matches = scrapertools.find_multiple_matches(data, patron) + for playlist_id, playlist_action, playlist_title, video_count in matches: + scrapedtitle = playlist_action.replace('remove', 'Eliminar de ').replace('add', 'Añadir a ') + scrapedtitle += playlist_title + " (" + video_count + ")" + itemlist.append(item.clone(action="acciones_playlist", title=scrapedtitle, list_id=playlist_id, + url="http://www.documaniatv.com/ajax.php", folder=folder)) + + if "kodi" in config.get_platform(): + itemlist.append(item.clone(action="acciones_playlist", title="Crear una nueva playlist y añadir el documental", + id=item.id, url="http://www.documaniatv.com/ajax.php", folder=folder)) + itemlist.append(item.clone(action="acciones_playlist", title="Me gusta", url="http://www.documaniatv.com/ajax.php", + folder=folder)) + + return itemlist + + +def play_(item): + logger.info() + itemlist = [] + + try: + import xbmc + if not xbmc.getCondVisibility('System.HasAddon(script.cnubis)'): + from platformcode import platformtools + platformtools.dialog_ok("Addon no encontrado", + "Para ver vídeos alojados en cnubis necesitas tener su instalado su add-on", + line3="Descárgalo en http://cnubis.com/kodi-pelisalacarta.html") + return itemlist + except: + pass + + # Descarga la pagina + data = scrapertools.cachePage(item.url, headers=headers) + # Busca enlace directo + video_url = scrapertools.find_single_match(data, 'class="embedded-video"[^<]+<iframe.*?src="([^"]+)"') + if config.get_platform() == "plex" or config.get_platform() == "mediaserver": + code = scrapertools.find_single_match(video_url, 'u=([A-z0-9]+)') + url = "http://cnubis.com/plugins/mediaplayer/embeder/_embedkodi.php?u=%s" % code + data = scrapertools.downloadpage(url, headers=headers) + video_url = scrapertools.find_single_match(data, 'file\s*:\s*"([^"]+)"') + itemlist.append(item.clone(action="play", url=video_url, server="directo")) + return itemlist + + cnubis_script = xbmc.translatePath("special://home/addons/script.cnubis/default.py") + xbmc.executebuiltin("XBMC.RunScript(%s, url=%s&referer=%s&title=%s)" + % (cnubis_script, urllib.quote_plus(video_url), urllib.quote_plus(item.url), + item.fulltitle)) + + return itemlist + + +def usuario(item): + logger.info() + itemlist = [] + data = scrapertools.cachePage(item.url, headers=headers) + profile_id = scrapertools.find_single_match(data, 'data-profile-id="([^"]+)"') + url = "http://www.documaniatv.com/ajax.php?p=profile&do=profile-load-playlists&uid=%s" % profile_id + + data = scrapertools.cachePage(url, headers=headers) + data = jsontools.load(data) + data = data['html'] + + patron = '<div class="pm-video-thumb">.*?src="([^"]+)".*?' \ + '<span class="pm-pl-items">(.*?)</span>(.*?)</div>' \ + '.*?<h3.*?href="([^"]+)".*?title="([^"]+)"' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedthumbnail, items, videos, scrapedurl, scrapedtitle in matches: + scrapedtitle = scrapedtitle.replace("Historia", 'Historial') + scrapedtitle += " (" + items + videos + ")" + if "no-thumbnail" in scrapedthumbnail: + scrapedthumbnail = "" + else: + scrapedthumbnail += "|" + headers[0][0] + "=" + headers[0][1] + itemlist.append(item.clone(action="playlist", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, fanart=scrapedthumbnail)) + + return itemlist + + +def acciones_playlist(item): + logger.info() + itemlist = [] + if item.title == "Crear una nueva playlist y añadir el documental": + from platformcode import platformtools + texto = platformtools.dialog_input(heading="Introduce el título de la nueva playlist") + if texto is not None: + post = "p=playlists&do=create-playlist&title=%s&visibility=1&video-id=%s&ui=video-watch" % (texto, item.id) + data = scrapertools.cachePage(item.url, headers=headers, post=post) + else: + return + + elif item.title != "Me gusta": + if "Eliminar" in item.title: + action = "remove-from-playlist" + else: + action = "add-to-playlist" + post = "p=playlists&do=%s&playlist-id=%s&video-id=%s" % (action, item.list_id, item.id) + data = scrapertools.cachePage(item.url, headers=headers, post=post) + else: + item.url = "http://www.documaniatv.com/ajax.php?vid=%s&p=video&do=like" % item.id + data = scrapertools.cachePage(item.url, headers=headers) + + try: + import xbmc + from platformcode import platformtools + platformtools.dialog_notification(item.title, "Se ha añadido/eliminado correctamente") + xbmc.executebuiltin("Container.Refresh") + except: + itemlist.append(item.clone(action="", title="Se ha añadido/eliminado correctamente")) + return itemlist + + +def playlist(item): + logger.info() + itemlist = [] + data = scrapertools.cachePage(item.url, headers=headers) + patron = '<div class="pm-pl-list-index.*?src="([^"]+)".*?' \ + '<a href="([^"]+)".*?>(.*?)</a>' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedthumbnail, scrapedurl, scrapedtitle in matches: + scrapedthumbnail += "|" + headers[0][0] + "=" + headers[0][1] + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + itemlist.append(item.clone(action="play_", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, + fanart=scrapedthumbnail, fulltitle=scrapedtitle, folder=False)) + + return itemlist diff --git a/plugin.video.alfa/channels/documentalesonline.json b/plugin.video.alfa/channels/documentalesonline.json new file mode 100755 index 00000000..fe8308a3 --- /dev/null +++ b/plugin.video.alfa/channels/documentalesonline.json @@ -0,0 +1,32 @@ +{ + "id": "documentalesonline", + "name": "Documentales Online", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "http://i.imgur.com/fsrnC4m.jpg", + "version": 1, + "changes": [ + { + "date": "15/04/2017", + "description": "fix novedades" + }, + { + "date": "09/03/2017", + "description": "nueva web" + } + ], + "categories": [ + "documentary" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/documentalesonline.py b/plugin.video.alfa/channels/documentalesonline.py new file mode 100755 index 00000000..65b7a8f2 --- /dev/null +++ b/plugin.video.alfa/channels/documentalesonline.py @@ -0,0 +1,155 @@ +# -*- coding: utf-8 -*- + +import re + +from core import httptools +from core import logger +from core import scrapertools +from core.item import Item + +HOST = "http://documentales-online.com/" + + +def mainlist(item): + logger.info() + + itemlist = list() + + itemlist.append(Item(channel=item.channel, title="Novedades", action="listado", url=HOST)) + itemlist.append(Item(channel=item.channel, title="Destacados", action="seccion", url=HOST, extra="destacados")) + itemlist.append(Item(channel=item.channel, title="Series Destacadas", action="seccion", url=HOST, extra="series")) + # itemlist.append(Item(channel=item.channel, title="Top 100", action="categorias", url=HOST)) + # itemlist.append(Item(channel=item.channel, title="Populares", action="categorias", url=HOST)) + + itemlist.append(Item(channel=item.channel, title="Buscar por:")) + itemlist.append(Item(channel=item.channel, title=" Título", action="search")) + itemlist.append(Item(channel=item.channel, title=" Categorías", action="categorias", url=HOST)) + # itemlist.append(Item(channel=item.channel, title=" Series y Temas", action="categorias", url=HOST)) + + return itemlist + + +def seccion(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) + + if item.extra == "destacados": + patron_seccion = '<h4 class="widget-title">Destacados</h4><div class="textwidget"><ul>(.*?)</ul>' + action = "findvideos" + else: + patron_seccion = '<h4 class="widget-title">Series destacadas</h4><div class="textwidget"><ul>(.*?)</ul>' + action = "listado" + + data = scrapertools.find_single_match(data, patron_seccion) + + matches = re.compile('<a href="([^"]+)">(.*?)</a>', re.DOTALL).findall(data) + + aux_action = action + for url, title in matches: + if item.extra != "destacados" and "Cosmos (Carl Sagan)" in title: + action = "findvideos" + else: + action = aux_action + itemlist.append(item.clone(title=title, url=url, action=action, fulltitle=title)) + + return itemlist + + +def listado(item): + logger.info() + + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) + + pagination = scrapertools.find_single_match(data, '<div class="older"><a href="([^"]+)"') + if not pagination: + pagination = scrapertools.find_single_match(data, '<span class=\'current\'>\d</span>' + '<a class="page larger" href="([^"]+)">') + + patron = '<ul class="sp-grid">(.*?)</ul>' + data = scrapertools.find_single_match(data, patron) + + matches = re.compile('<a href="([^"]+)">(.*?)</a>.*?<img.*?src="([^"]+)"', re.DOTALL).findall(data) + + for url, title, thumb in matches: + itemlist.append(item.clone(title=title, url=url, action="findvideos", fulltitle=title, thumbnail=thumb)) + + if pagination: + itemlist.append(item.clone(title=">> Página siguiente", url=pagination)) + + return itemlist + + +def categorias(item): + logger.info() + + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) + + data = scrapertools.find_single_match(data, 'a href="#">Categorías</a><ul class="sub-menu">(.*?)</ul>') + matches = re.compile('<a href="([^"]+)">(.*?)</a>', re.DOTALL).findall(data) + + for url, title in matches: + itemlist.append(item.clone(title=title, url=url, action="listado", fulltitle=title)) + + return itemlist + + +def search(item, texto): + logger.info() + + texto = texto.replace(" ", "+") + + try: + item.url = HOST + "?s=%s" % texto + return listado(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def findvideos(item): + logger.info() + + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) + + if item.fulltitle == "Cosmos (Carl Sagan)": + + matches = scrapertools.find_multiple_matches(data, + '<p><strong>(.*?)</strong><br /><iframe.+?src="(https://www\.youtube\.com/[^?]+)') + + for title, url in matches: + new_item = item.clone(title=title, url=url) + + from core import servertools + aux_itemlist = servertools.find_video_items(new_item) + for videoitem in aux_itemlist: + videoitem.title = new_item.title + videoitem.fulltitle = new_item.title + videoitem.channel = item.channel + # videoitem.thumbnail = item.thumbnail + itemlist.extend(aux_itemlist) + + else: + data = scrapertools.find_multiple_matches(data, '<iframe.+?src="(https://www\.youtube\.com/[^?]+)') + + from core import servertools + itemlist.extend(servertools.find_video_items(data=",".join(data))) + for videoitem in itemlist: + videoitem.fulltitle = item.fulltitle + videoitem.channel = item.channel + # videoitem.thumbnail = item.thumbnail + + return itemlist diff --git a/plugin.video.alfa/channels/doomtv.json b/plugin.video.alfa/channels/doomtv.json new file mode 100755 index 00000000..7e45d32d --- /dev/null +++ b/plugin.video.alfa/channels/doomtv.json @@ -0,0 +1,77 @@ +{ + "id": "doomtv", + "name": "doomtv", + "compatible": { + "addon_version": "4.3" + }, + "active": true, + "adult": false, + "language": "es", + "thumbnail": "https://s2.postimg.org/jivgi4ak9/doomtv.png", + "banner": "https://s32.postimg.org/6gxyripvp/doomtv_banner.png", + "version": 1, + "changes": [ + { + "date": "24/06/2017", + "description": "Cambios para autoplay" + }, + { + "date": "06/06/2017", + "description": "COmpatibilida con AutoPlay" + }, + { + "date": "12/05/2017", + "description": "Fix generos y enlaces" + }, + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/02/2017", + "description": "Release." + } + ], + "categories": [ + "latino", + "movie" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": false, + "visible": false + }, + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "No filtrar", + "Latino" + ] + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_infantiles", + "type": "bool", + "label": "Incluir en Novedades - Infantiles", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/doomtv.py b/plugin.video.alfa/channels/doomtv.py new file mode 100755 index 00000000..a9a15e86 --- /dev/null +++ b/plugin.video.alfa/channels/doomtv.py @@ -0,0 +1,412 @@ +# -*- coding: utf-8 -*- + +import re +import urllib +import urlparse + +from channels import autoplay +from channels import filtertools +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item + +IDIOMAS = {'latino': 'Latino'} +list_language = IDIOMAS.values() + +CALIDADES = {'1080p': '1080p', '720p': '720p', '480p': '480p', '360p': '360p'} +list_quality = CALIDADES.values() +list_servers = ['directo'] + +host = 'http://doomtv.net/' +headers = { + 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0 Chrome/58.0.3029.110', + 'Referer': host} + +tgenero = {"Comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png", + "Suspenso": "https://s13.postimg.org/wmw6vl1cn/suspenso.png", + "Drama": "https://s16.postimg.org/94sia332d/drama.png", + "Acción": "https://s3.postimg.org/y6o9puflv/accion.png", + "Aventura": "https://s10.postimg.org/6su40czih/aventura.png", + "Romance": "https://s15.postimg.org/fb5j8cl63/romance.png", + "Animación": "https://s13.postimg.org/5on877l87/animacion.png", + "Ciencia Ficción": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png", + "Terror": "https://s7.postimg.org/yi0gij3gb/terror.png", + "Documentales": "https://s16.postimg.org/7xjj4bmol/documental.png", + "Musical": "https://s29.postimg.org/bbxmdh9c7/musical.png", + "Fantasía": "https://s13.postimg.org/65ylohgvb/fantasia.png", + "Bélico Guerra": "https://s23.postimg.org/71itp9hcr/belica.png", + "Misterio": "https://s1.postimg.org/w7fdgf2vj/misterio.png", + "Crimen": "https://s4.postimg.org/6z27zhirx/crimen.png", + "Biográfia": "https://s15.postimg.org/5lrpbx323/biografia.png", + "Familia": "https://s7.postimg.org/6s7vdhqrf/familiar.png", + "Familiar": "https://s7.postimg.org/6s7vdhqrf/familiar.png", + "Intriga": "https://s27.postimg.org/v9og43u2b/intriga.png", + "Thriller": "https://s22.postimg.org/5y9g0jsu9/thriller.png", + "Guerra": "https://s4.postimg.org/n1h2jp2jh/guerra.png", + "Estrenos": "https://s21.postimg.org/fy69wzm93/estrenos.png", + "Peleas": "https://s14.postimg.org/we1oyg05t/peleas.png", + "Policiales": "https://s21.postimg.org/n9e0ci31z/policial.png", + "Uncategorized": "https://s30.postimg.org/uj5tslenl/otros.png", + "LGBT": "https://s30.postimg.org/uj5tslenl/otros.png"} + + +def mainlist(item): + logger.info() + + autoplay.init(item.channel, list_servers, list_quality) + itemlist = [] + + itemlist.append( + item.clone(title="Todas", + action="lista", + thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png', + fanart='https://s18.postimg.org/fwvaeo6qh/todas.png', + url=host + )) + + itemlist.append( + item.clone(title="Generos", + action="seccion", + thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png', + fanart='https://s3.postimg.org/5s9jg2wtf/generos.png', + url=host, + extra='generos' + )) + + itemlist.append( + item.clone(title="Mas vistas", + action="seccion", + thumbnail='https://s9.postimg.org/wmhzu9d7z/vistas.png', + fanart='https://s9.postimg.org/wmhzu9d7z/vistas.png', + url=host, + extra='masvistas' + )) + + itemlist.append( + item.clone(title="Recomendadas", + action="lista", + thumbnail='https://s12.postimg.org/s881laywd/recomendadas.png', + fanart='https://s12.postimg.org/s881laywd/recomendadas.png', + url=host, + extra='recomendadas' + )) + + itemlist.append( + item.clone(title="Por año", + action="seccion", + thumbnail='https://s8.postimg.org/7eoedwfg5/pora_o.png', + fanart='https://s8.postimg.org/7eoedwfg5/pora_o.png', + url=host, extra='poraño' + )) + + itemlist.append( + item.clone(title="Buscar", + action="search", + url='http://doomtv.net/?s=', + thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png', + fanart='https://s30.postimg.org/pei7txpa9/buscar.png' + )) + + autoplay.show_option(item.channel, itemlist) + + return itemlist + + +def lista(item): + logger.info() + + itemlist = [] + max_items = 20 + next_page_url = '' + + data = httptools.downloadpage(item.url).data + + if item.extra == 'recomendadas': + patron = '<a href="(.*?)">.*?' + patron += '<div class="imgss">.*?' + patron += '<img src="(.*?)" alt="(.*?)(?:–.*?|\(.*?|–|").*?' + patron += '<div class="imdb">.*?' + patron += '<\/a>.*?' + patron += '<span class="ttps">.*?<\/span>.*?' + patron += '<span class="ytps">(.*?)<\/span><\/div>' + elif item.extra in ['generos', 'poraño', 'buscar']: + patron = '<div class=movie>.*?<img src=(.*?) alt=(.*?)(?:\s|\/)><a href=(.*?)>.*?' + patron += '<h2>.*?<\/h2>.*?(?:<span class=year>(.*?)<\/span>)?.*?<\/div>' + else: + patron = '<div class="imagen">.*?' + patron += '<img src="(.*?)" alt="(.*?)(?:–.*?|\(.*?|–|").*?' + patron += '<a href="([^"]+)"><(?:span) class="player"><\/span><\/a>.*?' + patron += 'h2>\s*.*?(?:year)">(.*?)<\/span>.*?<\/div>' + matches = re.compile(patron, re.DOTALL).findall(data) + + if item.next_page != 'b': + if len(matches) > max_items: + next_page_url = item.url + matches = matches[:max_items] + next_page = 'b' + else: + matches = matches[max_items:] + next_page = 'a' + patron_next_page = '<div class="siguiente"><a href="(.*?)"|\/\?' + matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data) + if len(matches_next_page) > 0: + next_page_url = urlparse.urljoin(item.url, matches_next_page[0]) + + for scrapedthumbnail, scrapedtitle, scrapedurl, scrapedyear in matches: + if item.extra == 'recomendadas': + url = scrapedthumbnail + title = scrapedurl + thumbnail = scrapedtitle + else: + url = scrapedurl + thumbnail = scrapedthumbnail + title = scrapedtitle + year = scrapedyear + fanart = '' + plot = '' + + if 'serie' not in url: + itemlist.append( + Item(channel=item.channel, + action='findvideos', + title=title, + url=url, + thumbnail=thumbnail, + plot=plot, + fanart=fanart, + contentTitle=title, + infoLabels={'year': year}, + context=autoplay.context + )) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + # Paginacion + if next_page_url != '': + itemlist.append( + Item(channel=item.channel, + action="lista", + title='Siguiente >>>', + url=next_page_url, + thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png', + extra=item.extra, + next_page=next_page + )) + return itemlist + + +def seccion(item): + logger.info() + + itemlist = [] + duplicado = [] + data = httptools.downloadpage(item.url).data + + if item.extra == 'generos': + data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data) + accion = 'lista' + if item.extra == 'masvistas': + patron = '<b>\d*<\/b>\s*<a href="(.*?)">(.*?<\/a>\s*<span>.*?<\/span>\s*<i>.*?<\/i><\/li>)' + accion = 'findvideos' + elif item.extra == 'poraño': + patron = '<li><a class="ito" HREF="(.*?)">(.*?)<\/a><\/li>' + else: + patron = '<li class=cat-item cat-item-.*?><a href=(.*?)>(.*?)<\/i>' + + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + url = scrapedurl + title = scrapedtitle + thumbnail = '' + fanart = '' + plot = '' + year = '' + contentTitle = '' + if item.extra == 'masvistas': + year = re.findall(r'\b\d{4}\b', scrapedtitle) + title = re.sub(r'<\/a>\s*<span>.*?<\/span>\s*<i>.*?<\/i><\/li>', '', scrapedtitle) + contentTitle = title + title = title + ' (' + year[0] + ')' + + elif item.extra == 'generos': + title = re.sub(r'<\/a> <i>\d+', '', scrapedtitle) + cantidad = re.findall(r'.*?<\/a> <i>(\d+)', scrapedtitle) + th_title = title + title = title + ' (' + cantidad[0] + ')' + thumbnail = tgenero[th_title] + fanart = thumbnail + + if url not in duplicado: + itemlist.append( + Item(channel=item.channel, + action=accion, + title=title, + url=url, + thumbnail=thumbnail, + plot=plot, + fanart=fanart, + contentTitle=contentTitle, + infoLabels={'year': year} + )) + duplicado.append(url) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + return itemlist + + +def unpack(packed): + p, c, k = re.search("}\('(.*)', *\d+, *(\d+), *'(.*)'\.", packed, re.DOTALL).groups() + for c in reversed(range(int(c))): + if k.split('|')[c]: p = re.sub(r'(\b%s\b)' % c, k.split('|')[c], p) + p = p.replace('\\', '') + p = p.decode('string_escape') + return p + + +def getinfo(page_url): + info = () + logger.info() + data = httptools.downloadpage(page_url).data + thumbnail = scrapertools.find_single_match(data, '<div class="cover" style="background-image: url\((.*?)\);') + plot = scrapertools.find_single_match(data, '<h2>Synopsis<\/h2>\s*<p>(.*?)<\/p>') + info = (plot, thumbnail) + + return info + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = item.url + texto + if texto != '': + return lista(item) + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + # categoria='peliculas' + try: + if categoria == 'peliculas': + item.url = host + elif categoria == 'infantiles': + item.url = host + 'category/animacion/' + itemlist = lista(item) + if itemlist[-1].title == 'Siguiente >>>': + itemlist.pop() + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist + + +def get_url(item): + logger.info() + itemlist = [] + duplicado = [] + patrones = ["{'label':(.*?),.*?'file':'(.*?)'}", "{file:'(.*?redirector.*?),label:'(.*?)'}"] + data = httptools.downloadpage(item.url, headers=headers, cookies=False).data + patron = 'class="player-content"><iframe src="(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + for option in matches: + if 'allplayer' in option: + url = 'http:/' + option.replace('//', '/') + data = httptools.downloadpage(url, headers=headers, cookies=False).data + packed = scrapertools.find_single_match(data, "<div id='allplayer'>.*?(eval\(function\(p,a,c,k.*?\)\)\))") + if packed: + unpacked = unpack(packed) + video_urls = [] + if "vimeocdn" in unpacked: + + streams = scrapertools.find_multiple_matches(unpacked, + "{file:'(.*?)',type:'video/.*?',label:'(.*?)'") + for video_url, quality in streams: + video_urls.append([video_url, quality]) + else: + doc_id = scrapertools.find_single_match(unpacked, 'driveid=(.*?)&') + doc_url = "http://docs.google.com/get_video_info?docid=%s" % doc_id + response = httptools.downloadpage(doc_url, cookies=False) + cookies = "" + cookie = response.headers["set-cookie"].split("HttpOnly, ") + for c in cookie: + cookies += c.split(";", 1)[0] + "; " + + data = response.data.decode('unicode-escape') + data = urllib.unquote_plus(urllib.unquote_plus(data)) + headers_string = "|Cookie=" + cookies + + url_streams = scrapertools.find_single_match(data, 'url_encoded_fmt_stream_map=(.*)') + streams = scrapertools.find_multiple_matches(url_streams, + 'itag=(\d+)&url=(.*?)(?:;.*?quality=.*?(?:,|&)|&quality=.*?(?:,|&))') + + itags = {'18': '360p', '22': '720p', '34': '360p', '35': '480p', '37': '1080p', '59': '480p'} + for itag, video_url in streams: + video_url += headers_string + video_urls.append([video_url, itags[itag]]) + + for video_item in video_urls: + calidad = video_item[1] + title = '%s [%s]' % (item.contentTitle, calidad) + url = video_item[0] + + if url not in duplicado: + itemlist.append( + Item(channel=item.channel, + action='play', + title=title, + url=url, + thumbnail=item.thumbnail, + plot=item.plot, + fanart=item.fanart, + contentTitle=item.contentTitle, + language=IDIOMAS['latino'], + server='directo', + quality=CALIDADES[calidad], + context=item.context + )) + duplicado.append(url) + else: + itemlist.extend(servertools.find_video_items(data=option)) + + for videoitem in itemlist: + + if 'Enlace' in videoitem.title: + videoitem.channel = item.channel + videoitem.title = item.contentTitle + ' (' + videoitem.server + ')' + videoitem.language = 'latino' + videoitem.quality = 'default' + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + itemlist = get_url(item) + + # Requerido para FilterTools + + itemlist = filtertools.get_links(itemlist, item, list_language) + + # Requerido para AutoPlay + + autoplay.start(itemlist, item) + + if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': + itemlist.append( + Item(channel=item.channel, + title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', + url=item.url, + action="add_pelicula_to_library", + extra="findvideos", + contentTitle=item.contentTitle, + )) + + return itemlist diff --git a/plugin.video.alfa/channels/doramastv.json b/plugin.video.alfa/channels/doramastv.json new file mode 100755 index 00000000..767edadc --- /dev/null +++ b/plugin.video.alfa/channels/doramastv.json @@ -0,0 +1,33 @@ +{ + "id": "doramastv", + "name": "DoramasTV", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "doramastv.png", + "banner": "doramastv.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ], + "categories": [ + "tvshow" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/doramastv.py b/plugin.video.alfa/channels/doramastv.py new file mode 100755 index 00000000..1e16a3ab --- /dev/null +++ b/plugin.video.alfa/channels/doramastv.py @@ -0,0 +1,185 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import logger +from core import scrapertools +from core.item import Item + +host = "http://doramastv.com/" +DEFAULT_HEADERS = [] +DEFAULT_HEADERS.append( + ["User-Agent", "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12"]) + + +def mainlist(item): + logger.info() + + itemlist = list([]) + itemlist.append( + Item(channel=item.channel, action="pagina_", title="En emision", url=urlparse.urljoin(host, "drama/emision"))) + itemlist.append(Item(channel=item.channel, action="letras", title="Listado alfabetico", + url=urlparse.urljoin(host, "lista-numeros"))) + itemlist.append( + Item(channel=item.channel, action="generos", title="Generos", url=urlparse.urljoin(host, "genero/accion"))) + itemlist.append(Item(channel=item.channel, action="pagina_", title="Ultimos agregados", + url=urlparse.urljoin(host, "dramas/ultimos"))) + itemlist.append(Item(channel=item.channel, action="search", title="Buscar", + url=urlparse.urljoin(host, "buscar/anime/ajax/?title="))) + + return itemlist + + +def letras(item): + logger.info() + + itemlist = [] + headers = DEFAULT_HEADERS[:] + data = scrapertools.cache_page(item.url, headers=headers) + + patron = ' <a href="(\/lista-.+?)">(.+?)<' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + title = scrapertools.entityunescape(scrapedtitle) + url = urlparse.urljoin(host, scrapedurl) + thumbnail = "" + plot = "" + + logger.debug("title=[{0}], url=[{1}], thumbnail=[{2}]".format(title, url, thumbnail)) + + itemlist.append( + Item(channel=item.channel, action="pagina_", title=title, url=url, thumbnail=thumbnail, plot=plot)) + + return itemlist + + +def pagina_(item): + logger.info() + itemlist = [] + headers = DEFAULT_HEADERS[:] + data = scrapertools.cache_page(item.url, headers=headers) + data1 = scrapertools.get_match(data, '<div class="animes-bot">(.+?)<!-- fin -->') + data1 = data1.replace('\n', '') + data1 = data1.replace('\r', '') + patron = 'href="(\/drama.+?)".+?<\/div>(.+?)<\/div>.+?src="(.+?)".+?titulo">(.+?)<' + matches = re.compile(patron, re.DOTALL).findall(data1) + for scrapedurl, scrapedplot, scrapedthumbnail, scrapedtitle in matches: + title = scrapertools.unescape(scrapedtitle).strip() + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = urlparse.urljoin(host, scrapedthumbnail) + plot = scrapertools.decodeHtmlentities(scrapedplot) + itemlist.append( + Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, plot=plot, + show=title)) + + patron = 'href="([^"]+)" class="next"' + matches = re.compile(patron, re.DOTALL).findall(data) + for match in matches: + if len(matches) > 0: + scrapedurl = urlparse.urljoin(item.url, match) + scrapedtitle = "Pagina Siguiente >>" + scrapedthumbnail = "" + scrapedplot = "" + itemlist.append(Item(channel=item.channel, action="pagina_", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True)) + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + headers = DEFAULT_HEADERS[:] + data = scrapertools.cache_page(item.url, headers=headers) + data = data.replace('\n', '') + data = data.replace('\r', '') + data1 = scrapertools.get_match(data, '<ul id="lcholder">(.+?)</ul>') + patron = '<a href="(.+?)".+?>(.+?)<' + matches = re.compile(patron, re.DOTALL).findall(data1) + + for scrapedurl, scrapedtitle in matches: + title = scrapertools.htmlclean(scrapedtitle).strip() + thumbnail = "" + plot = "" + url = urlparse.urljoin(item.url, scrapedurl) + show = item.show + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot, + fulltitle=title, show=show)) + return itemlist + + +def findvideos(item): + logger.info() + + headers = DEFAULT_HEADERS[:] + data = scrapertools.cache_page(item.url, headers=headers) + data = data.replace('\n', '') + data = data.replace('\r', '') + patron = '<iframe src="(.+?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + data1 = '' + for match in matches: + data1 += match + '\n' + data = data1 + data = data.replace('%26', '&') + data = data.replace('http://ozhe.larata.in/repro-d/mvk?v=', 'http://vk.com/video_ext.php?oid=') + data = data.replace('http://ozhe.larata.in/repro-d/send?v=', 'http://sendvid.com/embed/') + data = data.replace('http://ozhe.larata.in/repro-d/msend?v=', 'http://sendvid.com/embed/') + data = data.replace('http://ozhe.larata.in/repro-d/vidweed?v=', 'http://www.videoweed.es/file/') + data = data.replace('http://ozhe.larata.in/repro-d/nowv?v=', 'http://www.nowvideo.sx/video/') + data = data.replace('http://ozhe.larata.in/repro-d/nov?v=', 'http://www.novamov.com/video/') + itemlist = [] + + from core import servertools + itemlist.extend(servertools.find_video_items(data=data)) + for videoitem in itemlist: + videoitem.channel = item.channel + videoitem.folder = False + return itemlist + + +def generos(item): + logger.info() + itemlist = [] + headers = DEFAULT_HEADERS[:] + data = scrapertools.cache_page(item.url, headers=headers) + data = data.replace('\n', '') + data = data.replace('\r', '') + + data = scrapertools.get_match(data, '<!-- Lista de Generos -->(.+?)<\/div>') + patron = '<a href="(.+?)".+?>(.+?)<' + matches = re.compile(patron, re.DOTALL).findall(data) + for scrapedurl, scrapedtitle in matches: + title = scrapertools.entityunescape(scrapedtitle) + url = urlparse.urljoin(host, scrapedurl) + thumbnail = "" + plot = "" + logger.debug("title=[{0}], url=[{1}], thumbnail=[{2}]".format(title, url, thumbnail)) + + itemlist.append( + Item(channel=item.channel, action="pagina_", title=title, url=url, thumbnail=thumbnail, plot=plot)) + + return itemlist + + +def search(item, texto): + logger.info() + item.url = urlparse.urljoin(host, item.url) + texto = texto.replace(" ", "+") + headers = DEFAULT_HEADERS[:] + data = scrapertools.cache_page(item.url + texto, headers=headers) + data = data.replace('\n', '') + data = data.replace('\r', '') + patron = '<a href="(.+?)".+?src="(.+?)".+?titulo">(.+?)<' + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + for scrapedurl, scrapedthumbnail, scrapedtitle in matches: + title = scrapertools.unescape(scrapedtitle).strip() + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = urlparse.urljoin(host, scrapedthumbnail) + itemlist.append( + Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, plot="", + show=title)) + return itemlist diff --git a/plugin.video.alfa/channels/downloads.json b/plugin.video.alfa/channels/downloads.json new file mode 100755 index 00000000..bed761f0 --- /dev/null +++ b/plugin.video.alfa/channels/downloads.json @@ -0,0 +1,189 @@ +{ + "id": "downloads", + "name": "Descargas", + "active": false, + "adult": false, + "language": "es", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "12/03/17", + "description": "Añadidas mas opciones de configuracion y corregidos fallos" + }, + { + "date": "12/01/17", + "description": "release" + } + ], + "categories": [ + "movie" + ], + "settings": [ + { + "type": "label", + "label": "Ubicacion de archivos", + "enabled": true, + "visible": true + }, + { + "id": "library_add", + "type": "bool", + "label": " - Añadir descargas completadas a la videoteca", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "library_move", + "type": "bool", + "label": " - Mover el archivo descargado a la videoteca", + "default": true, + "enabled": "eq(-1,true)", + "visible": true + }, + { + "id": "browser", + "type": "bool", + "label": " - Visualizar archivos descargados desde descargas", + "default": false, + "enabled": true, + "visible": true + }, + { + "type": "label", + "label": "Descarga", + "enabled": true, + "visible": true + }, + { + "id": "block_size", + "type": "list", + "label": " - Tamaño por bloque", + "lvalues": [ + "128 KB", + "256 KB", + "512 KB", + "1 MB", + "2 MB" + ], + "default": 1, + "enabled": true, + "visible": true + }, + { + "id": "part_size", + "type": "list", + "label": " - Tamaño por parte", + "lvalues": [ + "1 MB", + "2 MB", + "4 MB", + "8 MB", + "16 MB", + "32 MB" + ], + "default": 1, + "enabled": true, + "visible": true + }, + { + "id": "max_connections", + "type": "list", + "label": " - Numero máximo de conexiones simultaneas", + "lvalues": [ + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10" + ], + "default": 4, + "enabled": true, + "visible": true + }, + { + "id": "max_buffer", + "type": "list", + "label": " - Numero máximo de partes en memoria", + "lvalues": [ + "0", + "2", + "4", + "6", + "8", + "10", + "12", + "14", + "16", + "18", + "20" + ], + "default": 5, + "enabled": true, + "visible": true + }, + { + "type": "label", + "label": "Elección del servidor", + "enabled": true, + "visible": true + }, + { + "id": "server_reorder", + "type": "list", + "label": " - Orden de servidores", + "lvalues": [ + "Mantener", + "Reordenar" + ], + "default": 1, + "enabled": true, + "visible": true + }, + { + "id": "language", + "type": "list", + "label": " - Idioma preferido", + "lvalues": [ + "Esp, Lat, Sub, Eng, Vose", + "Esp, Sub, Lat, Eng, Vose", + "Eng, Sub, Vose, Esp, Lat", + "Vose, Eng, Sub, Esp, Lat" + ], + "default": 0, + "enabled": "eq(-1,'Reordenar')", + "visible": true + }, + { + "id": "quality", + "type": "list", + "label": " - Calidad preferida", + "lvalues": [ + "La mas alta", + "HD 1080", + "HD 720", + "SD" + ], + "default": 0, + "enabled": "eq(-2,'Reordenar')", + "visible": true + }, + { + "id": "server_speed", + "type": "bool", + "label": " - Elegir los servidores mas rapidos", + "default": true, + "enabled": "eq(-3,'Reordenar')", + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/downloads.py b/plugin.video.alfa/channels/downloads.py new file mode 100755 index 00000000..16fddcbd --- /dev/null +++ b/plugin.video.alfa/channels/downloads.py @@ -0,0 +1,869 @@ +# -*- coding: utf-8 -*- +# ------------------------------------------------------------ +# Gestor de descargas +# ------------------------------------------------------------ + +import os +import re +import time + +from core import config +from core import filetools +from core import videolibrarytools +from core import logger +from core import scraper +from core import scrapertools +from core import servertools +from core.downloader import Downloader +from core.item import Item +from platformcode import platformtools + +STATUS_COLORS = {0: "orange", 1: "orange", 2: "green", 3: "red"} +STATUS_CODES = type("StatusCode", (), {"stoped": 0, "canceled": 1, "completed": 2, "error": 3}) +DOWNLOAD_LIST_PATH = config.get_setting("downloadlistpath") +DOWNLOAD_PATH = config.get_setting("downloadpath") +STATS_FILE = os.path.join(config.get_data_path(), "servers.json") + +TITLE_FILE = "[COLOR %s][%i%%][/COLOR] %s" +TITLE_TVSHOW = "[COLOR %s][%i%%][/COLOR] %s [%s]" + + +def mainlist(item): + logger.info() + itemlist = [] + + # Lista de archivos + for file in sorted(filetools.listdir(DOWNLOAD_LIST_PATH)): + # Saltamos todos los que no sean JSON + if not file.endswith(".json"): continue + + # cargamos el item + file = os.path.join(DOWNLOAD_LIST_PATH, file) + i = Item(path=file).fromjson(filetools.read(file)) + i.thumbnail = i.contentThumbnail + + # Listado principal + if not item.contentType == "tvshow": + # Series + if i.contentType == "episode": + # Comprobamos que la serie no este ya en el itemlist + if not filter( + lambda x: x.contentSerieName == i.contentSerieName and x.contentChannel == i.contentChannel, + itemlist): + + title = TITLE_TVSHOW % ( + STATUS_COLORS[i.downloadStatus], i.downloadProgress, i.contentSerieName, i.contentChannel) + + itemlist.append(Item(title=title, channel="descargas", action="mainlist", contentType="tvshow", + contentSerieName=i.contentSerieName, contentChannel=i.contentChannel, + downloadStatus=i.downloadStatus, downloadProgress=[i.downloadProgress], + fanart=i.fanart, thumbnail=i.thumbnail)) + + else: + s = \ + filter(lambda x: x.contentSerieName == i.contentSerieName and x.contentChannel == i.contentChannel, + itemlist)[0] + s.downloadProgress.append(i.downloadProgress) + downloadProgress = sum(s.downloadProgress) / len(s.downloadProgress) + + if not s.downloadStatus in [STATUS_CODES.error, STATUS_CODES.canceled] and not i.downloadStatus in [ + STATUS_CODES.completed, STATUS_CODES.stoped]: + s.downloadStatus = i.downloadStatus + + s.title = TITLE_TVSHOW % ( + STATUS_COLORS[s.downloadStatus], downloadProgress, i.contentSerieName, i.contentChannel) + + # Peliculas + elif i.contentType == "movie" or i.contentType == "video": + i.title = TITLE_FILE % (STATUS_COLORS[i.downloadStatus], i.downloadProgress, i.contentTitle) + itemlist.append(i) + + # Listado dentro de una serie + else: + if i.contentType == "episode" and i.contentSerieName == item.contentSerieName and i.contentChannel == item.contentChannel: + i.title = TITLE_FILE % (STATUS_COLORS[i.downloadStatus], i.downloadProgress, + "%dx%0.2d: %s" % (i.contentSeason, i.contentEpisodeNumber, i.contentTitle)) + itemlist.append(i) + + estados = [i.downloadStatus for i in itemlist] + + # Si hay alguno completado + if 2 in estados: + itemlist.insert(0, Item(channel=item.channel, action="clean_ready", title="Eliminar descargas completadas", + contentType=item.contentType, contentChannel=item.contentChannel, + contentSerieName=item.contentSerieName, text_color="sandybrown")) + + # Si hay alguno con error + if 3 in estados: + itemlist.insert(0, Item(channel=item.channel, action="restart_error", title="Reiniciar descargas con error", + contentType=item.contentType, contentChannel=item.contentChannel, + contentSerieName=item.contentSerieName, text_color="orange")) + + # Si hay alguno pendiente + if 1 in estados or 0 in estados: + itemlist.insert(0, Item(channel=item.channel, action="download_all", title="Descargar todo", + contentType=item.contentType, contentChannel=item.contentChannel, + contentSerieName=item.contentSerieName, text_color="green")) + + if len(itemlist): + itemlist.insert(0, Item(channel=item.channel, action="clean_all", title="Eliminar todo", + contentType=item.contentType, contentChannel=item.contentChannel, + contentSerieName=item.contentSerieName, text_color="red")) + + if not item.contentType == "tvshow" and config.get_setting("browser", "downloads") == True: + itemlist.insert(0, Item(channel=item.channel, action="browser", title="Ver archivos descargados", + url=DOWNLOAD_PATH, text_color="yellow")) + + if not item.contentType == "tvshow": + itemlist.insert(0, Item(channel=item.channel, action="settings", title="Configuración descargas...", + text_color="blue")) + + return itemlist + + +def settings(item): + ret = platformtools.show_channel_settings(caption="configuración -- Descargas") + platformtools.itemlist_refresh() + return ret + + +def browser(item): + logger.info() + itemlist = [] + + for file in filetools.listdir(item.url): + if file == "list": continue + if filetools.isdir(filetools.join(item.url, file)): + itemlist.append( + Item(channel=item.channel, title=file, action=item.action, url=filetools.join(item.url, file))) + else: + itemlist.append(Item(channel=item.channel, title=file, action="play", url=filetools.join(item.url, file))) + + return itemlist + + +def clean_all(item): + logger.info() + + for fichero in sorted(filetools.listdir(DOWNLOAD_LIST_PATH)): + if fichero.endswith(".json"): + download_item = Item().fromjson(filetools.read(os.path.join(DOWNLOAD_LIST_PATH, fichero))) + if not item.contentType == "tvshow" or ( + item.contentSerieName == download_item.contentSerieName and item.contentChannel == download_item.contentChannel): + filetools.remove(os.path.join(DOWNLOAD_LIST_PATH, fichero)) + + platformtools.itemlist_refresh() + + +def clean_ready(item): + logger.info() + for fichero in sorted(filetools.listdir(DOWNLOAD_LIST_PATH)): + if fichero.endswith(".json"): + download_item = Item().fromjson(filetools.read(os.path.join(DOWNLOAD_LIST_PATH, fichero))) + if not item.contentType == "tvshow" or ( + item.contentSerieName == download_item.contentSerieName and item.contentChannel == download_item.contentChannel): + if download_item.downloadStatus == STATUS_CODES.completed: + filetools.remove(os.path.join(DOWNLOAD_LIST_PATH, fichero)) + + platformtools.itemlist_refresh() + + +def restart_error(item): + logger.info() + for fichero in sorted(filetools.listdir(DOWNLOAD_LIST_PATH)): + if fichero.endswith(".json"): + download_item = Item().fromjson(filetools.read(os.path.join(DOWNLOAD_LIST_PATH, fichero))) + + if not item.contentType == "tvshow" or ( + item.contentSerieName == download_item.contentSerieName and item.contentChannel == download_item.contentChannel): + if download_item.downloadStatus == STATUS_CODES.error: + if filetools.isfile( + os.path.join(config.get_setting("downloadpath"), download_item.downloadFilename)): + filetools.remove( + os.path.join(config.get_setting("downloadpath"), download_item.downloadFilename)) + + update_json(item.path, + {"downloadStatus": STATUS_CODES.stoped, "downloadComplete": 0, "downloadProgress": 0}) + + platformtools.itemlist_refresh() + + +def download_all(item): + time.sleep(0.5) + for fichero in sorted(filetools.listdir(DOWNLOAD_LIST_PATH)): + if fichero.endswith(".json"): + download_item = Item(path=os.path.join(DOWNLOAD_LIST_PATH, fichero)).fromjson( + filetools.read(os.path.join(DOWNLOAD_LIST_PATH, fichero))) + + if not item.contentType == "tvshow" or ( + item.contentSerieName == download_item.contentSerieName and item.contentChannel == download_item.contentChannel): + if download_item.downloadStatus in [STATUS_CODES.stoped, STATUS_CODES.canceled]: + res = start_download(download_item) + platformtools.itemlist_refresh() + # Si se ha cancelado paramos + if res == STATUS_CODES.canceled: break + + +def menu(item): + logger.info() + if item.downloadServer: + servidor = item.downloadServer.get("server", "Auto") + else: + servidor = "Auto" + # Opciones disponibles para el menu + op = ["Descargar", "Eliminar de la lista", "Reiniciar descarga y eliminar datos", + "Modificar servidor: %s" % (servidor.capitalize())] + + opciones = [] + + # Opciones para el menu + if item.downloadStatus == 0: # Sin descargar + opciones.append(op[0]) # Descargar + if not item.server: opciones.append(op[3]) # Elegir Servidor + opciones.append(op[1]) # Eliminar de la lista + + if item.downloadStatus == 1: # descarga parcial + opciones.append(op[0]) # Descargar + if not item.server: opciones.append(op[3]) # Elegir Servidor + opciones.append(op[2]) # Reiniciar descarga + opciones.append(op[1]) # Eliminar de la lista + + if item.downloadStatus == 2: # descarga completada + opciones.append(op[1]) # Eliminar de la lista + opciones.append(op[2]) # Reiniciar descarga + + if item.downloadStatus == 3: # descarga con error + opciones.append(op[2]) # Reiniciar descarga + opciones.append(op[1]) # Eliminar de la lista + + # Mostramos el dialogo + seleccion = platformtools.dialog_select("Elige una opción", opciones) + + # -1 es cancelar + if seleccion == -1: return + + logger.info("opcion=%s" % (opciones[seleccion])) + # Opcion Eliminar + if opciones[seleccion] == op[1]: + filetools.remove(item.path) + + # Opcion inicaiar descarga + if opciones[seleccion] == op[0]: + start_download(item) + + # Elegir Servidor + if opciones[seleccion] == op[3]: + select_server(item) + + # Reiniciar descarga + if opciones[seleccion] == op[2]: + if filetools.isfile(os.path.join(config.get_setting("downloadpath"), item.downloadFilename)): + filetools.remove(os.path.join(config.get_setting("downloadpath"), item.downloadFilename)) + + update_json(item.path, {"downloadStatus": STATUS_CODES.stoped, "downloadComplete": 0, "downloadProgress": 0, + "downloadServer": {}}) + + platformtools.itemlist_refresh() + + +def move_to_libray(item): + download_path = filetools.join(config.get_setting("downloadpath"), item.downloadFilename) + library_path = filetools.join(config.get_videolibrary_path(), *filetools.split(item.downloadFilename)) + final_path = download_path + + if config.get_setting("library_add", "downloads") == True and config.get_setting("library_move", + "downloads") == True: + if not filetools.isdir(filetools.dirname(library_path)): + filetools.mkdir(filetools.dirname(library_path)) + + if filetools.isfile(library_path) and filetools.isfile(download_path): + filetools.remove(library_path) + + if filetools.isfile(download_path): + if filetools.move(download_path, library_path): + final_path = library_path + + if len(filetools.listdir(filetools.dirname(download_path))) == 0: + filetools.rmdir(filetools.dirname(download_path)) + + if config.get_setting("library_add", "downloads") == True: + if filetools.isfile(final_path): + if item.contentType == "movie" and item.infoLabels["tmdb_id"]: + library_item = Item(title="Descargado: %s" % item.downloadFilename, channel="downloads", + action="findvideos", infoLabels=item.infoLabels, url=final_path) + videolibrarytools.save_movie(library_item) + + elif item.contentType == "episode" and item.infoLabels["tmdb_id"]: + library_item = Item(title="Descargado: %s" % item.downloadFilename, channel="downloads", + action="findvideos", infoLabels=item.infoLabels, url=final_path) + tvshow = Item(channel="downloads", contentType="tvshow", + infoLabels={"tmdb_id": item.infoLabels["tmdb_id"]}) + videolibrarytools.save_tvshow(tvshow, [library_item]) + + +def update_json(path, params): + item = Item().fromjson(filetools.read(path)) + item.__dict__.update(params) + filetools.write(path, item.tojson()) + + +def save_server_statistics(server, speed, success): + from core import jsontools + if os.path.isfile(STATS_FILE): + servers = jsontools.load(open(STATS_FILE, "rb").read()) + else: + servers = {} + + if not server in servers: + servers[server] = {"success": [], "count": 0, "speeds": [], "last": 0} + + servers[server]["count"] += 1 + servers[server]["success"].append(bool(success)) + servers[server]["success"] = servers[server]["success"][-5:] + servers[server]["last"] = time.time() + if success: + servers[server]["speeds"].append(speed) + servers[server]["speeds"] = servers[server]["speeds"][-5:] + + open(STATS_FILE, "wb").write(jsontools.dump(servers)) + return + + +def get_server_position(server): + from core import jsontools + if os.path.isfile(STATS_FILE): + servers = jsontools.load(open(STATS_FILE, "rb").read()) + else: + servers = {} + + if server in servers: + pos = [s for s in sorted(servers, key=lambda x: (sum(servers[x]["speeds"]) / (len(servers[x]["speeds"]) or 1), + float(sum(servers[x]["success"])) / ( + len(servers[x]["success"]) or 1)), reverse=True)] + return pos.index(server) + 1 + else: + return 0 + + +def get_match_list(data, match_list, order_list=None, only_ascii=False, ignorecase=False): + """ + Busca coincidencias en una cadena de texto, con un diccionario de "ID" / "Listado de cadenas de busqueda": + { "ID1" : ["Cadena 1", "Cadena 2", "Cadena 3"], + "ID2" : ["Cadena 4", "Cadena 5", "Cadena 6"] + } + + El diccionario no pude contener una misma cadena de busqueda en varías IDs. + + La busqueda se realiza por orden de tamaño de cadena de busqueda (de mas larga a mas corta) si una cadena coincide, + se elimina de la cadena a buscar para las siguientes, para que no se detecten dos categorias si una cadena es parte de otra: + por ejemplo: "Idioma Español" y "Español" si la primera aparece en la cadena "Pablo sabe hablar el Idioma Español" + coincidira con "Idioma Español" pero no con "Español" ya que la coincidencia mas larga tiene prioridad. + + """ + import unicodedata + match_dict = dict() + matches = [] + + # Pasamos la cadena a unicode + data = unicode(data, "utf8") + + # Pasamos el diccionario a {"Cadena 1": "ID1", "Cadena 2", "ID1", "Cadena 4", "ID2"} y los pasamos a unicode + for key in match_list: + if order_list and not key in order_list: + raise Exception("key '%s' not in match_list" % key) + for value in match_list[key]: + if value in match_dict: + raise Exception("Duplicate word in list: '%s'" % value) + match_dict[unicode(value, "utf8")] = key + + # Si ignorecase = True, lo pasamos todo a mayusculas + if ignorecase: + data = data.upper() + match_dict = dict((key.upper(), match_dict[key]) for key in match_dict) + + # Si ascii = True, eliminamos todos los accentos y Ñ + if only_ascii: + data = ''.join((c for c in unicodedata.normalize('NFD', data) if unicodedata.category(c) != 'Mn')) + match_dict = dict((''.join((c for c in unicodedata.normalize('NFD', key) if unicodedata.category(c) != 'Mn')), + match_dict[key]) for key in match_dict) + + # Ordenamos el listado de mayor tamaño a menor y buscamos. + for match in sorted(match_dict, key=lambda x: len(x), reverse=True): + s = data + for a in matches: + s = s.replace(a, "") + if match in s: + matches.append(match) + if matches: + if order_list: + return type("Mtch_list", (), + {"key": match_dict[matches[-1]], "index": order_list.index(match_dict[matches[-1]])}) + else: + return type("Mtch_list", (), {"key": match_dict[matches[-1]], "index": None}) + else: + if order_list: + return type("Mtch_list", (), {"key": None, "index": len(order_list)}) + else: + return type("Mtch_list", (), {"key": None, "index": None}) + + +def sort_method(item): + """ + Puntua cada item en funcion de varios parametros: + @type item: item + @param item: elemento que se va a valorar. + @return: puntuacion otenida + @rtype: int + """ + lang_orders = {} + lang_orders[0] = ["ES", "LAT", "SUB", "ENG", "VOSE"] + lang_orders[1] = ["ES", "SUB", "LAT", "ENG", "VOSE"] + lang_orders[2] = ["ENG", "SUB", "VOSE", "ESP", "LAT"] + lang_orders[3] = ["VOSE", "ENG", "SUB", "ESP", "LAT"] + + quality_orders = {} + quality_orders[0] = ["BLURAY", "FULLHD", "HD", "480P", "360P", "240P"] + quality_orders[1] = ["FULLHD", "HD", "480P", "360P", "240P", "BLURAY"] + quality_orders[2] = ["HD", "480P", "360P", "240P", "FULLHD", "BLURAY"] + quality_orders[3] = ["480P", "360P", "240P", "BLURAY", "FULLHD", "HD"] + + order_list_idiomas = lang_orders[int(config.get_setting("language", "downloads"))] + match_list_idimas = {"ES": ["CAST", "ESP", "Castellano", "Español", "Audio Español"], + "LAT": ["LAT", "Latino"], + "SUB": ["Subtitulo Español", "Subtitulado", "SUB"], + "ENG": ["EN", "ENG", "Inglés", "Ingles", "English"], + "VOSE": ["VOSE"]} + + order_list_calidad = ["BLURAY", "FULLHD", "HD", "480P", "360P", "240P"] + order_list_calidad = quality_orders[int(config.get_setting("quality", "downloads"))] + match_list_calidad = {"BLURAY": ["BR", "BLURAY"], + "FULLHD": ["FULLHD", "FULL HD", "1080", "HD1080", "HD 1080"], + "HD": ["HD", "HD REAL", "HD 720", "720", "HDTV"], + "480P": ["SD", "480P"], + "360P": ["360P"], + "240P": ["240P"]} + + value = (get_match_list(item.title, match_list_idimas, order_list_idiomas, ignorecase=True, only_ascii=True).index, \ + get_match_list(item.title, match_list_calidad, order_list_calidad, ignorecase=True, only_ascii=True).index) + + if config.get_setting("server_speed", "downloads"): + value += tuple([get_server_position(item.server)]) + + return value + + +def download_from_url(url, item): + logger.info("Intentando descargar: %s" % (url)) + if url.lower().endswith(".m3u8") or url.lower().startswith("rtmp"): + save_server_statistics(item.server, 0, False) + return {"downloadStatus": STATUS_CODES.error} + + # Obtenemos la ruta de descarga y el nombre del archivo + download_path = filetools.dirname(filetools.join(DOWNLOAD_PATH, item.downloadFilename)) + file_name = filetools.basename(filetools.join(DOWNLOAD_PATH, item.downloadFilename)) + + # Creamos la carpeta si no existe + if not filetools.exists(download_path): + filetools.mkdir(download_path) + + # Lanzamos la descarga + d = Downloader(url, download_path, file_name, + max_connections=1 + int(config.get_setting("max_connections", "downloads")), + block_size=2 ** (17 + int(config.get_setting("block_size", "downloads"))), + part_size=2 ** (20 + int(config.get_setting("part_size", "downloads"))), + max_buffer=2 * int(config.get_setting("max_buffer", "downloads"))) + d.start_dialog("Descargas") + + # Descarga detenida. Obtenemos el estado: + # Se ha producido un error en la descarga + if d.state == d.states.error: + logger.info("Error al intentar descargar %s" % (url)) + status = STATUS_CODES.error + + # La descarga se ha detenifdo + elif d.state == d.states.stopped: + logger.info("Descarga detenida") + status = STATUS_CODES.canceled + + # La descarga ha finalizado + elif d.state == d.states.completed: + logger.info("Descargado correctamente") + status = STATUS_CODES.completed + + if item.downloadSize and item.downloadSize != d.size[0]: + status = STATUS_CODES.error + + save_server_statistics(item.server, d.speed[0], d.state != d.states.error) + + dir = os.path.dirname(item.downloadFilename) + file = filetools.join(dir, d.filename) + + if status == STATUS_CODES.completed: + move_to_libray(item.clone(downloadFilename=file)) + + return {"downloadUrl": d.download_url, "downloadStatus": status, "downloadSize": d.size[0], + "downloadProgress": d.progress, "downloadCompleted": d.downloaded[0], "downloadFilename": file} + + +def download_from_server(item): + logger.info(item.tostring()) + unsupported_servers = ["torrent"] + + progreso = platformtools.dialog_progress("Descargas", "Probando con: %s" % item.server) + channel = __import__('channels.%s' % item.contentChannel, None, None, ["channels.%s" % item.contentChannel]) + if hasattr(channel, "play") and not item.play_menu: + + progreso.update(50, "Probando con: %s" % item.server, "Conectando con %s..." % item.contentChannel) + try: + itemlist = getattr(channel, "play")(item.clone(channel=item.contentChannel, action=item.contentAction)) + except: + logger.error("Error en el canal %s" % item.contentChannel) + else: + if len(itemlist) and isinstance(itemlist[0], Item): + download_item = item.clone(**itemlist[0].__dict__) + download_item.contentAction = download_item.action + download_item.infoLabels = item.infoLabels + item = download_item + elif len(itemlist) and isinstance(itemlist[0], list): + item.video_urls = itemlist + if not item.server: item.server = "directo" + else: + logger.info("No hay nada que reproducir") + return {"downloadStatus": STATUS_CODES.error} + progreso.close() + logger.info("contentAction: %s | contentChannel: %s | server: %s | url: %s" % ( + item.contentAction, item.contentChannel, item.server, item.url)) + + if not item.server or not item.url or not item.contentAction == "play" or item.server in unsupported_servers: + logger.error("El Item no contiene los parametros necesarios.") + return {"downloadStatus": STATUS_CODES.error} + + if not item.video_urls: + video_urls, puedes, motivo = servertools.resolve_video_urls_for_playing(item.server, item.url, item.password, + True) + else: + video_urls, puedes, motivo = item.video_urls, True, "" + + # Si no esta disponible, salimos + if not puedes: + logger.info("El vídeo **NO** está disponible") + return {"downloadStatus": STATUS_CODES.error} + + else: + logger.info("El vídeo **SI** está disponible") + + result = {} + + # Recorre todas las opciones hasta que consiga descargar una correctamente + for video_url in reversed(video_urls): + + result = download_from_url(video_url[1], item) + + if result["downloadStatus"] in [STATUS_CODES.canceled, STATUS_CODES.completed]: + break + + # Error en la descarga, continuamos con la siguiente opcion + if result["downloadStatus"] == STATUS_CODES.error: + continue + + # Devolvemos el estado + return result + + +def download_from_best_server(item): + logger.info( + "contentAction: %s | contentChannel: %s | url: %s" % (item.contentAction, item.contentChannel, item.url)) + + result = {"downloadStatus": STATUS_CODES.error} + + progreso = platformtools.dialog_progress("Descargas", "Obteniendo lista de servidores disponibles...") + channel = __import__('channels.%s' % item.contentChannel, None, None, ["channels.%s" % item.contentChannel]) + + progreso.update(50, "Obteniendo lista de servidores disponibles:", "Conectando con %s..." % item.contentChannel) + + if hasattr(channel, item.contentAction): + play_items = getattr(channel, item.contentAction)( + item.clone(action=item.contentAction, channel=item.contentChannel)) + else: + play_items = servertools.find_video_items(item.clone(action=item.contentAction, channel=item.contentChannel)) + + play_items = filter(lambda x: x.action == "play" and not "trailer" in x.title.lower(), play_items) + + progreso.update(100, "Obteniendo lista de servidores disponibles", "Servidores disponibles: %s" % len(play_items), + "Identificando servidores...") + + if config.get_setting("server_reorder", "downloads") == 1: + play_items.sort(key=sort_method) + + if progreso.iscanceled(): + return {"downloadStatus": STATUS_CODES.canceled} + + progreso.close() + + # Recorremos el listado de servers, hasta encontrar uno que funcione + for play_item in play_items: + play_item = item.clone(**play_item.__dict__) + play_item.contentAction = play_item.action + play_item.infoLabels = item.infoLabels + + result = download_from_server(play_item) + + if progreso.iscanceled(): + result["downloadStatus"] = STATUS_CODES.canceled + + # Tanto si se cancela la descarga como si se completa dejamos de probar mas opciones + if result["downloadStatus"] in [STATUS_CODES.canceled, STATUS_CODES.completed]: + result["downloadServer"] = {"url": play_item.url, "server": play_item.server} + break + + return result + + +def select_server(item): + logger.info( + "contentAction: %s | contentChannel: %s | url: %s" % (item.contentAction, item.contentChannel, item.url)) + + progreso = platformtools.dialog_progress("Descargas", "Obteniendo lista de servidores disponibles...") + channel = __import__('channels.%s' % item.contentChannel, None, None, ["channels.%s" % item.contentChannel]) + progreso.update(50, "Obteniendo lista de servidores disponibles:", "Conectando con %s..." % item.contentChannel) + + if hasattr(channel, item.contentAction): + play_items = getattr(channel, item.contentAction)( + item.clone(action=item.contentAction, channel=item.contentChannel)) + else: + play_items = servertools.find_video_items(item.clone(action=item.contentAction, channel=item.contentChannel)) + + play_items = filter(lambda x: x.action == "play" and not "trailer" in x.title.lower(), play_items) + + progreso.update(100, "Obteniendo lista de servidores disponibles", "Servidores disponibles: %s" % len(play_items), + "Identificando servidores...") + + for x, i in enumerate(play_items): + if not i.server and hasattr(channel, "play"): + play_items[x] = getattr(channel, "play")(i) + + seleccion = platformtools.dialog_select("Selecciona el servidor", ["Auto"] + [s.title for s in play_items]) + if seleccion > 1: + update_json(item.path, { + "downloadServer": {"url": play_items[seleccion - 1].url, "server": play_items[seleccion - 1].server}}) + elif seleccion == 0: + update_json(item.path, {"downloadServer": {}}) + + platformtools.itemlist_refresh() + + +def start_download(item): + logger.info( + "contentAction: %s | contentChannel: %s | url: %s" % (item.contentAction, item.contentChannel, item.url)) + + # Ya tenemnos server, solo falta descargar + if item.contentAction == "play": + ret = download_from_server(item) + update_json(item.path, ret) + return ret["downloadStatus"] + + elif item.downloadServer and item.downloadServer.get("server"): + ret = download_from_server( + item.clone(server=item.downloadServer.get("server"), url=item.downloadServer.get("url"), + contentAction="play")) + update_json(item.path, ret) + return ret["downloadStatus"] + # No tenemos server, necesitamos buscar el mejor + else: + ret = download_from_best_server(item) + update_json(item.path, ret) + return ret["downloadStatus"] + + +def get_episodes(item): + logger.info("contentAction: %s | contentChannel: %s | contentType: %s" % ( + item.contentAction, item.contentChannel, item.contentType)) + + # El item que pretendemos descargar YA es un episodio + if item.contentType == "episode": + episodes = [item.clone()] + + # El item es uma serie o temporada + elif item.contentType in ["tvshow", "season"]: + # importamos el canal + channel = __import__('channels.%s' % item.contentChannel, None, None, ["channels.%s" % item.contentChannel]) + # Obtenemos el listado de episodios + episodes = getattr(channel, item.contentAction)(item) + + itemlist = [] + + # Tenemos las lista, ahora vamos a comprobar + for episode in episodes: + + # Si partiamos de un item que ya era episodio estos datos ya están bien, no hay que modificarlos + if item.contentType != "episode": + episode.contentAction = episode.action + episode.contentChannel = episode.channel + + # Si el resultado es una temporada, no nos vale, tenemos que descargar los episodios de cada temporada + if episode.contentType == "season": + itemlist.extend(get_episodes(episode)) + + # Si el resultado es un episodio ya es lo que necesitamos, lo preparamos para añadirlo a la descarga + if episode.contentType == "episode": + + # Pasamos el id al episodio + if not episode.infoLabels["tmdb_id"]: + episode.infoLabels["tmdb_id"] = item.infoLabels["tmdb_id"] + + # Episodio, Temporada y Titulo + if not episode.contentSeason or not episode.contentEpisodeNumber: + season_and_episode = scrapertools.get_season_and_episode(episode.title) + if season_and_episode: + episode.contentSeason = season_and_episode.split("x")[0] + episode.contentEpisodeNumber = season_and_episode.split("x")[1] + + # Buscamos en tmdb + if item.infoLabels["tmdb_id"]: + scraper.find_and_set_infoLabels(episode) + + # Episodio, Temporada y Titulo + if not episode.contentTitle: + episode.contentTitle = re.sub("\[[^\]]+\]|\([^\)]+\)|\d*x\d*\s*-", "", episode.title).strip() + + episode.downloadFilename = filetools.validate_path(os.path.join(item.downloadFilename, "%dx%0.2d - %s" % ( + episode.contentSeason, episode.contentEpisodeNumber, episode.contentTitle.strip()))) + + itemlist.append(episode) + # Cualquier otro resultado no nos vale, lo ignoramos + else: + logger.info("Omitiendo item no válido: %s" % episode.tostring()) + + return itemlist + + +def write_json(item): + logger.info() + + item.action = "menu" + item.channel = "downloads" + item.downloadStatus = STATUS_CODES.stoped + item.downloadProgress = 0 + item.downloadSize = 0 + item.downloadCompleted = 0 + if not item.contentThumbnail: + item.contentThumbnail = item.thumbnail + + for name in ["text_bold", "text_color", "text_italic", "context", "totalItems", "viewmode", "title", "fulltitle", + "thumbnail"]: + if item.__dict__.has_key(name): + item.__dict__.pop(name) + + path = os.path.join(config.get_setting("downloadlistpath"), str(time.time()) + ".json") + filetools.write(path, item.tojson()) + item.path = path + time.sleep(0.1) + + +def save_download(item): + logger.info() + + # Menu contextual + if item.from_action and item.from_channel: + item.channel = item.from_channel + item.action = item.from_action + del item.from_action + del item.from_channel + + item.contentChannel = item.channel + item.contentAction = item.action + + if item.contentType in ["tvshow", "episode", "season"]: + save_download_tvshow(item) + + elif item.contentType == "movie": + save_download_movie(item) + + else: + save_download_video(item) + + +def save_download_video(item): + logger.info("contentAction: %s | contentChannel: %s | contentTitle: %s" % ( + item.contentAction, item.contentChannel, item.contentTitle)) + + set_movie_title(item) + + item.downloadFilename = filetools.validate_path("%s [%s]" % (item.contentTitle.strip(), item.contentChannel)) + + write_json(item) + + if not platformtools.dialog_yesno(config.get_localized_string(30101), "¿Iniciar la descarga ahora?"): + platformtools.dialog_ok(config.get_localized_string(30101), item.contentTitle, + config.get_localized_string(30109)) + else: + start_download(item) + + +def save_download_movie(item): + logger.info("contentAction: %s | contentChannel: %s | contentTitle: %s" % ( + item.contentAction, item.contentChannel, item.contentTitle)) + + progreso = platformtools.dialog_progress("Descargas", "Obteniendo datos de la pelicula") + + set_movie_title(item) + + result = scraper.find_and_set_infoLabels(item) + if not result: + progreso.close() + return save_download_video(item) + + progreso.update(0, "Añadiendo pelicula...") + + item.downloadFilename = filetools.validate_path("%s [%s]" % (item.contentTitle.strip(), item.contentChannel)) + + write_json(item) + + progreso.close() + + if not platformtools.dialog_yesno(config.get_localized_string(30101), "¿Iniciar la descarga ahora?"): + platformtools.dialog_ok(config.get_localized_string(30101), item.contentTitle, + config.get_localized_string(30109)) + else: + start_download(item) + + +def save_download_tvshow(item): + logger.info("contentAction: %s | contentChannel: %s | contentType: %s | contentSerieName: %s" % ( + item.contentAction, item.contentChannel, item.contentType, item.contentSerieName)) + + progreso = platformtools.dialog_progress("Descargas", "Obteniendo datos de la serie") + + scraper.find_and_set_infoLabels(item) + + item.downloadFilename = filetools.validate_path("%s [%s]" % (item.contentSerieName, item.contentChannel)) + + progreso.update(0, "Obteniendo episodios...", "conectando con %s..." % item.contentChannel) + + episodes = get_episodes(item) + + progreso.update(0, "Añadiendo capitulos...", " ") + + for x, i in enumerate(episodes): + progreso.update(x * 100 / len(episodes), + "%dx%0.2d: %s" % (i.contentSeason, i.contentEpisodeNumber, i.contentTitle)) + write_json(i) + progreso.close() + + if not platformtools.dialog_yesno(config.get_localized_string(30101), "¿Iniciar la descarga ahora?"): + platformtools.dialog_ok(config.get_localized_string(30101), + str(len(episodes)) + " capitulos de: " + item.contentSerieName, + config.get_localized_string(30109)) + else: + for i in episodes: + res = start_download(i) + if res == STATUS_CODES.canceled: + break + + +def set_movie_title(item): + if not item.contentTitle: + item.contentTitle = re.sub("\[[^\]]+\]|\([^\)]+\)", "", item.fulltitle).strip() + + if not item.contentTitle: + item.contentTitle = re.sub("\[[^\]]+\]|\([^\)]+\)", "", item.title).strip() diff --git a/plugin.video.alfa/channels/ecarteleratrailers.json b/plugin.video.alfa/channels/ecarteleratrailers.json new file mode 100755 index 00000000..5b66a830 --- /dev/null +++ b/plugin.video.alfa/channels/ecarteleratrailers.json @@ -0,0 +1,23 @@ +{ + "id": "ecarteleratrailers", + "name": "Trailers ecartelera", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "ecarteleratrailers.png", + "banner": "ecarteleratrailers.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ], + "categories": [ + "movie" + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/ecarteleratrailers.py b/plugin.video.alfa/channels/ecarteleratrailers.py new file mode 100755 index 00000000..2bcfc353 --- /dev/null +++ b/plugin.video.alfa/channels/ecarteleratrailers.py @@ -0,0 +1,84 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import logger +from core import scrapertools +from core.item import Item + + +def mainlist(item): + logger.info() + itemlist = [] + + if item.url == "": + item.url = "http://www.ecartelera.com/videos/" + + # ------------------------------------------------------ + # Descarga la página + # ------------------------------------------------------ + data = scrapertools.cachePage(item.url) + # logger.info(data) + + # ------------------------------------------------------ + # Extrae las películas + # ------------------------------------------------------ + patron = '<div class="viditem"[^<]+' + patron += '<div class="fimg"><a href="([^"]+)"><img alt="([^"]+)" src="([^"]+)"/><p class="length">([^<]+)</p></a></div[^<]+' + patron += '<div class="fcnt"[^<]+' + patron += '<h4><a[^<]+</a></h4[^<]+' + patron += '<p class="desc">([^<]+)</p>' + + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle, scrapedthumbnail, duration, scrapedplot in matches: + title = scrapedtitle + " (" + duration + ")" + url = scrapedurl + thumbnail = scrapedthumbnail + plot = scrapedplot.strip() + + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + itemlist.append( + Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, fanart=thumbnail, + plot=plot, server="directo", folder=False)) + + # ------------------------------------------------------ + # Extrae la página siguiente + # ------------------------------------------------------ + patron = '<a href="([^"]+)">Siguiente</a>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for match in matches: + scrapedtitle = "Pagina siguiente" + scrapedurl = match + scrapedthumbnail = "" + scrapeddescription = "" + + # Añade al listado de XBMC + itemlist.append(Item(channel=item.channel, action="mainlist", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot, server="directo", folder=True, + viewmode="movie_with_plot")) + + return itemlist + + +# Reproducir un vídeo +def play(item): + logger.info() + itemlist = [] + # Descarga la página + data = scrapertools.cachePage(item.url) + logger.info(data) + + # Extrae las películas + patron = '<source src="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) > 0: + url = urlparse.urljoin(item.url, matches[0]) + logger.info("url=" + url) + itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=url, thumbnail=item.thumbnail, + plot=item.plot, server="directo", folder=False)) + + return itemlist diff --git a/plugin.video.alfa/channels/elsenordelanillo.json b/plugin.video.alfa/channels/elsenordelanillo.json new file mode 100755 index 00000000..101f0a31 --- /dev/null +++ b/plugin.video.alfa/channels/elsenordelanillo.json @@ -0,0 +1,24 @@ +{ + "id": "elsenordelanillo", + "name": "El señor del anillo", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "elsenordelanillo.png", + "banner": "elsenordelanillo.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ], + "categories": [ + "latino", + "movie" + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/elsenordelanillo.py b/plugin.video.alfa/channels/elsenordelanillo.py new file mode 100755 index 00000000..ef054c54 --- /dev/null +++ b/plugin.video.alfa/channels/elsenordelanillo.py @@ -0,0 +1,216 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + + +def mainlist(item): + logger.info() + + itemlist = [] + itemlist.append(Item(channel=item.channel, action="peliculas", title="Novedades", + url="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/", viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="generos", title="Por género", + url="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/")) + itemlist.append(Item(channel=item.channel, action="letras", title="Por letra", + url="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/")) + itemlist.append(Item(channel=item.channel, action="anyos", title="Por año", + url="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/")) + + return itemlist + + +def anyos(item): + logger.info() + + # Descarga la pagina + data = scrapertools.cache_page(item.url) + # logger.info("data="+data) + data = scrapertools.find_single_match(data, 'scalas por a(.*?)</ul>') + logger.info("data=" + data) + + # Extrae las entradas (carpetas) + patron = '<li><a target="[^"]+" title="[^"]+" href="([^"]+)"><strong>([^<]+)</strong>' + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + for scrapedurl, scrapedtitle in matches: + title = scrapedtitle.strip() + thumbnail = "" + plot = "" + url = urlparse.urljoin(item.url, scrapedurl) + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + itemlist.append( + Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot, + fulltitle=title, viewmode="movie")) + + return itemlist + + +def letras(item): + logger.info() + + # Descarga la pagina + data = scrapertools.cache_page(item.url) + # logger.info("data="+data) + data = scrapertools.find_single_match(data, '<div class="bkpelsalf_ul(.*?)</ul>') + logger.info("data=" + data) + + # Extrae las entradas (carpetas) + # <li><a target="_top" href="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/letra/a.html" title="Películas que comienzan con A">A</a> + patron = '<li><a target="[^"]+" href="([^"]+)" title="[^"]+">([^<]+)</a>' + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + for scrapedurl, scrapedtitle in matches: + title = scrapedtitle.strip() + thumbnail = "" + plot = "" + url = urlparse.urljoin(item.url, scrapedurl) + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + itemlist.append( + Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot, + fulltitle=title, viewmode="movie")) + + return itemlist + + +def generos(item): + logger.info() + + # Descarga la pagina + data = scrapertools.cache_page(item.url) + # logger.info("data="+data) + + # Extrae las entradas (carpetas) + # <a class='generos' target="_top" href='/pelisdelanillo/categoria/accion/' title='Las Mejores Películas de Acción De Todos Los Años'> Acción </a> + patron = "<a class='generos' target=\"_top\" href='([^']+)' title='[^']+'>([^<]+)</a>" + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + for scrapedurl, scrapedtitle in matches: + title = unicode(scrapedtitle, "iso-8859-1", errors="replace").encode("utf-8").strip() + thumbnail = "" + plot = "" + url = urlparse.urljoin(item.url, scrapedurl) + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + itemlist.append( + Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot, + fulltitle=title, viewmode="movie")) + + return itemlist + + +def peliculas(item): + logger.info() + itemlist = [] + + # Descarga la pagina + data = scrapertools.cache_page(item.url) + # logger.info("data="+data) + + # Extrae las entradas + ''' + <!--<pelicula>--> + <li class="peli_bx br1px brdr10px ico_a"> + <h2 class="titpeli bold ico_b"><a target="_top" href="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/pelicula/1077/el-jardinero-fiel.html" title="El Jardinero Fiel">El Jardinero Fiel</a></h2> + <div class="peli_img p_relative"> + <div class="peli_img_img"> + <a target="_top" href="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/pelicula/1077/el-jardinero-fiel.html" title="El Jardinero Fiel"> + <img src="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/files/uploads/1077.jpg" alt="El Jardinero Fiel" /></a> + </div> + <div> + <center><table border="5" bordercolor="#000000"><tr><td> + <img width="26" heigth="17" src="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/Temas/default/img/idioma/lat.png"> + </td><td> + <img width="26" heigth="17" src="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/Temas/default/img/idioma/sub.png"> + </td><td> + <img width="26" heigth="17" src="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/Temas/default/img/idioma/no-cam.png"> + </td><td> + <img width="26" heigth="17" src="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/Temas/default/img/idioma/dvd.png"> + </td><td> + <img width="26" heigth="17" src="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/Temas/default/img/idioma/no-hd.png"> + </td></tr></table></center> + </div> + <div class="peli_txt bgdeg8 brdr10px bxshd2 ico_b p_absolute pd15px white"> + <div class="plt_tit bold fs14px mgbot10px"><h2 class="bold d_inline fs14px"><font color="black"><b>El Jardinero Fiel</b></font></h2></div> + <div class="plt_ft clf mgtop10px"> + <div class="stars f_left pdtop10px"><strong>Genero</strong>: Suspenso, Drama, 2005</div> + <br><br> + <div class="stars f_left pdtop10px"><table><tr><td><strong>Idioma</strong>:</td><td><img width="26" heigth="17" src="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/Temas/default/img/idioma/lat.png"></td><td><img width="26" heigth="17" src="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/Temas/default/img/idioma/sub.png"></td></tr></table></div> + <br /><br /> + <div class="stars f_left pdtop10px"><table><tr><td><strong>Calidad</strong>:</td><td><img width="26" heigth="17" src="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/Temas/default/img/idioma/no-cam.png"></td><td><img width="26" heigth="17" src="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/Temas/default/img/idioma/dvd.png"></td><td><img width="26" heigth="17" src="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/Temas/default/img/idioma/no-hd.png"></td></tr></table></div> + <br /><br> + <div class="stars f_left pdtop10px"><strong>Visualizada</strong>: 629 Veces</div> + <a target="_top" class="vrfich bold ico f_right" href="http://www.xn--elseordelanillo-1qb.com/pelisdelanillo/pelicula/1077/el-jardinero-fiel.html" title=""></a> + + </div> + </div> + </div> + </li> + <!--</pelicula>--> + ''' + patronbloque = "<!--<pelicula>--[^<]+<li(.*?)</li>" + bloques = re.compile(patronbloque, re.DOTALL).findall(data) + + for bloque in bloques: + scrapedurl = scrapertools.find_single_match(bloque, '<a.*?href="([^"]+)"') + scrapedtitle = scrapertools.find_single_match(bloque, '<a.*?title="([^"]+)"') + scrapedthumbnail = scrapertools.find_single_match(bloque, '<img src="([^"]+)"') + + title = unicode(scrapedtitle, "iso-8859-1", errors="replace").encode("utf-8") + title = title.strip() + title = scrapertools.htmlclean(title) + thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) + plot = "" + url = urlparse.urljoin(item.url, scrapedurl) + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot, + fulltitle=title)) + + # </b></span></a></li[^<]+<li><a href="?page=2"> + next_page = scrapertools.find_single_match(data, '</b></span></a></li[^<]+<li><a target="_top" href="([^"]+)">') + if next_page != "": + itemlist.append( + Item(channel=item.channel, action="peliculas", title=">> Página siguiente", url=item.url + next_page, + folder=True, viewmode="movie")) + + return itemlist + + +def findvideos(item): + logger.info() + + # Descarga la pagina + data = scrapertools.cache_page(item.url) + # logger.info("data="+data) + bloque = scrapertools.find_single_match(data, "function cargamos.*?window.open.'([^']+)'") + data = scrapertools.cache_page(bloque) + + from core import servertools + itemlist = servertools.find_video_items(data=data) + for videoitem in itemlist: + videoitem.channel = item.channel + videoitem.folder = False + + return itemlist + + +def play(item): + logger.info("url=" + item.url) + + itemlist = servertools.find_video_items(data=item.url) + + for videoitem in itemlist: + videoitem.title = item.title + videoitem.fulltitle = item.fulltitle + videoitem.thumbnail = item.thumbnail + videoitem.channel = item.channel + + return itemlist diff --git a/plugin.video.alfa/channels/eporner.json b/plugin.video.alfa/channels/eporner.json new file mode 100755 index 00000000..4ddc9dfe --- /dev/null +++ b/plugin.video.alfa/channels/eporner.json @@ -0,0 +1,27 @@ +{ + "id": "eporner", + "name": "Eporner", + "active": true, + "adult": true, + "language": "es", + "thumbnail": "eporner.png", + "banner": "eporner.png", + "version": 1, + "changes": [ + { + "date": "03/06/2017", + "description": "reparada seccion categorias" + }, + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "28/12/16", + "description": "First version" + } + ], + "categories": [ + "adult" + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/eporner.py b/plugin.video.alfa/channels/eporner.py new file mode 100755 index 00000000..4fee0a64 --- /dev/null +++ b/plugin.video.alfa/channels/eporner.py @@ -0,0 +1,145 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import httptools +from core import jsontools +from core import logger + + +def mainlist(item): + logger.info() + itemlist = [] + + itemlist.append(item.clone(title="Últimos videos", action="videos", url="http://www.eporner.com/0/")) + itemlist.append(item.clone(title="Categorias", action="categorias", url="http://www.eporner.com/categories/")) + itemlist.append(item.clone(title="Pornstars", action="pornstars_list", url="http://www.eporner.com/pornstars/")) + itemlist.append(item.clone(title="Buscar", action="search", url="http://www.eporner.com/search/%s/")) + + return itemlist + + +def search(item, texto): + logger.info() + + item.url = item.url % texto + item.action = "videos" + + try: + return videos(item) + except: + import traceback + logger.error(traceback.format_exc()) + return [] + + +def pornstars_list(item): + logger.info() + itemlist = [] + for letra in "ABCDEFGHIJKLMNOPQRSTUVWXYZ": + itemlist.append(item.clone(title=letra, url=urlparse.urljoin(item.url, letra), action="pornstars")) + + return itemlist + + +def pornstars(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + + patron = '<div class="mbtit" itemprop="name"><a href="([^"]+)" title="([^"]+)">[^<]+</a></div> ' + patron += '<a href="[^"]+" title="[^"]+"> <img src="([^"]+)" alt="[^"]+" style="width:190px;height:152px;" /> </a> ' + patron += '<div class="mbtim"><span>Videos: </span>([^<]+)</div>' + + matches = re.compile(patron, re.DOTALL).findall(data) + for url, title, thumbnail, count in matches: + itemlist.append( + item.clone(title="%s (%s videos)" % (title, count), url=urlparse.urljoin(item.url, url), action="videos", + thumbnail=thumbnail)) + + # Paginador + patron = "<span style='color:#FFCC00;'>[^<]+</span></a> <a href='([^']+)' title='[^']+'><span>[^<]+</span></a>" + matches = re.compile(patron, re.DOTALL).findall(data) + if matches: + itemlist.append(item.clone(title="Pagina siguiente", url=urlparse.urljoin(item.url, matches[0]))) + + return itemlist + + +def categorias(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + + patron = '<div class="categoriesbox" id="[^"]+"> <div class="ctbinner"> <a href="([^"]+)" title="[^"]+"> <img src="([^"]+)" alt="[^"]+"> <h2>([^"]+)</h2> </a> </div> </div>' + + matches = re.compile(patron, re.DOTALL).findall(data) + for url, thumbnail, title in matches: + itemlist.append( + item.clone(title=title, url=urlparse.urljoin(item.url, url), action="videos", thumbnail=thumbnail)) + + return sorted(itemlist, key=lambda i: i.title) + + +def videos(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + + patron = '<a href="([^"]+)" title="([^"]+)" id="[^"]+">.*?<img id="[^"]+" src="([^"]+)"[^>]+>.*?<div class="mbtim">([^<]+)</div>' + + matches = re.compile(patron, re.DOTALL).findall(data) + for url, title, thumbnail, duration in matches: + itemlist.append(item.clone(title="%s (%s)" % (title, duration), url=urlparse.urljoin(item.url, url), + action="play", thumbnail=thumbnail, contentThumbnail=thumbnail, + contentType="movie", contentTitle=title)) + + # Paginador + patron = "<span style='color:#FFCC00;'>[^<]+</span></a> <a href='([^']+)' title='[^']+'><span>[^<]+</span></a>" + matches = re.compile(patron, re.DOTALL).findall(data) + if matches: + itemlist.append(item.clone(title="Página siguiente", url=urlparse.urljoin(item.url, matches[0]))) + + return itemlist + + +def int_to_base36(num): + """Converts a positive integer into a base36 string.""" + assert num >= 0 + digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'.lower() + + res = '' + while not res or num > 0: + num, i = divmod(num, 36) + res = digits[i] + res + return res + + +def play(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + + patron = "EP: { vid: '([^']+)', hash: '([^']+)'" + + vid, hash = re.compile(patron, re.DOTALL).findall(data)[0] + + hash = int_to_base36(int(hash[0:8], 16)) + int_to_base36(int(hash[8:16], 16)) + int_to_base36( + int(hash[16:24], 16)) + int_to_base36(int(hash[24:32], 16)) + + url = "https://www.eporner.com/xhr/video/%s?hash=%s" % (vid, hash) + data = httptools.downloadpage(url).data + jsondata = jsontools.load(data) + + for source in jsondata["sources"]["mp4"]: + url = jsondata["sources"]["mp4"][source]["src"] + title = source.split(" ")[0] + + itemlist.append(["%s %s [directo]" % (title, url[-4:]), url]) + + return sorted(itemlist, key=lambda i: int(i[0].split("p")[0])) diff --git a/plugin.video.alfa/channels/erotik.json b/plugin.video.alfa/channels/erotik.json new file mode 100755 index 00000000..661c670d --- /dev/null +++ b/plugin.video.alfa/channels/erotik.json @@ -0,0 +1,33 @@ +{ + "id": "erotik", + "name": "Erotik", + "active": true, + "adult": true, + "language": "es", + "thumbnail": "http://www.youfreeporntube.com/uploads/custom-logo.png", + "banner": "http://www.youfreeporntube.com/uploads/custom-logo.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "26/12/2016", + "description": "Release." + } + ], + "categories": [ + "adult" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/erotik.py b/plugin.video.alfa/channels/erotik.py new file mode 100755 index 00000000..2a213b74 --- /dev/null +++ b/plugin.video.alfa/channels/erotik.py @@ -0,0 +1,136 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import logger +from core import scrapertools +from core.item import Item + + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append(Item(channel=item.channel, action="lista", title="Útimos videos", + url="http://www.ero-tik.com/newvideos.html?&page=1")) + itemlist.append( + Item(channel=item.channel, action="categorias", title="Categorias", url="http://www.ero-tik.com/browse.html")) + itemlist.append(Item(channel=item.channel, action="lista", title="Top ultima semana", + url="http://www.ero-tik.com/topvideos.html?do=recent")) + itemlist.append(Item(channel=item.channel, action="search", title="Buscar", + url="http://www.ero-tik.com/search.php?keywords=")) + + return itemlist + + +def search(item, texto): + logger.info() + + texto = texto.replace(" ", "+") + item.url = "{0}{1}".format(item.url, texto) + try: + return lista(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + +def categorias(item): + logger.info() + itemlist = [] + data = scrapertools.cache_page(item.url) + data = re.sub(r"\n|\r|\t|\s{2}", "", data) + patron = '<div class="pm-li-category"><a href="([^"]+)">.*?.<h3>(.*?)</h3></a>' + matches = re.compile(patron, re.DOTALL).findall(data) + for url, actriz in matches: + itemlist.append(Item(channel=item.channel, action="listacategoria", title=actriz, url=url)) + + return itemlist + + +def lista(item): + logger.info() + itemlist = [] + # Descarga la página + data = scrapertools.cache_page(item.url) + data = re.sub(r"\n|\r|\t|\s{2}", "", data) + + # Extrae las entradas de la pagina seleccionada + patron = '<li><div class=".*?<a href="([^"]+)".*?>.*?.img src="([^"]+)".*?alt="([^"]+)".*?>' + + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + for scrapedurl, scrapedthumbnail, scrapedtitle in matches: + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) + title = scrapedtitle.strip() + + # Añade al listado + itemlist.append(Item(channel=item.channel, action="play", thumbnail=thumbnail, fanart=thumbnail, title=title, + fulltitle=title, url=url, + viewmode="movie", folder=True)) + + paginacion = scrapertools.find_single_match(data, + '<li class="active"><a href="#" onclick="return false;">\d+</a></li><li class=""><a href="([^"]+)">') + + if paginacion: + itemlist.append(Item(channel=item.channel, action="lista", title=">> Página Siguiente", + url="http://ero-tik.com/" + paginacion)) + + return itemlist + + +def listacategoria(item): + logger.info() + itemlist = [] + # Descarga la página + data = scrapertools.cache_page(item.url) + data = re.sub(r"\n|\r|\t|\s{2}", "", data) + + # Extrae las entradas de la pagina seleccionada + patron = '<li><div class=".*?<a href="([^"]+)".*?>.*?.img src="([^"]+)".*?alt="([^"]+)".*?>' + + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + for scrapedurl, scrapedthumbnail, scrapedtitle in matches: + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) + title = scrapedtitle.strip() + + # Añade al listado + itemlist.append( + Item(channel=item.channel, action="play", thumbnail=thumbnail, title=title, fulltitle=title, url=url, + viewmode="movie", folder=True)) + + paginacion = scrapertools.find_single_match(data, + '<li class="active"><a href="#" onclick="return false;">\d+</a></li><li class=""><a href="([^"]+)">') + + if paginacion: + itemlist.append( + Item(channel=item.channel, action="listacategoria", title=">> Página Siguiente", url=paginacion)) + + return itemlist + + +def play(item): + logger.info() + itemlist = [] + # Descarga la página + data = scrapertools.cachePage(item.url) + data = scrapertools.unescape(data) + logger.info(data) + from core import servertools + itemlist.extend(servertools.find_video_items(data=data)) + for videoitem in itemlist: + videoitem.thumbnail = item.thumbnail + videoitem.channel = item.channel + videoitem.action = "play" + videoitem.folder = False + videoitem.title = item.title + + return itemlist diff --git a/plugin.video.alfa/channels/estadepelis.json b/plugin.video.alfa/channels/estadepelis.json new file mode 100755 index 00000000..5c51852b --- /dev/null +++ b/plugin.video.alfa/channels/estadepelis.json @@ -0,0 +1,78 @@ +{ + "id": "estadepelis", + "name": "Estadepelis", + "compatible": { + "addon_version": "4.3" + }, + "active": true, + "adult": false, + "language": "es", + "thumbnail": "https://s24.postimg.org/nsgit7fhh/estadepelis.png", + "banner": "https://s28.postimg.org/ud0l032ul/estadepelis_banner.png", + "version": 1, + "changes": [ + { + "date": "24/06/2017", + "description": "Cambios para autoplay" + }, + { + "date": "22/06/2017", + "description": "ajustes para AutoPlay" + }, + { + "date": "25/05/2017", + "description": "cambios esteticos" + }, + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "07/02/2017", + "description": "Release" + } + ], + "categories": [ + "latino", + "movie" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": false, + "visible": false + }, + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "No filtrar", + "Latino", + "VOS" + ] + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_infantiles", + "type": "bool", + "label": "Incluir en Novedades - Infantiles", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/estadepelis.py b/plugin.video.alfa/channels/estadepelis.py new file mode 100755 index 00000000..f70eb948 --- /dev/null +++ b/plugin.video.alfa/channels/estadepelis.py @@ -0,0 +1,476 @@ +# -*- coding: utf-8 -*- + +import re + +from channels import autoplay +from channels import filtertools +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item + +host = 'http://www.estadepelis.com/' +headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'], + ['Referer', host]] + +IDIOMAS = {'Latino': 'Latino', 'Sub Español': 'VOS'} +list_language = IDIOMAS.values() +list_quality = [] +list_servers = ['yourupload', 'openload', 'sendvid', ''] + +vars = { + 'ef5ca18f089cf01316bbc967fa10f72950790c39ef5ca18f089cf01316bbc967fa10f72950790c39': 'http://www.estadepelis.com/', + 'b48699bb49d4550f27879deeb948d4f7d9c5949a8': 'embed', + 'JzewJkLlrvcFnLelj2ikbA': 'php?url=', + 'p889c6853a117aca83ef9d6523335dc065213ae86': 'player', + 'e20fb341325556c0fc0145ce10d08a970538987': 'http://yourupload.com/embed/' +} + +tgenero = {"acción": "https://s3.postimg.org/y6o9puflv/accion.png", + "animación": "https://s13.postimg.org/5on877l87/animacion.png", + "aventura": "https://s10.postimg.org/6su40czih/aventura.png", + "belico": "https://s23.postimg.org/71itp9hcr/belica.png", + "ciencia ficción": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png", + "comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png", + "comedia romántica": "https://s21.postimg.org/xfsj7ua0n/romantica.png", + "cortometrajes": "https://s15.postimg.org/kluxxwg23/cortometraje.png", + "crimen": "https://s4.postimg.org/6z27zhirx/crimen.png", + "cristianas": "https://s7.postimg.org/llo852fwr/religiosa.png", + "deportivas": "https://s13.postimg.org/xuxf5h06v/deporte.png", + "drama": "https://s16.postimg.org/94sia332d/drama.png", + "familiar": "https://s7.postimg.org/6s7vdhqrf/familiar.png", + "fantasía": "https://s13.postimg.org/65ylohgvb/fantasia.png", + "guerra": "https://s4.postimg.org/n1h2jp2jh/guerra.png", + "historia": "https://s15.postimg.org/fmc050h1n/historia.png", + "intriga": "https://s27.postimg.org/v9og43u2b/intriga.png", + "misterios": "https://s1.postimg.org/w7fdgf2vj/misterio.png", + "musical": "https://s29.postimg.org/bbxmdh9c7/musical.png", + "romance": "https://s15.postimg.org/fb5j8cl63/romance.png", + "suspenso": "https://s13.postimg.org/wmw6vl1cn/suspenso.png", + "terror": "https://s7.postimg.org/yi0gij3gb/terror.png", + "thriller": "https://s22.postimg.org/5y9g0jsu9/thriller.png"} + + +def mainlist(item): + logger.info() + autoplay.init(item.channel, list_servers, list_quality) + itemlist = [] + + itemlist.append(item.clone(title="Peliculas", + action="menupeliculas", + thumbnail='https://s8.postimg.org/6wqwy2c2t/peliculas.png', + fanart='https://s8.postimg.org/6wqwy2c2t/peliculas.png' + )) + + itemlist.append(item.clone(title="Series", + action="lista", + thumbnail='https://s27.postimg.org/iahczwgrn/series.png', + fanart='https://s27.postimg.org/iahczwgrn/series.png', + url=host + 'lista-de-series/', + extra='series' + )) + + itemlist.append(item.clone(title="Doramas", + action="lista", + thumbnail='https://s15.postimg.org/sjcthoa6z/doramas.png', + fanart='https://s15.postimg.org/sjcthoa6z/doramas.png', + url=host + 'lista-de-doramas/', + extra='series' + )) + + itemlist.append(item.clone(title="Documentales", + action="lista", + thumbnail='https://s16.postimg.org/7xjj4bmol/documental.png', + fanart='https://s16.postimg.org/7xjj4bmol/documental.png', + url=host + 'lista-de-documentales/', + extra='peliculas' + )) + + itemlist.append(item.clone(title="Buscar", + action="search", + url=host + 'search?q=', + thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png', + fanart='https://s30.postimg.org/pei7txpa9/buscar.png' + )) + + autoplay.show_option(item.channel, itemlist) + + return itemlist + + +def menupeliculas(item): + logger.info() + + itemlist = [] + + itemlist.append(item.clone(title="Todas", + action="lista", + thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png', + fanart='https://s18.postimg.org/fwvaeo6qh/todas.png', + url=host + 'lista-de-peliculas/', + extra='peliculas' + )) + + itemlist.append(item.clone(title="Ultimas", + action="lista", + thumbnail='https://s22.postimg.org/cb7nmhwv5/ultimas.png', + fanart='https://s22.postimg.org/cb7nmhwv5/ultimas.png', + url=host, + extra='peliculas' + )) + + itemlist.append(item.clone(title="Generos", + action="generos", + thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png', + fanart='https://s3.postimg.org/5s9jg2wtf/generos.png', + url=host, + extra='peliculas' + )) + + return itemlist + + +def lista(item): + logger.info() + + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data) + contentSerieName = '' + + patron = '<div class=movie><div class=imagen><img src=(.*?) alt=(.*?) width=.*? height=.*?\/><a href=(.*?)><span ' + patron += 'class=player>.*?class=year>(.*?)<\/span>' + matches = re.compile(patron, re.DOTALL).findall(data) + + if item.extra == 'peliculas': + accion = 'findvideos' + else: + accion = 'temporadas' + + for scrapedthumbnail, scrapedtitle, scrapedurl, scrapedyear in matches: + + scrapedurl = scrapedurl.translate(None, '"') + scrapedurl = scrapedurl.translate(None, "'") + url = host + scrapedurl + thumbnail = scrapedthumbnail + title = scrapedtitle + year = scrapedyear + if item.extra == 'series': + contentSerieName = scrapedtitle + + itemlist.append(Item(channel=item.channel, + action=accion, + title=title, + url=url, + thumbnail=thumbnail, + contentTitle=scrapedtitle, + extra=item.extra, + contentSerieName=contentSerieName, + infoLabels={'year': year} + )) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + # #Paginacion + + if itemlist != []: + actual_page_url = item.url + next_page = scrapertools.find_single_match(data, '<div class=siguiente><a href=(.*?)>') + url = host + next_page + if next_page != '': + itemlist.append(Item(channel=item.channel, + action="lista", + title='Siguiente >>>', + url=url, + thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png', + extra=item.extra + )) + return itemlist + + +def generos(item): + logger.info() + + itemlist = [] + norep = [] + data = httptools.downloadpage(item.url).data + + patron = '<li class="cat-item cat-item-.*?"><a href="([^"]+)">([^<]+)<\/a>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + + url = host + scrapedurl + title = scrapedtitle.lower() + if title in tgenero: + thumbnail = tgenero[title.lower()] + else: + thumbnail = '' + + itemactual = Item(channel=item.channel, + action='lista', + title=title, url=url, + thumbnail=thumbnail, + extra=item.extra + ) + + if title not in norep: + itemlist.append(itemactual) + norep.append(itemactual.title) + + return itemlist + + +def temporadas(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = '<li class="has-sub"><a href="([^"]+)"><span><b class="icon-bars"><\/b> ([^<]+)<\/span><\/a>' + matches = re.compile(patron, re.DOTALL).findall(data) + temp = 1 + infoLabels = item.infoLabels + for scrapedurl, scrapedtitle in matches: + url = scrapedurl + title = scrapedtitle.strip('') + contentSeasonNumber = temp + infoLabels['season'] = contentSeasonNumber + thumbnail = item.thumbnail + plot = scrapertools.find_single_match(data, '<p>([^<]+)<\/p>') + itemlist.append(Item(channel=item.channel, + action="episodiosxtemp", + title=title, + fulltitle=item.title, + url=url, + thumbnail=thumbnail, + contentSerieName=item.contentSerieName, + contentSeasonNumber=contentSeasonNumber, + plot=plot, + infoLabels=infoLabels + )) + temp = temp + 1 + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append(Item(channel=item.channel, + title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', + url=item.url, + action="add_serie_to_library", + extra="episodios", + contentSerieName=item.contentSerieName, + extra1=item.extra1, + temp=str(temp) + )) + + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + temp = 'temporada-' + str(item.contentSeasonNumber) + patron = '<li>.\s*<a href="(.*?)">.\s*<span.*?datex">([^<]+)<' + + matches = re.compile(patron, re.DOTALL).findall(data) + for scrapedurl, scrapedepisode in matches: + url = host + scrapedurl + title = item.contentSerieName + ' ' + scrapedepisode + thumbnail = item.thumbnail + fanart = '' + itemlist.append(Item(channel=item.channel, + action="findvideos", + title=title, + fulltitle=item.fulltitle, + url=url, + thumbnail=item.thumbnail, + plot=item.plot, + extra=item.extra, + contentSerieName=item.contentSerieName + )) + + return itemlist + + +def episodiosxtemp(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + temp = 'temporada-' + str(item.contentSeasonNumber) + patron = '<li>.\s*<a href="(.*?-' + temp + '.*?)">.\s*<span.*?datex">([^<]+)<' + matches = re.compile(patron, re.DOTALL).findall(data) + infoLabels = item.infoLabels + for scrapedurl, scrapedepisode in matches: + url = host + scrapedurl + title = item.contentSerieName + ' ' + scrapedepisode + scrapedepisode = re.sub(r'.*?x', '', scrapedepisode) + infoLabels['episode'] = scrapedepisode + thumbnail = item.thumbnail + fanart = '' + itemlist.append(Item(channel=item.channel, + action="findvideos", + title=title, + fulltitle=item.fulltitle, + url=url, + thumbnail=item.thumbnail, + plot=item.plot, + extra=item.extra, + contentSerieName=item.contentSerieName, + infoLabels=infoLabels + )) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + return itemlist + + +def dec(encurl): + logger.info() + url = '' + encurl = encurl.translate(None, "',(,),;") + encurl = encurl.split('+') + + for cod in encurl: + if cod in vars: + url = url + vars[cod] + else: + url = url + cod + return url + + +def findvideos(item): + logger.info() + + itemlist = [] + langs = dict() + + data = httptools.downloadpage(item.url).data + logger.debug('data: %s' % data) + patron = '<a onclick="return (play\d+).*?;"> (.*?) <\/a>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for key, value in matches: + langs[key] = value.strip() + + patron = 'function (play\d).*?servidores.*?attr.*?src.*?\+([^;]+);' + matches = re.compile(patron, re.DOTALL).findall(data) + title = item.title + enlace = scrapertools.find_single_match(data, + 'var e20fb341325556c0fc0145ce10d08a970538987 =.*?"\/your\.".*?"([^"]+)"') + + for scrapedlang, encurl in matches: + + if 'e20fb34' in encurl: + url = dec(encurl) + url = url + enlace + + else: + url = dec(encurl) + title = '' + server = '' + servers = {'/opl': 'openload', '/your': 'yourupload', '/sen': 'senvid', '/face': 'netutv', '/vk': 'vk'} + server_id = re.sub(r'.*?embed|\.php.*', '', url) + if server_id and server_id in servers: + server = servers[server_id] + logger.debug('server_id: %s' % server_id) + + if langs[scrapedlang] in list_language: + language = IDIOMAS[langs[scrapedlang]] + else: + language = 'Latino' + if langs[scrapedlang] == 'Latino': + idioma = '[COLOR limegreen]LATINO[/COLOR]' + elif langs[scrapedlang] == 'Sub Español': + idioma = '[COLOR red]SUB[/COLOR]' + + if item.extra == 'peliculas': + title = item.contentTitle + ' (' + server + ') ' + idioma + plot = scrapertools.find_single_match(data, '<p>([^<]+)<\/p>') + else: + title = item.contentSerieName + ' (' + server + ') ' + idioma + plot = item.plot + + thumbnail = servertools.guess_server_thumbnail(title) + + if 'player' not in url and 'php' in url: + itemlist.append(item.clone(title=title, + url=url, + action="play", + plot=plot, + thumbnail=thumbnail, + server=server, + quality='', + language=language + )) + logger.debug('url: %s' % url) + # Requerido para FilterTools + + itemlist = filtertools.get_links(itemlist, item, list_language) + + # Requerido para AutoPlay + + autoplay.start(itemlist, item) + + if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': + itemlist.append(Item(channel=item.channel, + title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', + url=item.url, + action="add_pelicula_to_library", + extra="findvideos", + contentTitle=item.contentTitle + )) + + return itemlist + + +def play(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url, add_referer=True).data + if 'your' in item.url: + item.url = 'http://www.yourupload.com/embed/' + scrapertools.find_single_match(data, 'src=".*?code=(.*?)"') + itemlist.append(item) + else: + + itemlist = servertools.find_video_items(data=data) + + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append(Item(channel=item.channel, + title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', + url=item.url, + action="add_pelicula_to_library", + extra="findvideos", + contentTitle=item.contentTitle + )) + + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = item.url + texto + if texto != '': + return lista(item) + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + # categoria='peliculas' + try: + if categoria == 'peliculas': + item.url = host + item.extra = 'peliculas' + elif categoria == 'infantiles': + item.url = host + 'search?q=animación' + item.extra = 'peliculas' + itemlist = lista(item) + if itemlist[-1].title == 'Siguiente >>>': + itemlist.pop() + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist diff --git a/plugin.video.alfa/channels/estrenosgo.json b/plugin.video.alfa/channels/estrenosgo.json new file mode 100755 index 00000000..94c0678c --- /dev/null +++ b/plugin.video.alfa/channels/estrenosgo.json @@ -0,0 +1,29 @@ +{ + "id": "estrenosgo", + "name": "EstrenosGo", + "active": true, + "adult": false, + "language": "es", + "fanart": "https://github.com/master-1970/resources/raw/master/images/fanart/estrenosgo.png", + "thumbnail": "https://github.com/master-1970/resources/raw/master/images/squares/estrenosgo.png", + "banner": "estrenosgo.png", + "version": 1, + "changes": [ + { + "date": "15/05/16", + "description": "Compatibilidad con python anteriores a la 2.7" + }, + { + "date": "03/05/16", + "description": "Modificado por cambios en la web" + }, + { + "date": "29/04/16", + "description": "Version inicial" + } + ], + "categories": [ + "movie", + "tvshow" + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/estrenosgo.py b/plugin.video.alfa/channels/estrenosgo.py new file mode 100755 index 00000000..eaa0aec9 --- /dev/null +++ b/plugin.video.alfa/channels/estrenosgo.py @@ -0,0 +1,203 @@ +# -*- coding: utf-8 -*- + +import re + +from core import channeltools +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item + +HOST = 'http://estrenosli.org/' +parameters = channeltools.get_channel_parameters('estrenosgo') +fanart_host = parameters['fanart'] +thumbnail_host = parameters['thumbnail'] +color1, color2, color3 = ['0xFF58D3F7', '0xFF2E64FE', '0xFF0404B4'] + + +def mainlist(item): + logger.info() + itemlist = [] + item.url = HOST + item.text_color = color2 + item.fanart = fanart_host + + item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/Directors%20Chair.png" + itemlist.append(item.clone(title="Películas:", folder=False, text_color=color3, text_bold=True)) + itemlist.append(item.clone(title=" Cartelera", action="listado", url=HOST + "descarga-0-58126-0-0-fx-1-1-.fx")) + itemlist.append(item.clone(title=" DVD-RIP", action="listado", url=HOST + "descarga-0-581210-0-0-fx-1-1.fx")) + itemlist.append(item.clone(title=" HD-RIP", action="listado", url=HOST + "descarga-0-58128-0-0-fx-1-1-.fx")) + + itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host)) + + item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/TV%20Series.png" + itemlist.append(item.clone(title="Series:", folder=False, text_color=color3, text_bold=True)) + itemlist.append(item.clone(title=" Nuevos episodios", action="listado", + url=HOST + "descarga-0-58122-0-0-fx-1-1.fx")) + + return itemlist + + +def listado(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '<div class="MiniFicha">.*?' + patron += '<img src="([^"]+).*?' + patron += '<div class="MiniF_TitleSpecial">[^>]+>([^<]+).*?' + patron += '<b>Categoria:\s*</b>([^&]+)»\s*([^<]+).*?' + patron += '<div class="OpcionesDescargasMini">(.*?)</div>' + + matches = re.compile(patron, re.DOTALL).findall(data) + + for thumbnail, title, cat_padres, cat_hijos, opciones in matches: + # logger.debug(thumbnail + "\n" + title + "\n" + cat_padres + "\n" + cat_hijos + "\n" + opciones) + # Obtenemos el año del titulo y eliminamos lo q sobre + patron = '\d{4}$' + year = scrapertools.find_single_match(title, patron) + if year: + title = re.sub(patron, "", title) + patron = '\s?-?\s?(line)?\s?-\s?$' + regex = re.compile(patron, re.I) + title = regex.sub("", title) + + # Obtenemos la imagen b por q es mayor + thumbnail = HOST + thumbnail[:-5] + 'b' + thumbnail[-4:] + + # Buscamos opcion de ver online + patron = '<a href="http://estrenosly.org/ver-online-([^"]+)' + url_ver = scrapertools.find_single_match(opciones, patron) + if url_ver: + new_item = Item(channel=item.channel, action="findvideos", title=title, + thumbnail=thumbnail, url=url_ver, + infoLabels={"year": year}, text_color=color1) + + cat_padres = cat_padres.strip() + if cat_padres in ["peliculas-dvdrip", "HDRIP", "cartelera"]: + # if item.extra == 'movie': + new_item.contentTitle = title + new_item.extra = "movie" + # Filtramos nombres validos para la calidad + patron = ("rip|dvd|screener|hd|ts|Telesync") + if re.search(patron, cat_hijos, flags=re.IGNORECASE): + new_item.contentQuality = cat_hijos + new_item.title = "%s [%s]" % (title, cat_hijos) + elif cat_padres == "peliculas-dvdrip": + new_item.contentQuality = "DVDRIP" + new_item.title = "%s [DVDRIP]" % title + elif cat_padres == "HDRIP": + new_item.contentQuality = "HDRIP" + new_item.title = "%s [HDRIP]" % title + + elif cat_padres == "series": + new_item.contentSerieName = cat_hijos + patron = re.compile('(\d+)x(\d+)') + matches = patron.findall(title) + if len(matches) == 1: + new_item.contentSeason = matches[0][0] + new_item.contentEpisodeNumber = matches[0][1].zfill(2) + new_item.extra = "episodie" + else: + # matches == [('1', '01'), ('1', '02'), ('1', '03')] + new_item.extra = "multi-episodie" + + else: # Otras categorias q de momento no nos interesan + continue + + ''' Opcionalmente podriamos obtener los enlaces torrent y descargas directas + patron = '<a href="http://estrenosli.org/descarga-directa-([^"]+)' + new_item.url_descarga = scrapertools.find_single_match(opciones,patron) + patron = '<a href="http://estrenosli.org/descargar-torrent-([^"]+).*?' + new_item.url_torrent = scrapertools.find_single_match(opciones,patron)''' + + itemlist.append(new_item) + + if itemlist: + # Obtenemos los datos basicos de todas las peliculas mediante multihilos + tmdb.set_infoLabels(itemlist) + + # Si es necesario añadir paginacion + patron = '<div class="sPages">.*?' + patron += '<a href="([^"]+)">Siguiente' + url_next_page = scrapertools.find_single_match(data, patron) + if url_next_page: + itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente", + thumbnail=thumbnail_host, url=HOST + url_next_page, folder=True, + text_color=color3, text_bold=True)) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + list_opciones = [] + IDIOMAS = {"banderita1": "Español", "banderita2": "VOSE", "banderita3": "Latino"} + + url = "http://estrenosli.org/ver-online-" + item.url + + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '<div class="content"><a href="([^"]+).*?' + patron += '<div class="content_mini"><span class="([^"]+)' + matches = re.compile(patron, re.DOTALL).findall(data) + + for url, banderita in matches: + idioma = "" + if banderita in IDIOMAS: + idioma = " [%s]" % IDIOMAS[banderita] + + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + if item.extra == 'multi-episodie': + patron = '<div class="linksDescarga"><span class="titulo">Video Online:([^<]+).*?<a href="([^"]+)' + matches = re.compile(patron, re.DOTALL).findall(data) + for capitulo, url in matches: + s = servertools.findvideos(url, skip=True) + if s: + itemlist.append(item.clone(url=s[0][1], action="play", folder=False, server=s[0][2], + title="Ver %s en %s%s" % ( + capitulo.strip(), s[0][2].capitalize(), idioma), + thumbnail2=item.thumbnail, + thumbnail=config.get_thumb("server_" + s[0][2] + ".png"))) + else: + for s in servertools.findvideos(data): + itemlist.append(item.clone(url=s[1], action="play", folder=False, server=s[2], + title="Ver en %s%s" % (s[2].capitalize(), idioma), + thumbnail2=item.thumbnail, + thumbnail=config.get_thumb("server_" + s[2] + ".png"))) + + # Insertar items "Buscar trailer" y "Añadir a la videoteca" + if itemlist and item.extra == "movie": + if item.contentQuality: + title = "%s [%s]" % (item.contentTitle, item.contentQuality) + else: + title = item.contentTitle + + itemlist.insert(0, item.clone(channel="trailertools", action="buscartrailer", + text_color=color3, title=title, viewmode="list")) + + if config.get_videolibrary_support(): + itemlist.append(Item(channel=item.channel, title="Añadir película a la videoteca", + action="add_pelicula_to_library", url=item.url, text_color="green", + contentTitle=item.contentTitle, extra="library", thumbnail=thumbnail_host)) + + return itemlist + + +def play(item): + logger.info() + itemlist = [] + + # Cambiamos el thumbnail del server por el de la pelicula + itemlist.append(item.clone(thumbnail=item.thumbnail2)) + + return itemlist diff --git a/plugin.video.alfa/channels/favorites.py b/plugin.video.alfa/channels/favorites.py new file mode 100755 index 00000000..355b7990 --- /dev/null +++ b/plugin.video.alfa/channels/favorites.py @@ -0,0 +1,243 @@ +# -*- coding: utf-8 -*- +# ------------------------------------------------------------ +# Lista de vídeos favoritos +# ------------------------------------------------------------ + +import os +import time + +from core import config +from core import filetools +from core import logger +from core import scrapertools +from core.item import Item +from platformcode import platformtools + +try: + # Fijamos la ruta a favourites.xml + if config.is_xbmc(): + import xbmc + + FAVOURITES_PATH = xbmc.translatePath("special://profile/favourites.xml") + else: + FAVOURITES_PATH = os.path.join(config.get_data_path(), "favourites.xml") +except: + import traceback + + logger.error(traceback.format_exc()) + + +def mainlist(item): + logger.info() + itemlist = [] + + for name, thumb, data in read_favourites(): + if "plugin://plugin.video.%s/?" % config.PLUGIN_NAME in data: + url = scrapertools.find_single_match(data, 'plugin://plugin.video.%s/\?([^;]*)' % config.PLUGIN_NAME) \ + .replace(""", "") + + item = Item().fromurl(url) + item.title = name + item.thumbnail = thumb + item.isFavourite = True + + if type(item.context) == str: + item.context = item.context.split("|") + elif type(item.context) != list: + item.context = [] + + item.context.extend([{"title": config.get_localized_string(30154), # "Quitar de favoritos" + "action": "delFavourite", + "channel": "favorites", + "from_title": item.title}, + {"title": "Renombrar", + "action": "renameFavourite", + "channel": "favorites", + "from_title": item.title} + ]) + # logger.debug(item.tostring('\n')) + itemlist.append(item) + + return itemlist + + +def read_favourites(): + favourites_list = [] + if filetools.exists(FAVOURITES_PATH): + data = filetools.read(FAVOURITES_PATH) + + matches = scrapertools.find_multiple_matches(data, "<favourite([^<]*)</favourite>") + for match in matches: + name = scrapertools.find_single_match(match, 'name="([^"]*)') + thumb = scrapertools.find_single_match(match, 'thumb="([^"]*)') + data = scrapertools.find_single_match(match, '[^>]*>([^<]*)') + favourites_list.append((name, thumb, data)) + + return favourites_list + + +def save_favourites(favourites_list): + raw = '<favourites>' + chr(10) + for name, thumb, data in favourites_list: + raw += ' <favourite name="%s" thumb="%s">%s</favourite>' % (name, thumb, data) + chr(10) + raw += '</favourites>' + chr(10) + + return filetools.write(FAVOURITES_PATH, raw) + + +def addFavourite(item): + logger.info() + # logger.debug(item.tostring('\n')) + + # Si se llega aqui mediante el menu contextual, hay que recuperar los parametros action y channel + if item.from_action: + item.__dict__["action"] = item.__dict__.pop("from_action") + if item.from_channel: + item.__dict__["channel"] = item.__dict__.pop("from_channel") + + favourites_list = read_favourites() + data = "ActivateWindow(10025,"plugin://plugin.video.%s/?" % config.PLUGIN_NAME + item.tourl() + "",return)" + titulo = item.title.replace('"', "'") + favourites_list.append((titulo, item.thumbnail, data)) + + if save_favourites(favourites_list): + platformtools.dialog_ok(config.get_localized_string(30102), titulo, + config.get_localized_string(30108)) # 'se ha añadido a favoritos' + + +def delFavourite(item): + logger.info() + # logger.debug(item.tostring('\n')) + + if item.from_title: + item.title = item.from_title + + favourites_list = read_favourites() + for fav in favourites_list[:]: + if fav[0] == item.title: + favourites_list.remove(fav) + + if save_favourites(favourites_list): + platformtools.dialog_ok(config.get_localized_string(30102), item.title, + config.get_localized_string(30105).lower()) # 'Se ha quitado de favoritos' + platformtools.itemlist_refresh() + break + + +def renameFavourite(item): + logger.info() + # logger.debug(item.tostring('\n')) + + # Buscar el item q queremos renombrar en favourites.xml + favourites_list = read_favourites() + for i, fav in enumerate(favourites_list): + if fav[0] == item.from_title: + # abrir el teclado + new_title = platformtools.dialog_input(item.from_title, item.title) + if new_title: + favourites_list[i] = (new_title, fav[1], fav[2]) + if save_favourites(favourites_list): + platformtools.dialog_ok(config.get_localized_string(30102), item.from_title, + "se ha renombrado como:", new_title) # 'Se ha quitado de favoritos' + platformtools.itemlist_refresh() + + +################################################## +# Funciones para migrar favoritos antiguos (.txt) +def readbookmark(filepath): + logger.info() + import urllib + + bookmarkfile = filetools.open_for_reading(filepath) + + lines = bookmarkfile.readlines() + + try: + titulo = urllib.unquote_plus(lines[0].strip()) + except: + titulo = lines[0].strip() + + try: + url = urllib.unquote_plus(lines[1].strip()) + except: + url = lines[1].strip() + + try: + thumbnail = urllib.unquote_plus(lines[2].strip()) + except: + thumbnail = lines[2].strip() + + try: + server = urllib.unquote_plus(lines[3].strip()) + except: + server = lines[3].strip() + + try: + plot = urllib.unquote_plus(lines[4].strip()) + except: + plot = lines[4].strip() + + # Campos fulltitle y canal añadidos + if len(lines) >= 6: + try: + fulltitle = urllib.unquote_plus(lines[5].strip()) + except: + fulltitle = lines[5].strip() + else: + fulltitle = titulo + + if len(lines) >= 7: + try: + canal = urllib.unquote_plus(lines[6].strip()) + except: + canal = lines[6].strip() + else: + canal = "" + + bookmarkfile.close() + + return canal, titulo, thumbnail, plot, server, url, fulltitle + + +def check_bookmark(readpath): + # Crea un listado con las entradas de favoritos + itemlist = [] + + if readpath.startswith("special://") and config.is_xbmc(): + import xbmc + readpath = xbmc.translatePath(readpath) + + for fichero in sorted(filetools.listdir(readpath)): + # Ficheros antiguos (".txt") + if fichero.endswith(".txt"): + # Esperamos 0.1 segundos entre ficheros, para que no se solapen los nombres de archivo + time.sleep(0.1) + + # Obtenemos el item desde el .txt + canal, titulo, thumbnail, plot, server, url, fulltitle = readbookmark(filetools.join(readpath, fichero)) + if canal == "": + canal = "favorites" + item = Item(channel=canal, action="play", url=url, server=server, title=fulltitle, thumbnail=thumbnail, + plot=plot, fanart=thumbnail, fulltitle=fulltitle, folder=False) + + filetools.rename(filetools.join(readpath, fichero), fichero[:-4] + ".old") + itemlist.append(item) + + # Si hay Favoritos q guardar + if itemlist: + favourites_list = read_favourites() + for item in itemlist: + data = "ActivateWindow(10025,"plugin://plugin.video.alfa/?" + item.tourl() + "",return)" + favourites_list.append((item.title, item.thumbnail, data)) + if save_favourites(favourites_list): + logger.debug("Conversion de txt a xml correcta") + + +# Esto solo funcionara al migrar de versiones anteriores, ya no existe "bookmarkpath" +try: + if config.get_setting("bookmarkpath") != "": + check_bookmark(config.get_setting("bookmarkpath")) + else: + logger.info("No existe la ruta a los favoritos de versiones antiguas") +except: + pass diff --git a/plugin.video.alfa/channels/filesmonster_catalogue.json b/plugin.video.alfa/channels/filesmonster_catalogue.json new file mode 100755 index 00000000..0a29e612 --- /dev/null +++ b/plugin.video.alfa/channels/filesmonster_catalogue.json @@ -0,0 +1,33 @@ +{ + "id": "filesmonster_catalogue", + "name": "Filesmonster Catalogue", + "active": true, + "adult": true, + "language": "es", + "thumbnail": "filesmonster_catalogue.png", + "banner": "filesmonster_catalogue.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "05/08/2016", + "description": "Eliminado de sección películas" + } + ], + "categories": [ + "adult" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/filesmonster_catalogue.py b/plugin.video.alfa/channels/filesmonster_catalogue.py new file mode 100755 index 00000000..e10758a1 --- /dev/null +++ b/plugin.video.alfa/channels/filesmonster_catalogue.py @@ -0,0 +1,398 @@ +# -*- coding: utf-8 -*- + +import os +import re + +from core import config +from core import logger +from core import scrapertools +from core.item import Item + + +def strip_tags(value): + return re.sub(r'<[^>]*?>', '', value) + + +def mainlist(item): + logger.info() + user = config.get_setting("filesmonsteruser") + itemlist = [] + itemlist.append(Item(channel=item.channel, action="unusualporn", title="Canal unusualporn.net", + thumbnail="http://filesmonster.biz/img/logo.png")) + itemlist.append(Item(channel=item.channel, action="files_monster", title="Canal files-monster.org", + thumbnail="http://files-monster.org/template/static/images/logo.jpg")) + itemlist.append(Item(channel=item.channel, action="filesmonster", title="Canal filesmonster.filesdl.net", + thumbnail="http://filesmonster.biz/img/logo.png")) + if user != '': itemlist.append( + Item(channel=item.channel, action="favoritos", title="Favoritos en filesmonster.com del usuario " + user, + folder=True)) + + return itemlist + + +def filesmonster(item): + logger.info() + + itemlist = [] + itemlist.append(Item(channel=item.channel, action="videos", title="Ultimos vídeos", + thumbnail="http://photosex.biz/imager/w_400/h_400/9f869c6cb63e12f61b58ffac2da822c9.jpg", + url="http://filesmonster.filesdl.net")) + itemlist.append(Item(channel=item.channel, action="categorias", title="Categorias", + thumbnail="http://photosex.biz/imager/w_400/h_500/e48337cd95bbb6c2c372ffa6e71441ac.jpg", + url="http://filesmonster.filesdl.net")) + itemlist.append(Item(channel=item.channel, action="search", title="Buscar en filesmonster.fliesdl.net", + url="http://filesmonster.filesdl.net/posts/search?q=%s")) + return itemlist + + +def unusualporn(item): + logger.info() + + itemlist = [] + itemlist.append(Item(channel=item.channel, action="videos_2", title="Últimos vídeos", url="http://unusualporn.net/", + thumbnail="http://photosex.biz/imager/w_400/h_500/e48337cd95bbb6c2c372ffa6e71441ac.jpg")) + itemlist.append(Item(channel=item.channel, action="categorias_2", title="Categorías", url="http://unusualporn.net/", + thumbnail="http://photosex.biz/imager/w_400/h_500/e48337cd95bbb6c2c372ffa6e71441ac.jpg")) + itemlist.append(Item(channel=item.channel, action="search", title="Buscar en unusualporn", + url="http://unusualporn.net/search/%s")) + return itemlist + + +def files_monster(item): + logger.info() + + itemlist = [] + itemlist.append( + Item(channel=item.channel, action="videos_3", title="Últimos vídeos", url="http://www.files-monster.org/", + thumbnail="http://photosex.biz/imager/w_400/h_500/e48337cd95bbb6c2c372ffa6e71441ac.jpg")) + itemlist.append( + Item(channel=item.channel, action="categorias_3", title="Categorías", url="http://www.files-monster.org/", + thumbnail="http://photosex.biz/imager/w_400/h_500/e48337cd95bbb6c2c372ffa6e71441ac.jpg")) + itemlist.append(Item(channel=item.channel, action="search", title="Buscar en files-monster.org", + url="http://files-monster.org/search?search=%s")) + return itemlist + + +def favoritos(item): + user = config.get_setting("filesmonsteruser") + password = config.get_setting("filesmonsterpassword") + logger.info() + name_file = os.path.splitext(os.path.basename(__file__))[0] + fname = os.path.join(config.get_data_path(), "settings_channels", name_file + "_favoritos.txt") + fa = open(fname, 'a+') + fa.close() + f = open(fname, 'r') + lines = f.readlines() + f.close() + itemlist = [] + post2 = "username=" + user + "&password=" + password + login_url = "http://filesmonster.com/api/public/login" + data1 = scrapertools.cache_page(login_url, post=post2) + partes1 = data1.split('"') + estado = partes1[3] + if estado != 'success': itemlist.append(Item(channel=item.channel, + title="No pudo accederse con tus datos de acceso de Filesmonster.com, introdúcelos en con el apartado figuración. Error: " + estado + data1)) + url_favoritos = "http://filesmonster.com/?favorites=1" + data2 = scrapertools.cache_page(url_favoritos, post=post2) + data2 = scrapertools.find_single_match(data2, 'favorites-table(.*?)pager') + patronvideos = '<a href="([^"]+)">([^<]+)</a>.*?del=([^"]+)"' + matches = re.compile(patronvideos, re.DOTALL).findall(data2) + contador = 0 + for url, title, borrar in matches: + contador = contador + 1 + imagen = '' + for linea in lines: + partes2 = linea.split("@") + parte_url = partes2[0] + parte_imagen = partes2[1] + if (parte_url == url): imagen = parte_imagen.rstrip('\n').rstrip('\r') + + if url.find("?fid=") == -1: + itemlist.append( + Item(channel=item.channel, action="play", server="filesmonster", title=title, fulltitle=item.title, + url=url, thumbnail=imagen, folder=False)) + else: + itemlist.append( + Item(channel=item.channel, action="detail", server="filesmonster", title=title, fulltitle=title, + thumbnail=imagen, url=url, folder=True)) + itemlist.append(Item(channel=item.channel, action="quitar_favorito", + title="(-) quitar de mis favoritos en filesmonster.com", thumbnail=imagen, + url="http://filesmonster.com/?favorites=1&del=" + borrar, plot=borrar)) + itemlist.append(Item(channel=item.channel, title="", folder=True)) + if contador == 0 and estado == 'success': + itemlist.append( + Item(channel=item.channel, title="No tienes ningún favorito, navega por las diferentes fuentes y añádelos")) + return itemlist + + +def quitar_favorito(item): + logger.info() + itemlist = [] + + data = scrapertools.downloadpage(item.url) + itemlist.append(Item(channel=item.channel, action="favoritos", + title="El vídeo ha sido eliminado de tus favoritos, pulsa para volver a tu lista de favoritos")) + + return itemlist + + +def anadir_favorito(item): + logger.info() + name_file = os.path.splitext(os.path.basename(__file__))[0] + fname = os.path.join(config.get_data_path(), "settings_channels", name_file + "_favoritos.txt") + user = config.get_setting("filesmonsteruser") + password = config.get_setting("filesmonsterpassword") + itemlist = [] + post2 = "username=" + user + "&password=" + password + login_url = "http://filesmonster.com/api/public/login" + data1 = scrapertools.cache_page(login_url, post=post2) + if item.plot == 'el archivo': + id1 = item.url.split('?id=') + id = id1[1] + que = "file" + if item.plot == 'la carpeta': + id1 = item.url.split('?fid=') + id = id1[1] + que = "folder" + url = "http://filesmonster.com/ajax/add_to_favorites" + post3 = "username=" + user + "&password=" + password + "&id=" + id + "&obj_type=" + que + data2 = scrapertools.cache_page(url, post=post3) + if data2 == 'Already in Your favorites': itemlist.append(Item(channel=item.channel, action="favoritos", + title="" + item.plot + " ya estaba en tu lista de favoritos (" + user + ") en Filesmonster")) + if data2 != 'You are not logged in' and data2 != 'Already in Your favorites': + itemlist.append(Item(channel=item.channel, action="favoritos", + title="Se ha añadido correctamente " + item.plot + " a tu lista de favoritos (" + user + ") en Filesmonster", + plot=data1 + data2)) + f = open(fname, "a+") + if (item.plot == 'la carpeta'): + ruta = "http://filesmonster.com/folders.php?" + if (item.plot == 'el archivo'): + ruta = "http://filesmonster.com/download.php" + laruta = ruta + item.url + laruta = laruta.replace("http://filesmonster.com/folders.php?http://filesmonster.com/folders.php?", + "http://filesmonster.com/folders.php?") + laruta = laruta.replace("http://filesmonster.com/download.php?http://filesmonster.com/download.php?", + "http://filesmonster.com/download.php?") + f.write(laruta + '@' + item.thumbnail + '\n') + f.close() + if data2 == 'You are not logged in': itemlist.append(Item(channel=item.channel, action="favoritos", + title="No ha sido posible añadir " + item.plot + " a tu lista de favoritos (" + user + " no logueado en Filesmonster)", )) + + return itemlist + + +def categorias(item): + logger.info() + itemlist = [] + + data = scrapertools.downloadpage(item.url) + data = scrapertools.find_single_match(data, + 'Categories <b class="caret"></b></a>(.*?)RSS <b class="caret"></b></a>') + + patronvideos = '<a href="([^"]+)">([^<]+)</a>' + + matches = re.compile(patronvideos, re.DOTALL).findall(data) + + for url, title in matches: + itemlist.append(Item(channel=item.channel, action="videos", title=title, url=url)) + + return itemlist + + +def categorias_2(item): + logger.info() + itemlist = [] + + data = scrapertools.downloadpage(item.url) + + patronvideos = '<li class="cat-item cat-item-[\d]+"><a href="([^"]+)" title="[^"]+">([^<]+)</a><a class="rss_s" title="[^"]+" target="_blank" href="[^"]+"></a></li>' + + matches = re.compile(patronvideos, re.DOTALL).findall(data) + + for url, title in matches: + itemlist.append(Item(channel=item.channel, action="videos_2", title=title, url=url)) + + return itemlist + + +def categorias_3(item): + logger.info() + itemlist = [] + + data = scrapertools.downloadpage(item.url) + + patronvideos = '<li><a href="([^"]+)">([^<]+)</a></li>' + + matches = re.compile(patronvideos, re.DOTALL).findall(data) + + for url, title in matches: + itemlist.append(Item(channel=item.channel, action="videos_3", title=title, url=url)) + + return itemlist + + +def search(item, texto): + logger.info("texto:" + texto) + original = item.url + item.url = item.url % texto + try: + if original == 'http://filesmonster.filesdl.net/posts/search?q=%s': + return videos(item) + if original == 'http://unusualporn.net/search/%s': + return videos_2(item) + if original == 'http://files-monster.org/search?search=%s': + return videos_3(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def videos(item): + logger.info() + itemlist = [] + + url = item.url + while url and len(itemlist) < 25: + data = scrapertools.downloadpage(url) + patronvideos = '<div class="panel-heading">.*?<a href="([^"]+)">([^<]+).*?</a>.*?<div class="panel-body" style="text-align: center;">.*?<img src="([^"]+)".*?' + matches = re.compile(patronvideos, re.DOTALL).findall(data) + + for url, title, thumbnail in matches: + title = title.strip() + itemlist.append( + Item(channel=item.channel, action="detail", title=title, fulltitle=title, url=url, thumbnail=thumbnail)) + + url = scrapertools.find_single_match(data, '<li><a href="([^"]+)">Next</a></li>').replace("&", "&") + + # Enlace para la siguiente pagina + if url: + itemlist.append(Item(channel=item.channel, action="videos", title=">> Página Siguiente", url=url)) + + return itemlist + + +def videos_2(item): + logger.info() + itemlist = [] + url_limpia = item.url.split("?")[0] + url = item.url + while url and len(itemlist) < 25: + data = scrapertools.downloadpage(url) + patronvideos = 'data-link="([^"]+)" data-title="([^"]+)" src="([^"]+)" border="0" />'; + matches = re.compile(patronvideos, re.DOTALL).findall(data) + + for url, title, thumbnail in matches: + itemlist.append(Item(channel=item.channel, action="detail_2", title=title, fulltitle=title, url=url, + thumbnail=thumbnail)) + + url = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />').replace("&", "&") + + # Enlace para la siguiente pagina + if url: + itemlist.append(Item(channel=item.channel, action="videos_2", title=">> Página Siguiente", url=url)) + + return itemlist + + +def videos_3(item): + logger.info() + itemlist = [] + + url = item.url + url_limpia = item.url.split("?")[0] + while url and len(itemlist) < 25: + data = scrapertools.downloadpage(url) + patronvideos = '<a href="([^"]+)">.*?<img src="([^"]+)" border="0" title=".*?([^"]+).*?" height="70" />' + matches = re.compile(patronvideos, re.DOTALL).findall(data) + + for url, thumbnail, title in matches: + itemlist.append(Item(channel=item.channel, action="detail_2", title=title, fulltitle=title, url=url, + thumbnail=thumbnail)) + + url = scrapertools.find_single_match(data, + '<a style="text-decoration:none;" href="([^"]+)">→</a>').replace( + "&", "&") + + # Enlace para la siguiente pagina + if url: + itemlist.append( + Item(channel=item.channel, action="videos_3", title=">> Página Siguiente", url=url_limpia + url)) + + return itemlist + + +def detail(item): + logger.info() + itemlist = [] + + data = scrapertools.downloadpage(item.url) + patronvideos = '["|\'](http\://filesmonster.com/download.php\?[^"\']+)["|\']' + matches = re.compile(patronvideos, re.DOTALL).findall(data) + + for url in matches: + title = "Archivo %d: %s [filesmonster]" % (len(itemlist) + 1, item.fulltitle) + itemlist.append( + Item(channel=item.channel, action="play", server="filesmonster", title=title, fulltitle=item.fulltitle, + url=url, thumbnail=item.thumbnail, folder=False)) + itemlist.append(Item(channel=item.channel, action="anadir_favorito", + title="(+) Añadir el vídeo a tus favoritos en filesmonster", url=url, + thumbnail=item.thumbnail, plot="el archivo", folder=True)) + itemlist.append(Item(channel=item.channel, title="")); + + patronvideos = '["|\'](http\://filesmonster.com/folders.php\?[^"\']+)["|\']' + matches = re.compile(patronvideos, re.DOTALL).findall(data) + for url in matches: + if not url == item.url: + logger.info(url) + logger.info(item.url) + title = "Carpeta %d: %s [filesmonster]" % (len(itemlist) + 1, item.fulltitle) + itemlist.append(Item(channel=item.channel, action="detail", title=title, fulltitle=item.fulltitle, url=url, + thumbnail=item.thumbnail, folder=True)) + itemlist.append(Item(channel=item.channel, action="anadir_favorito", + title="(+) Añadir la carpeta a tus favoritos en filesmonster", url=url, + thumbnail=item.thumbnail, plot="la carpeta", folder=True)) + itemlist.append(Item(channel=item.channel, title="")); + + return itemlist + + +def detail_2(item): + logger.info() + itemlist = [] + + # descarga la pagina + data = scrapertools.downloadpageGzip(item.url) + data = data.split('<span class="filesmonsterdlbutton">Download from Filesmonster</span>') + data = data[0] + # descubre la url + patronvideos = 'href="http://filesmonster.com/download.php(.*?)".(.*?)' + matches = re.compile(patronvideos, re.DOTALL).findall(data) + for match2 in matches: + url = "http://filesmonster.com/download.php" + match2[0] + title = "Archivo %d: %s [filesmonster]" % (len(itemlist) + 1, item.fulltitle) + itemlist.append( + Item(channel=item.channel, action="play", server="filesmonster", title=title, fulltitle=item.fulltitle, + url=url, thumbnail=item.thumbnail, folder=False)) + itemlist.append(Item(channel=item.channel, action="anadir_favorito", + title="(+) Añadir el vídeo a tus favoritos en filesmonster", url=match2[0], + thumbnail=item.thumbnail, plot="el archivo", folder=True)) + itemlist.append(Item(channel=item.channel, title="")); + + patronvideos = '["|\'](http\://filesmonster.com/folders.php\?[^"\']+)["|\']' + matches = re.compile(patronvideos, re.DOTALL).findall(data) + for url in matches: + if not url == item.url: + logger.info(url) + logger.info(item.url) + title = "Carpeta %d: %s [filesmonster]" % (len(itemlist) + 1, item.fulltitle) + itemlist.append(Item(channel=item.channel, action="detail", title=title, fulltitle=item.fulltitle, url=url, + thumbnail=item.thumbnail, folder=True)) + itemlist.append(Item(channel=item.channel, action="anadir_favorito", + title="(+) Añadir la carpeta a tus favoritos en filesmonster", url=url, + thumbnail=item.thumbnail, plot="la carpeta", folder=True)) + itemlist.append(Item(channel=item.channel, title="")); + + return itemlist diff --git a/plugin.video.alfa/channels/filtertools.py b/plugin.video.alfa/channels/filtertools.py new file mode 100755 index 00000000..26748ae3 --- /dev/null +++ b/plugin.video.alfa/channels/filtertools.py @@ -0,0 +1,621 @@ +# -*- coding: utf-8 -*- +# ------------------------------------------------------------ +# filtertools - se encarga de filtrar resultados +# ------------------------------------------------------------ + +from core import config +from core import filetools +from core import jsontools +from core import logger +from core.item import Item +from platformcode import platformtools + +TAG_TVSHOW_FILTER = "TVSHOW_FILTER" +TAG_NAME = "name" +TAG_ACTIVE = "active" +TAG_LANGUAGE = "language" +TAG_QUALITY_ALLOWED = "quality_allowed" + +COLOR = {"parent_item": "yellow", "error": "red", "striped_even_active": "blue", + "striped_even_inactive": "0xff00bfff", "striped_odd_active": "0xff008000", + "striped_odd_inactive": "0xff00fa9a", "selected": "blue" + } + +filter_global = None + +__channel__ = "filtertools" + + +# TODO echar un ojo a https://pyformat.info/, se puede formatear el estilo y hacer referencias directamente a elementos + + +class ResultFilter: + def __init__(self, dict_filter): + self.active = dict_filter[TAG_ACTIVE] + self.language = dict_filter[TAG_LANGUAGE] + self.quality_allowed = dict_filter[TAG_QUALITY_ALLOWED] + + def __str__(self): + return "{active: '%s', language: '%s', quality_allowed: '%s'}" % \ + (self.active, self.language, self.quality_allowed) + + +class Filter: + def __init__(self, item, global_filter_lang_id): + self.result = None + self.__get_data(item, global_filter_lang_id) + + def __get_data(self, item, global_filter_lang_id): + + dict_filtered_shows = jsontools.get_node_from_file(item.channel, TAG_TVSHOW_FILTER) + tvshow = item.show.lower().strip() + + global_filter_language = config.get_setting(global_filter_lang_id, item.channel) + + if tvshow in dict_filtered_shows.keys(): + + self.result = ResultFilter({TAG_ACTIVE: dict_filtered_shows[tvshow][TAG_ACTIVE], + TAG_LANGUAGE: dict_filtered_shows[tvshow][TAG_LANGUAGE], + TAG_QUALITY_ALLOWED: dict_filtered_shows[tvshow][TAG_QUALITY_ALLOWED]}) + + # opcion general "no filtrar" + elif global_filter_language != 0: + from core import channeltools + list_controls, dict_settings = channeltools.get_channel_controls_settings(item.channel) + + for control in list_controls: + if control["id"] == global_filter_lang_id: + + try: + language = control["lvalues"][global_filter_language] + # logger.debug("language %s" % language) + except: + logger.error("No se ha encontrado el valor asociado al codigo '%s': %s" % + (global_filter_lang_id, global_filter_language)) + break + + self.result = ResultFilter({TAG_ACTIVE: True, TAG_LANGUAGE: language, TAG_QUALITY_ALLOWED: []}) + break + + def __str__(self): + return "{'%s'}" % self.result + + +def access(): + """ + Devuelve si se puede usar o no filtertools + """ + allow = False + + if config.is_xbmc() or config.get_platform() == "mediaserver": + allow = True + + return allow + + +def context(item, list_language=None, list_quality=None, exist=False): + """ + Para xbmc/kodi y mediaserver ya que pueden mostrar el menú contextual, se añade un menu para configuración + la opción de filtro, sólo si es para series. + Dependiendo del lugar y si existe filtro se añadirán más opciones a mostrar. + El contexto -solo se muestra para series-. + + @param item: elemento para obtener la información y ver que contexto añadir + @type item: item + param list_language: listado de idiomas posibles + @type list_language: list[str] + @param list_quality: listado de calidades posibles + @type list_quality: list[str] + @param exist: si existe el filtro + @type exist: bool + @return: lista de opciones a mostrar en el menú contextual + @rtype: list + """ + + # Dependiendo de como sea el contexto lo guardamos y añadimos las opciones de filtertools. + if type(item.context) == str: + _context = item.context.split("|") + elif type(item.context) == list: + _context = item.context + else: + _context = [] + + if access(): + dict_data = {"title": "FILTRO: Configurar", "action": "config_item", "channel": "filtertools"} + if list_language: + dict_data["list_language"] = list_language + if list_quality: + dict_data["list_quality"] = list_quality + + added = False + if type(_context) == list: + for x in _context: + if x and type(x) == dict: + if x["channel"] == "filtertools": + added = True + break + + if not added: + _context.append(dict_data) + + if item.action == "play": + if not exist: + _context.append({"title": "FILTRO: Añadir '%s'" % item.language, "action": "save_from_context", + "channel": "filtertools", "from_channel": item.channel}) + else: + _context.append({"title": "FILTRO: Borrar '%s'" % item.language, "action": "delete_from_context", + "channel": "filtertools", "from_channel": item.channel}) + + return _context + + +def show_option(itemlist, channel, list_language, list_quality): + if access(): + itemlist.append(Item(channel=__channel__, title="[COLOR %s]Configurar filtro para series...[/COLOR]" % + COLOR.get("parent_item", "auto"), action="load", + list_language=list_language, + list_quality=list_quality, from_channel=channel)) + + return itemlist + + +def load(item): + return mainlist(channel=item.from_channel, list_language=item.list_language, list_quality=item.list_quality) + + +def check_conditions(_filter, list_item, item, list_language, list_quality, quality_count=0, language_count=0): + is_language_valid = True + if _filter.language: + # logger.debug("title es %s" % item.title) + + # viene de episodios + if isinstance(item.language, list): + if _filter.language in item.language: + language_count += 1 + else: + is_language_valid = False + # viene de findvideos + else: + if item.language.lower() == _filter.language.lower(): + language_count += 1 + else: + is_language_valid = False + + is_quality_valid = True + quality = "" + + if _filter.quality_allowed and item.quality != "": + # if hasattr(item, 'quality'): # esta validación no hace falta por que SIEMPRE se devuelve el atributo vacío + if item.quality.lower() in _filter.quality_allowed: + quality = item.quality.lower() + quality_count += 1 + else: + is_quality_valid = False + + if is_language_valid and is_quality_valid: + item.list_language = list_language + if list_quality: + item.list_quality = list_quality + item.context = context(item, exist=True) + list_item.append(item) + # logger.debug("{0} | context: {1}".format(item.title, item.context)) + # logger.debug(" -Enlace añadido") + + logger.debug(" idioma valido?: %s, item.language: %s, filter.language: %s" % + (is_language_valid, item.language, _filter.language)) + logger.debug(" calidad valida?: %s, item.quality: %s, filter.quality_allowed: %s" + % (is_quality_valid, quality, _filter.quality_allowed)) + + return list_item, quality_count, language_count + + +def get_link(list_item, item, list_language, list_quality=None, global_filter_lang_id="filter_languages"): + """ + Devuelve una lista de enlaces, si el item está filtrado correctamente se agrega a la lista recibida. + + @param list_item: lista de enlaces + @type list_item: list[Item] + @param item: elemento a filtrar + @type item: Item + @param list_language: listado de idiomas posibles + @type list_language: list[str] + @param list_quality: listado de calidades posibles + @type list_quality: list[str] + @param global_filter_lang_id: id de la variable de filtrado por idioma que está en settings + @type global_filter_lang_id: str + @return: lista de Item + @rtype: list[Item] + """ + logger.info() + + # si los campos obligatorios son None salimos + if list_item is None or item is None: + return [] + + logger.debug("total de items : %s" % len(list_item)) + + global filter_global + + if not filter_global: + filter_global = Filter(item, global_filter_lang_id).result + logger.debug("filter: '%s' datos: '%s'" % (item.show, filter_global)) + + if filter_global and filter_global.active: + list_item, quality_count, language_count = \ + check_conditions(filter_global, list_item, item, list_language, list_quality) + else: + item.context = context(item) + list_item.append(item) + + return list_item + + +def get_links(list_item, item, list_language, list_quality=None, global_filter_lang_id="filter_languages"): + """ + Devuelve una lista de enlaces filtrados. + + @param list_item: lista de enlaces + @type list_item: list[Item] + @param item: elemento a filtrar + @type item: item + @param list_language: listado de idiomas posibles + @type list_language: list[str] + @param list_quality: listado de calidades posibles + @type list_quality: list[str] + @param global_filter_lang_id: id de la variable de filtrado por idioma que está en settings + @type global_filter_lang_id: str + @return: lista de Item + @rtype: list[Item] + """ + logger.info() + + # si los campos obligatorios son None salimos + if list_item is None or item is None: + return [] + + # si list_item está vacío volvemos, no se añade validación de plataforma para que Plex pueda hacer filtro global + if len(list_item) == 0: + return list_item + + logger.debug("total de items : %s" % len(list_item)) + + new_itemlist = [] + quality_count = 0 + language_count = 0 + + _filter = Filter(item, global_filter_lang_id).result + logger.debug("filter: '%s' datos: '%s'" % (item.show, _filter)) + + if _filter and _filter.active: + + for item in list_item: + new_itemlist, quality_count, language_count = check_conditions(_filter, new_itemlist, item, list_language, + list_quality, quality_count, language_count) + + logger.info("ITEMS FILTRADOS: %s/%s, idioma [%s]: %s, calidad_permitida %s: %s" + % (len(new_itemlist), len(list_item), _filter.language, language_count, _filter.quality_allowed, + quality_count)) + + if len(new_itemlist) == 0: + list_item_all = [] + for i in list_item: + list_item_all.append(i.tourl()) + + _context = [{"title": "FILTRO: Borrar '%s'" % _filter.language, "action": "delete_from_context", + "channel": "filtertools", "to_channel": "seriesdanko"}] + + if _filter.quality_allowed: + msg_quality_allowed = " y calidad %s" % _filter.quality_allowed + else: + msg_quality_allowed = "" + + new_itemlist.append(Item(channel=__channel__, action="no_filter", list_item_all=list_item_all, + show=item.show, + title="[COLOR %s]No hay elementos con idioma '%s'%s, pulsa para mostrar " + "sin filtro[/COLOR]" + % (COLOR.get("error", "auto"), _filter.language, msg_quality_allowed), + context=_context)) + + else: + for item in list_item: + item.list_language = list_language + if list_quality: + item.list_quality = list_quality + item.context = context(item) + new_itemlist = list_item + + return new_itemlist + + +def no_filter(item): + """ + Muestra los enlaces sin filtrar + + @param item: item + @type item: Item + @return: lista de enlaces + @rtype: list[Item] + """ + logger.info() + + itemlist = [] + for i in item.list_item_all: + itemlist.append(Item().fromurl(i)) + + return itemlist + + +def mainlist(channel, list_language, list_quality): + """ + Muestra una lista de las series filtradas + + @param channel: nombre del canal para obtener las series filtradas + @type channel: str + @param list_language: lista de idiomas del canal + @type list_language: list[str] + @param list_quality: lista de calidades del canal + @type list_quality: list[str] + @return: lista de Item + @rtype: list[Item] + """ + logger.info() + itemlist = [] + dict_series = jsontools.get_node_from_file(channel, TAG_TVSHOW_FILTER) + + idx = 0 + for tvshow in sorted(dict_series): + + if idx % 2 == 0: + if dict_series[tvshow][TAG_ACTIVE]: + tag_color = COLOR.get("striped_even_active", "auto") + else: + tag_color = COLOR.get("striped_even_inactive", "auto") + else: + if dict_series[tvshow][TAG_ACTIVE]: + tag_color = COLOR.get("striped_odd_active", "auto") + else: + tag_color = COLOR.get("striped_odd_inactive", "auto") + + idx += 1 + name = dict_series.get(tvshow, {}).get(TAG_NAME, tvshow) + activo = " (desactivado)" + if dict_series[tvshow][TAG_ACTIVE]: + activo = "" + title = "Configurar [COLOR %s][%s][/COLOR]%s" % (tag_color, name, activo) + + itemlist.append(Item(channel=__channel__, action="config_item", title=title, show=name, + list_language=list_language, list_quality=list_quality, from_channel=channel)) + + if len(itemlist) == 0: + itemlist.append(Item(channel=channel, action="mainlist", title="No existen filtros, busca una serie y " + "pulsa en menú contextual 'FILTRO: Configurar'")) + + return itemlist + + +def config_item(item): + """ + muestra una serie filtrada para su configuración + + @param item: item + @type item: Item + """ + logger.info() + logger.info("item %s" % item.tostring()) + + # OBTENEMOS LOS DATOS DEL JSON + dict_series = jsontools.get_node_from_file(item.from_channel, TAG_TVSHOW_FILTER) + + tvshow = item.show.lower().strip() + + lang_selected = dict_series.get(tvshow, {}).get(TAG_LANGUAGE, 'Español') + list_quality = dict_series.get(tvshow, {}).get(TAG_QUALITY_ALLOWED, [x.lower() for x in item.list_quality]) + # logger.info("lang selected {}".format(lang_selected)) + # logger.info("list quality {}".format(list_quality)) + + active = True + custom_button = {'visible': False} + allow_option = False + if item.show.lower().strip() in dict_series: + allow_option = True + active = dict_series.get(item.show.lower().strip(), {}).get(TAG_ACTIVE, False) + custom_button = {'label': 'Borrar', 'function': 'delete', 'visible': True, 'close': True} + + list_controls = [] + + if allow_option: + active_control = { + "id": "active", + "type": "bool", + "label": "¿Activar/Desactivar filtro?", + "color": "", + "default": active, + "enabled": allow_option, + "visible": allow_option, + } + list_controls.append(active_control) + + language_option = { + "id": "language", + "type": "list", + "label": "Idioma", + "color": "0xFFee66CC", + "default": item.list_language.index(lang_selected), + "enabled": True, + "visible": True, + "lvalues": item.list_language + } + list_controls.append(language_option) + + if item.list_quality: + list_controls_calidad = [ + { + "id": "textoCalidad", + "type": "label", + "label": "Calidad permitida", + "color": "0xffC6C384", + "enabled": True, + "visible": True, + }, + ] + for element in sorted(item.list_quality, key=str.lower): + list_controls_calidad.append({ + "id": element, + "type": "bool", + "label": element, + "default": (False, True)[element.lower() in list_quality], + "enabled": True, + "visible": True, + }) + + # concatenamos list_controls con list_controls_calidad + list_controls.extend(list_controls_calidad) + + title = "Filtrado de enlaces para: [COLOR %s]%s[/COLOR]" % (COLOR.get("selected", "auto"), item.show) + + platformtools.show_channel_settings(list_controls=list_controls, callback='save', item=item, + caption=title, custom_button=custom_button) + + +def delete(item, dict_values): + logger.info() + + if item: + dict_series = jsontools.get_node_from_file(item.from_channel, TAG_TVSHOW_FILTER) + tvshow = item.show.strip().lower() + + heading = "¿Está seguro que desea eliminar el filtro?" + line1 = "Pulse 'Si' para eliminar el filtro de [COLOR %s]%s[/COLOR], pulse 'No' o cierre la ventana para " \ + "no hacer nada." % (COLOR.get("selected", "auto"), item.show.strip()) + + if platformtools.dialog_yesno(heading, line1) == 1: + lang_selected = dict_series.get(tvshow, {}).get(TAG_LANGUAGE, "") + dict_series.pop(tvshow, None) + + result, json_data = jsontools.update_node(dict_series, item.from_channel, TAG_TVSHOW_FILTER) + + sound = False + if result: + message = "FILTRO ELIMINADO" + else: + message = "Error al guardar en disco" + sound = True + + heading = "%s [%s]" % (item.show.strip(), lang_selected) + platformtools.dialog_notification(heading, message, sound=sound) + + if item.action in ["findvideos", "play"]: + platformtools.itemlist_refresh() + + +def save(item, dict_data_saved): + """ + Guarda los valores configurados en la ventana + + @param item: item + @type item: Item + @param dict_data_saved: diccionario con los datos salvados + @type dict_data_saved: dict + """ + logger.info() + + if item and dict_data_saved: + logger.debug('item: %s\ndatos: %s' % (item.tostring(), dict_data_saved)) + + if item.from_channel == "videolibrary": + item.from_channel = item.contentChannel + dict_series = jsontools.get_node_from_file(item.from_channel, TAG_TVSHOW_FILTER) + tvshow = item.show.strip().lower() + + logger.info("Se actualiza los datos") + + list_quality = [] + for _id, value in dict_data_saved.items(): + if _id in item.list_quality and value: + list_quality.append(_id.lower()) + + lang_selected = item.list_language[dict_data_saved[TAG_LANGUAGE]] + dict_filter = {TAG_NAME: item.show, TAG_ACTIVE: dict_data_saved.get(TAG_ACTIVE, True), + TAG_LANGUAGE: lang_selected, TAG_QUALITY_ALLOWED: list_quality} + dict_series[tvshow] = dict_filter + + result, json_data = jsontools.update_node(dict_series, item.from_channel, TAG_TVSHOW_FILTER) + + sound = False + if result: + message = "FILTRO GUARDADO" + else: + message = "Error al guardar en disco" + sound = True + + heading = "%s [%s]" % (item.show.strip(), lang_selected) + platformtools.dialog_notification(heading, message, sound=sound) + + if item.from_action in ["findvideos", "play"]: + platformtools.itemlist_refresh() + + +def save_from_context(item): + """ + Salva el filtro a través del menú contextual + + @param item: item + @type item: item + """ + logger.info() + + dict_series = jsontools.get_node_from_file(item.from_channel, TAG_TVSHOW_FILTER) + tvshow = item.show.strip().lower() + + dict_filter = {TAG_NAME: item.show, TAG_ACTIVE: True, TAG_LANGUAGE: item.language, TAG_QUALITY_ALLOWED: []} + dict_series[tvshow] = dict_filter + + result, json_data = jsontools.update_node(dict_series, item.from_channel, TAG_TVSHOW_FILTER) + + sound = False + if result: + message = "FILTRO GUARDADO" + else: + message = "Error al guardar en disco" + sound = True + + heading = "%s [%s]" % (item.show.strip(), item.language) + platformtools.dialog_notification(heading, message, sound=sound) + + if item.from_action in ["findvideos", "play"]: + platformtools.itemlist_refresh() + + +def delete_from_context(item): + """ + Elimina el filtro a través del menú contextual + + @param item: item + @type item: item + """ + logger.info() + + # venimos desde get_links y no se ha obtenido ningún resultado, en menu contextual y damos a borrar + if item.to_channel != "": + item.from_channel = item.to_channel + + dict_series = jsontools.get_node_from_file(item.from_channel, TAG_TVSHOW_FILTER) + tvshow = item.show.strip().lower() + + lang_selected = dict_series.get(tvshow, {}).get(TAG_LANGUAGE, "") + dict_series.pop(tvshow, None) + + result, json_data = jsontools.update_node(dict_series, item.from_channel, TAG_TVSHOW_FILTER) + + sound = False + if result: + message = "FILTRO ELIMINADO" + else: + message = "Error al guardar en disco" + sound = True + + heading = "%s [%s]" % (item.show.strip(), lang_selected) + platformtools.dialog_notification(heading, message, sound=sound) + + if item.from_action in ["findvideos", "play", "no_filter"]: # 'no_filter' es el mismo caso que L#601 + platformtools.itemlist_refresh() diff --git a/plugin.video.alfa/channels/freecambay.json b/plugin.video.alfa/channels/freecambay.json new file mode 100755 index 00000000..b9b0beff --- /dev/null +++ b/plugin.video.alfa/channels/freecambay.json @@ -0,0 +1,28 @@ +{ + "id": "freecambay", + "name": "FreeCamBay", + "language": "es", + "active": true, + "adult": true, + "version": 1, + "changes": [ + { + "date": "29/04/2017", + "description": "Primera versión" + } + ], + "thumbnail": "http://i.imgur.com/wuzhOCt.png?1", + "categories": [ + "adult" + ], + "settings": [ + { + "id": "menu_info", + "type": "bool", + "label": "Mostrar menú antes de reproducir con imágenes", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/freecambay.py b/plugin.video.alfa/channels/freecambay.py new file mode 100755 index 00000000..2f2f7b22 --- /dev/null +++ b/plugin.video.alfa/channels/freecambay.py @@ -0,0 +1,262 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core.item import Item + +host = "http://www.freecambay.com" + + +def mainlist(item): + logger.info() + itemlist = [] + + itemlist.append(item.clone(action="lista", title="Nuevos Vídeos", url=host + "/latest-updates/")) + itemlist.append(item.clone(action="lista", title="Mejor Valorados", url=host + "/top-rated/")) + itemlist.append(item.clone(action="lista", title="Más Vistos", url=host + "/most-popular/")) + itemlist.append(item.clone(action="categorias", title="Categorías", url=host + "/categories/")) + itemlist.append(item.clone(action="categorias", title="Modelos", + url=host + "/models/?mode=async&function=get_block&block_id=list_models_models" \ + "_list&sort_by=total_videos")) + itemlist.append(item.clone(action="playlists", title="Listas", url=host + "/playlists/")) + itemlist.append(item.clone(action="tags", title="Tags", url=host + "/tags/")) + itemlist.append(item.clone(title="Buscar...", action="search")) + itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False)) + + return itemlist + + +def configuracion(item): + from platformcode import platformtools + ret = platformtools.show_channel_settings() + platformtools.itemlist_refresh() + return ret + + +def search(item, texto): + logger.info() + item.url = "%s/search/%s/" % (host, texto.replace("+", "-")) + item.extra = texto + try: + return lista(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def lista(item): + logger.info() + itemlist = [] + + # Descarga la pagina + data = httptools.downloadpage(item.url).data + + action = "play" + if config.get_setting("menu_info", "freecambay"): + action = "menu_info" + + # Extrae las entradas + patron = '<div class="item.*?href="([^"]+)" title="([^"]+)".*?data-original="([^"]+)"(.*?)<div class="duration">([^<]+)<' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedtitle, scrapedthumbnail, quality, duration in matches: + if duration: + scrapedtitle = "%s - %s" % (duration, scrapedtitle) + if '>HD<' in quality: + scrapedtitle += " [COLOR red][HD][/COLOR]" + + itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, + fanart=scrapedthumbnail)) + + # Extrae la marca de siguiente página + if item.extra: + next_page = scrapertools.find_single_match(data, '<li class="next">.*?from_videos\+from_albums:(\d+)') + if next_page: + if "from_videos=" in item.url: + next_page = re.sub(r'&from_videos=(\d+)', '&from_videos=%s' % next_page, item.url) + else: + next_page = "%s?mode=async&function=get_block&block_id=list_videos_videos_list_search_result" \ + "&q=%s&category_ids=&sort_by=post_date&from_videos=%s" % (item.url, item.extra, next_page) + itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page)) + else: + next_page = scrapertools.find_single_match(data, '<li class="next">.*?href="([^"]*)"') + if next_page and not next_page.startswith("#"): + next_page = urlparse.urljoin(host, next_page) + itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page)) + else: + next_page = scrapertools.find_single_match(data, '<li class="next">.*?from:(\d+)') + if next_page: + if "from=" in item.url: + next_page = re.sub(r'&from=(\d+)', '&from=%s' % next_page, item.url) + else: + next_page = "%s?mode=async&function=get_block&block_id=list_videos_common_videos_list&sort_by=post_date&from=%s" % ( + item.url, next_page) + itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page)) + + return itemlist + + +def categorias(item): + logger.info() + itemlist = [] + + # Descarga la pagina + data = httptools.downloadpage(item.url).data + + # Extrae las entradas + patron = '<a class="item" href="([^"]+)" title="([^"]+)".*?src="([^"]+)".*?<div class="videos">([^<]+)<' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedtitle, scrapedthumbnail, videos in matches: + if videos: + scrapedtitle = "%s (%s)" % (scrapedtitle, videos) + itemlist.append(item.clone(action="lista", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, + fanart=scrapedthumbnail)) + + # Extrae la marca de siguiente página + next_page = scrapertools.find_single_match(data, '<li class="next">.*?from:(\d+)') + if next_page: + if "from=" in item.url: + next_page = re.sub(r'&from=(\d+)', '&from=%s' % next_page, item.url) + else: + next_page = "%s&from=%s" % (item.url, next_page) + itemlist.append(item.clone(action="categorias", title=">> Página Siguiente", url=next_page)) + + return itemlist + + +def playlists(item): + logger.info() + itemlist = [] + + # Descarga la pagina + data = httptools.downloadpage(item.url).data + + # Extrae las entradas + patron = '<div class="item.*?href="([^"]+)" title="([^"]+)".*?data-original="([^"]+)".*?<div class="videos">([^<]+)<' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedtitle, scrapedthumbnail, videos in matches: + if videos: + scrapedtitle = "%s (%s)" % (scrapedtitle, videos) + itemlist.append(item.clone(action="videos", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, + fanart=scrapedthumbnail)) + + # Extrae la marca de siguiente página + next_page = scrapertools.find_single_match(data, '<li class="next">.*?href="([^"]+)"') + if next_page: + next_page = urlparse.urljoin(host, next_page) + itemlist.append(item.clone(action="playlists", title=">> Página Siguiente", url=next_page)) + + return itemlist + + +def videos(item): + logger.info() + itemlist = [] + + # Descarga la pagina + data = httptools.downloadpage(item.url).data + + action = "play" + if config.get_setting("menu_info", "freecambay"): + action = "menu_info" + # Extrae las entradas + patron = '<a href="([^"]+)" class="item ".*?data-original="([^"]+)".*?<strong class="title">\s*([^<]+)<' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedthumbnail, scrapedtitle in matches: + scrapedtitle = scrapedtitle.strip() + itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, + fanart=scrapedthumbnail)) + + # Extrae la marca de siguiente página + next_page = scrapertools.find_single_match(data, '<li class="next">.*?from:(\d+)') + if next_page: + if "from=" in item.url: + next_page = re.sub(r'&from=(\d+)', '&from=%s' % next_page, item.url) + else: + next_page = "%s?mode=async&function=get_block&block_id=playlist_view_playlist_view&sort_by" \ + "=added2fav_date&&from=%s" % (item.url, next_page) + itemlist.append(item.clone(action="videos", title=">> Página Siguiente", url=next_page)) + + return itemlist + + +def play(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + + patron = '(?:video_url|video_alt_url[0-9]*)\s*:\s*\'([^\']+)\'.*?(?:video_url_text|video_alt_url[0-9]*_text)\s*:\s*\'([^\']+)\'' + matches = scrapertools.find_multiple_matches(data, patron) + if not matches: + patron = '<iframe.*?height="(\d+)".*?video_url\s*:\s*\'([^\']+)\'' + matches = scrapertools.find_multiple_matches(data, patron) + for url, quality in matches: + if "http" in quality: + calidad = url + url = quality + quality = calidad + "p" + + itemlist.append(['.mp4 %s [directo]' % quality, url]) + + if item.extra == "play_menu": + return itemlist, data + + return itemlist + + +def menu_info(item): + logger.info() + itemlist = [] + + video_urls, data = play(item.clone(extra="play_menu")) + itemlist.append(item.clone(action="play", title="Ver -- %s" % item.title, video_urls=video_urls)) + + bloque = scrapertools.find_single_match(data, '<div class="block-screenshots">(.*?)</div>') + matches = scrapertools.find_multiple_matches(bloque, '<img class="thumb lazy-load".*?data-original="([^"]+)"') + for i, img in enumerate(matches): + if i == 0: + continue + img = urlparse.urljoin(host, img) + title = "Imagen %s" % (str(i)) + itemlist.append(item.clone(action="", title=title, thumbnail=img, fanart=img)) + + return itemlist + + +def tags(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + + if item.title == "Tags": + letras = [] + matches = scrapertools.find_multiple_matches(data, '<strong class="title".*?>\s*(.*?)</strong>') + for title in matches: + title = title.strip() + if title not in letras: + letras.append(title) + itemlist.append(Item(channel=item.channel, action="tags", url=item.url, title=title, extra=title)) + else: + if not item.length: + item.length = 0 + + bloque = scrapertools.find_single_match(data, + '>%s</strong>(.*?)(?:(?!%s)(?!#)[A-Z#]{1}</strong>|<div class="footer-margin">)' % ( + item.extra, item.extra)) + matches = scrapertools.find_multiple_matches(bloque, '<a href="([^"]+)">\s*(.*?)</a>') + for url, title in matches[item.length:item.length + 100]: + itemlist.append(Item(channel=item.channel, action="lista", url=url, title=title)) + + if len(itemlist) >= 100: + itemlist.append(Item(channel=item.channel, action="tags", url=item.url, title=">> Página siguiente", + length=item.length + 100, extra=item.extra)) + + return itemlist diff --git a/plugin.video.alfa/channels/gnula.json b/plugin.video.alfa/channels/gnula.json new file mode 100755 index 00000000..cea1c756 --- /dev/null +++ b/plugin.video.alfa/channels/gnula.json @@ -0,0 +1,24 @@ +{ + "id": "gnula", + "name": "Gnula", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "gnula.png", + "banner": "gnula.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ], + "categories": [ + "latino", + "movie" + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/gnula.py b/plugin.video.alfa/channels/gnula.py new file mode 100755 index 00000000..1435c3d6 --- /dev/null +++ b/plugin.video.alfa/channels/gnula.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append(Item(channel=item.channel, title="Estrenos", action="peliculas", + url="http://gnula.nu/peliculas-online/lista-de-peliculas-online-parte-1/", viewmode="movie")) + itemlist.append( + Item(channel=item.channel, title="Generos", action="generos", url="http://gnula.nu/generos/lista-de-generos/")) + itemlist.append(Item(channel=item.channel, title="Recomendadas", action="peliculas", + url="http://gnula.nu/peliculas-online/lista-de-peliculas-recomendadas/", viewmode="movie")) + # itemlist.append( Item(channel=item.channel, title="Portada" , action="portada" , url="http://gnula.nu/")) + return itemlist + + +def generos(item): + logger.info() + itemlist = [] + + data = scrapertools.cache_page(item.url) + + # <span style="font-weight: bold;">Lista de géneros</span><br/> + data = scrapertools.find_single_match(data, '<spa[^>]+>Lista de g(.*?)/table') + + # <strong>Historia antigua</strong> [<a href="http://gnula.nu/generos/lista-de-peliculas-del-genero-historia-antigua/" + patron = '<strong>([^<]+)</strong> .<a href="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + for genero, scrapedurl in matches: + title = scrapertools.htmlclean(genero) + plot = "" + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = "" + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + itemlist.append( + Item(channel=item.channel, action='peliculas', title=title, url=url, thumbnail=thumbnail, plot=plot, + extra=title, viewmode="movie")) + + itemlist = sorted(itemlist, key=lambda item: item.title) + + return itemlist + + +def peliculas(item): + logger.info() + + ''' + <a class="Ntooltip" href="http://gnula.nu/comedia-romantica/ver-with-this-ring-2015-online/">With This Ring<span><br/> + <img src="http://gnula.nu/wp-content/uploads/2015/06/With_This_Ring2.gif"></span></a> [<span style="color: #33ccff;">18/07/15</span> <span style="color: #33ff33;">(VS)</span><span style="color: red;">(VC)</span><span style="color: #cc66cc;">(VL)</span>] [<span style="color: #ffcc99;">HD-R</span>]—–<strong>Comedia, Romántica</strong><br/> + ''' + ''' + <a class="Ntooltip" href="http://gnula.nu/aventuras/ver-las-aventuras-de-tintin-el-secreto-del-unicornio-2011-online/">The Adventures of Tintin<span><br /> + <img src="http://gnula.nu/wp-content/uploads/2015/07/The_Adventures_of_Tintin_Secret_of_the_Unicorn2.gif"></span></a> (2011) [<span style="color: #33ccff;">10/07/15</span> <span style="color: #33ff33;">(VS)</span><span style="color: red;">(VC)</span><span style="color: #cc66cc;">(VL)</span>] [<span style="color: #ffcc99;">DVD-R</span>]—–<strong>Animación, Infantil, Aventuras</strong><br /> + ''' + # Descarga la página + data = scrapertools.cachePage(item.url) + patron = '<a class="Ntooltip" href="([^"]+)">([^<]+)<span><br[^<]+' + patron += '<img src="([^"]+)"></span></a>(.*?)<br' + + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + for scrapedurl, scrapedtitle, scrapedthumbnail, resto in matches: + plot = scrapertools.htmlclean(resto).strip() + title = scrapedtitle + " " + plot + fulltitle = title + contentTitle = scrapedtitle + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + itemlist.append(Item(channel=item.channel, action='findvideos', title=title, fulltitle=fulltitle, url=url, + thumbnail=thumbnail, plot=plot, extra=title, hasContentDetails=True, + contentTitle=contentTitle, contentThumbnail=thumbnail, + contentType="movie", context=["buscar_trailer"])) + + return itemlist + + +def findvideos(item): + logger.info("item=" + item.tostring()) + + # Descarga la página para obtener el argumento + data = scrapertools.cachePage(item.url) + item.plot = scrapertools.find_single_match(data, '<div class="entry">(.*?)<div class="iframes">') + item.plot = scrapertools.htmlclean(item.plot).strip() + item.contentPlot = item.plot + + newthumbnail = scrapertools.find_single_match(data, + '<div class="entry"[^<]+<p align="center"><img alt="[^"]+" src="([^"]+)"') + if newthumbnail != "": + item.thumbnail = newthumbnail + item.contentThumbnail = newthumbnail + + logger.info("plot=" + item.plot) + + return servertools.find_video_items(item=item, data=data) diff --git a/plugin.video.alfa/channels/guaridavalencianista.json b/plugin.video.alfa/channels/guaridavalencianista.json new file mode 100755 index 00000000..fcc8b8cd --- /dev/null +++ b/plugin.video.alfa/channels/guaridavalencianista.json @@ -0,0 +1,23 @@ +{ + "id": "guaridavalencianista", + "name": "La Guarida valencianista", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "guaridavalencianista.png", + "banner": "guaridavalencianista.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ], + "categories": [ + "documentary" + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/guaridavalencianista.py b/plugin.video.alfa/channels/guaridavalencianista.py new file mode 100755 index 00000000..f35497ad --- /dev/null +++ b/plugin.video.alfa/channels/guaridavalencianista.py @@ -0,0 +1,177 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + + +def mainlist(item): + logger.info() + itemlist = [] + + itemlist.append(Item(channel=item.channel, title="Novedades", action="listvideos", + url="http://guaridavalencia.blogspot.com.es")) + # itemlist.append( Item(channel=item.channel, title="Documentales - Series Disponibles" , action="DocuSeries" , url="http://guaridavalencia.blogspot.com/")) + itemlist.append( + Item(channel=item.channel, title="Categorias", action="DocuTag", url="http://guaridavalencia.blogspot.com.es")) + itemlist.append(Item(channel=item.channel, title="Partidos de liga (Temporada 2014/2015)", action="listvideos", + url="http://guaridavalencia.blogspot.com.es/search/label/PARTIDOS%20DEL%20VCF%20%28TEMPORADA%202014-15%29")) + + return itemlist + + +def DocuSeries(item): + logger.info() + itemlist = [] + + # Descarga la página + data = scrapertools.cache_page(item.url) + + # Extrae las entradas (carpetas) + patronvideos = '<li><b><a href="([^"]+)" target="_blank">([^<]+)</a></b></li>' + matches = re.compile(patronvideos, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + for match in matches: + scrapedurl = match[0] + scrapedtitle = match[1] + scrapedthumbnail = "" + scrapedplot = "" + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + itemlist.append(Item(channel=item.channel, action="listvideos", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True)) + + return itemlist + + +def DocuTag(item): + logger.info() + itemlist = [] + # Descarga la página + data = scrapertools.cache_page(item.url) + # ~ patronvideos = "<a dir='ltr' href='([^']+)'>([^<]+)</a>[^<]+<span class='label-count' dir='ltr'>(.+?)</span>" + patronvideos = "<li[^<]+<a dir='ltr' href='([^']+)'>([^<]+)</a[^<]+<span dir='ltr'>[^0-9]+([0-9]+)[^<]+</span[^<]+</li[^<]+" + # ~ patronvideos = "<li[^<]+<a dir='ltr' href='([^']+)'[^<]+([^<]+)</a>" + # ~ [^<]+<span class='label-count' dir='ltr'>(.+?)</span>" + matches = re.compile(patronvideos, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + for match in matches: + scrapedurl = match[0] + # Se debe quitar saltos de linea en match[1] + scrapedtitle = match[1][1:-1] + " (" + match[2] + ")" + # ~ scrapedtitle = match[1] + scrapedthumbnail = "" + scrapedplot = "" + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + itemlist.append(Item(channel=item.channel, action="listvideos", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True)) + + return itemlist + + +def DocuARCHIVO(item): + logger.info() + itemlist = [] + + # Descarga la página + data = scrapertools.cache_page(item.url) + patronvideos = "<a class='post-count-link' href='([^']+)'>([^<]+)</a>[^<]+" + patronvideos += "<span class='post-count' dir='ltr'>(.+?)</span>" + matches = re.compile(patronvideos, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + for match in matches: + scrapedurl = match[0] + scrapedtitle = match[1] + " " + match[2] + scrapedthumbnail = "" + scrapedplot = "" + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + itemlist.append(Item(channel=item.channel, action="listvideos", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True)) + + return itemlist + + +def listvideos(item): + logger.info() + itemlist = [] + + scrapedthumbnail = "" + scrapedplot = "" + + # Descarga la página + data = scrapertools.cache_page(item.url) + patronvideos = "<h3 class='post-title entry-title'[^<]+" + patronvideos += "<a href='([^']+)'>([^<]+)</a>.*?" + patronvideos += "<div class='post-body entry-content'(.*?)<div class='post-footer'>" + matches = re.compile(patronvideos, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + for match in matches: + scrapedtitle = match[1] + scrapedtitle = re.sub("<[^>]+>", " ", scrapedtitle) + scrapedtitle = scrapertools.unescape(scrapedtitle)[1:-1] + scrapedurl = match[0] + regexp = re.compile(r'src="(http[^"]+)"') + + matchthumb = regexp.search(match[2]) + if matchthumb is not None: + scrapedthumbnail = matchthumb.group(1) + matchplot = re.compile('<div align="center">(<img.*?)</span></div>', re.DOTALL).findall(match[2]) + + if len(matchplot) > 0: + scrapedplot = matchplot[0] + # print matchplot + else: + scrapedplot = "" + + scrapedplot = re.sub("<[^>]+>", " ", scrapedplot) + scrapedplot = scrapertools.unescape(scrapedplot) + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True)) + + # Extrae la marca de siguiente página + patronvideos = "<a class='blog-pager-older-link' href='([^']+)'" + matches = re.compile(patronvideos, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + if len(matches) > 0: + scrapedtitle = "Página siguiente" + scrapedurl = urlparse.urljoin(item.url, matches[0]) + scrapedthumbnail = "" + scrapedplot = "" + itemlist.append(Item(channel=item.channel, action="listvideos", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True)) + + return itemlist + + # ~ return itemlist + + +def findvideos(item): + logger.info() + data = scrapertools.cachePage(item.url) + + # Busca los enlaces a los videos + + listavideos = servertools.findvideos(data) + + if item is None: + item = Item() + + itemlist = [] + for video in listavideos: + scrapedtitle = video[0].strip() + " - " + item.title.strip() + scrapedurl = video[1] + server = video[2] + + itemlist.append(Item(channel=item.channel, title=scrapedtitle, action="play", server=server, url=scrapedurl, + thumbnail=item.thumbnail, show=item.show, plot=item.plot, folder=False)) + + return itemlist diff --git a/plugin.video.alfa/channels/hdfull.json b/plugin.video.alfa/channels/hdfull.json new file mode 100755 index 00000000..c1512fa4 --- /dev/null +++ b/plugin.video.alfa/channels/hdfull.json @@ -0,0 +1,61 @@ +{ + "id": "hdfull", + "name": "HDFull", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "hdfull.png", + "banner": "hdfull.png", + "version": 1, + "changes": [ + { + "date": "30/05/2017", + "description": "Arreglada la extracción de enlaces por cambios en la web" + }, + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "21/03/2017", + "description": "Pequeño fix para corregir algunas urls de los vídeos que se extraían mal" + }, + { + "date": "02/02/2017", + "description": "Arreglada la extracción de enlaces por cambios en la web" + }, + { + "date": "05/01/2017", + "description": "Corregido debido a cloudflare" + } + ], + "categories": [ + "movie", + "tvshow" + ], + "settings": [ + { + "id": "hdfulluser", + "type": "text", + "label": "@30014", + "enabled": true, + "visible": true + }, + { + "id": "hdfullpassword", + "type": "text", + "label": "@30015", + "hidden": true, + "enabled": "!eq(-1,'')", + "visible": true + }, + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/hdfull.py b/plugin.video.alfa/channels/hdfull.py new file mode 100755 index 00000000..df5cc3c1 --- /dev/null +++ b/plugin.video.alfa/channels/hdfull.py @@ -0,0 +1,926 @@ +# -*- coding: utf-8 -*- + +import base64 +import re +import urllib +import urlparse + +from core import config +from core import httptools +from core import jsontools +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item +from platformcode import platformtools + +host = "http://hdfull.tv" + +if config.get_setting('hdfulluser', 'hdfull'): + account = True +else: + account = False + + +def settingCanal(item): + return platformtools.show_channel_settings() + + +def login(): + logger.info() + + data = agrupa_datos(httptools.downloadpage(host).data) + + patron = "<input type='hidden' name='__csrf_magic' value=\"([^\"]+)\" />" + sid = scrapertools.find_single_match(data, patron) + + post = urllib.urlencode({'__csrf_magic': sid}) + "&username=" + config.get_setting('hdfulluser', + 'hdfull') + "&password=" + config.get_setting( + 'hdfullpassword', 'hdfull') + "&action=login" + + httptools.downloadpage(host, post=post) + + +def mainlist(item): + logger.info() + + itemlist = [] + + itemlist.append(Item(channel=item.channel, action="menupeliculas", title="Películas", url=host, folder=True)) + itemlist.append(Item(channel=item.channel, action="menuseries", title="Series", url=host, folder=True)) + itemlist.append(Item(channel=item.channel, action="search", title="Buscar...")) + if not account: + itemlist.append(Item(channel=item.channel, title=bbcode_kodi2html( + "[COLOR orange][B]Habilita tu cuenta para activar los items de usuario...[/B][/COLOR]"), + action="settingCanal", url="")) + else: + login() + itemlist.append(Item(channel=item.channel, action="settingCanal", title="Configuración...", url="")) + + return itemlist + + +def menupeliculas(item): + logger.info() + + itemlist = [] + + if account: + itemlist.append(Item(channel=item.channel, action="items_usuario", + title=bbcode_kodi2html("[COLOR orange][B]Favoritos[/B][/COLOR]"), + url=host + "/a/my?target=movies&action=favorite&start=-28&limit=28", folder=True)) + itemlist.append(Item(channel=item.channel, action="items_usuario", + title=bbcode_kodi2html("[COLOR orange][B]Pendientes[/B][/COLOR]"), + url=host + "/a/my?target=movies&action=pending&start=-28&limit=28", folder=True)) + + itemlist.append(Item(channel=item.channel, action="fichas", title="ABC", url=host + "/peliculas/abc", folder=True)) + itemlist.append( + Item(channel=item.channel, action="fichas", title="Últimas películas", url=host + "/peliculas", folder=True)) + itemlist.append( + Item(channel=item.channel, action="fichas", title="Películas Estreno", url=host + "/peliculas-estreno", + folder=True)) + itemlist.append(Item(channel=item.channel, action="fichas", title="Películas Actualizadas", + url=host + "/peliculas-actualizadas", folder=True)) + itemlist.append( + Item(channel=item.channel, action="fichas", title="Rating IMDB", url=host + "/peliculas/imdb_rating", + folder=True)) + itemlist.append(Item(channel=item.channel, action="generos", title="Películas por Género", url=host, folder=True)) + if account: + itemlist.append(Item(channel=item.channel, action="items_usuario", + title=bbcode_kodi2html("[COLOR orange][B]Vistas[/B][/COLOR]"), + url=host + "/a/my?target=movies&action=seen&start=-28&limit=28", folder=True)) + + return itemlist + + +def menuseries(item): + logger.info() + + itemlist = [] + + if account: + itemlist.append(Item(channel=item.channel, action="items_usuario", + title=bbcode_kodi2html("[COLOR orange][B]Siguiendo[/B][/COLOR]"), + url=host + "/a/my?target=shows&action=following&start=-28&limit=28", folder=True)) + itemlist.append(Item(channel=item.channel, action="items_usuario", + title=bbcode_kodi2html("[COLOR orange][B]Para Ver[/B][/COLOR]"), + url=host + "/a/my?target=shows&action=watch&start=-28&limit=28", folder=True)) + + itemlist.append(Item(channel=item.channel, action="series_abc", title="A-Z", folder=True)) + + itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Últimos Emitidos", + url=host + "/a/episodes?action=latest&start=-24&limit=24&elang=ALL", folder=True)) + itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Episodios Estreno", + url=host + "/a/episodes?action=premiere&start=-24&limit=24&elang=ALL", folder=True)) + itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Episodios Actualizados", + url=host + "/a/episodes?action=updated&start=-24&limit=24&elang=ALL", folder=True)) + itemlist.append( + Item(channel=item.channel, action="fichas", title="Últimas series", url=host + "/series", folder=True)) + itemlist.append( + Item(channel=item.channel, action="fichas", title="Rating IMDB", url=host + "/series/imdb_rating", folder=True)) + itemlist.append( + Item(channel=item.channel, action="generos_series", title="Series por Género", url=host, folder=True)) + itemlist.append(Item(channel=item.channel, action="listado_series", title="Listado de todas las series", + url=host + "/series/list", folder=True)) + if account: + itemlist.append(Item(channel=item.channel, action="items_usuario", + title=bbcode_kodi2html("[COLOR orange][B]Favoritas[/B][/COLOR]"), + url=host + "/a/my?target=shows&action=favorite&start=-28&limit=28", folder=True)) + itemlist.append(Item(channel=item.channel, action="items_usuario", + title=bbcode_kodi2html("[COLOR orange][B]Pendientes[/B][/COLOR]"), + url=host + "/a/my?target=shows&action=pending&start=-28&limit=28", folder=True)) + itemlist.append(Item(channel=item.channel, action="items_usuario", + title=bbcode_kodi2html("[COLOR orange][B]Vistas[/B][/COLOR]"), + url=host + "/a/my?target=shows&action=seen&start=-28&limit=28", folder=True)) + + return itemlist + + +def search(item, texto): + logger.info() + + data = agrupa_datos(httptools.downloadpage(host).data) + + sid = scrapertools.get_match(data, '.__csrf_magic. value="(sid:[^"]+)"') + item.extra = urllib.urlencode({'__csrf_magic': sid}) + '&menu=search&query=' + texto + item.title = "Buscar..." + item.url = host + "/buscar" + + try: + return fichas(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def series_abc(item): + logger.info() + + itemlist = [] + + az = "ABCDEFGHIJKLMNOPQRSTUVWXYZ#" + + for l in az: + itemlist.append( + Item(channel=item.channel, action='fichas', title=l, url=host + "/series/abc/" + l.replace('#', '9'))) + + return itemlist + + +def items_usuario(item): + logger.info() + + itemlist = [] + ## Carga estados + status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data) + + ## Fichas usuario + url = item.url.split("?")[0] + post = item.url.split("?")[1] + + old_start = scrapertools.get_match(post, 'start=([^&]+)&') + limit = scrapertools.get_match(post, 'limit=(\d+)') + start = "%s" % (int(old_start) + int(limit)) + + post = post.replace("start=" + old_start, "start=" + start) + next_page = url + "?" + post + + ## Carga las fichas de usuario + data = httptools.downloadpage(url, post=post).data + fichas_usuario = jsontools.load(data) + + for ficha in fichas_usuario: + + try: + title = ficha['title']['es'].strip() + except: + title = ficha['title']['en'].strip() + + try: + title = title.encode('utf-8') + except: + pass + + show = title + + try: + thumbnail = host + "/thumbs/" + ficha['thumbnail'] + except: + thumbnail = host + "/thumbs/" + ficha['thumb'] + + try: + url = urlparse.urljoin(host, '/serie/' + ficha['permalink']) + "###" + ficha['id'] + ";1" + action = "episodios" + str = get_status(status, 'shows', ficha['id']) + if "show_title" in ficha: + action = "findvideos" + try: + serie = ficha['show_title']['es'].strip() + except: + serie = ficha['show_title']['en'].strip() + temporada = ficha['season'] + episodio = ficha['episode'] + serie = bbcode_kodi2html("[COLOR whitesmoke][B]" + serie + "[/B][/COLOR]") + if len(episodio) == 1: episodio = '0' + episodio + try: + title = temporada + "x" + episodio + " - " + serie + ": " + title + except: + title = temporada + "x" + episodio + " - " + serie.decode('iso-8859-1') + ": " + title.decode( + 'iso-8859-1') + url = urlparse.urljoin(host, '/serie/' + ficha[ + 'permalink'] + '/temporada-' + temporada + '/episodio-' + episodio) + "###" + ficha['id'] + ";3" + except: + url = urlparse.urljoin(host, '/pelicula/' + ficha['perma']) + "###" + ficha['id'] + ";2" + action = "findvideos" + str = get_status(status, 'movies', ficha['id']) + if str != "": title += str + + # try: title = title.encode('utf-8') + # except: pass + + itemlist.append( + Item(channel=item.channel, action=action, title=title, fulltitle=title, url=url, thumbnail=thumbnail, + show=show, folder=True)) + + if len(itemlist) == int(limit): + itemlist.append( + Item(channel=item.channel, action="items_usuario", title=">> Página siguiente", url=next_page, folder=True)) + + return itemlist + + +def listado_series(item): + logger.info() + + itemlist = [] + + data = agrupa_datos(httptools.downloadpage(item.url).data) + + patron = '<div class="list-item"><a href="([^"]+)"[^>]+>([^<]+)</a></div>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + url = scrapedurl + "###0;1" + itemlist.append( + Item(channel=item.channel, action="episodios", title=scrapedtitle, fulltitle=scrapedtitle, url=url, + show=scrapedtitle, contentType="tvshow")) + + return itemlist + + +def fichas(item): + logger.info() + itemlist = [] + + ## Carga estados + status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data) + + if item.title == "Buscar...": + data = agrupa_datos(httptools.downloadpage(item.url, post=item.extra).data) + + s_p = scrapertools.get_match(data, '<h3 class="section-title">(.*?)<div id="footer-wrapper">').split( + '<h3 class="section-title">') + + if len(s_p) == 1: + data = s_p[0] + if 'Lo sentimos</h3>' in s_p[0]: + return [Item(channel=item.channel, title=bbcode_kodi2html( + "[COLOR gold][B]HDFull:[/B][/COLOR] [COLOR blue]" + texto.replace('%20', + ' ') + "[/COLOR] sin resultados"))] + else: + data = s_p[0] + s_p[1] + else: + data = agrupa_datos(httptools.downloadpage(item.url).data) + + data = re.sub( + r'<div class="span-6[^<]+<div class="item"[^<]+' + \ + '<a href="([^"]+)"[^<]+' + \ + '<img.*?src="([^"]+)".*?' + \ + '<div class="left"(.*?)</div>' + \ + '<div class="right"(.*?)</div>.*?' + \ + 'title="([^"]+)".*?' + \ + 'onclick="setFavorite.\d, (\d+),', + r"'url':'\1';'image':'\2';'langs':'\3';'rating':'\4';'title':\5;'id':'\6';", + data + ) + + patron = "'url':'([^']+)';'image':'([^']+)';'langs':'([^']+)';'rating':'([^']+)';'title':([^;]+);'id':'([^']+)';" + + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedthumbnail, scrapedlangs, scrapedrating, scrapedtitle, scrapedid in matches: + + thumbnail = scrapedthumbnail.replace("/tthumb/130x190/", "/thumbs/") + + title = scrapedtitle.strip() + show = title + contentTitle = scrapedtitle.strip() + + if scrapedlangs != ">": + textoidiomas = extrae_idiomas(scrapedlangs) + title += bbcode_kodi2html(" ( [COLOR teal][B]" + textoidiomas + "[/B][/COLOR])") + + if scrapedrating != ">": + valoracion = re.sub(r'><[^>]+>(\d+)<b class="dec">(\d+)</b>', r'\1,\2', scrapedrating) + title += bbcode_kodi2html(" ([COLOR orange]" + valoracion + "[/COLOR])") + + url = urlparse.urljoin(item.url, scrapedurl) + + if "/serie" in url or "/tags-tv" in url: + action = "episodios" + url += "###" + scrapedid + ";1" + type = "shows" + contentType = "tvshow" + else: + action = "findvideos" + url += "###" + scrapedid + ";2" + type = "movies" + contentType = "movie" + + str = get_status(status, type, scrapedid) + if str != "": title += str + + if item.title == "Buscar...": + tag_type = scrapertools.get_match(url, 'l.tv/([^/]+)/') + title += bbcode_kodi2html(" - [COLOR blue]" + tag_type.capitalize() + "[/COLOR]") + + itemlist.append( + Item(channel=item.channel, action=action, title=title, url=url, fulltitle=title, thumbnail=thumbnail, + show=show, folder=True, contentType=contentType, contentTitle=contentTitle)) + + ## Paginación + next_page_url = scrapertools.find_single_match(data, '<a href="([^"]+)">.raquo;</a>') + if next_page_url != "": + itemlist.append(Item(channel=item.channel, action="fichas", title=">> Página siguiente", + url=urlparse.urljoin(item.url, next_page_url), folder=True)) + + return itemlist + + +def episodios(item): + logger.info() + id = "0" + itemlist = [] + + ## Carga estados + status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data) + + url_targets = item.url + + if "###" in item.url: + id = item.url.split("###")[1].split(";")[0] + type = item.url.split("###")[1].split(";")[1] + item.url = item.url.split("###")[0] + + ## Temporadas + data = agrupa_datos(httptools.downloadpage(item.url).data) + + if id == "0": + ## Se saca el id de la serie de la página cuando viene de listado_series + id = scrapertools.get_match(data, "<script>var sid = '([^']+)';</script>") + url_targets = url_targets.replace('###0', '###' + id) + + str = get_status(status, "shows", id) + if str != "" and account and item.category != "Series" and "XBMC" not in item.title: + if config.get_videolibrary_support(): + title = bbcode_kodi2html(" ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )") + itemlist.append( + Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=url_targets, + thumbnail=item.thumbnail, show=item.show, folder=False)) + title = str.replace('green', 'red').replace('Siguiendo', 'Abandonar') + itemlist.append(Item(channel=item.channel, action="set_status", title=title, fulltitle=title, url=url_targets, + thumbnail=item.thumbnail, show=item.show, folder=True)) + elif account and item.category != "Series" and "XBMC" not in item.title: + if config.get_videolibrary_support(): + title = bbcode_kodi2html(" ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )") + itemlist.append( + Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=url_targets, + thumbnail=item.thumbnail, show=item.show, folder=False)) + title = bbcode_kodi2html(" ( [COLOR orange][B]Seguir[/B][/COLOR] )") + itemlist.append(Item(channel=item.channel, action="set_status", title=title, fulltitle=title, url=url_targets, + thumbnail=item.thumbnail, show=item.show, folder=True)) + + patron = "<li><a href='([^']+)'>[^<]+</a></li>" + + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl in matches: + + ## Episodios + data = agrupa_datos(httptools.downloadpage(scrapedurl).data) + + sid = scrapertools.get_match(data, "<script>var sid = '(\d+)'") + ssid = scrapertools.get_match(scrapedurl, "temporada-(\d+)") + post = "action=season&start=0&limit=0&show=%s&season=%s" % (sid, ssid) + + url = host + "/a/episodes" + + data = httptools.downloadpage(url, post=post).data + + episodes = jsontools.load(data) + + for episode in episodes: + + thumbnail = host + "/thumbs/" + episode['thumbnail'] + + temporada = episode['season'] + episodio = episode['episode'] + if len(episodio) == 1: episodio = '0' + episodio + + if episode['languages'] != "[]": + idiomas = "( [COLOR teal][B]" + for idioma in episode['languages']: idiomas += idioma + " " + idiomas += "[/B][/COLOR])" + idiomas = bbcode_kodi2html(idiomas) + else: + idiomas = "" + + if episode['title']: + try: + title = episode['title']['es'].strip() + except: + title = episode['title']['en'].strip() + + if len(title) == 0: title = "Temporada " + temporada + " Episodio " + episodio + + try: + title = temporada + "x" + episodio + " - " + title.decode('utf-8') + ' ' + idiomas + except: + title = temporada + "x" + episodio + " - " + title.decode('iso-8859-1') + ' ' + idiomas + # try: title = temporada + "x" + episodio + " - " + title + ' ' + idiomas + # except: pass + # except: title = temporada + "x" + episodio + " - " + title.decode('iso-8859-1') + ' ' + idiomas + + str = get_status(status, 'episodes', episode['id']) + if str != "": title += str + + try: + title = title.encode('utf-8') + except: + title = title.encode('iso-8859-1') + + url = urlparse.urljoin(scrapedurl, 'temporada-' + temporada + '/episodio-' + episodio) + "###" + episode[ + 'id'] + ";3" + + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, + thumbnail=thumbnail, show=item.show, folder=True, contentType="episode")) + + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=url_targets, + action="add_serie_to_library", extra="episodios", show=item.show)) + itemlist.append(Item(channel=item.channel, title="Descargar todos los episodios de la serie", url=url_targets, + action="download_all_episodes", extra="episodios", show=item.show)) + + return itemlist + + +def novedades_episodios(item): + logger.info() + + itemlist = [] + ## Carga estados + status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data) + + ## Episodios + url = item.url.split("?")[0] + post = item.url.split("?")[1] + + old_start = scrapertools.get_match(post, 'start=([^&]+)&') + start = "%s" % (int(old_start) + 24) + + post = post.replace("start=" + old_start, "start=" + start) + next_page = url + "?" + post + + data = httptools.downloadpage(url, post=post).data + + episodes = jsontools.load(data) + + for episode in episodes: + + thumbnail = host + "/thumbs/" + episode['thumbnail'] + + temporada = episode['season'] + episodio = episode['episode'] + if len(episodio) == 1: episodio = '0' + episodio + + if episode['languages'] != "[]": + idiomas = "( [COLOR teal][B]" + for idioma in episode['languages']: idiomas += idioma + " " + idiomas += "[/B][/COLOR])" + idiomas = bbcode_kodi2html(idiomas) + else: + idiomas = "" + + try: + show = episode['show']['title']['es'].strip() + except: + show = episode['show']['title']['en'].strip() + + show = bbcode_kodi2html("[COLOR whitesmoke][B]" + show + "[/B][/COLOR]") + + if episode['title']: + try: + title = episode['title']['es'].strip() + except: + title = episode['title']['en'].strip() + + if len(title) == 0: title = "Temporada " + temporada + " Episodio " + episodio + + try: + title = temporada + "x" + episodio + " - " + show.decode('utf-8') + ": " + title.decode( + 'utf-8') + ' ' + idiomas + except: + title = temporada + "x" + episodio + " - " + show.decode('iso-8859-1') + ": " + title.decode( + 'iso-8859-1') + ' ' + idiomas + + str = get_status(status, 'episodes', episode['id']) + if str != "": title += str + + try: + title = title.encode('utf-8') + except: + title = title.encode('iso-8859-1') + # try: show = show.encode('utf-8') + # except: show = show.encode('iso-8859-1') + + url = urlparse.urljoin(host, '/serie/' + episode[ + 'permalink'] + '/temporada-' + temporada + '/episodio-' + episodio) + "###" + episode['id'] + ";3" + + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumbnail, + folder=True, contentType="episode")) + + if len(itemlist) == 24: + itemlist.append( + Item(channel=item.channel, action="novedades_episodios", title=">> Página siguiente", url=next_page, + folder=True)) + + return itemlist + + +def generos(item): + logger.info() + + itemlist = [] + + data = agrupa_datos(httptools.downloadpage(item.url).data) + data = scrapertools.find_single_match(data, '<li class="dropdown"><a href="http://hdfull.tv/peliculas"(.*?)</ul>') + + patron = '<li><a href="([^"]+)">([^<]+)</a></li>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + title = scrapedtitle.strip() + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = "" + plot = "" + + itemlist.append(Item(channel=item.channel, action="fichas", title=title, url=url, folder=True)) + + return itemlist + + +def generos_series(item): + logger.info() + + itemlist = [] + + data = agrupa_datos(httptools.downloadpage(item.url).data) + data = scrapertools.find_single_match(data, '<li class="dropdown"><a href="http://hdfull.tv/series"(.*?)</ul>') + + patron = '<li><a href="([^"]+)">([^<]+)</a></li>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + title = scrapedtitle.strip() + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = "" + plot = "" + + itemlist.append(Item(channel=item.channel, action="fichas", title=title, url=url, folder=True)) + + return itemlist + + +def findvideos(item): + logger.info() + + itemlist = [] + ## Carga estados + status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data) + + url_targets = item.url + + ## Vídeos + if "###" in item.url: + id = item.url.split("###")[1].split(";")[0] + type = item.url.split("###")[1].split(";")[1] + item.url = item.url.split("###")[0] + + if type == "2" and account and item.category != "Cine": + title = bbcode_kodi2html(" ( [COLOR orange][B]Agregar a Favoritos[/B][/COLOR] )") + if "Favorito" in item.title: + title = bbcode_kodi2html(" ( [COLOR red][B]Quitar de Favoritos[/B][/COLOR] )") + if config.get_videolibrary_support(): + title_label = bbcode_kodi2html(" ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )") + itemlist.append(Item(channel=item.channel, action="findvideos", title=title_label, fulltitle=title_label, + url=url_targets, thumbnail=item.thumbnail, show=item.show, folder=False)) + + title_label = bbcode_kodi2html(" ( [COLOR green][B]Tráiler[/B][/COLOR] )") + + itemlist.append( + Item(channel=item.channel, action="trailer", title=title_label, fulltitle=title_label, url=url_targets, + thumbnail=item.thumbnail, show=item.show)) + + itemlist.append(Item(channel=item.channel, action="set_status", title=title, fulltitle=title, url=url_targets, + thumbnail=item.thumbnail, show=item.show, folder=True)) + + data_js = httptools.downloadpage("http://hdfull.tv/templates/hdfull/js/jquery.hdfull.view.min.js").data + key = scrapertools.find_single_match(data_js, 'JSON.parse\(atob.*?substrings\((.*?)\)') + + data_js = httptools.downloadpage("http://hdfull.tv/js/providers.js").data + try: + data_js = jhexdecode(data_js) + except: + from lib.aadecode import decode as aadecode + data_js = data_js.split(";゚ω゚") + decode_aa = "" + for match in data_js: + decode_aa += aadecode(match) + + data_js = re.sub(r':(function.*?\})', r':"\g<1>"', decode_aa) + data_js = re.sub(r':(var[^,]+),', r':"\g<1>",', data_js) + + data = agrupa_datos(httptools.downloadpage(item.url).data) + data_obf = scrapertools.find_single_match(data, "var ad\s*=\s*'([^']+)'") + data_decrypt = jsontools.load(obfs(base64.b64decode(data_obf), 126 - int(key))) + + infolabels = {} + year = scrapertools.find_single_match(data, '<span>Año:\s*</span>.*?(\d{4})') + infolabels["year"] = year + + matches = [] + for match in data_decrypt: + prov = eval(scrapertools.find_single_match(data_js, 'p\[%s\]\s*=\s*(\{.*?\}[\'"]\})' % match["provider"])) + function = prov["l"].replace("code", match["code"]).replace("var_1", match["code"]) + + url = scrapertools.find_single_match(function, "return\s*(.*?)[;]*\}") + url = re.sub(r'\'|"|\s|\+', '', url) + url = re.sub(r'var_\d+\[\d+\]', '', url) + embed = prov["e"] + + matches.append([match["lang"], match["quality"], url, embed]) + + enlaces = [] + for idioma, calidad, url, embed in matches: + servername = scrapertools.find_single_match(url, "(?:http:|https:)//(?:www.|)([^.]+).") + if servername == "streamin": servername = "streaminto" + if servername == "waaw": servername = "netutv" + if servername == "uploaded" or servername == "ul": servername = "uploadedto" + mostrar_server = True + if config.get_setting("hidepremium") == True: + mostrar_server = servertools.is_server_enabled(servername) + if mostrar_server: + option = "Ver" + if re.search(r'return ([\'"]{2,}|\})', embed): + option = "Descargar" + calidad = unicode(calidad, "utf8").upper().encode("utf8") + servername_c = unicode(servername, "utf8").capitalize().encode("utf8") + title = option + ": " + servername_c + " (" + calidad + ")" + " (" + idioma + ")" + thumbnail = item.thumbnail + plot = item.title + "\n\n" + scrapertools.find_single_match(data, + '<meta property="og:description" content="([^"]+)"') + plot = scrapertools.htmlclean(plot) + fanart = scrapertools.find_single_match(data, '<div style="background-image.url. ([^\s]+)') + if account: + url += "###" + id + ";" + type + + enlaces.append( + Item(channel=item.channel, action="play", title=title, fulltitle=title, url=url, thumbnail=thumbnail, + plot=plot, fanart=fanart, show=item.show, folder=True, server=servername, infoLabels=infolabels, + contentTitle=item.contentTitle, contentType=item.contentType, tipo=option)) + + enlaces.sort(key=lambda it: it.tipo, reverse=True) + itemlist.extend(enlaces) + ## 2 = película + if type == "2" and item.category != "Cine": + ## STRM para todos los enlaces de servidores disponibles + ## Si no existe el archivo STRM de la peícula muestra el item ">> Añadir a la videoteca..." + try: + itemlist.extend(file_cine_library(item, url_targets)) + except: + pass + + return itemlist + + +def trailer(item): + import youtube + itemlist = [] + item.url = "https://www.googleapis.com/youtube/v3/search" + \ + "?q=" + item.show.replace(" ", "+") + "+trailer+HD+Español" \ + "®ionCode=ES" + \ + "&part=snippet" + \ + "&hl=es_ES" + \ + "&key=AIzaSyAd-YEOqZz9nXVzGtn3KWzYLbLaajhqIDA" + \ + "&type=video" + \ + "&maxResults=50" + \ + "&pageToken=" + itemlist.extend(youtube.fichas(item)) + # itemlist.pop(-1) + return itemlist + + +def file_cine_library(item, url_targets): + import os + from core import filetools + videolibrarypath = os.path.join(config.get_videolibrary_path(), "CINE") + archivo = item.show.strip() + strmfile = archivo + ".strm" + strmfilepath = filetools.join(videolibrarypath, strmfile) + + if not os.path.exists(strmfilepath): + itemlist = [] + itemlist.append(Item(channel=item.channel, title=">> Añadir a la videoteca...", url=url_targets, + action="add_file_cine_library", extra="episodios", show=archivo)) + + return itemlist + + +def add_file_cine_library(item): + from core import videolibrarytools + new_item = item.clone(title=item.show, action="play_from_library") + videolibrarytools.save_movie(new_item) + itemlist = [] + itemlist.append(Item(title='El vídeo ' + item.show + ' se ha añadido a la videoteca')) + # xbmctools.renderItems(itemlist, "", "", "") + platformtools.render_items(itemlist, "") + + return + + +def play(item): + if "###" in item.url: + id = item.url.split("###")[1].split(";")[0] + type = item.url.split("###")[1].split(";")[1] + item.url = item.url.split("###")[0] + post = "target_id=%s&target_type=%s&target_status=1" % (id, type) + data = httptools.downloadpage(host + "/a/status", post=post).data + + devuelve = servertools.findvideosbyserver(item.url, item.server) + if devuelve: + item.url = devuelve[0][1] + else: + devuelve = servertools.findvideos(item.url, True) + if devuelve: + item.url = devuelve[0][1] + item.server = devuelve[0][2] + + return [item] + + +## -------------------------------------------------------------------------------- +## -------------------------------------------------------------------------------- + +def agrupa_datos(data): + ## Agrupa los datos + data = re.sub(r'\n|\r|\t| |<br>|<!--.*?-->', '', data) + data = re.sub(r'\s+', ' ', data) + data = re.sub(r'>\s<', '><', data) + return data + + +def extrae_idiomas(bloqueidiomas): + logger.info("idiomas=" + bloqueidiomas) + patronidiomas = '([a-z0-9]+).png"' + idiomas = re.compile(patronidiomas, re.DOTALL).findall(bloqueidiomas) + textoidiomas = "" + for idioma in idiomas: + textoidiomas = textoidiomas + idioma.upper() + " " + + return textoidiomas + + +def bbcode_kodi2html(text): + if config.get_platform().startswith("plex") or config.get_platform().startswith("mediaserver"): + import re + text = re.sub(r'\[COLOR\s([^\]]+)\]', + r'<span style="color: \1">', + text) + text = text.replace('[/COLOR]', '</span>') + text = text.replace('[CR]', '<br>') + text = re.sub(r'\[([^\]]+)\]', + r'<\1>', + text) + text = text.replace('"color: white"', '"color: auto"') + + return text + + +## -------------------------------------------------------------------------------- + +def set_status(item): + if "###" in item.url: + id = item.url.split("###")[1].split(";")[0] + type = item.url.split("###")[1].split(";")[1] + # item.url = item.url.split("###")[0] + + if "Abandonar" in item.title: + path = "/a/status" + post = "target_id=" + id + "&target_type=" + type + "&target_status=0" + + elif "Seguir" in item.title: + target_status = "3" + path = "/a/status" + post = "target_id=" + id + "&target_type=" + type + "&target_status=3" + + elif "Agregar a Favoritos" in item.title: + path = "/a/favorite" + post = "like_id=" + id + "&like_type=" + type + "&like_comment=&vote=1" + + elif "Quitar de Favoritos" in item.title: + path = "/a/favorite" + post = "like_id=" + id + "&like_type=" + type + "&like_comment=&vote=-1" + + data = httptools.downloadpage(host + path, post=post).data + + title = bbcode_kodi2html("[COLOR green][B]OK[/B][/COLOR]") + + return [Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=item.url, + thumbnail=item.thumbnail, show=item.show, folder=False)] + + +def get_status(status, type, id): + if type == 'shows': + state = {'0': '', '1': 'Finalizada', '2': 'Pendiente', '3': 'Siguiendo'} + else: + state = {'0': '', '1': 'Visto', '2': 'Pendiente'} + + str = ""; + str1 = ""; + str2 = "" + + try: + if id in status['favorites'][type]: + str1 = bbcode_kodi2html(" [COLOR orange][B]Favorito[/B][/COLOR]") + except: + str1 = "" + + try: + if id in status['status'][type]: + str2 = state[status['status'][type][id]] + if str2 != "": str2 = bbcode_kodi2html( + " [COLOR green][B]" + state[status['status'][type][id]] + "[/B][/COLOR]") + except: + str2 = "" + + if str1 != "" or str2 != "": + str = " (" + str1 + str2 + " )" + + return str + + +## -------------------------------------------------------------------------------- +## -------------------------------------------------------------------------------- + + +def jhexdecode(t): + r = re.sub(r'_\d+x\w+x(\d+)', 'var_' + r'\1', t) + r = re.sub(r'_\d+x\w+', 'var_0', r) + + def to_hx(c): + h = int("%s" % c.groups(0), 16) + if 19 < h < 160: + return chr(h) + else: + return "" + + r = re.sub(r'(?:\\|)x(\w{2})', to_hx, r).replace('var ', '') + + f = eval(scrapertools.get_match(r, '\s*var_0\s*=\s*([^;]+);')) + for i, v in enumerate(f): + r = r.replace('[[var_0[%s]]' % i, "." + f[i]) + r = r.replace(':var_0[%s]' % i, ":\"" + f[i] + "\"") + r = r.replace(' var_0[%s]' % i, " \"" + f[i] + "\"") + r = r.replace('(var_0[%s]' % i, "(\"" + f[i] + "\"") + r = r.replace('[var_0[%s]]' % i, "." + f[i]) + if v == "": r = r.replace('var_0[%s]' % i, '""') + + r = re.sub(r':(function.*?\})', r":'\g<1>'", r) + r = re.sub(r':(var[^,]+),', r":'\g<1>',", r) + + return r + + +def obfs(data, key, n=126): + chars = list(data) + for i in range(0, len(chars)): + c = ord(chars[i]) + if c <= n: + number = (ord(chars[i]) + key) % n + chars[i] = chr(number) + + return "".join(chars) diff --git a/plugin.video.alfa/channels/hentaienespanol.json b/plugin.video.alfa/channels/hentaienespanol.json new file mode 100755 index 00000000..47a8ba67 --- /dev/null +++ b/plugin.video.alfa/channels/hentaienespanol.json @@ -0,0 +1,33 @@ +{ + "id": "hentaienespanol", + "name": "HentaiEnEspañol", + "active": true, + "adult": true, + "language": "es", + "thumbnail": "https://s11.postimg.org/cmuwcvvpf/hentaienespanol.png", + "banner": "https://s3.postimg.org/j3qkfut8z/hentaienespanol_banner.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/12/2016", + "description": "Release." + } + ], + "categories": [ + "adult" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": false, + "visible": false + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/hentaienespanol.py b/plugin.video.alfa/channels/hentaienespanol.py new file mode 100755 index 00000000..f79c00a3 --- /dev/null +++ b/plugin.video.alfa/channels/hentaienespanol.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- + +import re + +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + +host = 'http://www.xn--hentaienespaol-1nb.net/' +headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'], + ['Referer', host]] + + +def mainlist(item): + logger.info() + + itemlist = [] + + itemlist.append(Item(channel=item.channel, title="Todos", action="todas", url=host, thumbnail='', fanart='')) + + itemlist.append( + Item(channel=item.channel, title="Sin Censura", action="todas", url=host + 'hentai/sin-censura/', thumbnail='', + fanart='')) + + return itemlist + + +def todas(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = '<div class="box-peli" id="post-.*?">.<h2 class="title">.<a href="([^"]+)">([^<]+)<\/a>.*?' + patron += 'height="170px" src="([^"]+)' + + matches = re.compile(patron, re.DOTALL).findall(data) + for scrapedurl, scrapedtitle, scrapedthumbnail in matches: + url = scrapedurl + title = scrapedtitle # .decode('utf-8') + thumbnail = scrapedthumbnail + fanart = '' + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, fanart=fanart)) + + # Paginacion + title = '' + siguiente = scrapertools.find_single_match(data, 'class="nextpostslink" rel="next" href="([^"]+)">') + title = 'Pagina Siguiente >>> ' + fanart = '' + itemlist.append(Item(channel=item.channel, action="todas", title=title, url=siguiente, fanart=fanart)) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = item.url + texto + + if texto != '': + return todas(item) + else: + return [] + + +def findvideos(item): + logger.info() + + itemlist = [] + + data = httptools.downloadpage(item.url).data + patron = '<li.*?<a href="([^"]+)" target="_blank"><i class="icon-metro online"><\/i><span>Ver.*?<\/span><\/a> <\/li>' + matches = re.compile(patron, re.DOTALL).findall(data) + for scrapedurl in matches: + title = item.title + url = scrapedurl + itemlist.append(item.clone(title=title, url=url, action="play")) + + return itemlist + + +def play(item): + logger.info() + itemlist = [] + item.url = item.url.replace(' ', '%20') + data = httptools.downloadpage(item.url, add_referer=True).data + url = scrapertools.find_single_match(data, '<iframe.*?src="([^"]+)".*?frameborder="0"') + itemlist = servertools.find_video_items(data=data) + + return itemlist diff --git a/plugin.video.alfa/channels/hentaiid.json b/plugin.video.alfa/channels/hentaiid.json new file mode 100755 index 00000000..d86cc149 --- /dev/null +++ b/plugin.video.alfa/channels/hentaiid.json @@ -0,0 +1,15 @@ +{ + "id": "hentaiid", + "name": "Hentai ID", + "active": true, + "adult": true, + "language": "es", + "thumbnail": "https://dl.dropboxusercontent.com/u/30248079/hentai_id.png", + "banner": "https://dl.dropboxusercontent.com/u/30248079/hentai_id2.png", + "version": 1, + "date": "09/03/2017", + "changes": "Fix web", + "categories": [ + "adult" + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/hentaiid.py b/plugin.video.alfa/channels/hentaiid.py new file mode 100755 index 00000000..feaa1c48 --- /dev/null +++ b/plugin.video.alfa/channels/hentaiid.py @@ -0,0 +1,167 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import httptools +from core import logger +from core import scrapertools +from core.item import Item + +CHANNEL_HOST = "http://hentai-id.tv/" + + +def mainlist(item): + logger.info() + + itemlist = list() + itemlist.append(Item(channel=item.channel, action="series", title="Novedades", + url=urlparse.urljoin(CHANNEL_HOST, "archivos/h2/"), extra="novedades")) + itemlist.append(Item(channel=item.channel, action="letras", title="Por orden alfabético")) + itemlist.append(Item(channel=item.channel, action="generos", title="Por géneros", url=CHANNEL_HOST)) + itemlist.append(Item(channel=item.channel, action="series", title="Sin Censura", + url=urlparse.urljoin(CHANNEL_HOST, "archivos/sin-censura/"))) + itemlist.append(Item(channel=item.channel, action="series", title="High Definition", + url=urlparse.urljoin(CHANNEL_HOST, "archivos/hight-definition/"))) + itemlist.append(Item(channel=item.channel, action="series", title="Mejores Hentais", + url=urlparse.urljoin(CHANNEL_HOST, "archivos/ranking-hentai/"))) + itemlist.append(Item(channel=item.channel, action="search", title="Buscar", + url=urlparse.urljoin(CHANNEL_HOST, "?s="))) + + return itemlist + + +def letras(item): + logger.info() + + itemlist = [] + + for letra in '0ABCDEFGHIJKLMNOPQRSTUVWXYZ': + itemlist.append(Item(channel=item.channel, action="series", title=letra, + url=urlparse.urljoin(CHANNEL_HOST, "/?s=letra-%s" % letra.replace("0", "num")))) + + return itemlist + + +def generos(item): + logger.info() + + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}", "", data) + + data = scrapertools.get_match(data, "<div class='cccon'>(.*?)</div><div id=\"myslides\">") + patron = "<a.+? href='/([^']+)'>(.*?)</a>" + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + title = scrapertools.entityunescape(scrapedtitle) + url = urlparse.urljoin(item.url, scrapedurl) + # logger.debug("title=[{0}], url=[{1}]".format(title, url)) + + itemlist.append(Item(channel=item.channel, action="series", title=title, url=url)) + + return itemlist + + +def search(item, texto): + logger.info() + if item.url == "": + item.url = urlparse.urljoin(CHANNEL_HOST, "animes/?buscar=") + texto = texto.replace(" ", "+") + item.url = "%s%s" % (item.url, texto) + + try: + return series(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def series(item): + logger.info() + + data = httptools.downloadpage(item.url).data + + patron = '<div class="post" id="post"[^<]+<center><h1 class="post-title entry-title"[^<]+<a href="([^"]+)">' \ + '(.*?)</a>[^<]+</h1></center>[^<]+<div[^<]+</div>[^<]+<div[^<]+<div.+?<img src="([^"]+)"' + + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + if item.extra == "novedades": + action = "findvideos" + else: + action = "episodios" + + for scrapedurl, scrapedtitle, scrapedthumbnail in matches: + title = scrapertools.unescape(scrapedtitle) + fulltitle = title + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) + show = title + # logger.debug("title=[{0}], url=[{1}], thumbnail=[{2}]".format(title, url, thumbnail)) + itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail, + show=show, fulltitle=fulltitle, fanart=thumbnail, folder=True)) + + patron = '</span><a class="page larger" href="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + for match in matches: + if len(matches) > 0: + scrapedurl = match + scrapedtitle = ">> Pagina Siguiente" + + itemlist.append(Item(channel=item.channel, action="series", title=scrapedtitle, url=scrapedurl, + folder=True, viewmode="movies_with_plot")) + + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = scrapertools.find_single_match(data, '<div class="listanime">(.*?)</div>') + patron = '<a href="([^"]+)">([^<]+)</a>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + title = scrapertools.unescape(scrapedtitle) + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = item.thumbnail + plot = item.plot + + # logger.debug("title=[{0}], url=[{1}], thumbnail=[{2}]".format(title, url, thumbnail)) + + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, + thumbnail=thumbnail, plot=plot, show=item.show, fulltitle="%s %s" % (item.show, title), + fanart=thumbnail, viewmode="movies_with_plot", folder=True)) + + return itemlist + + +def findvideos(item): + logger.info() + + data = httptools.downloadpage(item.url).data + patron = '<div id="tab\d".+?>[^<]+<[iframe|IFRAME].*?[src|SRC]="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + for url in matches: + if 'goo.gl' in url: + video = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers["location"] + matches.remove(url) + matches.append(video) + + from core import servertools + itemlist = servertools.find_video_items(data=",".join(matches)) + for videoitem in itemlist: + videoitem.fulltitle = item.fulltitle + videoitem.channel = item.channel + videoitem.thumbnail = item.thumbnail + + return itemlist diff --git a/plugin.video.alfa/channels/idocumentales.json b/plugin.video.alfa/channels/idocumentales.json new file mode 100755 index 00000000..203c5f2a --- /dev/null +++ b/plugin.video.alfa/channels/idocumentales.json @@ -0,0 +1,41 @@ +{ + "id": "idocumentales", + "name": "Idocumentales", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "https://s27.postimg.org/pjq3y552b/idocumentales.png", + "banner": "https://s16.postimg.org/6d8bh1z1x/idocumentales_banner.png", + "version": 1, + "changes": [ + { + "date": "25/05/2017", + "description": "cambios esteticos" + }, + { + "date": "18/06/2016", + "description": "First release" + } + ], + "categories": [ + "documentary" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_documentales", + "type": "bool", + "label": "Incluir en Novedades - Documentales", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/idocumentales.py b/plugin.video.alfa/channels/idocumentales.py new file mode 100755 index 00000000..07d4fba2 --- /dev/null +++ b/plugin.video.alfa/channels/idocumentales.py @@ -0,0 +1,147 @@ +# -*- coding: utf-8 -*- + +import re + +from core import httptools +from core import logger +from core import scrapertools +from core import tmdb +from core.item import Item + +host = 'http://www.idocumentales.net' + + +def mainlist(item): + logger.info() + + itemlist = [] + + itemlist.append(item.clone(title="Todas", action="lista", thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png', + fanart='https://s18.postimg.org/fwvaeo6qh/todas.png', url=host)) + + itemlist.append(Item(channel=item.channel, title="Generos", action="generos", url=host, + thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png', + fanart='https://s3.postimg.org/5s9jg2wtf/generos.png')) + + itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host + '/?s=', + thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png', + fanart='https://s30.postimg.org/pei7txpa9/buscar.png')) + + return itemlist + + +def lista(item): + logger.info() + + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data) + patron = '<div class=item><a href=(.*?) title=(.*?)\(.*?\)><div class=img><img src=(.*?) alt=.*?' + patron += '<span class=player><\/span><span class=year>(.*?)<\/span><span class=calidad>(.*?)<\/span><\/div>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear, calidad in matches: + url = scrapedurl + thumbnail = scrapedthumbnail + plot = '' + contentTitle = scrapedtitle + title = contentTitle + ' (' + calidad + ')' + year = scrapedyear + fanart = '' + + itemlist.append( + Item(channel=item.channel, action='findvideos', title=title, url=url, thumbnail=thumbnail, plot=plot, + fanart=fanart, contentTitle=contentTitle, infoLabels={'year': year})) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + # Paginacion + + if itemlist != []: + actual_page_url = item.url + next_page = scrapertools.find_single_match(data, '<link rel=next href=(.*?) \/>') + if next_page != '': + itemlist.append(Item(channel=item.channel, action="lista", title='Siguiente >>>', url=next_page, + thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png')) + return itemlist + + +def generos(item): + logger.info() + itemlist = [] + duplicado = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data) + patron = '<li id=menu-item-.*? class=menu-item menu-item-type-taxonomy menu-item-object-category menu-item-.*?><a href=(.*?)>(.*?)<\/a><\/li>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + thumbnail = '' + fanart = '' + title = scrapedtitle + url = scrapedurl + + if url not in duplicado: + itemlist.append(Item(channel=item.channel, action="lista", title=title, fulltitle=item.title, url=url, + thumbnail=thumbnail, fanart=fanart)) + duplicado.append(url) + return itemlist + + +def busqueda(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data) + patron = '<li class=s-item><div class=s-img><img class=imx style=margin-top:0px; src=(.*?) alt=(.*?)><span><\/span><\/div><div class=s-box>.*?' + patron += '<h3><a href=(.*?)>.*?<\/a><\/h3><span class=year>(.*?)<\/span><p>(.*?)<\/p>' + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + for scrapedthumbnail, scrapedtitle, scrapedurl, scrapedyear, scrapedplot in matches: + url = scrapedurl + title = scrapertools.decodeHtmlentities(scrapedtitle) + thumbnail = scrapedthumbnail + plot = scrapedplot + year = scrapedyear + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumbnail, + plot=plot, contentSerieName=title, infoLabels={'year': year})) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + + # Paginacion + + if itemlist != []: + actual_page_url = item.url + next_page = scrapertools.find_single_match(data, '<link rel=next href=(.*?) \/>') + if next_page != '': + itemlist.append(Item(channel=item.channel, action="busqueda", title='Siguiente >>>', url=next_page, + thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png')) + + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = item.url + texto + if texto != '': + return busqueda(item) + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + try: + if categoria == 'documentales': + item.url = host + + itemlist = lista(item) + if itemlist[-1].title == 'Siguiente >>>': + itemlist.pop() + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist diff --git a/plugin.video.alfa/channels/infoplus.py b/plugin.video.alfa/channels/infoplus.py new file mode 100755 index 00000000..50feb3cf --- /dev/null +++ b/plugin.video.alfa/channels/infoplus.py @@ -0,0 +1,2337 @@ +# -*- coding: utf-8 -*- +# ------------------------------------------------------------ +# infoplus ventana con información del Item +# ------------------------------------------------------------ + +import re +from threading import Thread + +import xbmc +import xbmcgui +from core import config +from core import logger +from core import scrapertools +from core import tmdb +from core.item import Item +from core.scrapertools import decodeHtmlentities as dhe +from platformcode import platformtools + +mainWindow = list() +ActoresWindow = None +TrailerWindow = None +relatedWindow = None +imagesWindow = None +ActorInfoWindow = None +BusquedaWindow = None +SearchWindows = list() + +exit_loop = False + +ACTION_SHOW_FULLSCREEN = 36 +ACTION_GESTURE_SWIPE_LEFT = 511 +ACTION_SELECT_ITEM = 7 +ACTION_PREVIOUS_MENU = 10 +ACTION_MOVE_LEFT = 1 +ACTION_MOVE_RIGHT = 2 +ACTION_MOVE_DOWN = 4 +ACTION_MOVE_UP = 3 +OPTION_PANEL = 6 +OPTIONS_OK = 5 + + +def start(item, recomendaciones=[], from_window=False): + global mainWindow + if from_window: + global relatedWindow, ActorInfoWindow, ActoresWindow, BusquedaWindow, TrailerWindow, imagesWindow + create = [relatedWindow, ActorInfoWindow, ActoresWindow, BusquedaWindow, TrailerWindow, imagesWindow] + for window in create: + window = None + global exit_loop + exit_loop = False + global SearchWindows + SearchWindows = list() + + dialog = platformtools.dialog_progress("[COLOR darkturquoise][B]Cargando nuevos datos[/B][/COLOR]", + "[COLOR lightyellow]Buscando en [/COLOR][COLOR springgreen][B]Tmdb.......[/B][/COLOR]") + + principal_window = main(item=item, recomendaciones=recomendaciones, dialog=dialog, from_window=from_window) + try: + mainWindow.append(principal_window) + principal_window.doModal() + except: + return + + +class main(xbmcgui.WindowDialog): + def __init__(self, *args, **kwargs): + self.item = kwargs.get('item') + self.recomendaciones = kwargs.get('recomendaciones') + self.dialog = kwargs.get('dialog') + self.from_window = kwargs.get('from_window') + + if self.item.contentType == "movie": + tipo = "película" + tipo_busqueda = "movie" + icono = "http://imgur.com/SenkyxF.png" + else: + tipo = "serie" + tipo_busqueda = "tv" + icono = "http://s6.postimg.org/hzcjag975/tvdb.png" + + if self.item.rating_filma: + if "|" in self.item.show: + self.item.show = "" + self.infoLabels = self.item.info + icono = self.item.icon + rating_fa = self.item.rating_filma + if tipo == "película": + self.infoLabels["tmdb_id"] = self.item.extra.split("|")[1] + else: + self.infoLabels["tmdb_id"] = self.item.extra.split("|")[2] + critica = self.item.critica + rating = self.infoLabels.get("rating") + titulo = self.infoLabels["title"] + self.images = [] + thread1 = None + else: + info_copy = dict(self.item.infoLabels) + self.item.infoLabels.pop("season", None) + self.item.infoLabels.pop("episode", None) + tmdb.set_infoLabels_item(self.item, True) + self.infoLabels = self.item.infoLabels + self.infoLabels["season"] = info_copy.get("season", None) + self.infoLabels["episode"] = info_copy.get("episode", None) + + if not self.infoLabels["tmdb_id"]: + self.dialog.close() + platformtools.dialog_notification("Sin resultados", "No hay info de la %s solicitada" % tipo) + global mainWindow + self.close() + del mainWindow + return + + titulo = "[COLOR olive][B]%s[/B][/COLOR]" % self.infoLabels.get("title") + try: + if not self.infoLabels.get("rating"): + rating = "[COLOR crimson][B]Sin puntuación[/B][/COLOR]" + elif self.infoLabels.get("rating") >= 5 and self.infoLabels.get("rating") < 8: + rating = "[COLOR springgreen][B]%s[/B][/COLOR]" % self.infoLabels["rating"] + elif self.infoLabels.get("rating") >= 8: + rating = "[COLOR yellow][B]%s[/B][/COLOR]" % self.infoLabels["rating"] + else: + rating = "[COLOR crimson][B]%s[/B][/COLOR]" % self.infoLabels["rating"] + except: + rating = "[COLOR crimson][B]%s[/B][/COLOR]" % self.infoLabels["rating"] + + self.dialog.update(40, + '[COLOR teal]Registrando[/COLOR]' + '[COLOR yellow][B] film[/B][/COLOR]' + '[COLOR floralwhite][B]affinity.......[/B][/COLOR]') + critica, rating_fa, plot_fa = get_filmaf(self.item, self.infoLabels) + if not self.infoLabels.get("plot") and plot_fa: + self.infoLabels["plot"] = "[COLOR moccasin][B]%s[/B][/COLOR]" % plot_fa + elif not self.infoLabels["plot"]: + self.infoLabels["plot"] = "[COLOR yellow][B]Esta pelicula no tiene informacion...[/B][/COLOR]" + else: + self.infoLabels["plot"] = "[COLOR moccasin][B]%s[/B][/COLOR]" % self.infoLabels.get("plot") + + self.dialog.update(60, '[COLOR khaki]Indagando recomendaciones.......[/COLOR]') + thread1 = Thread(target=get_recomendations, args=[self.item, self.infoLabels, self.recomendaciones]) + thread1.setDaemon(True) + thread1.start() + + if self.infoLabels.get("status") == "Ended" and tipo == "serie": + status = "[COLOR aquamarine][B]Finalizada %s[/B][/COLOR]" + elif self.infoLabels.get("status") and tipo == "serie": + status = "[COLOR aquamarine][B]En emisión %s[/B][/COLOR]" + else: + status = "[COLOR aquamarine][B]%s[/B][/COLOR]" + if self.infoLabels.get("tagline") and tipo == "serie": + self.infoLabels["tagline"] = status % "(" + self.infoLabels["tagline"] + ")" + elif not self.infoLabels.get("tagline") and tipo == "serie": + self.infoLabels["tagline"] = status % "(Temporadas: %s)" % self.infoLabels.get("number_of_seasons", + "---") + else: + self.infoLabels["tagline"] = status % self.infoLabels.get("tagline", "") + + self.images = {} + thread2 = Thread(target=fanartv, args=[self.item, self.infoLabels, self.images]) + thread2.setDaemon(True) + thread2.start() + + if self.infoLabels["tmdb_id"]: + otmdb = tmdb.Tmdb(id_Tmdb=self.infoLabels["tmdb_id"], tipo=tipo_busqueda) + self.infoLabels["images"] = otmdb.result.get("images", {}) + for key, value in self.infoLabels["images"].items(): + if not value: + self.infoLabels["images"].pop(key) + + if not self.infoLabels.get("originaltitle"): + self.infoLabels["originaltitle"] = otmdb.result.get("original_title", + otmdb.result.get("original_name", "")) + self.trailers = otmdb.get_videos() + self.infoLabels["duration"] = int(otmdb.result.get("runtime", 0)) + else: + self.trailers = [] + + if self.item.contentType != "movie": + self.dialog.update(60, + '[COLOR teal]Recopilando imágenes en [/COLOR]' + '[COLOR floralwhite][B]FAN[/B][/COLOR]' + '[COLOR slategray][B]ART.[/B][/COLOR]' + '[COLOR darkgray]TV.......[/COLOR]') + try: + ###Busca música serie + titulo = re.sub('\[.*?\]', '', titulo) + titulo = self.infoLabels.get("originaltitle", titulo) + titulo = re.sub("'", "", titulo) + url_tvthemes = "http://televisiontunes.com/search.php?q=%s" % titulo.replace(' ', '+') + + data = scrapertools.downloadpage(url_tvthemes) + page_theme = scrapertools.find_single_match(data, '<!-- sond design -->.*?<li><a href="([^"]+)"') + + if page_theme: + page_theme = "http://televisiontunes.com" + page_theme + data = scrapertools.downloadpage(page_theme) + song = scrapertools.get_match(data, '<form name="song_name_form">.*?type="hidden" value="(.*?)"') + song = song.replace(" ", "%20") + pl = xbmc.PlayList(xbmc.PLAYLIST_MUSIC) + pl.clear() + pl.add(song) + self.player = xbmc.Player() + self.player.play(pl) + except: + import traceback + logger.error(traceback.format_exc()) + + if xbmc.Player().isPlaying(): + self.dialog.update(80, + '[COLOR teal]Afinado instrumentos en [/COLOR]' + '[COLOR cyan][B]T[/B][/COLOR]' + '[COLOR paleturquoise][B]V[/B][/COLOR]' + '[COLOR floralwhite]tu[/COLOR]' + '[COLOR darkgray][B]n[/B][/COLOR]' + '[COLOR slategray][B]es[/B][/COLOR]') + else: + self.dialog.update(80, + '[COLOR teal]Recopilando imágenes en [/COLOR]' + '[COLOR floralwhite][B]FAN[/B][/COLOR]' + '[COLOR slategray][B]ART.[/B][/COLOR]' + '[COLOR darkgray]TV.......[/COLOR]') + + while thread2.isAlive(): + xbmc.sleep(100) + if not self.infoLabels.get("fanart") and self.images: + try: + if self.item.contentType == "movie": + self.infoLabels["fanart"] = \ + self.images.get("moviebackground", self.images.get("hdmovieclearart", self.images.get("movieart")))[ + 0].get("url") + else: + self.infoLabels["fanart"] = \ + self.images.get("showbackground", self.images.get("hdclearart", self.images.get("clearart")))[ + 0].get("url") + except: + self.infoLabels["fanart"] = 'http://i.imgur.com/XuXGXjN.jpg' + import traceback + logger.error(traceback.format_exc()) + elif self.infoLabels.get("season") and self.images.get("showbackground"): + for imagen in self.images["showbackground"]: + if imagen.get("season") == str(self.infoLabels.get("season", "")): + self.infoLabels["fanart"] = imagen["url"] + break + + if not self.infoLabels.get("fanart"): + self.infoLabels["fanart"] = 'http://i.imgur.com/XuXGXjN.jpg' + + if self.images: + try: + if self.item.contentType == "movie": + self.infoLabels["thumbnail"] = self.images.get("hdmovielogo", self.images.get("movielogo"))[0].get( + "url") + elif self.infoLabels.get("season") and self.images.get("seasonthumb"): + find = False + for imagen in self.images["seasonthumb"]: + if imagen.get("season") == str(self.infoLabels.get("season", "")): + self.infoLabels["thumbnail"] = imagen["url"] + find = True + break + if not find: + self.infoLabels["thumbnail"] = \ + self.images.get("hdtvlogo", self.images.get("clearlogo", self.images.get("tvthumb")))[0].get( + "url") + else: + self.infoLabels["thumbnail"] = self.images.get("hdtvlogo", self.images.get("clearlogo", ))[0].get( + "url") + self.infoLabels["thumbnail"] = self.infoLabels["thumbnail"].replace(" ", "%20") + except: + self.infoLabels["thumbnail"] = 'http://i.imgur.com/8K5f4Uo.png' + import traceback + logger.error(traceback.format_exc()) + elif not self.item.rating_filma or "image.tmdb.org" in self.infoLabels.get("thumbnail", + "") or not self.infoLabels.get( + "thumbnail"): + self.infoLabels["thumbnail"] = 'http://i.imgur.com/8K5f4Uo.png' + + self.name = re.sub(r'(\[.*?\])', '', self.infoLabels["title"]) + self.botones = [] + + skin = xbmc.getSkinDir() + self.fonts = get_fonts(skin) + self.setCoordinateResolution(2) + self.actorButton = xbmcgui.ControlButton(650, 50, 60, 60, '', font='Font40', alignment=0x00000006, + noFocusTexture='http://i.imgur.com/yK4LCqB.png', + focusTexture='http://s6.postimg.org/djdkrpz0x/starzen.png', + focusedColor='0xFFAAAAAA') + self.trailerButton = xbmcgui.ControlButton(550, 50, 60, 60, '', font='Font40', alignment=0x00000006, + noFocusTexture='http://s6.postimg.org/dbs8k30r5/zentrailer.png', + focusTexture='http://s6.postimg.org/jqr9gr7gx/zentrailerfocused.png') + + self.background = xbmcgui.ControlImage(-40, -40, 1500, 830, 'http://imgur.com/ur6H9Ps.png') + self.title = xbmcgui.ControlTextBox(120, 0, 1130, 50) + self.rating = xbmcgui.ControlTextBox(415, 37, 1040, 50) + self.rating_filma = xbmcgui.ControlTextBox(417, 112, 1043, 50) + self.tagline = xbmcgui.ControlFadeLabel(120, 70, 420, 45, self.fonts["12"]) + self.plot = xbmcgui.ControlTextBox(117, 150, 1056, 150) + self.critica = xbmcgui.ControlTextBox(20, 386, 1056, 100, self.fonts["12"]) + self.fanart = xbmcgui.ControlImage(-40, -40, 1500, 830, self.infoLabels.get("fanart", "")) + self.critica_image = xbmcgui.ControlImage(120, 300, 200, 90, 'http://imgur.com/kGmaIER.png') + self.icon = xbmcgui.ControlImage(360, 30, 40, 40, icono) + self.fa_icon = xbmcgui.ControlImage(350, 100, 60, 60, "http://s6.postimg.org/6yhe5fgy9/filma.png") + + self.addControl(self.fanart) + self.fanart.setAnimations([('conditional', 'effect=rotatey start=100% end=0% time=1500 condition=true',), + ('unfocus', 'effect=zoom start=110% end=100% time=1000 tween=elastic easing=out',), ( + 'WindowClose', + 'effect=rotatey delay= 1000 start=0% end=-300% time=800 condition=true',)]) + + self.addControl(self.background) + self.addControl(self.critica_image) + self.critica_image.setAnimations( + [('conditional', 'effect=rotatey center=500 start=300% end=0% time=3000 condition=true ',), + ('unfocus', 'effect=zoom start=110% end=100% time=1000 tween=elastic easing=out',), + ('focus', 'effect=zoom start=80% end=110% time=700',), + ('WindowClose', 'effect=rotatey center=500 start=0% end=-300% time=800 condition=true',)]) + self.addControl(self.trailerButton) + self.botones.append(self.trailerButton) + self.trailerButton.setAnimations( + [('conditional', 'effect=slide start=-1500% end=0% delay=1200 time=4000 condition=true tween=elastic',), + ('unfocus', 'effect=zoom start=110% end=100% time=1000 tween=elastic easing=out',), + ('focus', 'effect=zoom start=80% end=110% time=700',), + ('WindowClose', 'effect=slide start=0% end=-1500% time=800 condition=true',)]) + self.addControl(self.actorButton) + self.botones.append(self.actorButton) + self.actorButton.setAnimations( + [('conditional', 'effect=slide start=1500% end=0% delay=1200 time=4000 condition=true tween=elastic',), + ('unfocus', 'effect=zoom start=110% end=100% time=1000 tween=elastic easing=out',), + ('focus', 'effect=zoom start=80% end=110% time=700',), + ('WindowClose', 'effect=slide start=0% end=1500% time=800 condition=true',)]) + + self.setFocus(self.trailerButton) + self.addControl(self.title) + self.title.setAnimations([('conditional', 'effect=fade start=0% end=100% delay=1500 time=1500 condition=true',), + ('WindowClose', 'effect=fade start=100% end=0% time=800 condition=true',)]) + self.addControl(self.tagline) + self.tagline.setAnimations( + [('conditional', 'effect=fade start=0% end=100% delay=2000 time=1500 condition=true',), + ('WindowClose', 'effect=fade start=100% end=0% time=800 condition=true',)]) + if self.item.contentType == "movie" and self.infoLabels.get("duration", 0): + self.duration = xbmcgui.ControlTextBox(120, 100, 420, 45, self.fonts["12"]) + self.addControl(self.duration) + self.duration.setAnimations( + [('conditional', 'effect=fade start=0% end=100% delay=2000 time=1500 condition=true',), + ('WindowClose', 'effect=fade start=100% end=0% time=800 condition=true',)]) + self.duration.setText( + "[COLOR mediumturquoise][B]Duración: %s minutos[/B][/COLOR]" % self.infoLabels["duration"]) + self.addControl(self.rating) + self.rating.setAnimations( + [('conditional', 'effect=rotatey start=100% end=0% delay=3000 time=1500 condition=true',), + ('WindowClose', 'effect=rotatey start=0% end=100% time=800 condition=true',)]) + self.addControl(self.rating_filma) + self.rating_filma.setAnimations( + [('conditional', 'effect=rotatey start=100% end=0% delay=3000 time=1500 condition=true',), + ('WindowClose', 'effect=rotatey start=0% end=100% time=800 condition=true',)]) + self.addControl(self.plot) + self.plot.setAnimations( + [('conditional', 'effect=slide delay=2000 start=1500 time=3600 tween=elastic easing=inout condition=true',), + ('WindowClose', 'effect=zoom center=auto start=100% end=0% time=800 condition=true',)]) + self.addControl(self.critica) + self.critica.setAnimations([('conditional', + 'effect=slide delay=1800 start=-1500% end=100% time=3600 tween=elastic easing=inout condition=true',), + ('WindowClose', 'effect=slide start=100% end=-1500% time=800 condition=true',)]) + + if not self.infoLabels.get("images") and not self.images: + self.thumbnail = xbmcgui.ControlImage(813, 0, 390, 150, 'http://i.imgur.com/oMjtYni.png') + self.addControl(self.thumbnail) + self.thumbnail.setAnimations( + [('conditional', 'effect=zoom delay=2000 center=auto start=0 end=100 time=800 condition=true',), ( + 'conditional', + 'effect=rotate delay=2000 center=auto aceleration=6000 start=0% end=360% time=800 condition=true',), + ('WindowClose', 'effect=zoom start=100% end=0% time=600 condition=true',)]) + else: + self.thumbnail = xbmcgui.ControlButton(813, 0, 390, 150, '', self.infoLabels.get("thumbnail", ""), + self.infoLabels.get("thumbnail", "")) + self.addControl(self.thumbnail) + self.thumbnail.setAnimations( + [('conditional', 'effect=zoom delay=2000 center=auto start=0 end=100 time=800 condition=true',), ( + 'conditional', + 'effect=rotate delay=2000 center=auto aceleration=6000 start=0% end=360% time=800 condition=true',), + ('unfocus', 'effect=zoom start=105% end=100% time=1000 tween=elastic easing=out',), + ('focus', 'effect=zoom start=80% end=100% time=700',), + ('WindowClose', 'effect=zoom start=100% end=0% time=600 condition=true',)]) + self.botones.append(self.thumbnail) + + self.addControl(self.icon) + self.icon.setAnimations( + [('conditional', 'effect=slide start=0,-700 delay=2000 time=2500 tween=bounce condition=true',), ( + 'conditional', + 'effect=rotate center=auto start=0% end=360% delay=3000 time=2500 tween=bounce condition=true',), + ('WindowClose', 'effect=slide end=0,-700% time=1000 condition=true',)]) + self.addControl(self.fa_icon) + self.fa_icon.setAnimations( + [('WindowOpen', 'effect=slide start=0,-700 delay=3000 time=2500 tween=bounce condition=true',), + ('WindowClose', 'effect=slide end=0,700% time=1000 condition=true',)]) + + self.title.setText(titulo) + self.tagline.addLabel(self.infoLabels.get("tagline")) + self.rating.setText(rating) + self.rating_filma.setText(rating_fa) + + try: + self.plot.autoScroll(11000, 6000, 30000) + self.critica.autoScroll(11000, 2500, 13000) + except: + xbmc.executebuiltin( + 'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000, "http://i.imgur.com/mHgwcn3.png")') + self.plot.setText(dhe(self.infoLabels.get("plot", ""))) + self.critica.setText(critica) + self.critica_butt = xbmcgui.ControlButton(20, 386, 1056, 100, '', '', '') + self.addControl(self.critica_butt) + + xbmc.sleep(200) + self.mas_pelis = 8 + self.idps = [] + self.botones_maspelis = [] + self.focus = -1 + i = 0 + count = 0 + self.btn_left = xbmcgui.ControlButton(90, 490, 70, 29, '', "http://s6.postimg.org/i3pnobu6p/redarrow.png", + "http://s6.postimg.org/i3pnobu6p/redarrow.png") + self.addControl(self.btn_left) + self.btn_left.setAnimations( + [('conditional', 'effect=zoom start=-100 end=100 delay=5000 time=2000 condition=true tween=bounce',), ( + 'conditional', + 'effect=zoom start=720,642,70,29 end=640,642,69,29 time=1000 loop=true tween=bounce condition=Control.HasFocus(' + str( + self.btn_left.getId()) + ')',), ('WindowClose', 'effect=slide end=0,700% time=1000 condition=true',)]) + self.btn_left.setVisible(False) + self.botones.append(self.btn_left) + if thread1: + while thread1.isAlive(): + xbmc.sleep(100) + for idp, peli, thumb in self.recomendaciones: + if self.item.contentType == "movie": + peli = "[COLOR yellow][B]" + peli + "[/B][/COLOR]" + else: + peli = "[COLOR slategray][B]" + peli + "[/B][/COLOR]" + if count % 8 == 0: + i = 0 + self.image = xbmcgui.ControlButton(65 + i, 538, 135, 160, '', thumb, thumb) + self.neon = xbmcgui.ControlImage(60 + i, 525, 145, 186, "http://s6.postimg.org/x0jspnxch/buttons.png") + fadelabel = xbmcgui.ControlFadeLabel(67 + i, 698, 135, 50) + self.botones.append(self.image) + if len(self.recomendaciones) != 0: + self.tpi = xbmcgui.ControlImage(200, 490, 100, 41, 'http://imgur.com/GNP2QcB.png') + self.addControl(self.tpi) + self.tpi.setAnimations([('conditional', + 'effect=rotatey start=200 end=0 delay=6200 time=900 tween=elastic condition=true',), + ('unfocus', + 'effect=zoom center=auto start=70% end=100% time=700 reversible=false',), + ('focus', 'effect=rotate center=auto start=0% end=360% time=650 tween=bounce',), + ('WindowClose', 'effect=slide end=0,700% time=1000 condition=true',)]) + if count < 8: + self.addControl(self.image) + self.image.setAnimations([('conditional', + 'effect=rotatey start=200 end=0 delay=6200 time=900 tween=elastic condition=true',), + ('unfocus', + 'effect=zoom center=auto start=70% end=100% time=700 reversible=false',), ( + 'focus', + 'effect=rotate center=auto start=0% end=360% time=650 tween=bounce',), + ('WindowClose', 'effect=slide end=0,700% time=1000 condition=true',)]) + self.addControl(fadelabel) + fadelabel.addLabel(peli) + fadelabel.setAnimations([('conditional', + 'effect=rotatey start=200 end=0 delay=6200 time=900 tween=elastic condition=true',), + ('WindowClose', 'effect=slide end=0,700% time=1000 condition=true',)]) + + self.addControl(self.neon) + self.neon.setVisibleCondition('[Control.HasFocus(' + str(self.image.getId()) + ')]') + self.neon.setAnimations([('conditional', + 'effect=rotate center=auto start=0% end=360% time=650 tween=bounce condition=Control.HasFocus(' + str( + self.image.getId()) + ')',), + ('WindowClose', 'effect=slide end=0,700% time=1000 condition=true',)]) + + self.idps.append([self.image, peli, idp, thumb]) + self.botones_maspelis.append([self.image, self.neon, fadelabel, peli]) + + i += 150 + count += 1 + + xbmc.sleep(200) + self.btn_right = None + if len(self.recomendaciones) > 8: + self.btn_right = xbmcgui.ControlButton(1150, 495, 60, 27, '', + "http://s6.postimg.org/j4uhr70k1/greenarrow.png", + "http://s6.postimg.org/j4uhr70k1/greenarrow.png") + self.addControl(self.btn_right) + self.btn_right.setAnimations( + [('conditional', 'effect=slide start=-3000 end=0 delay=6200 time=2000 condition=true tween=bounce',), ( + 'conditional', + 'effect=zoom start=230,490, 60, 27, 29 end=1230,642,61,27 time=1000 loop=true tween=bounce condition=Control.HasFocus(' + str( + self.btn_right.getId()) + ')',), + ('WindowClose', 'effect=slide end=0,700% time=1000 condition=true',)]) + self.botones.append(self.btn_right) + xbmc.sleep(200) + + self.lupam = xbmcgui.ControlImage(820, 320, 60, 60, "http://imgur.com/VDdB0Uw.png") + self.addControl(self.lupam) + self.lupam.setAnimations( + [('conditional', 'effect=slide start=1500 delay=7020 time=200 tween=elastic condition=true',), + ('WindowClose', 'effect=slide end=0,700% time=1000 condition=true',)]) + + self.global_search = xbmcgui.ControlButton(916, 320, 140, 53, '', 'http://imgur.com/hoOvpHV.png', + 'http://imgur.com/hoOvpHV.png') + self.addControl(self.global_search) + self.global_search.setAnimations( + [('conditional', 'effect=slide start=0,700 delay=6200 time=900 condition=true',), + ('unfocus', 'effect=zoom center=auto start=70% end=100% time=700 reversible=false',), + ('focus', 'effect=zoom center=auto end=130% reversible=false',), + ('WindowClose', 'effect=slide end=0,700 time=1000 condition=true',)]) + self.botones.insert(3, self.global_search) + self.buscar = None + if self.from_window: + canal = self.item.from_channel + if not canal: + canal = self.item.channel + channel = __import__('channels.%s' % canal, None, None, ["channels.%s" % canal]) + if hasattr(channel, 'search'): + if not self.item.thumb_busqueda: + from core import channeltools + self.item.thumb_busqueda = channeltools.get_channel_parameters(canal)["thumbnail"] + self.buscar = xbmcgui.ControlButton(1095, 320, 140, 53, '', self.item.thumb_busqueda, + self.item.thumb_busqueda) + self.addControl(self.buscar) + self.botones.insert(4, self.buscar) + self.buscar.setAnimations( + [('conditional', 'effect=slide start=0,700 delay=6200 time=900 condition=true',), + ('unfocus', 'effect=zoom center=auto start=70% end=100% time=700 reversible=false',), + ('focus', 'effect=zoom center=auto end=130% reversible=false',), + ('WindowClose', 'effect=slide end=0,700 time=1000 condition=true',)]) + xbmc.sleep(200) + self.dialog.close() + + def onAction(self, action): + if action == ACTION_PREVIOUS_MENU or action == ACTION_GESTURE_SWIPE_LEFT or action == 110 or action == 92: + global mainWindow + xbmc.executebuiltin('xbmc.PlayMedia(Stop)') + self.close() + mainWindow.pop() + if not mainWindow: + del mainWindow + else: + xbmc.sleep(800) + mainWindow[-1].doModal() + + if action == ACTION_MOVE_RIGHT or action == ACTION_MOVE_DOWN: + if self.focus < len(self.botones) - 1: + self.focus += 1 + while True: + id_focus = str(self.botones[self.focus].getId()) + if xbmc.getCondVisibility('[Control.IsVisible(' + id_focus + ')]'): + self.setFocus(self.botones[self.focus]) + break + self.focus += 1 + if self.focus == len(self.botones): + break + + if action == ACTION_MOVE_LEFT or action == ACTION_MOVE_UP: + if self.focus > 0: + self.focus -= 1 + while True: + id_focus = str(self.botones[self.focus].getId()) + if xbmc.getCondVisibility('[Control.IsVisible(' + id_focus + ')]'): + self.setFocus(self.botones[self.focus]) + break + self.focus -= 1 + if self.focus == len(self.botones): + break + + if action == 105 or action == 6: + for boton, peli, id, poster2 in self.idps: + try: + if self.getFocusId() == boton.getId() and self.btn_right: + self.focus = len(self.botones) - 1 + xbmc.executebuiltin('SendClick(%s)' % self.btn_right.getId()) + except: + pass + + if action == 104 or action == 5: + for boton, peli, id, poster2 in self.idps: + try: + if self.getFocusId() == boton.getId() and self.btn_left: + self.setFocus(self.btn_left) + xbmc.executebuiltin('SendClick(%s)' % self.btn_left.getId()) + except: + pass + + def onControl(self, control): + if control == self.actorButton: + global ActoresWindow + ActoresWindow = Actores('DialogSelect.xml', config.get_runtime_path(), tmdb_id=self.infoLabels["tmdb_id"], + item=self.item, fonts=self.fonts) + ActoresWindow.doModal() + + elif control == self.trailerButton: + global TrailerWindow + item = self.item.clone(thumbnail=self.infoLabels.get("thumbnail", ""), contextual=True, + contentTitle=self.name, windowed=True, infoLabels=self.infoLabels) + TrailerWindow = Trailer('TrailerWindow.xml', config.get_runtime_path()).Start(item, self.trailers) + + elif control == self.thumbnail: + global imagesWindow + imagesWindow = images(fanartv=self.images, tmdb=self.infoLabels["images"]) + imagesWindow.doModal() + + elif control == self.buscar or control == self.global_search: + if control == self.buscar: + check_busqueda = "no_global" + try: + canal = self.item.from_channel + if not canal: + canal = self.item.channel + channel = __import__('channels.%s' % canal, None, None, ["channels.%s" % canal]) + itemlist = channel.search(self.item.clone(), self.infoLabels.get("title")) + if not itemlist and self.infoLabels.get("originaltitle"): + itemlist = channel.search(self.item.clone(), self.infoLabels.get("originaltitle", "")) + except: + import traceback + logger.error(traceback.format_exc()) + else: + check_busqueda = "global" + itemlist = busqueda_global(self.item, self.infoLabels) + if len(itemlist) == 1 and self.infoLabels.get("originaltitle"): + itemlist = busqueda_global(self.item, self.infoLabels, org_title=True) + if itemlist: + global BusquedaWindow + BusquedaWindow = Busqueda('DialogSelect.xml', config.get_runtime_path(), itemlist=itemlist, + item=self.item) + BusquedaWindow.doModal() + else: + if check_busqueda == "no_global": + self.buscar.setVisible(False) + self.notfound = xbmcgui.ControlImage(800, 520, 300, 120, "http://imgur.com/V1xs9pT.png") + self.addControl(self.notfound) + self.notfound.setAnimations( + [('conditional', 'effect=zoom center=auto start=500% end=0% time=2000 condition=true',)]) + else: + self.global_search.setVisible(False) + self.notfound = xbmcgui.ControlImage(800, 520, 300, 120, "http://imgur.com/V1xs9pT.png") + self.addControl(self.notfound) + self.notfound.setAnimations( + [('conditional', 'effect=zoom center=auto start=500% end=0% time=2000 condition=true',)]) + elif control == self.btn_right: + try: + i = 1 + count = 0 + for afoto, neon, fadelabel, peli in self.botones_maspelis: + if i > self.mas_pelis - 8 and i <= self.mas_pelis and count < 8: + self.removeControls([afoto, neon, fadelabel]) + count += 1 + elif i > self.mas_pelis and count < 16: + self.addControl(afoto) + afoto.setAnimations([('conditional', + 'effect=rotatey start=200 end=0 time=900 delay=200 tween=elastic condition=true',), + ('unfocus', + 'effect=zoom center=auto start=70% end=100% time=700 reversible=false',), + ('focus', + 'effect=rotate center=auto start=0% end=360% time=650 tween=bounce',), + ('WindowClose', 'effect=slide end=0,700% time=1000 condition=true',)]) + self.addControl(fadelabel) + fadelabel.addLabel(peli) + fadelabel.setAnimations( + [('conditional', 'effect=rotatey start=200 end=0 time=900 tween=elastic condition=true',), + ('WindowClose', 'effect=slide end=0,700% time=1000 condition=true',)]) + + self.addControl(neon) + neon.setVisibleCondition('[Control.HasFocus(' + str(afoto.getId()) + ')]') + neon.setAnimations([('conditional', + 'effect=rotate center=auto start=0% end=360% time=650 tween=bounce condition=Control.HasFocus(' + str( + afoto.getId()) + ')',), + ('WindowClose', 'effect=slide end=0,700% time=1000 condition=true',)]) + + count += 1 + self.mas_pelis += 1 + xbmc.sleep(120) + i += 1 + + if self.mas_pelis > 8 and self.mas_pelis < 17: + self.btn_left.setVisible(True) + if len(self.botones_maspelis) < self.mas_pelis + 1: + self.btn_right.setVisible(False) + self.setFocus(self.btn_left) + self.focus = 4 + else: + self.focus = len(self.botones) - 1 + self.setFocus(self.btn_right) + xbmc.sleep(300) + except: + pass + elif control == self.btn_left: + try: + i = 1 + count = 0 + + if self.mas_pelis == len(self.botones_maspelis): + self.btn_right.setVisible(True) + + len_pelis = self.mas_pelis + for afoto, neon, fadelabel, peli in self.botones_maspelis: + resta = 8 + (len_pelis % 8) + if resta == 8: + resta = 16 + resta2 = len_pelis % 8 + if not resta2: + resta2 = 8 + if i > len_pelis - resta and count < 8: + self.addControl(afoto) + afoto.setAnimations( + [('conditional', 'effect=rotatey start=200 end=0 time=900 tween=elastic condition=true',), + ('unfocus', 'effect=zoom center=auto start=70% end=100% time=700 reversible=false',), + ('focus', 'effect=rotate center=auto start=0% end=360% time=650 tween=bounce',), + ('WindowClose', 'effect=slide end=0,700% time=1000 condition=true',)]) + self.addControl(fadelabel) + fadelabel.addLabel(peli) + fadelabel.setAnimations( + [('conditional', 'effect=rotatey start=200 end=0 time=900 tween=elastic condition=true',), + ('WindowClose', 'effect=slide end=0,700% time=1000 condition=true',)]) + + self.addControl(neon) + neon.setVisibleCondition('[Control.HasFocus(' + str(afoto.getId()) + ')]') + neon.setAnimations([('conditional', + 'effect=rotate center=auto start=0% end=360% time=650 tween=bounce condition=Control.HasFocus(' + str( + afoto.getId()) + ')',), + ('WindowClose', 'effect=slide end=0,700% time=1000 condition=true',)]) + count += 1 + elif i > len_pelis - resta2 and i <= len_pelis and count < 16: + self.removeControls([afoto, neon, fadelabel]) + count += 1 + self.mas_pelis -= 1 + i += 1 + + if self.mas_pelis == 8: + self.btn_left.setVisible(False) + self.focus = -1 + xbmc.executebuiltin('Action(Left)') + except: + pass + else: + for boton, peli, id, poster2 in self.idps: + if control == boton: + dialog = platformtools.dialog_progress("[COLOR darkturquoise][B]Cargando nueva info[/B][/COLOR]", + "[COLOR lightyellow]Buscando en [/COLOR][COLOR springgreen][B]Tmdb.......[/B][/COLOR]") + tipo = self.item.contentType + if tipo != "movie": + tipo = "tv" + new_tmdb = tmdb.Tmdb(id_Tmdb=id, tipo=tipo) + new_infolabels = new_tmdb.get_infoLabels() + trailers = new_tmdb.get_videos() + + new_infolabels["cast"] = new_tmdb.result.get("credits_cast", []) + new_infolabels["crew"] = new_tmdb.result.get("credits_crew", []) + new_infolabels["created_by"] = new_tmdb.result.get("created_by", []) + global relatedWindow + relatedWindow = related(item=self.item, infolabels=new_infolabels, fonts=self.fonts, + trailers=trailers, dialog=dialog) + relatedWindow.doModal() + + +class related(xbmcgui.WindowDialog): + def __init__(self, *args, **kwargs): + self.item = kwargs.get("item") + self.infoLabels = kwargs.get("infolabels") + self.fonts = kwargs.get("fonts") + self.trailers = kwargs.get("trailers") + self.dialog = kwargs.get("dialog") + + try: + if not self.infoLabels.get("rating"): + rating = "[COLOR crimson][B]Sin puntuación[/B][/COLOR]" + elif self.infoLabels["rating"] >= 5 and self.infoLabels["rating"] < 8: + rating = "[COLOR springgreen][B]%s[/B][/COLOR]" % self.infoLabels["rating"] + elif self.infoLabels["rating"] >= 8: + rating = "[COLOR fuchsia][B]%s[/B][/COLOR]" % self.infoLabels["rating"] + else: + rating = "[COLOR crimson][B]%s[/B][/COLOR]" % self.infoLabels["rating"] + except: + rating = "[COLOR crimson][B]%s[/B][/COLOR]" % self.infoLabels["rating"] + + images = fanartv(self.item, self.infoLabels) + if not self.infoLabels.get("fanart"): + try: + if self.item.contentType == "movie": + self.infoLabels["fanart"] = \ + images.get("moviebackground", images.get("hdmovieclearart", images.get("movieart")))[0].get("url") + else: + self.infoLabels["fanart"] = \ + images.get("showbackground", images.get("hdclearart", images.get("clearart")))[0].get("url") + except: + import traceback + logger.error(traceback.format_exc()) + + try: + if self.item.contentType == "movie": + self.infoLabels["thumbnail"] = images.get("hdmovielogo", images.get("movielogo"))[0].get("url") + elif self.infoLabels["season"]: + self.infoLabels["thumbnail"] = images.get("seasonthumb", images.get("tvthumb", images.get("hdtvlogo")))[ + 0].get("url") + else: + self.infoLabels["thumbnail"] = images.get("hdtvlogo", images.get("tvthumb"))[0].get("url") + except: + import traceback + logger.error(traceback.format_exc()) + + self.setCoordinateResolution(2) + self.background = xbmcgui.ControlImage(78, 50, 1053, 634, self.infoLabels.get("fanart", + "http://s6.postimg.org/fflvear2p/nofanart.png")) + self.addControl(self.background) + self.background.setAnimations( + [('conditional', 'effect=slide start=1000% end=100% delay=670 time=2500 condition=true',), + ('WindowClose', 'effect=slide end=-2000% time=1000 condition=true',)]) + + self.shadow = xbmcgui.ControlImage(75, 43, 1061, 649, 'http://s6.postimg.org/k05dw264x/marc_fanart.png') + self.addControl(self.shadow) + self.shadow.setAnimations( + [('conditional', 'effect=slide start=1000% end=100% delay=660 time=2500 condition=true',), + ('WindowClose', 'effect=slide end=-2000% time=1000 condition=true',)]) + self.star = xbmcgui.ControlImage(955, 55, 67, 67, "http://s6.postimg.org/jzn0d3clt/star.png") + self.addControl(self.star) + self.star.setAnimations([('conditional', 'effect=slide delay=6000 start=2000 time=800 condition=true',), + ('WindowClose', 'effect=slide end=0,-700% time=1000 condition=true',)]) + + self.puntuacion_peli = xbmcgui.ControlTextBox(977, 78, 35, 35, self.fonts["12"]) + self.addControl(self.puntuacion_peli) + self.puntuacion_peli.setText(rating) + self.puntuacion_peli.setAnimations( + [('conditional', 'effect=slide delay=6000 start=2000 time=800 condition=true',), + ('WindowClose', 'effect=slide end=0,-700% time=1000 condition=true',)]) + + self.info = "[COLOR lemonchiffon]%s[/COLOR]" % self.infoLabels.get("plot", "Sin información...") + self.info_peli = xbmcgui.ControlTextBox(455, 120, 750, 234) + self.addControl(self.info_peli) + + try: + self.info_peli.autoScroll(7000, 6000, 30000) + except: + xbmc.executebuiltin( + 'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000, "http://i.imgur.com/mHgwcn3.png")') + self.info_peli.setText(self.info) + self.info_peli.setAnimations( + [('conditional', 'effect=fade start=0% end=100% delay=3600 time=800 condition=true',), ( + 'conditional', 'effect=slide delay=1000 start=0,-500 delay=2600 time=2200 tween=bounce condition=true',), + ('WindowClose', 'effect=fade end=0% time=1000 condition=true',)]) + + self.poster_peli = xbmcgui.ControlImage(210, 90, 230, 260, self.infoLabels.get("thumbnail", "")) + self.addControl(self.poster_peli) + self.poster_peli.setAnimations([('conditional', + 'effect=zoom center=auto start=0% end=100% delay=2000 time=3000 tween=bounce condition=true',), + ('WindowClose', 'effect=zoom end=0% time=1000 condition=true',)]) + + if self.infoLabels.get("status") == "Ended" and self.item.contentType != "movie": + status = "[COLOR aquamarine][B]Finalizada %s[/B][/COLOR]" + elif self.infoLabels.get("status") and self.item.contentType != "movie": + status = "[COLOR aquamarine][B]En emisión %s[/B][/COLOR]" + else: + status = "[COLOR aquamarine][B]%s[/B][/COLOR]" + + if self.infoLabels.get("tagline") and self.item.contentType != "movie": + self.infoLabels["tagline"] = status % "(" + self.infoLabels["tagline"] + ")" + else: + self.infoLabels["tagline"] = status % self.infoLabels.get("tagline", "") + + if self.infoLabels.get("tagline"): + self.tagline_peli = xbmcgui.ControlFadeLabel(290, 55, 490, 260) + self.addControl(self.tagline_peli) + self.tagline_peli.addLabel(self.infoLabels["tagline"]) + self.tagline_peli.setAnimations( + [('conditional', 'effect=fade center=auto start=0% end=100% delay=3800 time=2000 condition=true',), + ('WindowClose', 'effect=fade end=0% time=500 condition=true',)]) + + if self.infoLabels.get("title", self.infoLabels.get("originaltitle")): + self.title_peli = xbmcgui.ControlFadeLabel(455, 85, 320, 430) + self.addControl(self.title_peli) + self.title_peli.addLabel( + "[COLOR yellow][B]%s[/B][/COLOR]" % self.infoLabels.get("title", self.infoLabels.get("originaltitle"))) + self.title_peli.setAnimations( + [('conditional', 'effect=fade start=0% end=100% delay=2500 time=5000 condition=true',), + ('WindowClose', 'effect=fade end=0% time=1000 condition=true',)]) + + self.gt_peli = xbmcgui.ControlTextBox(210, 385, 1100, 60, self.fonts["12"]) + self.addControl(self.gt_peli) + self.gt_peli.setText("[COLOR limegreen][B]Género: [/B][/COLOR]") + self.gt_peli.setAnimations( + [('conditional', 'effect=slide start=0,-7000 delay=5750 time=700 condition=true tween=circle easing=in',), + ('WindowClose', 'effect=slide end=0,-7000% time=700 condition=true',)]) + + self.genero_peli = xbmcgui.ControlFadeLabel(271, 385, 400, 60, self.fonts["12"]) + self.addControl(self.genero_peli) + self.genero_peli.addLabel(" [COLOR yellowgreen][B]%s[/B][/COLOR]" % self.infoLabels.get("genre", "---")) + self.genero_peli.setAnimations( + [('conditional', 'effect=slide start=0,-7000 delay=5750 time=700 condition=true tween=circle easing=in',), + ('WindowClose', 'effect=slide end=0,-7000% time=700 condition=true',)]) + + self.pt_peli = xbmcgui.ControlTextBox(210, 410, 307, 60, self.fonts["12"]) + self.addControl(self.pt_peli) + self.pt_peli.setText("[COLOR limegreen][B]Productora: [/B][/COLOR]") + self.pt_peli.setAnimations( + [('conditional', 'effect=slide start=0,-7000 delay=5700 time=700 condition=true tween=circle easing=in',), + ('WindowClose', 'effect=slide end=0,-7000% delay=100 time=700 condition=true',)]) + + self.productora_peli = xbmcgui.ControlFadeLabel(310, 410, 400, 60, self.fonts["12"]) + self.addControl(self.productora_peli) + self.productora_peli.addLabel("[COLOR yellowgreen][B]%s[/B][/COLOR]" % self.infoLabels.get("studio", "---")) + self.productora_peli.setAnimations( + [('conditional', 'effect=slide start=0,-700 delay=5700 time=700 condition=true tween=circle easing=in',), + ('WindowClose', 'effect=slide end=0,-7000% delay=100 time=700 condition=true',)]) + + self.paist_peli = xbmcgui.ControlTextBox(210, 435, 400, 60, self.fonts["12"]) + self.addControl(self.paist_peli) + self.paist_peli.setText("[COLOR limegreen][B]País: [/B][/COLOR]") + self.paist_peli.setAnimations( + [('conditional', 'effect=slide start=0,-700 delay=5650 time=700 condition=true tween=circle easing=in',), + ('WindowClose', 'effect=slide end=0,-7000% delay=200 time=700 condition=true',)]) + + self.pais_peli = xbmcgui.ControlFadeLabel(247, 435, 400, 60, self.fonts["12"]) + self.addControl(self.pais_peli) + self.pais_peli.addLabel(" [COLOR yellowgreen][B]%s[/B][/COLOR]" % self.infoLabels.get("country", "---")) + self.pais_peli.setAnimations( + [('conditional', 'effect=slide start=0,-700 delay=5650 time=700 condition=true tween=circle easing=in',), + ('WindowClose', 'effect=slide end=0,-7000% delay=200 time=700 condition=true',)]) + + self.ft_peli = xbmcgui.ControlTextBox(210, 460, 1100, 60, self.fonts["12"]) + self.addControl(self.ft_peli) + self.ft_peli.setText("[COLOR limegreen][B]Estreno: [/B][/COLOR]") + self.ft_peli.setAnimations( + [('conditional', 'effect=slide start=0,-700 delay=5600 time=700 condition=true tween=circle easing=in',), + ('WindowClose', 'effect=slide end=0,-7000% delay=300 time=700 condition=true',)]) + + self.fecha_peli = xbmcgui.ControlFadeLabel(273, 460, 400, 60, self.fonts["12"]) + self.addControl(self.fecha_peli) + release_date = " [COLOR yellowgreen][B]%s[/B][/COLOR]" % self.infoLabels.get("release_date", + self.infoLabels.get("premiered", + "---")) + self.fecha_peli.addLabel(release_date) + self.fecha_peli.setAnimations( + [('conditional', 'effect=slide start=0,-700 delay=5600 time=700 condition=true tween=circle easing=in',), + ('WindowClose', 'effect=slide end=0,-7000% delay=300 time=700 condition=true',)]) + + if self.infoLabels.get("number_of_seasons"): + self.seasons_txt = xbmcgui.ControlTextBox(210, 485, 200, 60, self.fonts["12"]) + self.addControl(self.seasons_txt) + self.seasons_txt.setText("[COLOR limegreen][B]Temporadas/Episodios: [/B][/COLOR]") + self.seasons_txt.setAnimations([('conditional', + 'effect=slide start=0,-700 delay=5600 time=700 condition=true tween=circle easing=in',), + ('WindowClose', 'effect=slide end=0,-7000% time=700 condition=true',)]) + + self.seasons = xbmcgui.ControlFadeLabel(413, 485, 400, 60, self.fonts["12"]) + self.addControl(self.seasons) + temporadas = " [COLOR yellowgreen][B]%s/%s[/B][/COLOR]" % ( + self.infoLabels.get("number_of_seasons"), self.infoLabels.get("number_of_episodes", "---")) + self.seasons.addLabel(temporadas) + self.seasons.setAnimations([('conditional', + 'effect=slide start=0,-700 delay=5600 time=700 condition=true tween=circle easing=in',), + ( + 'WindowClose', 'effect=slide end=0,-7000% delay=300 time=700 condition=true',)]) + + i = 0 + sleep = 0 + for actor in self.infoLabels.get("cast", [])[:5]: + image = "https://image.tmdb.org/t/p/original" + if actor.get("profile_path"): + image += actor["profile_path"] + else: + image = "http://i.imgur.com/xQRgLkO.jpg" + self.actor = xbmcgui.ControlImage(215 + i, 529, 63, 63, image) + self.addControl(self.actor) + self.actor.setAnimations([('conditional', + 'effect=zoom center=auto start=0% end=100% delay=5800 time=1500 tween=bounce condition=true ',), + ('WindowClose', + 'effect=zoom end=0 center=auto delay=100+i time=700 condition=true',)]) + self.circle = xbmcgui.ControlImage(195 + i, 511, 102, 103, "http://s6.postimg.org/u1jewuxzl/act_marco.png") + self.addControl(self.circle) + self.circle.setAnimations([('conditional', + 'effect=zoom center=auto start=0 end=100 delay=5800 time=1500 tween=bounce condition=true ',), + ('WindowClose', + 'effect=zoom end=0 center=auto delay=100+i time=700 condition=true',)]) + self.nombre_actor = xbmcgui.ControlFadeLabel(206 + i, 605, 102, 60, self.fonts["12"]) + self.addControl(self.nombre_actor) + self.nombre_actor.addLabel("[COLOR floralwhite][B]%s[/B][/COLOR]" % actor.get("name")) + self.nombre_actor.setAnimations( + [('conditional', 'effect=fade start=0 end=100 delay=5800 time=1500 tween=bounce condition=true ',), + ('WindowClose', 'effect=fade end=0 center=auto time=700 condition=true',)]) + xbmc.sleep(200) + i += 130 + sleep += 1000 + + i = 0 + count = 0 + if self.item.contentType == "movie": + reparto = self.infoLabels.get("crew", []) + else: + reparto = self.infoLabels.get("created_by", []) + + for crew in reparto: + if crew.get('job', '') == 'Director' or self.item.contentType != "movie": + if count == 2: + break + count += 1 + image = "https://image.tmdb.org/t/p/original" + if crew.get("profile_path"): + image += crew.get("profile_path", "") + else: + image = "http://imgur.com/HGwvhMu.png" + + self.td = xbmcgui.ControlImage(880 + i, 390, 63, 63, image) + self.addControl(self.td) + self.td.setAnimations( + [('conditional', 'effect=fade start=0% end=100% delay=4200 time=200 condition=true',), + ('conditional', 'effect=slide start=-150,-60 delay=4200 time=450 condition=true tween=elastic',), + ('WindowClose', 'effect=slide end=-2000 center=auto time=700 condition=true',)]) + + self.circle = xbmcgui.ControlImage(860 + i, 372, 102, 103, + "http://s6.postimg.org/u1jewuxzl/act_marco.png") + self.addControl(self.circle) + self.circle.setAnimations( + [('conditional', 'effect=fade start=0% end=100% delay=4200 time=200 condition=true',), + ('conditional', 'effect=slide start=-200,-200 delay=4200 time=450 condition=true tween=elastic',), + ('WindowClose', 'effect=slide end=-2000 center=auto time=700 condition=true',)]) + self.nd = xbmcgui.ControlFadeLabel(860 + i, 464, 105, 60, self.fonts["12"]) + self.addControl(self.nd) + self.nd.addLabel("[COLOR floralwhite][B]%s[/B][/COLOR]" % crew["name"]) + self.nd.setAnimations( + [('conditional', 'effect=fade start=0 end=100 delay=4200 time=1500 tween=bounce condition=true',), + ('WindowClose', 'effect=slide end=2000 center=auto time=700 condition=true',)]) + i += 130 + + try: + if self.nd: + self.img_dir = xbmcgui.ControlImage(740, 380, 100, 90, "http://s6.postimg.org/k8kl30pe9/director.png") + self.addControl(self.img_dir) + self.img_dir.setAnimations( + [('conditional', 'effect=fade start=0 end=100 delay=3200 time=700 condition=true ',), + ('WindowClose', 'effect=rotate end=-2000 time=700 condition=true',)]) + except: + pass + + self.botones = [] + self.trailer_r = xbmcgui.ControlButton(790, 62, 55, 55, '', 'http://i.imgur.com/cGI2fxC.png', + 'http://i.imgur.com/cGI2fxC.png') + self.addControl(self.trailer_r) + self.trailer_r.setAnimations([('conditional', 'effect=slide start=-2000 delay=4000 time=2500 condition=true',), + ('conditional', + 'effect=rotate delay=4000 center=auto start=0% end=360% time=2500 condition=true ',), + ('unfocus', + 'effect=zoom center=auto start=70% end=100% time=700 reversible=false',), ( + 'conditional', + 'effect=rotate center=auto start=0% end=360% reversible=false time=2000 loop=true condition=Control.HasFocus(' + str( + self.trailer_r.getId()) + ')'), + ('WindowClose', 'effect=slide end=2000 time=700 condition=true',)]) + self.botones.append(self.trailer_r) + + self.plusinfo = xbmcgui.ControlButton(1090, 20, 100, 100, '', 'http://i.imgur.com/1w5CFCL.png', + 'http://i.imgur.com/1w5CFCL.png') + self.addControl(self.plusinfo) + self.plusinfo.setAnimations( + [('conditional', 'effect=slide start=0,-700 delay=5600 time=700 condition=true tween=elastic easing=out',), + ('unfocus', 'effect=zoom center=auto start=70% end=100% time=700 reversible=false',), ('conditional', + 'effect=rotate center=auto start=0% end=360% reversible=false time=2000 loop=true condition=Control.HasFocus(' + str( + self.plusinfo.getId()) + ')'), + ('WindowClose', 'effect=rotatey end=-300 time=1000 condition=true',)]) + self.botones.append(self.plusinfo) + + self.lupam = xbmcgui.ControlImage(950, 580, 60, 60, "http://imgur.com/VDdB0Uw.png") + self.addControl(self.lupam) + self.lupam.setAnimations( + [('conditional', 'effect=slide start=1500 delay=7020 time=200 tween=elastic condition=true',), + ('WindowClose', 'effect=zoom end=0 center=auto time=700 condition=true',)]) + + self.focus = -1 + self.buscar = None + canal = self.item.from_channel + if not canal: + canal = self.item.channel + channel = __import__('channels.%s' % canal, None, None, ["channels.%s" % canal]) + if hasattr(channel, 'search'): + if not self.item.thumb_busqueda: + from core import channeltools + self.item.thumb_busqueda = channeltools.get_channel_parameters(canal)["thumbnail"] + self.buscar = xbmcgui.ControlButton(1040, 550, 150, 53, '', self.item.thumb_busqueda, + self.item.thumb_busqueda) + self.addControl(self.buscar) + self.botones.append(self.buscar) + self.buscar.setAnimations([('conditional', 'effect=slide start=0,700 delay=6000 time=200 condition=true',), + ('unfocus', + 'effect=zoom center=auto start=70% end=100% time=700 reversible=false',), ( + 'conditional', + 'effect=zoom center=auto start=100% end=120% reversible=false tween=bounce time=1000 loop=true condition=Control.HasFocus(' + str( + self.buscar.getId()) + ')'), + ('WindowClose', 'effect=rotatey end=-300 time=1500 condition=true',)]) + self.global_search = xbmcgui.ControlButton(1046, 620, 140, 53, '', 'http://imgur.com/hoOvpHV.png', + 'http://imgur.com/hoOvpHV.png') + self.addControl(self.global_search) + self.botones.append(self.global_search) + self.global_search.setAnimations( + [('conditional', 'effect=slide start=0,700 delay=6090 time=200 condition=true',), + ('unfocus', 'effect=zoom center=auto start=70% end=100% time=700 reversible=false',), ('conditional', + 'effect=zoom center=auto start=120% end=100% reversible=false tween=bounce time=1000 loop=true condition=Control.HasFocus(' + str( + self.global_search.getId()) + ')'), + ('WindowClose', 'effect=rotatey end=300 time=1500 condition=true',)]) + + self.dialog.close() + xbmc.sleep(200) + + def onAction(self, action): + if action == ACTION_PREVIOUS_MENU or action == ACTION_GESTURE_SWIPE_LEFT or action == 110 or action == 92: + global relatedWindow, exit_loop + exit_loop = True + self.close() + + if action == ACTION_MOVE_RIGHT or action == ACTION_MOVE_DOWN: + if self.focus < len(self.botones) - 1: + self.focus += 1 + self.setFocus(self.botones[self.focus]) + + if action == ACTION_MOVE_LEFT or action == ACTION_MOVE_UP: + if self.focus > 0: + self.focus -= 1 + self.setFocus(self.botones[self.focus]) + + def onControl(self, control): + global TrailerWindow, BusquedaWindow + if control == self.plusinfo: + global ActorInfoWindow, relatedWindow, ActoresWindow, imagesWindow, exit_loop, mainWindow + exit_loop = True + borrar = [relatedWindow, ActorInfoWindow, ActoresWindow, BusquedaWindow, TrailerWindow, imagesWindow] + item_new = Item(channel=self.item.channel, contentType=self.item.contentType, infoLabels=self.infoLabels, + thumb_busqueda=self.item.thumb_busqueda, from_channel=self.item.from_channel) + for window in borrar: + try: + window.close() + del window + except: + pass + mainWindow[-1].close() + xbmc.sleep(200) + start(item=item_new, from_window=True) + elif control == self.trailer_r: + item = self.item.clone(thumbnail=self.infoLabels.get("thumbnail"), contextual=True, + contentTitle=self.infoLabels.get("title"), windowed=True, infoLabels=self.infoLabels) + item.infoLabels["images"] = "" + TrailerWindow = Trailer('TrailerWindow.xml', config.get_runtime_path()).Start(item, self.trailers) + else: + if control == self.buscar: + try: + check_busqueda = "no_global" + canal = self.item.from_channel + if not canal: + canal = self.item.channel + channel = __import__('channels.%s' % canal, None, None, ["channels.%s" % canal]) + itemlist = channel.search(self.item.clone(), self.infoLabels.get("title")) + if not itemlist and self.infoLabels.get("originaltitle"): + itemlist = channel.search(self.item.clone(), self.infoLabels.get("originaltitle", "")) + except: + import traceback + logger.error(traceback.format_exc()) + + elif control == self.global_search: + check_busqueda = "global" + itemlist = busqueda_global(self.item, self.infoLabels) + if len(itemlist) == 1 and self.infoLabels.get("originaltitle"): + itemlist = busqueda_global(self.item, self.infoLabels, org_title=True) + + if itemlist: + BusquedaWindow = Busqueda('DialogSelect.xml', config.get_runtime_path(), itemlist=itemlist, + item=self.item) + BusquedaWindow.doModal() + else: + if check_busqueda == "no_global": + self.removeControl(self.buscar) + self.notfound = xbmcgui.ControlImage(800, 520, 300, 120, "http://imgur.com/V1xs9pT.png") + self.addControl(self.notfound) + self.notfound.setAnimations( + [('conditional', 'effect=zoom center=auto start=500% end=0% time=2000 condition=true',)]) + else: + self.removeControl(self.global_search) + self.notfound = xbmcgui.ControlImage(800, 520, 300, 120, "http://imgur.com/V1xs9pT.png") + self.addControl(self.notfound) + self.notfound.setAnimations( + [('conditional', 'effect=zoom center=auto start=500% end=0% time=2000 condition=true',)]) + + +def busqueda_global(item, infoLabels, org_title=False): + logger.info() + if item.contentType != "movie": + cat = ["serie"] + else: + cat = ["movie"] + + new_item = Item() + new_item.extra = infoLabels.get("title", "") + new_item.extra = re.sub('\[.*?\]', '', new_item.extra) + + if org_title: + new_item.extra = infoLabels.get("originaltitle", "") + new_item.category = item.contentType + + from channels import search + return search.do_search(new_item, cat) + + +class Busqueda(xbmcgui.WindowXMLDialog): + def __init__(self, *args, **kwargs): + self.lista = kwargs.get("itemlist") + self.item = kwargs.get("item") + + def onInit(self): + try: + self.control_list = self.getControl(6) + self.getControl(5).setNavigation(self.control_list, self.control_list, self.control_list, self.control_list) + self.getControl(3).setEnabled(0) + self.getControl(3).setVisible(0) + except: + pass + if self.item.contentType != "movie": + self.getControl(1).setLabel("[COLOR orange][B]¿Está la serie que buscas?[/B][/COLOR]") + else: + self.getControl(1).setLabel("[COLOR orange][B]¿Está la película que buscas?[/B][/COLOR]") + + self.getControl(5).setLabel("[COLOR tomato][B]Cerrar[/B][/COLOR]") + self.control_list.reset() + items = [] + for item_l in self.lista: + item = xbmcgui.ListItem(item_l.title) + try: + item.setArt({"thumb": item_l.thumbnail}) + except: + item.setThumbnailImage(item_l.thumbnail) + item.setProperty("item_copy", item_l.tourl()) + items.append(item) + + self.getControl(6).addItems(items) + self.setFocusId(6) + + def onAction(self, action): + global BusquedaWindow + if (action == ACTION_SELECT_ITEM or action == 100) and self.getFocusId() == 6: + dialog = platformtools.dialog_progress_bg("Cargando resultados", "Espere........") + selectitem = self.getControl(6).getSelectedItem() + item = Item().fromurl(selectitem.getProperty("item_copy")) + exec "import channels." + item.channel + " as channel" + itemlist = getattr(channel, item.action)(item) + global SearchWindows + window = GlobalSearch('DialogSelect.xml', config.get_runtime_path(), itemlist=itemlist, dialog=dialog) + SearchWindows.append(window) + self.close() + window.doModal() + + if (action == ACTION_SELECT_ITEM or action == 100) and self.getFocusId() == 5: + self.close() + + elif action == ACTION_PREVIOUS_MENU or action == ACTION_GESTURE_SWIPE_LEFT or action == 110 or action == 92: + self.close() + + +class GlobalSearch(xbmcgui.WindowXMLDialog): + def __init__(self, *args, **kwargs): + self.lista = kwargs.get("itemlist") + self.dialog = kwargs.get("dialog") + + def onInit(self): + self.dialog.close() + try: + self.control_list = self.getControl(6) + self.getControl(5).setNavigation(self.control_list, self.control_list, self.control_list, self.control_list) + self.getControl(3).setEnabled(0) + self.getControl(3).setVisible(0) + except: + pass + + self.getControl(1).setLabel("[COLOR orange][B]Selecciona...[/B][/COLOR]") + self.getControl(5).setLabel("[COLOR tomato][B]Cerrar[/B][/COLOR]") + self.control_list.reset() + if not self.lista: + global SearchWindows + self.close() + SearchWindows.pop() + if len(SearchWindows) - 1 >= 0: + SearchWindows[-1].doModal() + else: + BusquedaWindow.doModal() + else: + items = [] + for item_l in self.lista: + item = xbmcgui.ListItem(item_l.title) + try: + item.setArt({"thumb": item_l.thumbnail}) + except: + item.setThumbnailImage(item_l.thumbnail) + item.setProperty("item_copy", item_l.tourl()) + items.append(item) + self.getControl(6).addItems(items) + self.setFocusId(6) + + def onAction(self, action): + global SearchWindows + if (action == ACTION_SELECT_ITEM or action == 100) and self.getFocusId() == 6: + selectitem = self.getControl(6).getSelectedItem() + item = Item().fromurl(selectitem.getProperty("item_copy")) + exec "import channels." + item.channel + " as channel" + ventana_error = None + if item.action == "play": + if hasattr(channel, 'play'): + itemlist = channel.play(item) + if len(itemlist) > 0: + item = itemlist[0] + else: + ventana_error = xbmcgui.Dialog() + ok = ventana_error.ok("plugin", "No hay nada para reproducir") + return + + global BusquedaWindow, exit_loop, mainWindow, ActorInfoWindow, relatedWindow, ActoresWindow + borrar = [relatedWindow, ActorInfoWindow, ActoresWindow, BusquedaWindow] + + borrar.extend(SearchWindows) + borrar.extend(mainWindow) + if item.server != "torrent": + import time + recuperar = False + inicio = time.time() + try: + retorna = platformtools.play_video(item) + except: + retorna = True + xbmc.sleep(1500) + if not retorna and xbmc.Player().isPlaying(): + exit_loop = True + for window in borrar: + try: + window.close() + except: + pass + while True: + xbmc.sleep(1000) + if not xbmc.Player().isPlaying(): + break + if time.time() - inicio > 120: + return + + for window in SearchWindows: + window.doModal() + BusquedaWindow.doModal() + mainWindow[-1].doModal() + + elif item.server == "torrent": + exit_loop = True + for window in borrar: + try: + window.close() + del window + except: + pass + platformtools.play_video(item) + + else: + try: + dialog = platformtools.dialog_progress_bg("Cargando resultados", "Espere........") + itemlist = getattr(channel, item.action)(item) + window = GlobalSearch('DialogSelect.xml', config.get_runtime_path(), itemlist=itemlist, + dialog=dialog) + SearchWindows.append(window) + self.close() + window.doModal() + except: + pass + + elif (action == ACTION_SELECT_ITEM or action == 100) and self.getFocusId() == 5: + self.close() + SearchWindows.pop() + if len(SearchWindows) - 1 >= 0: + SearchWindows[-1].doModal() + else: + BusquedaWindow.doModal() + + elif action == ACTION_PREVIOUS_MENU or action == ACTION_GESTURE_SWIPE_LEFT or action == 110 or action == 92: + self.close() + SearchWindows.pop() + if len(SearchWindows) - 1 >= 0: + SearchWindows[-1].doModal() + else: + BusquedaWindow.doModal() + + +class Actores(xbmcgui.WindowXMLDialog): + def __init__(self, *args, **kwargs): + self.tmdb_id = kwargs.get("tmdb_id") + self.item = kwargs.get("item") + self.fonts = kwargs.get("fonts") + + def onInit(self): + try: + self.control_list = self.getControl(6) + self.getControl(5).setNavigation(self.control_list, self.control_list, self.control_list, self.control_list) + self.getControl(3).setEnabled(0) + self.getControl(3).setVisible(0) + except: + pass + self.getControl(1).setLabel("[COLOR orange][B]Reparto[/B][/COLOR]") + self.getControl(5).setLabel("[COLOR red][B]Cerrar[/B][/COLOR]") + self.control_list.reset() + items = [] + + tipo = self.item.contentType + if tipo != "movie": + tipo = "tv" + otmdb = tmdb.Tmdb(id_Tmdb=self.tmdb_id, tipo=tipo) + actores = otmdb.result.get("credits", {}).get("cast", []) + + if self.item.contentType == "movie": + reparto = otmdb.result.get("credits", {}).get("crew", []) + else: + reparto = otmdb.result.get("created_by", []) + + for crew in reparto: + if crew.get('job', '') == 'Director' or self.item.contentType != "movie": + actores.insert(0, crew) + + for actor in actores: + name_info = "[COLOR yellow][B]%s[/B][/COLOR]" % actor["name"] + try: + name = "[COLOR salmon]%s[/COLOR] [COLOR papayawhip](%s)[/COLOR]" % (actor["name"], actor["character"]) + job = "actor" + except: + job = "Director" + name = "[COLOR salmon]%s[/COLOR] [COLOR gold](%s)[/COLOR]" % (actor["name"], job) + image = "https://image.tmdb.org/t/p/original" + if actor["profile_path"]: + image += actor["profile_path"] + else: + image = "http://i.imgur.com/dvMKE1V.jpg" + item = xbmcgui.ListItem(name) + try: + item.setArt({"thumb": image}) + except: + item.setThumbnailImage(image) + item.setProperty("id_actor", str(actor["id"])) + item.setProperty("name_info", name_info) + item.setProperty("thumbnail", image) + item.setProperty("job", job) + items.append(item) + + self.getControl(6).addItems(items) + self.setFocusId(6) + + def onAction(self, action): + if (action == ACTION_SELECT_ITEM or action == 100) and self.getFocusId() == 6: + selectitem = self.getControl(6).getSelectedItem() + id_actor = selectitem.getProperty("id_actor") + name_info = selectitem.getProperty("name_info") + thumbnail = selectitem.getProperty("thumbnail") + job = selectitem.getProperty("job") + dialog = platformtools.dialog_progress("[COLOR darkturquoise][B]Cargando nuevos datos[/B][/COLOR]", + "[COLOR yellow]Obteniendo datos del %s...[/COLOR]" % job.lower()) + + global ActorInfoWindow + ActorInfoWindow = ActorInfo(id=id_actor, name=name_info, thumbnail=thumbnail, item=self.item, + fonts=self.fonts, dialog=dialog, job=job) + ActorInfoWindow.doModal() + xbmc.sleep(400) + elif (action == ACTION_SELECT_ITEM or action == 100) and self.getFocusId() == 5: + self.close() + + elif action == ACTION_PREVIOUS_MENU or action == ACTION_GESTURE_SWIPE_LEFT or action == 110 or action == 92: + self.close() + + +class ActorInfo(xbmcgui.WindowDialog): + def __init__(self, *args, **kwargs): + global exit_loop + if exit_loop: + exit_loop = False + self.id = kwargs.get('id') + self.nombre = kwargs.get('name') + self.thumbnail = kwargs.get('thumbnail') + self.item = kwargs.get('item') + self.fonts = kwargs.get('fonts') + self.job = kwargs.get('job') + + self.dialog = kwargs.get('dialog') + if self.item.contentType == "movie": + tipo = "movie" + search = {'url': 'person/%s' % self.id, 'language': 'es', 'append_to_response': 'movie_credits,images'} + else: + tipo = "tv" + search = {'url': 'person/%s' % self.id, 'language': 'es', 'append_to_response': 'tv_credits,images'} + + actor_tmdb = tmdb.Tmdb(discover=search) + if not actor_tmdb.result.get("biography") and actor_tmdb.result.get("imdb_id"): + data = scrapertools.downloadpage("http://www.imdb.com/name/%s/bio" % actor_tmdb.result["imdb_id"]) + info = scrapertools.find_single_match(data, '<div class="soda odd">.*?<p>(.*?)</p>') + if info: + bio = dhe(scrapertools.htmlclean(info.strip())) + try: + info_list = [] + while bio: + info_list.append(bio[:1900]) + bio = bio[1900:] + bio = [] + threads = {} + for i, info_ in enumerate(info_list): + t = Thread(target=translate, args=[info_, "es", "en", i, bio]) + t.setDaemon(True) + t.start() + threads[i] = t + + while threads: + for key, t in threads.items(): + if not t.isAlive(): + threads.pop(key) + xbmc.sleep(100) + if bio: + bio.sort(key=lambda x: x[0]) + biography = "" + for i, b in bio: + biography += b + actor_tmdb.result["biography"] = dhe(biography) + else: + bio = dhe(scrapertools.htmlclean(info.strip())) + actor_tmdb.result["biography"] = dhe(bio) + except: + bio = dhe(scrapertools.htmlclean(info.strip())) + actor_tmdb.result["biography"] = bio + else: + actor_tmdb.result["biography"] = "Sin información" + elif not actor_tmdb.result.get("biography"): + actor_tmdb.result["biography"] = "Sin información" + + self.setCoordinateResolution(2) + self.background = xbmcgui.ControlImage(30, -5, 1250, 730, 'http://imgur.com/7ccBX3g.png') + self.addControl(self.background) + self.background.setAnimations( + [('conditional', 'effect=fade start=0% end=100% delay=2000 time=1500 condition=true',), + ('WindowClose', 'effect=slide end=0,-700% time=1000 condition=true',)]) + self.filmo = xbmcgui.ControlImage(330, 470, 230, 45, 'http://s6.postimg.org/rlktamqhd/filmography1.png') + self.addControl(self.filmo) + self.filmo.setAnimations([('conditional', + 'effect=zoom start=0,700 end=100% center=auto delay=5500 time=1000 condition=true tween=elastic',), + ('WindowClose', 'effect=zoom start=100% end=0% time=1000 condition=true',)]) + + self.title = xbmcgui.ControlTextBox(470, 30, 730, 250) + self.addControl(self.title) + self.title.setAnimations( + [('conditional', 'effect=slide start=-1500% end=0% delay=3000 time=1500 condition=true',), + ('WindowClose', 'effect=slide end=1500% time=1000 condition=true',)]) + self.title.setText(self.nombre) + self.info_actor = xbmcgui.ControlTextBox(470, 70, 750, 400) + self.addControl(self.info_actor) + self.info_actor.setAnimations( + [('conditional', 'effect=slide start=2000% end=-10% delay=5300 time=1500 tween=bounce condition=true',), + ('WindowClose', 'effect=slide end=-2000% time=1000 condition=true',)]) + try: + self.info_actor.autoScroll(7000, 6000, 30000) + except: + xbmc.executebuiltin( + 'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000, "http://i.imgur.com/mHgwcn3.png")') + self.info_actor.setText( + "[COLOR coral][B]%s[/B][/COLOR]" % actor_tmdb.result.get("biography", "Sin información")) + + self.titulos = [] + tipo_busqueda = "cast" + if self.job != "actor": + tipo_busqueda = "crew" + ids = [] + for entradas in actor_tmdb.result.get("%s_credits" % tipo, {}).get(tipo_busqueda, []): + if entradas["id"] in ids: + continue + else: + ids.append(entradas["id"]) + thumb = "https://image.tmdb.org/t/p/original" + if entradas["poster_path"]: + thumb += entradas["poster_path"] + else: + thumb = "http://s6.postimg.org/tw1vhymj5/noposter.png" + if self.item.contentType == "movie": + self.titulos.append([entradas["id"], entradas.get("title", entradas.get("original_title", "")), thumb]) + else: + self.titulos.append([entradas["id"], entradas.get("title", entradas.get("original_title", "")), thumb]) + + self.dialog.update(40, '[COLOR rosybrown]Obteniendo filmografía...[/COLOR]') + self.mas_pelis = 8 + self.idps = [] + self.botones = [] + self.botones_maspelis = [] + self.focus = -1 + i = 0 + count = 0 + self.btn_left = xbmcgui.ControlButton(90, 490, 70, 29, '', "http://s6.postimg.org/i3pnobu6p/redarrow.png", + "http://s6.postimg.org/i3pnobu6p/redarrow.png") + self.addControl(self.btn_left) + self.btn_left.setAnimations([('conditional', + 'effect=zoom start=720,642,70,29 end=640,642,69,29 time=1000 loop=true tween=bounce condition=Control.HasFocus(' + str( + self.btn_left.getId()) + ')',), + ('WindowClose', 'effect=fade end=0% time=1000 condition=true',)]) + self.btn_left.setVisible(False) + self.botones.append(self.btn_left) + for idp, peli, foto in self.titulos: + if count % 8 == 0: + i = 0 + self.image = xbmcgui.ControlButton(65 + i, 538, 135, 160, '', foto, foto) + self.neon = xbmcgui.ControlImage(60 + i, 525, 145, 186, "http://s6.postimg.org/x0jspnxch/buttons.png") + fadelabel = xbmcgui.ControlFadeLabel(67 + i, 698, 135, 50) + self.botones.append(self.image) + + if count < 8: + self.addControl(self.image) + self.image.setAnimations([('conditional', + 'effect=rotatey start=200 end=0 delay=2000 time=900 tween=elastic condition=true',), + ('unfocus', + 'effect=zoom center=auto start=70% end=100% time=700 reversible=false',), ( + 'focus', + 'effect=rotate center=auto start=0% end=360% time=650 tween=bounce',), + ('WindowClose', 'effect=slide end=0,700% time=1000 condition=true',), ]) + self.addControl(self.neon) + self.neon.setVisibleCondition('[Control.HasFocus(' + str(self.image.getId()) + ')]') + self.neon.setAnimations([('conditional', + 'effect=rotate center=auto start=0% end=360% time=650 tween=bounce condition=Control.HasFocus(' + str( + self.image.getId()) + ')',), + ('WindowClose', 'effect=slide end=0,700% time=1000 condition=true',), ]) + + self.addControl(fadelabel) + fadelabel.addLabel(peli) + fadelabel.setAnimations([('conditional', + 'effect=rotatey start=200 end=0 delay=6200 time=900 tween=elastic condition=true',), + ('WindowClose', 'effect=slide end=0,700% time=1000 condition=true',)]) + + self.idps.append([self.image, peli, idp, foto]) + self.botones_maspelis.append([self.image, self.neon, fadelabel, peli]) + + i += 150 + count += 1 + xbmc.sleep(200) + if len(self.titulos) > 8: + self.btn_right = xbmcgui.ControlButton(1150, 495, 60, 27, '', + "http://s6.postimg.org/j4uhr70k1/greenarrow.png", + "http://s6.postimg.org/j4uhr70k1/greenarrow.png") + self.addControl(self.btn_right) + self.btn_right.setAnimations( + [('conditional', 'effect=slide start=-3000 end=0 delay=5000 time=2000 condition=true tween=bounce',), ( + 'conditional', + 'effect=zoom start=230,490, 60, 27, 29 end=1230,642,61,27 time=1000 loop=true tween=bounce condition=Control.HasFocus(' + str( + self.btn_right.getId()) + ')',), ('WindowClose', 'effect=fade end=0% time=1000 condition=true',)]) + self.botones.append(self.btn_right) + + xbmc.sleep(200) + self.dialog.update(80, '[COLOR plum]Recopilando imágenes...[/COLOR]') + self.images = [] + for images in actor_tmdb.result.get("images", {}).get("profiles", []): + imagen = "https://image.tmdb.org/t/p/original" + images["file_path"] + self.images.append(imagen) + + if len(self.images) <= 1 or (len(self.images) == 2 and self.images[0] == self.images[1]): + self.marco = xbmcgui.ControlImage(100, 23, 330, 425, + 'http://s6.postimg.org/nkmk7b8nl/marco_foto2_copia.png') + self.addControl(self.marco) + self.marco.setAnimations( + [('conditional', 'effect=rotatey start=100% end=0% delay=2400 time=1500 condition=true',), + ('WindowClose', 'effect=fade end=0% time=1000 condition=true',)]) + self.thumb = xbmcgui.ControlImage(115, 40, 294, 397, self.thumbnail) + self.addControl(self.thumb) + self.thumb.setAnimations( + [('conditional', 'effect=rotatey start=100% end=0% delay=2380 time=1500 condition=true',), + ('WindowClose', 'effect=fade end=0% time=1000 condition=true',)]) + xbmc.sleep(300) + else: + self.start_change = False + self.th = Thread(target=self.change_image) + self.th.setDaemon(True) + self.th.start() + + self.dialog.close() + + def change_image(self): + global exit_loop + imagenes = [] + while True: + xbmc.sleep(100) + for i, image in enumerate(self.images): + xbmc.sleep(400) + if i == 0: + xbmc.sleep(300) + self.marco = xbmcgui.ControlImage(100, 23, 330, 425, + 'http://s6.postimg.org/nkmk7b8nl/marco_foto2_copia.png') + self.thumb = xbmcgui.ControlImage(115, 40, 294, 397, "") + xbmc.sleep(500) + self.addControl(self.marco) + self.marco.setAnimations( + [('conditional', 'effect=rotatey start=100% end=0% delay=2300 time=1500 condition=true',), + ('WindowClose', 'effect=fade end=0% time=1000 condition=true',)]) + self.addControl(self.thumb) + self.thumb.setImage(self.thumbnail) + self.thumb.setAnimations( + [('conditional', 'effect=rotatey start=100% end=0% delay=2280 time=1500 condition=true',), + ('WindowClose', 'effect=fade end=0% time=1000 condition=true',)]) + xbmc.sleep(4000) + for img in imagenes: + self.removeControls([img[0], img[1]]) + imagenes = [] + imagenes.append([self.thumb, self.marco]) + if exit_loop: + break + + if exit_loop: + break + if i > 0: + if exit_loop: + break + xbmc.sleep(5200) + self.marco = xbmcgui.ControlImage(100, 23, 330, 425, + 'http://s6.postimg.org/4syg4krkh/marco_foto.png') + self.addControl(self.marco) + self.marco.setAnimations( + [('conditional', 'effect=rotatey start=100% end=0% delay=300 time=1500 condition=true',), + ('WindowClose', 'effect=fade end=0% time=1000 condition=true',)]) + self.thumb = xbmcgui.ControlImage(115, 40, 294, 397, "") + self.addControl(self.thumb) + self.thumb.setImage(image, True) + self.thumb.setAnimations( + [('conditional', 'effect=rotatey start=100% end=0% delay=285 time=1500 condition=true',), + ('WindowClose', 'effect=fade end=0% time=1000 condition=true',)]) + imagenes.append([self.thumb, self.marco]) + + xbmc.sleep(400) + if exit_loop: + break + logger.info("salimos carajo xD") + + def onAction(self, action): + global exit_loop + if exit_loop: + exit_loop = False + + if action == ACTION_MOVE_RIGHT or action == ACTION_MOVE_DOWN: + if self.focus < len(self.botones) - 1: + self.focus += 1 + while True: + id_focus = str(self.botones[self.focus].getId()) + if xbmc.getCondVisibility('[Control.IsVisible(' + id_focus + ')]'): + self.setFocus(self.botones[self.focus]) + break + self.focus += 1 + if self.focus == len(self.botones): + break + + if action == ACTION_MOVE_LEFT or action == ACTION_MOVE_UP: + if self.focus > 0: + self.focus -= 1 + while True: + id_focus = str(self.botones[self.focus].getId()) + if xbmc.getCondVisibility('[Control.IsVisible(' + id_focus + ')]'): + self.setFocus(self.botones[self.focus]) + break + self.focus -= 1 + if self.focus == len(self.botones): + break + + if action == ACTION_PREVIOUS_MENU or action == ACTION_GESTURE_SWIPE_LEFT or action == 110 or action == 92: + exit_loop = True + self.close() + + if action == 105 or action == 6: + for boton, peli, id, poster2 in self.idps: + try: + if self.getFocusId() == boton.getId() and self.btn_right: + self.focus = len(self.botones) - 1 + xbmc.executebuiltin('SendClick(%s)' % self.btn_right.getId()) + except: + pass + + if action == 104 or action == 5: + for boton, peli, id, poster2 in self.idps: + try: + if self.getFocusId() == boton.getId() and self.btn_left: + self.setFocus(self.btn_left) + xbmc.executebuiltin('SendClick(%s)' % self.btn_left.getId()) + except: + pass + + def onControl(self, control): + try: + if control == self.btn_right: + i = 1 + count = 0 + for afoto, neon, fadelabel, peli in self.botones_maspelis: + if i > self.mas_pelis - 8 and i <= self.mas_pelis and count < 8: + self.removeControls([afoto, neon, fadelabel]) + count += 1 + elif i > self.mas_pelis and count < 16: + self.addControl(afoto) + afoto.setAnimations([('conditional', + 'effect=rotatey start=200 end=0 time=900 delay=200 tween=elastic condition=true',), + ('unfocus', + 'effect=zoom center=auto start=70% end=100% time=700 reversible=false',), + ('focus', + 'effect=rotate center=auto start=0% end=360% time=650 tween=bounce',), + ('WindowClose', 'effect=slide end=0,700% time=1000 condition=true',), ]) + self.addControl(neon) + neon.setVisibleCondition('[Control.HasFocus(' + str(afoto.getId()) + ')]') + neon.setAnimations([('conditional', + 'effect=rotate center=auto start=0% end=360% time=650 tween=bounce condition=Control.HasFocus(' + str( + afoto.getId()) + ')',), + ('WindowClose', 'effect=slide end=0,700% time=1000 condition=true',), ]) + self.addControl(fadelabel) + fadelabel.addLabel(peli) + fadelabel.setAnimations( + [('conditional', 'effect=rotatey start=200 end=0 time=900 tween=elastic condition=true',), + ('WindowClose', 'effect=slide end=0,700% time=1000 condition=true',)]) + + count += 1 + self.mas_pelis += 1 + xbmc.sleep(120) + i += 1 + + if self.mas_pelis > 8 and self.mas_pelis < 17: + self.btn_left.setVisible(True) + + if len(self.botones_maspelis) < self.mas_pelis + 1: + self.btn_right.setVisible(False) + self.btn_right.setVisible(False) + self.setFocus(self.btn_left) + self.focus = 4 + else: + self.focus = len(self.botones) - 1 + self.setFocus(self.btn_right) + + xbmc.sleep(300) + except: + pass + try: + if control == self.btn_left: + i = 1 + count = 0 + if self.mas_pelis == len(self.botones_maspelis): + self.btn_right.setVisible(True) + len_pelis = self.mas_pelis + for afoto, neon, fadelabel, peli in self.botones_maspelis: + resta = 8 + (len_pelis % 8) + if resta == 8: + resta = 16 + resta2 = len_pelis % 8 + if not resta2: + resta2 = 8 + if i > len_pelis - resta and count < 8: + self.addControl(afoto) + afoto.setAnimations( + [('conditional', 'effect=rotatey start=200 end=0 time=900 tween=elastic condition=true',), + ('unfocus', 'effect=zoom center=auto start=70% end=100% time=700 reversible=false',), + ('focus', 'effect=rotate center=auto start=0% end=360% time=650 tween=bounce',), + ('WindowClose', 'effect=slide end=0,700% time=1000 condition=true',), ]) + self.addControl(neon) + neon.setVisibleCondition('[Control.HasFocus(' + str(afoto.getId()) + ')]') + neon.setAnimations([('conditional', + 'effect=rotate center=auto start=0% end=360% time=650 tween=bounce condition=Control.HasFocus(' + str( + afoto.getId()) + ')',), + ('WindowClose', 'effect=slide end=0,700% time=1000 condition=true',), ]) + self.addControl(fadelabel) + fadelabel.addLabel(peli) + fadelabel.setAnimations( + [('conditional', 'effect=rotatey start=200 end=0 time=900 tween=elastic condition=true',), + ('WindowClose', 'effect=slide end=0,700% time=1000 condition=true',)]) + count += 1 + elif i > len_pelis - resta2 and i <= len_pelis and count < 16: + self.removeControls([afoto, neon, fadelabel]) + count += 1 + self.mas_pelis -= 1 + i += 1 + + if self.mas_pelis == 8: + self.btn_left.setVisible(False) + except: + pass + + for boton, peli, id, poster2 in self.idps: + if control == boton: + dialog = platformtools.dialog_progress("[COLOR darkturquoise][B]Cargando nueva info[/B][/COLOR]", + "[COLOR lightyellow]Buscando en [/COLOR][COLOR springgreen][B]Tmdb.......[/B][/COLOR]") + tipo = self.item.contentType + if tipo != "movie": + tipo = "tv" + new_tmdb = tmdb.Tmdb(id_Tmdb=id, tipo=tipo) + new_infolabels = new_tmdb.get_infoLabels() + + new_infolabels["cast"] = new_tmdb.result.get("credits_cast", []) + new_infolabels["crew"] = new_tmdb.result.get("credits_crew", []) + new_infolabels["created_by"] = new_tmdb.result.get("created_by", []) + global relatedWindow + relatedWindow = related(item=self.item, infolabels=new_infolabels, fonts=self.fonts, dialog=dialog) + relatedWindow.doModal() + + +class images(xbmcgui.WindowDialog): + def __init__(self, *args, **kwargs): + self.fanartv = kwargs.get("fanartv", {}) + self.tmdb = kwargs.get("tmdb", {}) + self.imdb = kwargs.get("imdb", []) + self.fa = kwargs.get("fa", []) + self.mal = kwargs.get("mal", []) + + self.imagenes = [] + for key, value in self.tmdb.iteritems(): + for detail in value: + self.imagenes.append('http://image.tmdb.org/t/p/w342' + detail["file_path"]) + for tipo, child in self.fanartv.iteritems(): + for imagen in child: + self.imagenes.append(imagen["url"].replace("/fanart/", "/preview/")) + for imagen, title in self.fa: + self.imagenes.append(imagen) + for imagen in self.imdb: + self.imagenes.append(imagen["src"]) + for imagen, title in self.mal: + self.imagenes.append(imagen) + + self.setCoordinateResolution(2) + self.shadow = xbmcgui.ControlImage(145, 10, 1011, 700, 'http://imgur.com/66VSLTo.png') + self.addControl(self.shadow) + self.shadow.setAnimations( + [('conditional', 'effect=slide start=1000% end=100% delay=672 time=2500 condition=true',), + ('WindowClose', 'effect=slide end=0,-700% time=1000 condition=true',)]) + imagen_inicial = self.imagenes[0].replace("/preview/", "/fanart/").replace("-s200", "-large").replace("/w342/", + "/original/") + self.background = xbmcgui.ControlImage(148, 17, 1003, 560, imagen_inicial, 2) + self.addControl(self.background) + self.background.setAnimations( + [('conditional', 'effect=slide start=1000% end=100% delay=670 time=2500 condition=true',), + ('WindowClose', 'effect=slide end=0,-700% time=1000 condition=true',)]) + + self.botones = [] + self.imgcount = 8 + self.urls = [] + self.botones_imgs = [] + self.focus = -1 + i = 0 + count = 0 + self.btn_left = xbmcgui.ControlButton(293, 550, 70, 29, '', "http://s6.postimg.org/i3pnobu6p/redarrow.png", + "http://s6.postimg.org/i3pnobu6p/redarrow.png") + self.addControl(self.btn_left) + self.btn_left.setAnimations( + [('conditional', 'effect=zoom start=-100 end=100 delay=5000 time=2000 condition=true tween=bounce',), ( + 'conditional', + 'effect=zoom start=293,642,70,29 end=243,642,69,29 time=1000 loop=true tween=bounce condition=Control.HasFocus(' + str( + self.btn_left.getId()) + ')',), ('WindowClose', 'effect=slide end=0,700% time=1000 condition=true',)]) + self.btn_left.setVisible(False) + self.botones.append(self.btn_left) + for img in self.imagenes: + img = img.replace(" ", "%20") + if count % 8 == 0: + i = 0 + self.image = xbmcgui.ControlButton(280 + i, 590, 100, 98, '', img, img) + self.neon = xbmcgui.ControlImage(280 + i, 590, 100, 98, "http://s6.postimg.org/x0jspnxch/buttons.png") + self.botones.append(self.image) + if count < 8: + self.addControl(self.image) + self.image.setAnimations([('conditional', + 'effect=rotatey start=200 end=0 delay=3500 time=900 tween=elastic condition=true',), + ('unfocus', + 'effect=zoom center=auto start=70% end=100% time=700 reversible=false',), + ('WindowClose', 'effect=slide end=0,700% time=1000 condition=true',)]) + self.addControl(self.neon) + self.neon.setVisibleCondition('[Control.HasFocus(' + str(self.image.getId()) + ')]') + self.neon.setAnimations([('WindowClose', 'effect=slide end=0,700% time=1000 condition=true',)]) + + self.urls.append([self.image, img]) + self.botones_imgs.append([self.image, self.neon]) + + i += 120 + count += 1 + xbmc.sleep(200) + if len(self.imagenes) > 8: + self.btn_right = xbmcgui.ControlButton(1150, 550, 60, 27, '', + "http://s6.postimg.org/j4uhr70k1/greenarrow.png", + "http://s6.postimg.org/j4uhr70k1/greenarrow.png") + self.addControl(self.btn_right) + self.btn_right.setAnimations( + [('conditional', 'effect=slide start=-3000 end=0 delay=3600 time=2000 condition=true tween=bounce',), ( + 'conditional', + 'effect=zoom start=230,490, 60, 27, 29 end=1190,642,61,27 time=1000 loop=true tween=bounce condition=Control.HasFocus(' + str( + self.btn_right.getId()) + ')',), + ('WindowClose', 'effect=slide end=0,700% time=1000 condition=true',)]) + self.botones.append(self.btn_right) + xbmc.sleep(200) + + def onAction(self, action): + if action == ACTION_PREVIOUS_MENU or action == ACTION_GESTURE_SWIPE_LEFT or action == 110 or action == 92: + self.close() + + if action == ACTION_MOVE_RIGHT or action == ACTION_MOVE_DOWN: + if self.focus < len(self.botones) - 1: + self.focus += 1 + while True: + id_focus = str(self.botones[self.focus].getId()) + if xbmc.getCondVisibility('[Control.IsVisible(' + id_focus + ')]'): + self.setFocus(self.botones[self.focus]) + break + self.focus += 1 + if self.focus == len(self.botones): + break + + if action == ACTION_MOVE_LEFT or action == ACTION_MOVE_UP: + if self.focus > 0: + self.focus -= 1 + while True: + id_focus = str(self.botones[self.focus].getId()) + if xbmc.getCondVisibility('[Control.IsVisible(' + id_focus + ')]'): + self.setFocus(self.botones[self.focus]) + break + self.focus -= 1 + if self.focus == len(self.botones): + break + + if action == 105 or action == 6: + for image, neon in self.botones_imgs: + try: + if self.getFocusId() == image.getId() and self.btn_right: + self.focus = len(self.botones) - 1 + xbmc.executebuiltin('SendClick(%s)' % self.btn_right.getId()) + except: + pass + + if action == 104 or action == 5: + for image, neon in self.botones_imgs: + try: + if self.getFocusId() == image.getId() and self.btn_left: + self.focus = 0 + xbmc.executebuiltin('SendClick(%s)' % self.btn_left.getId()) + except: + pass + + def onControl(self, control): + try: + if control == self.btn_right: + i = 1 + count = 0 + for image, neon in self.botones_imgs: + if i > self.imgcount - 8 and i <= self.imgcount and count < 8: + self.removeControls([image, neon]) + count += 1 + elif i > self.imgcount and count < 16: + self.addControl(image) + image.setAnimations([('conditional', + 'effect=rotatey start=200 end=0 delay=600 time=900 tween=elastic condition=true',), + ('unfocus', + 'effect=zoom center=auto start=70% end=100% time=700 reversible=false',), + ('WindowClose', 'effect=slide end=0,700% time=1000 condition=true',)]) + self.addControl(neon) + neon.setVisibleCondition('[Control.HasFocus(' + str(image.getId()) + ')]') + neon.setAnimations([('WindowClose', 'effect=slide end=0,700% time=1000 condition=true',)]) + + count += 1 + self.imgcount += 1 + xbmc.sleep(120) + i += 1 + + if self.imgcount > 8 and self.imgcount < 17: + self.btn_left.setVisible(True) + + if len(self.botones_imgs) < self.imgcount + 1: + self.btn_right.setVisible(False) + + self.focus = -1 + xbmc.executebuiltin('Action(Right)') + xbmc.sleep(300) + except: + pass + + try: + if control == self.btn_left: + i = 1 + count = 0 + if self.imgcount == len(self.botones_imgs): + self.btn_right.setVisible(True) + + len_images = self.imgcount + for image, neon in self.botones_imgs: + resta = 8 + (len_images % 8) + if resta == 8: + resta = 16 + resta2 = len_images % 8 + if not resta2: + resta2 = 8 + if i > len_images - resta and count < 8: + self.addControl(image) + image.setAnimations([('conditional', + 'effect=rotatey start=200 end=0 delay=600 time=900 tween=elastic condition=true',), + ('unfocus', + 'effect=zoom center=auto start=70% end=100% time=700 reversible=false',), + ('WindowClose', 'effect=slide end=0,700% time=1000 condition=true',)]) + self.addControl(neon) + neon.setVisibleCondition('[Control.HasFocus(' + str(image.getId()) + ')]') + neon.setAnimations([('WindowClose', 'effect=slide end=0,700% time=1000 condition=true',)]) + count += 1 + elif i > len_images - resta2 and i <= len_images and count < 16: + self.removeControls([image, neon]) + count += 1 + self.imgcount -= 1 + i += 1 + + if self.imgcount == 8: + self.btn_left.setVisible(False) + self.setFocus(self.botones[1]) + else: + self.setFocus(self.btn_left) + except: + pass + + for boton, url in self.urls: + if control == boton: + if "fanart.tv" in url: + url = url.replace("/preview/", "/fanart/") + elif "filmaffinity" in url: + url = url.replace("-s200", "-large") + elif "image.tmdb" in url: + url = url.replace("/w342/", "/original/") + self.background.setImage(url.replace(" ", "%20")) + + +class Trailer(xbmcgui.WindowXMLDialog): + def Start(self, item, trailers): + self.item = item + from channels import trailertools + self.video_url, self.windows = trailertools.buscartrailer(self.item.clone(), trailers=trailers) + + self.doModal() + + def onInit(self): + self.setCoordinateResolution(0) + if not self.video_url: + platformtools.dialog_notification("[COLOR crimson][B]Error[/B][/COLOR]", + "[COLOR tomato]Vídeo no disponible[/COLOR]", 2) + self.close() + elif self.video_url == "no_video": + self.close() + else: + new_video = False + while True: + if new_video: + self.doModal() + xlistitem = xbmcgui.ListItem(path=self.video_url, thumbnailImage=self.item.thumbnail) + pl = xbmc.PlayList(xbmc.PLAYLIST_VIDEO) + pl.clear() + pl.add(self.video_url, xlistitem) + self.player = xbmc.Player() + self.player.play(pl, windowed=True) + while xbmc.Player().isPlaying(): + xbmc.sleep(1000) + self.close() + self.video_url = None + new_video = True + self.windows[-1].doModal() + try: + self.video_url = self.windows[-1].result + if not self.video_url: + break + except: + break + + def onAction(self, action): + if action == ACTION_PREVIOUS_MENU or action == ACTION_GESTURE_SWIPE_LEFT or action == 110 or action == 92: + self.player.stop() + self.close() + + elif action == ACTION_MOVE_LEFT: + xbmc.executebuiltin('PlayerControl(Rewind)') + elif action == ACTION_MOVE_RIGHT: + xbmc.executebuiltin('PlayerControl(Forward)') + elif action == ACTION_SELECT_ITEM: + xbmc.executebuiltin('PlayerControl(Play)') + elif action == 199 or action == ACTION_SHOW_FULLSCREEN or action == 9: + xbmc.log("tuprimalafachaaa") + if action == 13: + self.close() + + def onClick(self, control): + if control == self.getControl(2): + self.player.pause() + + +def get_recomendations(item, infoLabels, recomendaciones): + tipo = item.contentType + if tipo != "movie": + tipo = "tv" + search = {'url': '%s/%s/recommendations' % (tipo, infoLabels['tmdb_id']), 'language': 'es', 'page': 1} + reco_tmdb = tmdb.Tmdb(discover=search, tipo=tipo, idioma_busqueda="es") + + for i in range(0, len(reco_tmdb.results)): + titulo = reco_tmdb.results[i].get("title", reco_tmdb.results[i].get("original_title", "")) + if not titulo: + titulo = reco_tmdb.results[i].get("name", reco_tmdb.results[i].get("original_name", "")) + idtmdb = str(reco_tmdb.results[i].get("id")) + thumbnail = reco_tmdb.results[i].get("poster_path", "") + if thumbnail: + thumbnail = 'http://image.tmdb.org/t/p/original' + thumbnail + recomendaciones.append([idtmdb, titulo, thumbnail]) + + +def get_filmaf(item, infoLabels): + title = infoLabels["title"].replace(" ", "+") + year = str(infoLabels.get("year", "")) + url = "http://www.filmaffinity.com/es/advsearch.php?stext={0}&stype%5B%5D=title&country=&genre=&fromyear={1}&toyear={1}".format( + title, year) + data = scrapertools.downloadpage(url) + + tipo = "película" + if item.contentType != "movie": + tipo = "serie" + url_filmaf = scrapertools.find_single_match(data, '<div class="mc-poster">\s*<a title="[^"]*" href="([^"]+)"') + if url_filmaf: + url_filmaf = "http://www.filmaffinity.com%s" % url_filmaf + data = scrapertools.downloadpage(url_filmaf) + + rating = scrapertools.find_single_match(data, 'itemprop="ratingValue" content="([^"]+)"') + if not rating: + rating_filma = "[COLOR crimson][B]Sin puntuación[/B][/COLOR]" + else: + try: + if float(rating) >= 5 and float(rating) < 8: + rating_filma = "[COLOR springgreen][B]%s[/B][/COLOR]" % rating + elif float(rating) >= 8: + rating_filma = "[COLOR yellow][B]%s[/B][/COLOR]" % rating + else: + rating_filma = "[COLOR crimson][B]%s[/B][/COLOR]" % rating + except: + import traceback + logger.error(traceback.format_exc()) + rating_filma = "[COLOR crimson][B]%s[/B][/COLOR]" % rating + plot = scrapertools.find_single_match(data, '<dd itemprop="description">(.*?)</dd>') + plot = plot.replace("<br><br />", "\n") + + patron = '<div itemprop="reviewBody">(.*?)</div>.*?itemprop="author">(.*?)\s*<i alt="([^"]+)"' + matches_reviews = scrapertools.find_multiple_matches(data, patron) + critica = "" + if matches_reviews: + for review, autor, valoracion in matches_reviews: + review = dhe(scrapertools.htmlclean(review)) + review += "\n" + autor + if "positiva" in valoracion: + critica += "[COLOR green][B]%s[/B][/COLOR]\n\n" % review + elif "neutral" in valoracion: + critica += "[COLOR yellow][B]%s[/B][/COLOR]\n\n" % review + else: + critica += "[COLOR red][B]%s[/B][/COLOR]\n\n" % review + else: + critica = "[COLOR floralwhite][B]Esta %s no tiene críticas[/B][/COLOR]" % tipo + + else: + critica = "[COLOR floralwhite][B]Esta %s no tiene críticas[/B][/COLOR]" % tipo + rating_filma = "[COLOR crimson][B]Sin puntuación[/B][/COLOR]" + plot = "" + + return critica, rating_filma, plot + + +def fanartv(item, infoLabels, images={}): + from core import jsontools + headers = [['Content-Type', 'application/json']] + id_search = infoLabels.get('tvdb_id') + if item.contentType != "movie" and not id_search: + search = {'url': 'tv/%s/external_ids' % infoLabels['tmdb_id'], 'language': 'es'} + ob_tmdb = tmdb.Tmdb(discover=search, idioma_busqueda='es') + id_search = ob_tmdb.result.get("tvdb_id") + elif item.contentType == "movie": + id_search = infoLabels.get('tmdb_id') + + if id_search: + if item.contentType == "movie": + url = "http://webservice.fanart.tv/v3/movies/%s?api_key=cab16e262d72fea6a6843d679aa10300" \ + % infoLabels['tmdb_id'] + else: + url = "http://webservice.fanart.tv/v3/tv/%s?api_key=cab16e262d72fea6a6843d679aa10300" % id_search + data = jsontools.load(scrapertools.downloadpage(url, headers=headers)) + if data and not "error message" in data: + for key, value in data.items(): + if key not in ["name", "tmdb_id", "imdb_id", "thetvdb_id"]: + images[key] = value + return images + + +def get_fonts(skin): + data_font = "" + fonts = {} + if "confluence" in skin or "estuary" in skin or "refocus" in skin: + fonts = {"10": "font10", "12": "font12", "16": "font16", "24": "font24_title", "30": "font30"} + elif "aeonmq" in skin: + fonts = {"10": "font_14", "12": "font_16", "16": "font_20", "24": "font_24", "30": "font_30"} + elif "madnox" in skin: + fonts = {"10": "Font_Reg22", "12": "Font_Reg26", "16": "Font_Reg32", "24": "Font_Reg38", + "30": "Font_ShowcaseMainLabel2_Caps"} + + if not fonts: + from core import filetools + try: + data_font = filetools.read(xbmc.translatePath(filetools.join('special://skin/1080i', 'Font.xml')), "r") + except: + try: + data_font = filetools.read(xbmc.translatePath(filetools.join('special://skin/720p', 'Font.xml')), "r") + except: + pass + + if data_font: + fuentes = scrapertools.find_multiple_matches(data_font, + "<name>([^<]+)<\/name>(?:<![^<]+>|)\s*<filename>[^<]+<\/filename>\s*<size>(\d+)<\/size>") + sizes = [] + try: + for name, size in fuentes: + size = int(size) + sizes.append([size, name]) + sizes.sort() + fonts["10"] = sizes[0][1].lower() + check = False + if not 12 in sizes: + for size, name in sizes: + if size != fonts["10"]: + fonts["12"] = name.lower() + check = True + break + for size, name in sizes: + if size == 12 and not check: + fonts["12"] = name.lower() + elif size == 16: + fonts["16"] = name.lower() + elif size == 24: + fonts["24"] = name.lower() + elif size == 30: + fonts["30"] = name.lower() + break + elif size > 30 and size <= 33: + fonts["30"] = name.lower() + break + except: + pass + if not fonts: + fonts = {"10": "font10", "12": "font12", "16": "font16", "24": "font24", "30": "font30"} + + return fonts + + +def translate(to_translate, to_language="auto", language="auto", i=0, bio=[]): + '''Return the translation using google translate + you must shortcut the langage you define (French = fr, English = en, Spanish = es, etc...) + if you don't define anything it will detect it or use english by default + Example: + print(translate("salut tu vas bien?", "en")) + hello you alright?''' + import urllib2 + import urllib + agents = { + 'User-Agent': "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)"} + before_trans = 'class="t0">' + to_translate = urllib.quote(to_translate.replace(" ", "+")).replace("%2B", "+") + link = "http://translate.google.com/m?hl=%s&sl=%s&q=%s" % (to_language, language, to_translate) + request = urllib2.Request(link, headers=agents) + page = urllib2.urlopen(request).read() + result = page[page.find(before_trans) + len(before_trans):] + result = result.split("<")[0] + result = re.sub(r"d>|nn", "", result) + bio.append([i, result]) diff --git a/plugin.video.alfa/channels/inkapelis.json b/plugin.video.alfa/channels/inkapelis.json new file mode 100755 index 00000000..a8e94caa --- /dev/null +++ b/plugin.video.alfa/channels/inkapelis.json @@ -0,0 +1,92 @@ +{ + "id": "inkapelis", + "name": "Inkapelis", + "active": true, + "adult": false, + "language": "es", + "changes": [ + { + "date": "12/03/2017", + "description": "Reparados enlaces directos" + }, + { + "date": "27/02/2017", + "description": "Añadidos enlaces directos y adaptado al uso de httptools" + }, + { + "date": "16/01/2016", + "description": "Corregido por cambios en el enmascaramiento de enlaces" + } + ], + "version": 1, + "thumbnail": "http://i.imgur.com/I7MxHZI.png", + "banner": "inkapelis.png", + "categories": [ + "movie", + "vos", + "latino" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Películas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "modo_grafico", + "type": "bool", + "label": "Buscar información extra", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "perfil", + "type": "list", + "label": "Perfil de color", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "Perfil 3", + "Perfil 2", + "Perfil 1" + ] + }, + { + "id": "filtro_defecto_peliculas", + "type": "label", + "enabled": true, + "visible": false + }, + { + "id": "pers_peliculas1", + "type": "label", + "enabled": true, + "visible": false + }, + { + "id": "pers_peliculas2", + "type": "label", + "enabled": true, + "visible": false + }, + { + "id": "pers_peliculas3", + "type": "label", + "enabled": true, + "visible": false + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/inkapelis.py b/plugin.video.alfa/channels/inkapelis.py new file mode 100755 index 00000000..a062b781 --- /dev/null +++ b/plugin.video.alfa/channels/inkapelis.py @@ -0,0 +1,393 @@ +# -*- coding: utf-8 -*- + +import re + +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item + +__modo_grafico__ = config.get_setting("modo_grafico", "inkapelis") +__perfil__ = config.get_setting("perfil", "inkapelis") + +# Fijar perfil de color +perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E'], + ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E'], + ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E']] +color1, color2, color3, color4 = perfil[__perfil__] + + +def mainlist(item): + logger.info() + itemlist = [] + + itemlist.append(item.clone(title="Novedades", action="entradas", url="http://www.inkapelis.com/", + extra="Novedades", text_color=color1)) + itemlist.append(item.clone(title="Estrenos", action="entradas", url="http://www.inkapelis.com/genero/estrenos/", + text_color=color1)) + itemlist.append(item.clone(title="Géneros", action="generos", url="http://www.inkapelis.com/", text_color=color1)) + itemlist.append(item.clone(title="Buscar...", action="search", text_color=color1)) + itemlist.append(item.clone(action="", title="")) + itemlist.append( + item.clone(action="filtro", title="Filtrar películas", url="http://www.inkapelis.com/?s=", text_color=color1)) + # Filtros personalizados para peliculas + for i in range(1, 4): + filtros = config.get_setting("pers_peliculas" + str(i), item.channel) + if filtros: + title = "Filtro Personalizado " + str(i) + new_item = item.clone() + new_item.values = filtros + itemlist.append( + new_item.clone(action="filtro", title=title, url="http://www.inkapelis.com/?s=", text_color=color2)) + itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False)) + return itemlist + + +def configuracion(item): + from platformcode import platformtools + ret = platformtools.show_channel_settings() + platformtools.itemlist_refresh() + return ret + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + try: + if categoria == "peliculas": + item.url = "http://www.inkapelis.com/" + item.action = "entradas" + item.extra = "Novedades" + itemlist = entradas(item) + + if itemlist[-1].action == "entradas": + itemlist.pop() + + # Se captura la excepción, para no interrumpir al canal novedades si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist + + +def search(item, texto): + logger.info() + itemlist = [] + item.extra = "Buscar" + item.url = "http://www.inkapelis.com/?s=%s" % texto + + try: + return entradas(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def generos(item): + logger.info() + itemlist = [] + + item.text_color = color1 + data = httptools.downloadpage(item.url).data + matches = scrapertools.find_multiple_matches(data, '<li class="cat-item cat-item-.*?><a href="([^"]+)".*?>(.*?)<b>') + + for scrapedurl, scrapedtitle in matches: + if scrapedtitle == "Eroticas +18 " and config.get_setting("adult_mode") != "0": + itemlist.append(item.clone(action="eroticas", title=scrapedtitle, url=scrapedurl)) + elif (scrapedtitle != "Estrenos ") and (scrapedtitle != "Próximos Estrenos "): + itemlist.append(item.clone(action="entradas", title=scrapedtitle, url=scrapedurl)) + + return itemlist + + +def filtro(item): + logger.info() + + list_controls = [] + valores = {} + strings = {} + # Se utilizan los valores por defecto/guardados o los del filtro personalizado + if not item.values: + valores_guardados = config.get_setting("filtro_defecto_peliculas", item.channel) + else: + valores_guardados = item.values + item.values = "" + + if valores_guardados: + dict_values = valores_guardados + else: + dict_values = None + if dict_values: + dict_values["filtro_per"] = 0 + + list_controls.append({'id': 'texto', 'label': 'Cadena de búsqueda', 'enabled': True, + 'type': 'text', 'default': '', 'visible': True}) + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + matches = scrapertools.find_multiple_matches(data, 'option value="">([^<]+)</option>(.*?)</select>') + i = 1 + for filtro_title, values in matches: + id = filtro_title.replace("A\xc3\xb1o", "year").lower() + filtro_title = filtro_title.replace("A\xc3\xb1o", "Año") + list_controls.append({'id': id, 'label': filtro_title, 'enabled': True, + 'type': 'list', 'default': 0, 'visible': True}) + valores[id] = [] + valores[id].append('') + strings[filtro_title] = [] + list_controls[i]['lvalues'] = [] + list_controls[i]['lvalues'].append('Cualquiera') + strings[filtro_title].append('Cualquiera') + patron = '<option value="([^"]+)">([^<]+)</option>' + matches_v = scrapertools.find_multiple_matches(values, patron) + for value, key in matches_v: + list_controls[i]['lvalues'].append(key) + valores[id].append(value) + strings[filtro_title].append(key) + + i += 1 + + item.valores = valores + item.strings = strings + if "Filtro Personalizado" in item.title: + return filtrado(item, valores_guardados) + + list_controls.append({'id': 'espacio', 'label': '', 'enabled': False, + 'type': 'label', 'default': '', 'visible': True}) + list_controls.append({'id': 'save', 'label': 'Establecer como filtro por defecto', 'enabled': True, + 'type': 'bool', 'default': False, 'visible': True}) + list_controls.append({'id': 'filtro_per', 'label': 'Guardar filtro en acceso directo...', 'enabled': True, + 'type': 'list', 'default': 0, 'visible': True, 'lvalues': ['No guardar', 'Filtro 1', + 'Filtro 2', 'Filtro 3']}) + list_controls.append({'id': 'remove', 'label': 'Eliminar filtro personalizado...', 'enabled': True, + 'type': 'list', 'default': 0, 'visible': True, 'lvalues': ['No eliminar', 'Filtro 1', + 'Filtro 2', 'Filtro 3']}) + + from platformcode import platformtools + return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values, + caption="Filtra los resultados", item=item, callback='filtrado') + + +def filtrado(item, values): + values_copy = values.copy() + # Guarda el filtro para que sea el que se cargue por defecto + if "save" in values and values["save"]: + values_copy.pop("remove") + values_copy.pop("filtro_per") + values_copy.pop("save") + config.set_setting("filtro_defecto_peliculas", values_copy, item.channel) + + # Elimina el filtro personalizado elegido + if "remove" in values and values["remove"] != 0: + config.set_setting("pers_peliculas" + str(values["remove"]), "", item.channel) + + values_copy = values.copy() + # Guarda el filtro en un acceso directo personalizado + if "filtro_per" in values and values["filtro_per"] != 0: + index = "peliculas" + str(values["filtro_per"]) + values_copy.pop("filtro_per") + values_copy.pop("save") + values_copy.pop("remove") + config.set_setting("pers_" + index, values_copy, item.channel) + + genero = item.valores["genero"][values["genero"]] + year = item.valores["year"][values["year"]] + calidad = item.valores["calidad"][values["calidad"]] + idioma = item.valores["idioma"][values["idioma"]] + texto = values["texto"].replace(" ", "+") + + strings = [] + for key, value in dict(item.strings).items(): + key2 = key.replace("Año", "year").lower() + strings.append(key + ": " + value[values[key2]]) + strings.append("Texto: " + texto) + + item.valores = "Filtro: " + ", ".join(sorted(strings)) + item.strings = "" + item.url = "http://www.inkapelis.com/?anio=%s&genero=%s&calidad=%s&idioma=%s&s=%s" % \ + (year, genero, calidad, idioma, texto) + item.extra = "Buscar" + + return entradas(item) + + +def entradas(item): + logger.info() + + itemlist = [] + item.text_color = color2 + # Descarga la página + data = httptools.downloadpage(item.url).data + if "valores" in item and item.valores: + itemlist.append(item.clone(action="", title=item.valores, text_color=color4)) + + # IF en caso de busqueda + if item.extra == "Buscar": + # Extrae las entradas + entradas = scrapertools.find_multiple_matches(data, '<div class="col-mt-5 postsh">(.*?)</div></div></div>') + patron = '<div class="poster-media-card([^"]+)">.*?<a href="([^"]+)" title="([^"]+)">' \ + '.*?<img.*?src="([^"]+)"' + for match in entradas: + matches = scrapertools.find_multiple_matches(match, patron) + for calidad, scrapedurl, scrapedtitle, scrapedthumbnail in matches: + thumbnail = scrapedthumbnail.replace("w185", "original") + title = scrapedtitle + calidad = calidad.strip() + if calidad: + title += " [" + calidad + "]" + + itemlist.append(item.clone(action="findvideos", title=title, url=scrapedurl, thumbnail=thumbnail, + contentTitle=scrapedtitle, fulltitle=scrapedtitle, + context=["buscar_trailer"], + contentType="movie")) + + else: + # Extrae las entradas + if item.extra == "Novedades": + data2 = data.split("<h3>Últimas Películas Agregadas</h3>", 1)[1] + entradas = scrapertools.find_multiple_matches(data2, '<div class="col-mt-5 postsh">(.*?)</div></div></div>') + else: + entradas = scrapertools.find_multiple_matches(data, '<div class="col-mt-5 postsh">(.*?)</div></div></div>') + + patron = '<div class="poster-media-card([^"]+)">.*?<a href="([^"]+)" title="([^"]+)">' \ + '.*?<div class="idiomes"><div class="(.*?)">.*?' \ + '<img.*?src="([^"]+)".*?<span class="under-title">(.*?)</span>' + for match in entradas: + matches = scrapertools.find_multiple_matches(match, patron) + for calidad, url, scrapedtitle, idioma, scrapedthumbnail, category in matches: + # Salto entradas adultos + if category == "Eroticas +18": + continue + idioma = idioma.strip() + calidad = calidad.strip() + scrapedtitle = scrapedtitle.replace("Ver Pelicula ", "") + title = scrapedtitle + if idioma: + title += " [" + idioma + "]" + if calidad: + title += " [" + calidad + "]" + if 'class="proximamente"' in match: + title += " [Próximamente]" + thumbnail = scrapedthumbnail.replace("w185", "original") + + itemlist.append(item.clone(action="findvideos", title=title, url=url, contentTitle=scrapedtitle, + fulltitle=scrapedtitle, thumbnail=thumbnail, context=["buscar_trailer"], + contentType="movie")) + + # Extrae la marca de la siguiente página + next_page = scrapertools.find_single_match(data, '<span class="current">.*?<\/span><a href="([^"]+)"') + if next_page: + if item.extra == "Buscar": + next_page = next_page.replace('&', '&') + itemlist.append(item.clone(action="entradas", title="Siguiente", url=next_page, text_color=color3)) + + return itemlist + + +def eroticas(item): + logger.info() + + itemlist = [] + # Descarga la página + data = httptools.downloadpage(item.url).data + + # Extrae las entradas + entradas = scrapertools.find_multiple_matches(data, '<div class="col-mt-5 postsh">(.*?)</div></div></div>') + patron = '<div class="poster-media-card([^"]+)">.*?<a href="([^"]+)" title="([^"]+)">' \ + '.*?<div class="idiomes"><div class="(.*?)">.*?' \ + '<img.*?src="([^"]+)"' + for match in entradas: + matches = scrapertools.find_multiple_matches(match, patron) + for calidad, url, scrapedtitle, idioma, scrapedthumbnail in matches: + title = scrapedtitle + " [" + idioma + "] [" + calidad + "]" + thumbnail = scrapedthumbnail.replace("w185", "original") + + itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumbnail, + extra="eroticas")) + + # Extrae la marca de la siguiente página + next_page = scrapertools.find_single_match(data, '<span class="current">.*?<\/span><a href="([^"]+)"') + if next_page: + itemlist.append(item.clone(action="entradas", title="Siguiente", url=next_page)) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + item.text_color = color2 + + # Descarga la pagina + data = httptools.downloadpage(item.url).data + sinopsis = scrapertools.find_single_match(data, '<h2>Sinopsis</h2>.*?>(.*?)</p>') + item.infoLabels["plot"] = scrapertools.htmlclean(sinopsis) + # Busca en tmdb si no se ha hecho antes + if item.extra != "eroticas": + if item.extra != "library": + year = scrapertools.find_single_match(data, 'Año de lanzamiento.*?"ab">(\d+)') + if year: + try: + item.infoLabels['year'] = year + # Obtenemos los datos basicos de todas las peliculas mediante multihilos + tmdb.set_infoLabels(item, __modo_grafico__) + except: + pass + trailer_url = scrapertools.find_single_match(data, 'id="trailerpro">.*?src="([^"]+)"') + item.infoLabels["trailer"] = "www.youtube.com/watch?v=TqqF3-qgJw4" + + patron = '<td><a href="([^"]+)".*?title="([^"]+)".*?<td>([^"]+)<\/td><td>([^"]+)<\/td>' + matches = scrapertools.find_multiple_matches(data, patron) + for url, server, idioma, calidad in matches: + if server == "Embed": + server = "Nowvideo" + if server == "Ul": + server = "Uploaded" + title = "%s [%s][%s]" % (server, idioma, calidad) + itemlist.append(item.clone(action="play", title=title, url=url)) + + patron = 'id="(embed[0-9]*)".*?<div class="calishow">(.*?)<.*?src="([^"]+)"' + matches = scrapertools.find_multiple_matches(data, patron) + for id_embed, calidad, url in matches: + title = scrapertools.find_single_match(url, "(?:http://|https://|//)(.*?)(?:embed.|videoembed|)/") + if re.search(r"(?i)inkapelis|goo.gl", title): + title = "Directo" + idioma = scrapertools.find_single_match(data, 'href="#%s".*?>([^<]+)<' % id_embed) + title = "%s [%s][%s]" % (title.capitalize(), idioma, calidad) + itemlist.append(item.clone(action="play", title=title, url=url)) + + if itemlist: + if not config.get_setting('menu_trailer', item.channel): + itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler", + text_color="magenta", context="")) + if item.extra != "library": + if config.get_videolibrary_support(): + itemlist.append(Item(channel=item.channel, title="Añadir película a la videoteca", + action="add_pelicula_to_library", url=item.url, fulltitle=item.fulltitle, + infoLabels={'title': item.fulltitle}, text_color="green", extra="library")) + + return itemlist + + +def play(item): + logger.info() + itemlist = [] + if "drive.php?v=" in item.url or "//goo.gl/" in item.url: + data = httptools.downloadpage(item.url).data.replace("\\", "") + matches = scrapertools.find_multiple_matches(data, '"label":(.*?),.*?type":".*?/([^"]+)".*?file":"([^"]+)"') + for calidad, ext, url in matches: + title = ".%s %s [directo]" % (ext, calidad) + itemlist.insert(0, [title, url]) + else: + itemlist = servertools.find_video_items(data=item.url) + + return itemlist diff --git a/plugin.video.alfa/channels/javtasty.json b/plugin.video.alfa/channels/javtasty.json new file mode 100755 index 00000000..1eb1a0f1 --- /dev/null +++ b/plugin.video.alfa/channels/javtasty.json @@ -0,0 +1,28 @@ +{ + "id": "javtasty", + "name": "JavTasty", + "language": "es", + "active": true, + "adult": true, + "version": 1, + "changes": [ + { + "date": "29/04/2017", + "description": "Primera versión" + } + ], + "thumbnail": "http://i.imgur.com/OTYwbAa.png?1", + "categories": [ + "adult" + ], + "settings": [ + { + "id": "menu_info", + "type": "bool", + "label": "Mostrar menú antes de reproducir con imágenes", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/javtasty.py b/plugin.video.alfa/channels/javtasty.py new file mode 100755 index 00000000..6252b776 --- /dev/null +++ b/plugin.video.alfa/channels/javtasty.py @@ -0,0 +1,137 @@ +# -*- coding: utf-8 -*- + +import urlparse + +from core import config +from core import httptools +from core import logger +from core import scrapertools + +host = "http://www.javtasty.com" + + +def mainlist(item): + logger.info() + itemlist = [] + + itemlist.append(item.clone(action="lista", title="Nuevos Vídeos", url=host + "/videos")) + itemlist.append(item.clone(action="lista", title="Mejor Valorados", url=host + "/videos?o=tr")) + itemlist.append(item.clone(action="lista", title="Más Vistos", url=host + "/videos?o=mv")) + itemlist.append(item.clone(action="lista", title="Ordenados por duración", url=host + "/videos?o=lg")) + itemlist.append(item.clone(action="categorias", title="Categorías", url=host + "/categories")) + itemlist.append(item.clone(title="Buscar...", action="search")) + itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False)) + + return itemlist + + +def configuracion(item): + from platformcode import platformtools + ret = platformtools.show_channel_settings() + platformtools.itemlist_refresh() + return ret + + +def search(item, texto): + logger.info() + item.url = "%s/search?search_query=%s&search_type=videos" % (host, texto) + item.extra = texto + try: + return lista(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def lista(item): + logger.info() + itemlist = [] + + # Descarga la pagina + data = httptools.downloadpage(item.url).data + + action = "play" + if config.get_setting("menu_info", "javtasty"): + action = "menu_info" + + # Extrae las entradas + patron = '<div class="well wellov well-sm".*?href="([^"]+)".*?data-original="([^"]+)" title="([^"]+)"(.*?)<div class="duration">(?:.*?</i>|)\s*([^<]+)<' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedthumbnail, scrapedtitle, quality, duration in matches: + scrapedurl = urlparse.urljoin(host, scrapedurl) + scrapedtitle = scrapedtitle.strip() + if duration: + scrapedtitle = "%s - %s" % (duration.strip(), scrapedtitle) + + if '>HD<' in quality: + scrapedtitle += " [COLOR red][HD][/COLOR]" + + itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, + fanart=scrapedthumbnail)) + + # Extrae la marca de siguiente página + next_page = scrapertools.find_single_match(data, 'href="([^"]+)" class="prevnext">') + if next_page: + next_page = next_page.replace("&", "&") + itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page)) + + return itemlist + + +def categorias(item): + logger.info() + itemlist = [] + + # Descarga la pagina + data = httptools.downloadpage(item.url).data + + # Extrae las entradas + patron = '<div class="col-sm-4.*?href="([^"]+)".*?data-original="([^"]+)" title="([^"]+)"' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedthumbnail, scrapedtitle in matches: + scrapedurl = urlparse.urljoin(host, scrapedurl) + scrapedthumbnail = urlparse.urljoin(host, scrapedthumbnail) + itemlist.append(item.clone(action="lista", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, + fanart=scrapedthumbnail)) + + return itemlist + + +def play(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + + videourl = scrapertools.find_single_match(data, "var video_sd\s*=\s*'([^']+)'") + if videourl: + itemlist.append(['.mp4 [directo]', videourl]) + videourl = scrapertools.find_single_match(data, "var video_hd\s*=\s*'([^']+)'") + if videourl: + itemlist.append(['.mp4 HD [directo]', videourl]) + + if item.extra == "play_menu": + return itemlist, data + + return itemlist + + +def menu_info(item): + logger.info() + itemlist = [] + + video_urls, data = play(item.clone(extra="play_menu")) + itemlist.append(item.clone(action="play", title="Ver -- %s" % item.title, video_urls=video_urls)) + + bloque = scrapertools.find_single_match(data, '<div class="carousel-inner"(.*?)<div class="container">') + matches = scrapertools.find_multiple_matches(bloque, 'src="([^"]+)"') + for i, img in enumerate(matches): + if i == 0: + continue + title = "Imagen %s" % (str(i)) + itemlist.append(item.clone(action="", title=title, thumbnail=img, fanart=img)) + + return itemlist diff --git a/plugin.video.alfa/channels/javus.json b/plugin.video.alfa/channels/javus.json new file mode 100755 index 00000000..1ce77a1b --- /dev/null +++ b/plugin.video.alfa/channels/javus.json @@ -0,0 +1,33 @@ +{ + "id": "javus", + "name": "javus.net", + "active": true, + "adult": true, + "language": "es", + "thumbnail": "https://s15.postimg.org/pzd3h4vy3/javus.png", + "banner": "https://s21.postimg.org/5pqzedp2f/javus_banner.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/12/2016", + "description": "Release." + } + ], + "categories": [ + "adult" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": false, + "visible": false + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/javus.py b/plugin.video.alfa/channels/javus.py new file mode 100755 index 00000000..fd812fb7 --- /dev/null +++ b/plugin.video.alfa/channels/javus.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- + +import re + +from core import httptools +from core import logger +from core import scrapertools +from core.item import Item + +host = 'http://javus.net/' + + +def mainlist(item): + if item.url == "": + item.url = host + + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = '<a href="([^"]+)" title="([^"]+)" rel="nofollow" class="post-image post-image-left".*?\s*<div class="featured-thumbnail"><img width="203" height="150" src="([^"]+)" class="attachment-featured size-featured wp-post-image" alt="" title="" \/><\/div>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle, scrapedthumbnail in matches: + url = scrapedurl + title = scrapedtitle.decode('utf-8') + thumbnail = scrapedthumbnail + fanart = '' + + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, fanart=fanart)) + + # Paginacion + title = '' + siguiente = scrapertools.find_single_match(data, "<a rel='nofollow' href='([^']+)' class='inactive'>Next <") + ultima = scrapertools.find_single_match(data, "<a rel='nofollow' class='inactive' href='([^']+)'>Last <") + if siguiente != ultima: + titlen = 'Pagina Siguiente >>> ' + fanart = '' + itemlist.append(Item(channel=item.channel, action="mainlist", title=titlen, url=siguiente, fanart=fanart)) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = item.url + texto + + if texto != '': + return todas(item) + else: + return [] diff --git a/plugin.video.alfa/channels/jkanime.json b/plugin.video.alfa/channels/jkanime.json new file mode 100755 index 00000000..59b4b84c --- /dev/null +++ b/plugin.video.alfa/channels/jkanime.json @@ -0,0 +1,33 @@ +{ + "id": "jkanime", + "name": "JKanime", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "jkanime.png", + "banner": "jkanime.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ], + "categories": [ + "anime" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/jkanime.py b/plugin.video.alfa/channels/jkanime.py new file mode 100755 index 00000000..53181ff1 --- /dev/null +++ b/plugin.video.alfa/channels/jkanime.py @@ -0,0 +1,268 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import logger +from core import scrapertools +from core.item import Item + + +def mainlist(item): + logger.info() + + itemlist = [] + itemlist.append( + Item(channel=item.channel, action="ultimos_capitulos", title="Últimos Capitulos", url="http://jkanime.net/")) + itemlist.append(Item(channel=item.channel, action="ultimos", title="Últimos", url="http://jkanime.net/")) + itemlist.append(Item(channel=item.channel, action="letras", title="Listado Alfabetico", url="http://jkanime.net/")) + itemlist.append(Item(channel=item.channel, action="generos", title="Listado por Genero", url="http://jkanime.net/")) + itemlist.append(Item(channel=item.channel, action="search", title="Buscar")) + + return itemlist + + +def ultimos_capitulos(item): + logger.info() + itemlist = [] + data = scrapertools.cache_page(item.url) + data = scrapertools.get_match(data, '<ul class="ratedul">.+?</ul>') + + data = data.replace('\t', '') + data = data.replace('\n', '') + data = data.replace('/thumbnail/', '/image/') + + patron = '<img src="(http://cdn.jkanime.net/assets/images/animes/.+?)" .+?href="(.+?)">(.+?)<.+?span>(.+?)<' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedthumb, scrapedurl, scrapedtitle, scrapedepisode in matches: + title = scrapedtitle.strip() + scrapedepisode + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = scrapedthumb + plot = "" + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot)) + + return itemlist + + +def search(item, texto): + logger.info() + if item.url == "": + item.url = "http://jkanime.net/buscar/%s/" + texto = texto.replace(" ", "+") + item.url = item.url % texto + try: + return series(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def ultimos(item): + logger.info() + itemlist = [] + data = scrapertools.cache_page(item.url) + data = scrapertools.get_match(data, '<ul class="latestul">(.*?)</ul>') + + patron = '<a href="([^"]+)">([^<]+)<' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + title = scrapedtitle.strip() + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = "" + plot = "" + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + + itemlist.append( + Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, plot=plot)) + + return itemlist + + +def generos(item): + logger.info() + itemlist = [] + + data = scrapertools.cache_page(item.url) + data = scrapertools.get_match(data, '<div class="genres">(.*?)</div>') + + patron = '<a href="([^"]+)">([^<]+)</a>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + title = scrapedtitle + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = "" + plot = "" + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + + itemlist.append( + Item(channel=item.channel, action="series", title=title, url=url, thumbnail=thumbnail, plot=plot, + viewmode="movie_with_plot")) + + return itemlist + + +def letras(item): + logger.info() + itemlist = [] + + data = scrapertools.cache_page(item.url) + data = scrapertools.get_match(data, '<ul class="animelet">(.*?)</ul>') + + patron = '<a href="([^"]+)">([^<]+)</a>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + title = scrapedtitle + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = "" + plot = "" + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + + itemlist.append( + Item(channel=item.channel, action="series", title=title, url=url, thumbnail=thumbnail, plot=plot, + viewmode="movie_with_plot")) + + return itemlist + + +def series(item): + logger.info() + + # Descarga la pagina + data = scrapertools.cache_page(item.url) + + # Extrae las entradas + ''' + <table class="search"> + <tr> + <td rowspan="2"> + <a href="http://jkanime.net/basilisk-kouga-ninpou-chou/"><img src="http://jkanime.net/assets/images/animes/thumbnail/basilisk-kouga-ninpou-chou.jpg" width="50" /></a> + </td> + <td><a class="titl" href="http://jkanime.net/basilisk-kouga-ninpou-chou/">Basilisk: Kouga Ninpou Chou</a></td> + <td rowspan="2" style="width:50px; text-align:center;">Serie</td> + <td rowspan="2" style="width:50px; text-align:center;" >24 Eps</td> + </tr> + <tr> + <td><p>Basilisk, considerada una de las mejores series del genero ninja, nos narra la historia de dos clanes ninja separados por el odio entre dos familias. Los actuales representantes, Kouga Danjo del clan Kouga y Ogen del clan… <a class="next" href="http://jkanime.net/basilisk-kouga-ninpou-chou/">seguir leyendo</a></p></td> + </tr> + </table> + ''' + patron = '<table class="search[^<]+' + patron += '<tr[^<]+' + patron += '<td[^<]+' + patron += '<a href="([^"]+)"><img src="([^"]+)"[^<]+</a>[^<]+' + patron += '</td>[^<]+' + patron += '<td><a[^>]+>([^<]+)</a></td>[^<]+' + patron += '<td[^>]+>([^<]+)</td>[^<]+' + patron += '<td[^>]+>([^<]+)</td>[^<]+' + patron += '</tr>[^<]+' + patron += '<tr>[^<]+' + patron += '<td>(.*?)</td>' + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + for scrapedurl, scrapedthumbnail, scrapedtitle, line1, line2, scrapedplot in matches: + title = scrapedtitle.strip() + " (" + line1.strip() + ") (" + line2.strip() + ")" + extra = line2.strip() + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) + thumbnail = thumbnail.replace("thumbnail", "image") + plot = scrapertools.htmlclean(scrapedplot) + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + + itemlist.append( + Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, fanart=thumbnail, + plot=plot, extra=extra)) + + try: + siguiente = scrapertools.get_match(data, '<a class="listsiguiente" href="([^"]+)" >Resultados Siguientes') + scrapedurl = urlparse.urljoin(item.url, siguiente) + scrapedtitle = ">> Pagina Siguiente" + scrapedthumbnail = "" + scrapedplot = "" + + itemlist.append( + Item(channel=item.channel, action="series", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, + plot=scrapedplot, folder=True, viewmode="movie_with_plot")) + except: + pass + return itemlist + + +def getPagesAndEpisodes(data): + results = re.findall('href="#pag([0-9]+)">[0-9]+ - ([0-9]+)', data) + if results: + return int(results[-1][0]), int(results[-1][1]) + return 1, 0 + + +def episodios(item): + logger.info() + itemlist = [] + + # Descarga la pagina + data = scrapertools.cache_page(item.url) + + scrapedplot = scrapertools.get_match(data, '<meta name="description" content="([^"]+)"/>') + scrapedthumbnail = scrapertools.find_single_match(data, '<div class="separedescrip">.*?src="([^"]+)"') + + idserie = scrapertools.get_match(data, "ajax/pagination_episodes/(\d+)/") + logger.info("idserie=" + idserie) + if " Eps" in item.extra and not "Desc" in item.extra: + caps_x = item.extra + caps_x = caps_x.replace(" Eps", "") + capitulos = int(caps_x) + paginas = capitulos / 10 + (capitulos % 10 > 0) + else: + paginas, capitulos = getPagesAndEpisodes(data) + + logger.info("idserie=" + idserie) + for numero in range(1, paginas + 1): + + numero_pagina = str(numero) + headers = [] + headers.append( + ["User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:16.0) Gecko/20100101 Firefox/16.0"]) + headers.append(["Referer", item.url]) + data2 = scrapertools.cache_page( + "http://jkanime.net/ajax/pagination_episodes/" + idserie + "/" + numero_pagina + "/") + logger.info("data2=" + data2) + + ''' + [{"number":"1","title":"Rose of Versailles - 1"},{"number":"2","title":"Rose of Versailles - 2"},{"number":"3","title":"Rose of Versailles - 3"},{"number":"4","title":"Rose of Versailles - 4"},{"number":"5","title":"Rose of Versailles - 5"},{"number":"6","title":"Rose of Versailles - 6"},{"number":"7","title":"Rose of Versailles - 7"},{"number":"8","title":"Rose of Versailles - 8"},{"number":"9","title":"Rose of Versailles - 9"},{"number":"10","title":"Rose of Versailles - 10"}] + [{"id":"14199","title":"GetBackers - 1","number":"1","animes_id":"122","timestamp":"2012-01-04 16:59:30"},{"id":"14200","title":"GetBackers - 2","number":"2","animes_id":"122","timestamp":"2012-01-04 16:59:30"},{"id":"14201","title":"GetBackers - 3","number":"3","animes_id":"122","timestamp":"2012-01-04 16:59:30"},{"id":"14202","title":"GetBackers - 4","number":"4","animes_id":"122","timestamp":"2012-01-04 16:59:30"},{"id":"14203","title":"GetBackers - 5","number":"5","animes_id":"122","timestamp":"2012-01-04 16:59:30"},{"id":"14204","title":"GetBackers - 6","number":"6","animes_id":"122","timestamp":"2012-01-04 16:59:30"},{"id":"14205","title":"GetBackers - 7","number":"7","animes_id":"122","timestamp":"2012-01-04 16:59:30"},{"id":"14206","title":"GetBackers - 8","number":"8","animes_id":"122","timestamp":"2012-01-04 16:59:30"},{"id":"14207","title":"GetBackers - 9","number":"9","animes_id":"122","timestamp":"2012-01-04 16:59:30"},{"id":"14208","title":"GetBackers - 10","number":"10","animes_id":"122","timestamp":"2012-01-04 16:59:30"}] + ''' + patron = '"number"\:"(\d+)","title"\:"([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data2) + + # http://jkanime.net/get-backers/1/ + for numero, scrapedtitle in matches: + title = scrapedtitle.strip() + url = urlparse.urljoin(item.url, numero) + thumbnail = scrapedthumbnail + plot = scrapedplot + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, + fanart=thumbnail, plot=plot)) + + if len(itemlist) == 0: + try: + porestrenar = scrapertools.get_match(data, + '<div[^<]+<span class="labl">Estad[^<]+</span[^<]+<span[^>]+>Por estrenar</span>') + itemlist.append(Item(channel=item.channel, action="findvideos", title="Serie por estrenar", url="", + thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot, + server="directo", folder=False)) + except: + pass + + return itemlist diff --git a/plugin.video.alfa/channels/lacajita.json b/plugin.video.alfa/channels/lacajita.json new file mode 100755 index 00000000..02466f55 --- /dev/null +++ b/plugin.video.alfa/channels/lacajita.json @@ -0,0 +1,53 @@ +{ + "id": "lacajita", + "name": "LaCajita", + "language": "es", + "active": true, + "adult": false, + "version": 1, + "changes": [ + { + "date": "16/05/2017", + "description": "Primera version" + } + ], + "thumbnail": "http://i.imgur.com/LVdupxc.png", + "categories": [ + "movie", + "latino", + "vos" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en búsqueda global", + "default": false, + "enabled": true, + "visible": true + }, + { + "id": "modo_grafico", + "type": "bool", + "color": "0xFFd50b0b", + "label": "Buscar información extra", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "perfil", + "type": "list", + "label": "Perfil de color", + "default": 3, + "enabled": true, + "visible": true, + "lvalues": [ + "Sin color", + "Perfil 3", + "Perfil 2", + "Perfil 1" + ] + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/lacajita.py b/plugin.video.alfa/channels/lacajita.py new file mode 100755 index 00000000..ef943e49 --- /dev/null +++ b/plugin.video.alfa/channels/lacajita.py @@ -0,0 +1,261 @@ +# -*- coding: utf-8 -*- + +import re + +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item + +__modo_grafico__ = config.get_setting("modo_grafico", "lacajita") +__perfil__ = config.get_setting("perfil", "lacajita") + +# Fijar perfil de color +perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFF088A08'], + ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFF088A08'], + ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFF088A08']] + +if __perfil__ - 1 >= 0: + color1, color2, color3, color4, color5 = perfil[__perfil__ - 1] +else: + color1 = color2 = color3 = color4 = color5 = "" +host = "http://lacajita.xyz" + + +def mainlist(item): + logger.info() + itemlist = [] + item.text_color = color1 + + itemlist.append(item.clone(title="Novedades DVD", action="")) + item.text_color = color2 + itemlist.append(item.clone(title=" En Español", action="entradas", url=host + "/estrenos-dvd/es/", page=0)) + itemlist.append(item.clone(title=" En Latino", action="entradas", url=host + "/estrenos-dvd/la/", page=0)) + itemlist.append(item.clone(title=" En VOSE", action="entradas", url=host + "/estrenos-dvd/vos/", page=0)) + item.text_color = color1 + itemlist.append(item.clone(title="Estrenos", action="")) + item.text_color = color2 + itemlist.append(item.clone(title=" En Español", action="entradas", url=host + "/estrenos/es/", page=0)) + itemlist.append(item.clone(title=" En Latino", action="entradas", url=host + "/estrenos/la/", page=0)) + itemlist.append(item.clone(title=" En VOSE", action="entradas", url=host + "/estrenos/vos/", page=0)) + item.text_color = color1 + itemlist.append(item.clone(title="Más Vistas", action="updated", url=host + "/listado-visto/", page=0)) + itemlist.append(item.clone(title="Actualizadas", action="updated", url=host + "/actualizado/", page=0)) + item.text_color = color5 + itemlist.append(item.clone(title="Por género", action="indices")) + itemlist.append(item.clone(title="Buscar...", action="search", text_color=color4)) + itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False)) + + return itemlist + + +def configuracion(item): + from platformcode import platformtools + ret = platformtools.show_channel_settings() + platformtools.itemlist_refresh() + return ret + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + try: + item.url = "%s/search.php?q1=%s" % (host, texto) + item.action = "busqueda" + item.page = 0 + return busqueda(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def entradas(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + bloque = scrapertools.find_single_match(data, '<ul class="nav navbar-nav">(.*?)</ul>') + + patron = "<li.*?href='([^']+)'.*?src='([^']+)'.*?>([^<]+)</p>(.*?)</button>" + matches = scrapertools.find_multiple_matches(bloque, patron) + matches_ = matches[item.page:item.page + 20] + for scrapedurl, scrapedthumbnail, scrapedtitle, data_idioma in matches_: + idiomas = [] + if "es.png" in data_idioma: + idiomas.append("ESP") + if "la.png" in data_idioma: + idiomas.append("LAT") + if "vos.png" in data_idioma: + idiomas.append("VOSE") + + titulo = scrapedtitle + if idiomas: + titulo += " [%s]" % "/".join(idiomas) + + scrapedurl = host + scrapedurl + scrapedthumbnail = scrapedthumbnail.replace("/w342/", "/w500/") + filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w500", "") + filtro = {"poster_path": filtro_thumb}.items() + itemlist.append(Item(channel=item.channel, action="findvideos", url=scrapedurl, title=titulo, + contentTitle=scrapedtitle, infoLabels={'filtro': filtro}, text_color=color2, + thumbnail=scrapedthumbnail, contentType="movie", fulltitle=scrapedtitle)) + + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + if len(matches) > item.page + 20: + page = item.page + 20 + itemlist.append(item.clone(title=">> Página Siguiente", page=page, text_color=color3)) + + return itemlist + + +def updated(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + bloque = scrapertools.find_single_match(data, '<ul class="nav navbar-nav">(.*?)</ul>') + + matches = scrapertools.find_multiple_matches(bloque, "<li.*?href='([^']+)'.*?src='([^']+)'.*?>([^<]+)</p>") + matches_ = matches[item.page:item.page + 20] + for scrapedurl, scrapedthumbnail, scrapedtitle in matches_: + if scrapedtitle == "Today": + continue + scrapedurl = host + scrapedurl + scrapedthumbnail = scrapedthumbnail.replace("/w342/", "/w500/") + filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w500", "") + filtro = {"poster_path": filtro_thumb}.items() + itemlist.append(Item(channel=item.channel, action="findvideos", url=scrapedurl, title=scrapedtitle, + contentTitle=scrapedtitle, infoLabels={'filtro': filtro}, text_color=color2, + thumbnail=scrapedthumbnail, contentType="movie", fulltitle=scrapedtitle)) + + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + if len(matches) > item.page + 20: + page = item.page + 20 + itemlist.append(item.clone(title=">> Página Siguiente", page=page, text_color=color3)) + else: + next = scrapertools.find_single_match(data, '<a href="([^"]+)">>>') + if next: + next = item.url + next + itemlist.append(item.clone(title=">> Página Siguiente", page=0, url=next, text_color=color3)) + + return itemlist + + +def busqueda(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + bloque = scrapertools.find_single_match(data, '<ul class="nav navbar-nav">(.*?)</ul>') + + matches = scrapertools.find_multiple_matches(bloque, "<li.*?href='([^']+)'.*?src='([^']+)'.*?>\s*([^<]+)</a>") + matches_ = matches[item.page:item.page + 25] + for scrapedurl, scrapedthumbnail, scrapedtitle in matches_: + scrapedurl = host + scrapedurl + scrapedthumbnail = scrapedthumbnail.replace("/w342/", "/w500/") + if re.search(r"\(\d{4}\)", scrapedtitle): + title = scrapedtitle.rsplit("(", 1)[0] + year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)') + infoLabels = {'year': year} + else: + title = scrapedtitle + filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w500", "") + filtro = {"poster_path": filtro_thumb}.items() + infoLabels = {'filtro': filtro} + itemlist.append(Item(channel=item.channel, action="findvideos", url=scrapedurl, title=scrapedtitle, + contentTitle=title, infoLabels=infoLabels, text_color=color2, + thumbnail=scrapedthumbnail, contentType="movie", fulltitle=title)) + + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + if len(matches) > item.page + 25: + page = item.page + 25 + itemlist.append(item.clone(title=">> Página Siguiente", page=page, text_color=color3)) + else: + next = scrapertools.find_single_match(data, '<a href="([^"]+)">>>') + if next: + next = item.url + next + itemlist.append(item.clone(title=">> Página Siguiente", page=0, url=next, text_color=color3)) + + return itemlist + + +def indices(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(host).data + + matches = scrapertools.find_multiple_matches(data, + '<li><a href="([^"]+)"><i class="fa fa-bookmark-o"></i>\s*(.*?)</a>') + for scrapedurl, scrapedtitle in matches: + scrapedurl = host + scrapedurl + itemlist.append(item.clone(action="updated", url=scrapedurl, title=scrapedtitle, page=0)) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + + patron = '<div class="grid_content2 sno">.*?src="([^"]+)".*?href="([^"]+)".*?src=\'(.*?)(?:.png|.jpg)\'' \ + '.*?<span>.*?<span>(.*?)</span>.*?<span>(.*?)</span>' + matches = scrapertools.find_multiple_matches(data, patron) + for idioma, url, servidor, calidad, detalle in matches: + url = host + url + servidor = servidor.rsplit("/", 1)[1] + servidor = servidor.replace("uploaded", "uploadedto").replace("streamin.to", "streaminto") + if "streamix" in servidor: + servidor = "streamixcloud" + try: + servers_module = __import__("servers." + servidor) + mostrar_server = servertools.is_server_enabled(servidor) + if not mostrar_server: + continue + except: + continue + + if "es.png" in idioma: + idioma = "ESP" + elif "la.png" in idioma: + idioma = "LAT" + elif "vos.png" in idioma: + idioma = "VOSE" + + title = "%s - %s - %s" % (servidor, idioma, calidad) + if detalle: + title += " (%s)" % detalle + + itemlist.append(item.clone(action="play", url=url, title=title, server=servidor, text_color=color3)) + + if item.extra != "findvideos" and config.get_videolibrary_support(): + itemlist.append(item.clone(title="Añadir película a la videoteca", action="add_pelicula_to_library", + extra="findvideos", text_color="green")) + + return itemlist + + +def play(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + + url = scrapertools.find_single_match(data, 'window.open\("([^"]+)"') + enlaces = servertools.findvideosbyserver(url, item.server) + if enlaces: + itemlist.append(item.clone(action="play", url=enlaces[0][1])) + else: + enlaces = servertools.findvideos(url, True) + if enlaces: + itemlist.append(item.clone(action="play", server=enlaces[0][2], url=enlaces[0][1])) + + return itemlist diff --git a/plugin.video.alfa/channels/locopelis.json b/plugin.video.alfa/channels/locopelis.json new file mode 100755 index 00000000..728a30dd --- /dev/null +++ b/plugin.video.alfa/channels/locopelis.json @@ -0,0 +1,79 @@ +{ + "id": "locopelis", + "name": "LOCOPELIS", + "compatible": { + "addon_version": "4.3" + }, + "active": true, + "adult": false, + "language": "es", + "thumbnail": "https://s31.postimg.org/5worjw2nv/locopelis.png", + "banner": "https://s31.postimg.org/ng87bb9jv/locopelis_banner.png", + "version": 1, + "changes": [ + { + "date": "24/06/2017", + "description": "Cambios para autoplay" + }, + { + "date": "06/06/2017", + "description": "Compatibilidad con autoplay" + }, + { + "date": "25/05/2017", + "description": "cambios esteticos" + }, + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "06/12/2016", + "description": "Release." + } + ], + "categories": [ + "latino", + "movie" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": false, + "visible": false + }, + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "llvalues": [ + "No filtrar", + "Latino", + "Español", + "VOS" + ] + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_infantiles", + "type": "bool", + "label": "Incluir en Novedades - Infantiles", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/locopelis.py b/plugin.video.alfa/channels/locopelis.py new file mode 100755 index 00000000..fc4f4058 --- /dev/null +++ b/plugin.video.alfa/channels/locopelis.py @@ -0,0 +1,417 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from channels import autoplay +from channels import filtertools +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import tmdb +from core.item import Item + +IDIOMAS = {'Latino': 'Latino', 'Español': 'Español', 'Sub español': 'VOS'} +list_language = IDIOMAS.values() +list_quality = [] +list_servers = [ + 'openload', +] + +host = 'http://www.locopelis.com/' + +audio = {'Latino': '[COLOR limegreen]LATINO[/COLOR]', 'Español': '[COLOR yellow]ESPAÑOL[/COLOR]', + 'Sub Español': '[COLOR red]SUB ESPAÑOL[/COLOR]'} + + +def mainlist(item): + logger.info() + autoplay.init(item.channel, list_servers, list_quality) + itemlist = [] + + itemlist.append(Item(channel=item.channel, + title="Peliculas", + action="todas", + url=host, + thumbnail='https://s8.postimg.org/6wqwy2c2t/peliculas.png', + fanart='https://s8.postimg.org/6wqwy2c2t/peliculas.png' + )) + + itemlist.append(Item(channel=item.channel, + title="Generos", + action="generos", + url=host, + thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png', + fanart='https://s3.postimg.org/5s9jg2wtf/generos.png' + )) + + itemlist.append(Item(channel=item.channel, + title="Alfabetico", + action="letras", + url=host, + thumbnail='https://s17.postimg.org/fwi1y99en/a-z.png', + fanart='https://s17.postimg.org/fwi1y99en/a-z.png', + extra='letras' + )) + + itemlist.append(Item(channel=item.channel, + title="Ultimas Agregadas", + action="ultimas", + url=host, + thumbnail='https://s22.postimg.org/cb7nmhwv5/ultimas.png', + fanart='https://s22.postimg.org/cb7nmhwv5/ultimas.png' + )) + + itemlist.append(Item(channel=item.channel, + title="Mas Vistas", + action="todas", + url=host + 'pelicula/peliculas-mas-vistas', + thumbnail='https://s9.postimg.org/wmhzu9d7z/vistas.png', + fanart='https://s9.postimg.org/wmhzu9d7z/vistas.png' + )) + + itemlist.append(Item(channel=item.channel, + title="Mas Votadas", + action="todas", + url=host + 'pelicula/peliculas-mas-votadas', + thumbnail='https://s7.postimg.org/9kg1nthzf/votadas.png', + fanart='https://s7.postimg.org/9kg1nthzf/votadas.png' + )) + + itemlist.append(Item(channel=item.channel, + title="Estrenos DVD", + action="todas", + url=host + 'pelicula/ultimas-peliculas/estrenos-dvd', + thumbnail='https://s1.postimg.org/m89hus1tb/dvd.png', + fanart='https://s1.postimg.org/m89hus1tb/dvd.png' + )) + + itemlist.append(Item(channel=item.channel, + title="Actualizadas", + action="todas", + url=host + 'pelicula/ultimas-peliculas/ultimas/actualizadas', + thumbnail='https://s16.postimg.org/57evw0wo5/actualizadas.png', + fanart='https://s16.postimg.org/57evw0wo5/actualizadas.png' + )) + + itemlist.append(Item(channel=item.channel, + title="Buscar", + action="search", + url=host + '/buscar/?q=', + thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png', + fanart='https://s30.postimg.org/pei7txpa9/buscar.png' + )) + + autoplay.show_option(item.channel, itemlist) + + return itemlist + + +def todas(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |<br>", "", data) + + patron = '<h2 class="titpeli bold ico_b">.*?<\/h2>.*?' + patron += '<a href="([^"]+)" title="([^"]+)">.*?' + patron += '<img src="([^"]+)" alt=.*?><\/a>.*?' + patron += '<p>([^<]+)<\/p>.*?' + patron += '<div class="stars f_left pdtop10px"><strong>Genero<\/strong>:.*?, (.*?)<\/div>.*?' + patron += '<div class=.*?>Idioma<\/strong>:.*?img src=.*?>([^<]+)<\/div>' + + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedplot, scrapedyear, scrapedidioma in matches: + + year = scrapedyear + idioma_id = scrapertools.decodeHtmlentities(scrapedidioma.strip()) + idioma = scrapertools.decodeHtmlentities(idioma_id) + # if idioma == 'Español': + # idioma ='Español' + logger.debug('idioma original: %s' % idioma_id) + logger.debug('idioma: %s' % idioma) + if idioma in audio: + idioma = audio[idioma] + + url = scrapedurl + if idioma != '': + title = scrapedtitle + ' (' + idioma + ')' + ' (' + year + ')' + else: + title = scrapedtitle + ' (' + year + ')' + thumbnail = scrapedthumbnail + plot = scrapedplot + contentTitle = scrapedtitle + fanart = 'https://s31.postimg.org/5worjw2nv/locopelis.png' + + itemlist.append(Item(channel=item.channel, + action="findvideos", + title=title, + url=url, + thumbnail=thumbnail, + plot=plot, + fanart=fanart, + extra=idioma, + contentTitle=contentTitle, + infoLabels={'year': year}, + language=idioma_id, + context=autoplay.context + )) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + # Paginacion + siguiente = '' + title = '' + data = scrapertools.find_single_match(data, '<ul class="nav.*?\/ul>') + actual = scrapertools.find_single_match(data, '<a href="(\?page=\d|.*?&page=\d*)"><span><b>(.*?)<\/b>') + if actual: + base_url = item.url + actual[0] + while not base_url.endswith('='): base_url = base_url[:-1] + siguiente = int(actual[1]) + 1 + if base_url.endswith('='): + siguiente_url = base_url + str(siguiente) + titlen = 'Pagina Siguiente >>> ' + fanart = 'https://s31.postimg.org/5worjw2nv/locopelis.png' + itemlist.append(Item(channel=item.channel, + action="todas", + title=titlen, + url=siguiente_url, + fanart=fanart + )) + + return itemlist + + +def generos(item): + tgenero = {"comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png", + "suspenso": "https://s13.postimg.org/wmw6vl1cn/suspenso.png", + "drama": "https://s16.postimg.org/94sia332d/drama.png", + "accion": "https://s3.postimg.org/y6o9puflv/accion.png", + "aventura": "https://s10.postimg.org/6su40czih/aventura.png", + "romance": "https://s15.postimg.org/fb5j8cl63/romance.png", + "animacion e infantil": "https://s13.postimg.org/5on877l87/animacion.png", + "ciencia ficcion": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png", + "terror": "https://s7.postimg.org/yi0gij3gb/terror.png", + "anime": 'https://s2.postimg.org/s38borokp/anime.png', + "documentales": "https://s16.postimg.org/7xjj4bmol/documental.png", + "intriga": "https://s27.postimg.org/v9og43u2b/intriga.png", + "musical": "https://s29.postimg.org/bbxmdh9c7/musical.png", + "western": "https://s23.postimg.org/lzyfbjzhn/western.png", + "fantasia": "https://s13.postimg.org/65ylohgvb/fantasia.png", + "asiaticas": "https://s4.postimg.org/oo8txm8od/asiatica.png", + "bélico (guerra)": "https://s23.postimg.org/71itp9hcr/belica.png", + "deporte": "https://s13.postimg.org/xuxf5h06v/deporte.png", + "adolescente": "https://s27.postimg.org/713imu3j7/adolescente.png", + "artes marciales": "https://s24.postimg.org/w1aw45j5h/artesmarciales.png", + "cine negro": "https://s27.postimg.org/absaoxx83/cinenegro.png", + "eroticas +18": "https://s15.postimg.org/exz7kysjf/erotica.png", + "hindu": "https://s28.postimg.org/ljn3fxf8d/hindu.png", + "religiosas": "https://s7.postimg.org/llo852fwr/religiosa.png", + "vampiros": "https://s22.postimg.org/3x69mu1fl/vampiros.png", + "zombies": "https://s28.postimg.org/dnn5haqml/zombies.png"} + + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = '<li><a title.*?href="http:\/\/www.locopelis.com\/categoria\/([^"]+)">([^<]+)<\/a><\/li>.*?' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + url = urlparse.urljoin(item.url, 'http://www.locopelis.com/categoria/' + scrapedurl) + title = scrapedtitle.decode('cp1252') + title = title.encode('utf-8') + if title.lower() in tgenero: + thumbnail = tgenero[title.lower()] + fanart = tgenero[title.lower()] + else: + thumbnail = '' + fanart = '' + plot = '' + itemlist.append(Item(channel=item.channel, + action="todas", + title=title.lower(), + fulltitle=item.fulltitle, + url=url, + thumbnail=thumbnail, + plot=plot, + fanart=fanart + )) + + return itemlist + + +def ultimas(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |<br>", "", data) + data = data.decode('cp1252') + realplot = '' + patron = '<a href="([^"]+)" title="([^"]+)"><img src="([^"]+)" alt=.*? style="width:105px; height:160px; ' \ + 'border:1px solid #999"\/><\/a>' + + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle, scrapedthumbnail in matches: + url = scrapedurl + thumbnail = scrapedthumbnail + plot = '' + title = scrapedtitle + fanart = 'https://s22.postimg.org/cb7nmhwv5/ultimas.png' + itemlist.append(Item(channel=item.channel, + action="findvideos", + title=title, + url=url, + thumbnail=thumbnail, + plot=plot, + fanart=fanart + )) + + return itemlist + + +def letras(item): + thumbletras = {'0-9': 'https://s32.postimg.org/drojt686d/image.png', + '0 - 9': 'https://s32.postimg.org/drojt686d/image.png', + '#': 'https://s32.postimg.org/drojt686d/image.png', + 'a': 'https://s32.postimg.org/llp5ekfz9/image.png', + 'b': 'https://s32.postimg.org/y1qgm1yp1/image.png', + 'c': 'https://s32.postimg.org/vlon87gmd/image.png', + 'd': 'https://s32.postimg.org/3zlvnix9h/image.png', + 'e': 'https://s32.postimg.org/bgv32qmsl/image.png', + 'f': 'https://s32.postimg.org/y6u7vq605/image.png', + 'g': 'https://s32.postimg.org/9237ib6jp/image.png', + 'h': 'https://s32.postimg.org/812yt6pk5/image.png', + 'i': 'https://s32.postimg.org/6nbbxvqat/image.png', + 'j': 'https://s32.postimg.org/axpztgvdx/image.png', + 'k': 'https://s32.postimg.org/976yrzdut/image.png', + 'l': 'https://s32.postimg.org/fmal2e9yd/image.png', + 'm': 'https://s32.postimg.org/m19lz2go5/image.png', + 'n': 'https://s32.postimg.org/b2ycgvs2t/image.png', + 'o': 'https://s32.postimg.org/c6igsucpx/image.png', + 'p': 'https://s32.postimg.org/jnro82291/image.png', + 'q': 'https://s32.postimg.org/ve5lpfv1h/image.png', + 'r': 'https://s32.postimg.org/nmovqvqw5/image.png', + 's': 'https://s32.postimg.org/zd2t89jol/image.png', + 't': 'https://s32.postimg.org/wk9lo8jc5/image.png', + 'u': 'https://s32.postimg.org/w8s5bh2w5/image.png', + 'v': 'https://s32.postimg.org/e7dlrey91/image.png', + 'w': 'https://s32.postimg.org/fnp49k15x/image.png', + 'x': 'https://s32.postimg.org/dkep1w1d1/image.png', + 'y': 'https://s32.postimg.org/um7j3zg85/image.png', + 'z': 'https://s32.postimg.org/jb4vfm9d1/image.png'} + + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |<br>", "", data) + data = data.decode('cp1252') + data = scrapertools.find_single_match(data, '<\/form><\/table><\/div>.*?<\/ul>') + + patron = '<li><a href="(.*?)" title="Letra.*?">(.*?)<\/a><\/li>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + url = scrapedurl + title = scrapedtitle + plot = '' + if scrapedtitle.lower() in thumbletras: + thumbnail = thumbletras[scrapedtitle.lower()] + else: + thumbnail = '' + itemlist.append(Item(channel=item.channel, + action='todas', + title=title, + url=url, + thumbnail=thumbnail, + plot=plot + )) + + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = item.url + texto + + if texto != '': + return todas(item) + else: + return [] + + +def findvideos(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + + from core import servertools + itemlist.extend(servertools.find_video_items(data=data)) + if item.language == 'Español': + item.language == 'Español' + for videoitem in itemlist: + videoitem.language = IDIOMAS[item.language] + videoitem.title = item.contentTitle + ' (' + videoitem.server + ') (' + videoitem.language + ')' + videoitem.channel = item.channel + videoitem.folder = False + videoitem.extra = item.thumbnail + videoitem.fulltitle = item.title + videoitem.quality = 'default' + + # Requerido para FilterTools + + itemlist = filtertools.get_links(itemlist, item, list_language) + + # Requerido para AutoPlay + + autoplay.start(itemlist, item) + + if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': + itemlist.append(Item(channel=item.channel, + title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', + url=item.url, + action="add_pelicula_to_library", + extra="findvideos", + contentTitle=item.contentTitle + )) + return itemlist + + +def play(item): + logger.info() + itemlist = [] + from core import servertools + itemlist.extend(servertools.find_video_items(data=item.url)) + for videoitem in itemlist: + videoitem.channel = item.channel + videoitem.title = item.title + videoitem.folder = False + videoitem.thumbnail = item.extra + videoitem.fulltitle = item.fulltitle + videoitem.infoLabels = item.infoLabels + return itemlist + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + # categoria='peliculas' + try: + if categoria == 'peliculas': + item.url = host + item.extra = 'peliculas' + elif categoria == 'infantiles': + item.url = host + 'categoria/animacion-e-infantil/' + item.extra = 'peliculas' + itemlist = todas(item) + if itemlist[-1].title == 'Siguiente >>>': + itemlist.pop() + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist diff --git a/plugin.video.alfa/channels/mejortorrent.json b/plugin.video.alfa/channels/mejortorrent.json new file mode 100755 index 00000000..8e877af9 --- /dev/null +++ b/plugin.video.alfa/channels/mejortorrent.json @@ -0,0 +1,36 @@ +{ + "id": "mejortorrent", + "name": "Mejor Torrent", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "mejortorrent.png", + "banner": "mejortorrent.png", + "version": 1, + "changes": [ + { + "date": "17/04/2017", + "description": "Arreglado error que impedía el uso del canal" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ], + "categories": [ + "torrent", + "movie", + "tvshow", + "documentary" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/mejortorrent.py b/plugin.video.alfa/channels/mejortorrent.py new file mode 100755 index 00000000..bd2c9a74 --- /dev/null +++ b/plugin.video.alfa/channels/mejortorrent.py @@ -0,0 +1,442 @@ +# -*- coding: utf-8 -*- + +import os +import re +import sys +import urllib +import urlparse + +from channelselector import get_thumbnail_path +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core.item import Item +from core.tmdb import Tmdb + +host = "http://www.mejortorrent.com" + + +def mainlist(item): + logger.info() + + itemlist = [] + + thumb_pelis = get_thumbnail("thumb_channels_movie.png") + thumb_pelis_hd = get_thumbnail("thumb_channels_movie_hd.png") + thumb_series = get_thumbnail("thumb_channels_tvshow.png") + thumb_series_hd = get_thumbnail("thumb_channels_tvshow_hd.png") + thumb_series_az = get_thumbnail("thumb_channels_tvshow_az.png") + thumb_docus = get_thumbnail("thumb_channels_documentary.png") + thumb_buscar = get_thumbnail("thumb_search.png") + + itemlist.append(Item(channel=item.channel, title="Peliculas", action="getlist", + url="http://www.mejortorrent.com/torrents-de-peliculas.html", thumbnail=thumb_pelis)) + itemlist.append(Item(channel=item.channel, title="Peliculas HD", action="getlist", + url="http://www.mejortorrent.com/torrents-de-peliculas-hd-alta-definicion.html", + thumbnail=thumb_pelis_hd)) + itemlist.append(Item(channel=item.channel, title="Series", action="getlist", + url="http://www.mejortorrent.com/torrents-de-series.html", thumbnail=thumb_series)) + itemlist.append(Item(channel=item.channel, title="Series HD", action="getlist", + url="http://www.mejortorrent.com/torrents-de-series-hd-alta-definicion.html", + thumbnail=thumb_series_hd)) + itemlist.append(Item(channel=item.channel, title="Series Listado Alfabetico", action="listalfabetico", + url="http://www.mejortorrent.com/torrents-de-series.html", thumbnail=thumb_series_az)) + itemlist.append(Item(channel=item.channel, title="Documentales", action="getlist", + url="http://www.mejortorrent.com/torrents-de-documentales.html", thumbnail=thumb_docus)) + itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", thumbnail=thumb_buscar)) + + return itemlist + + +def listalfabetico(item): + logger.info() + + itemlist = [] + + for letra in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', + 'U', 'V', 'W', 'X', 'Y', 'Z']: + itemlist.append(Item(channel=item.channel, action="getlist", title=letra, + url="http://www.mejortorrent.com/series-letra-" + letra.lower() + ".html")) + + itemlist.append(Item(channel=item.channel, action="getlist", title="Todas", + url="http://www.mejortorrent.com/series-letra..html")) + + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + + item.url = "http://www.mejortorrent.com/secciones.php?sec=buscador&valor=%s" % (texto) + try: + return buscador(item) + + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def buscador(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + + # pelis + # <a href="/peli-descargar-torrent-9578-Presentimientos.html"> + # <img src="/uploads/imagenes/peliculas/Presentimientos.jpg" border="1"></a + # + # series + # + # <a href="/serie-descargar-torrents-11589-11590-Ahora-o-nunca-4-Temporada.html"> + # <img src="/uploads/imagenes/series/Ahora o nunca4.jpg" border="1"></a> + # + # docs + # + # <a href="/doc-descargar-torrent-1406-1407-El-sueno-de-todos.html"> + # <img border="1" src="/uploads/imagenes/documentales/El sueno de todos.jpg"></a> + + # busca series + patron = "<a href='(/serie-descargar-torrent[^']+)'[^>]+>(.*?)</a>" + patron += ".*?<span style='color:gray;'>([^']+)</span>" + patron_enlace = "/serie-descargar-torrents-\d+-\d+-(.*?)\.html" + + matches = scrapertools.find_multiple_matches(data, patron) + scrapertools.printMatches(matches) + + for scrapedurl, scrapedtitle, scrapedinfo in matches: + title = scrapertools.remove_htmltags(scrapedtitle).decode('iso-8859-1').encode( + 'utf8') + ' ' + scrapedinfo.decode('iso-8859-1').encode('utf8') + url = urlparse.urljoin(item.url, scrapedurl) + logger.debug("title=[" + title + "], url=[" + url + "]") + + itemlist.append( + Item(channel=item.channel, action="episodios", title=title, url=url, folder=True, extra="series", + viewmode="movie_with_plot")) + + # busca pelis + patron = "<a href='(/peli-descargar-torrent-[^']+)'[^>]+>(.*?)</a>" + patron_enlace = "/peli-descargar-torrent-\d+(.*?)\.html" + + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + for scrapedurl, scrapedtitle in matches: + title = scrapertools.remove_htmltags(scrapedtitle).decode('iso-8859-1').encode('utf-8') + url = urlparse.urljoin(item.url, scrapedurl) + logger.debug("title=[" + title + "], url=[" + url + "]") + + itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, folder=False, extra="")) + + # busca docu + patron = "<a href='(/doc-descargar-torrent[^']+)' .*?" + patron += "<font Color='darkblue'>(.*?)</font>.*?" + patron += "<td align='right' width='20%'>(.*?)</td>" + patron_enlace = "/doc-descargar-torrent-\d+-\d+-(.*?)\.html" + + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + for scrapedurl, scrapedtitle, scrapedinfo in matches: + title = scrapedtitle.decode('iso-8859-1').encode('utf8') + " " + scrapedinfo.decode('iso-8859-1').encode('utf8') + url = urlparse.urljoin(item.url, scrapedurl) + logger.debug("title=[" + title + "], url=[" + url + "]") + + itemlist.append(Item(channel=item.channel, action="episodios", + title=title, url=url, folder=True, extra="docu", + viewmode="movie_with_plot")) + + return itemlist + + +def getlist(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + + # pelis + # <a href="/peli-descargar-torrent-9578-Presentimientos.html"> + # <img src="/uploads/imagenes/peliculas/Presentimientos.jpg" border="1"></a + # + # series + # + # <a href="/serie-descargar-torrents-11589-11590-Ahora-o-nunca-4-Temporada.html"> + # <img src="/uploads/imagenes/series/Ahora o nunca4.jpg" border="1"></a> + # + # docs + # + # <a href="/doc-descargar-torrent-1406-1407-El-sueno-de-todos.html"> + # <img border="1" src="/uploads/imagenes/documentales/El sueno de todos.jpg"></a> + + if item.url.find("peliculas") > -1: + patron = '<a href="(/peli-descargar-torrent[^"]+)">[^<]+' + patron += '<img src="([^"]+)"[^<]+</a>' + patron_enlace = "/peli-descargar-torrent-\d+(.*?)\.html" + patron_title = '<a href="/peli-descargar-torrent[^"]+">([^<]+)</a>(\s*<b>([^>]+)</b>)?' + action = "show_movie_info" + folder = True + extra = "" + elif item.url.find("series-letra") > -1: + patron = "<a href='(/serie-descargar-torrent[^']+)'>()" + patron_enlace = "/serie-descargar-torrents-\d+-\d+-(.*?)\.html" + patron_title = '<a href="/serie-descargar-torrent[^"]+">([^<]+)</a>(\s*<b>([^>]+)</b>)?' + action = "episodios" + folder = True + extra = "series" + elif item.url.find("series") > -1: + patron = '<a href="(/serie-descargar-torrent[^"]+)">[^<]+' + patron += '<img src="([^"]+)"[^<]+</a>' + patron_enlace = "/serie-descargar-torrents-\d+-\d+-(.*?)\.html" + patron_title = '<a href="/serie-descargar-torrent[^"]+">([^<]+)</a>(\s*<b>([^>]+)</b>)?' + action = "episodios" + folder = True + extra = "series" + else: + patron = '<a href="(/doc-descargar-torrent[^"]+)">[^<]+' + patron += '<img src="([^"]+)"[^<]+</a>' + patron_enlace = "/doc-descargar-torrent-\d+-\d+-(.*?)\.html" + patron_title = '<a href="/doc-descargar-torrent[^"]+">([^<]+)</a>(\s*<b>([^>]+)</b>)?' + action = "episodios" + folder = True + extra = "docus" + + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + for scrapedurl, scrapedthumbnail in matches: + title = scrapertools.get_match(scrapedurl, patron_enlace) + title = title.replace("-", " ") + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = urlparse.urljoin(item.url, urllib.quote(scrapedthumbnail)) + plot = "" + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail, plot=plot, + folder=folder, extra=extra)) + + matches = re.compile(patron_title, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + # Cambia el título sacado de la URL por un título con más información. + # esta implementación asume que va a encontrar las mismas coincidencias + # que en el bucle anterior, lo cual técnicamente es erróneo, pero que + # funciona mientras no cambien el formato de la página + cnt = 0 + for scrapedtitle, notused, scrapedinfo in matches: + title = re.sub('\r\n', '', scrapedtitle).decode('iso-8859-1').encode('utf8').strip() + if title.endswith('.'): + title = title[:-1] + + info = scrapedinfo.decode('iso-8859-1').encode('utf8') + if info != "": + title = '{0} {1}'.format(title, info) + + itemlist[cnt].title = title + cnt += 1 + if cnt == len(itemlist) - 1: + break + + if len(itemlist) == 0: + itemlist.append(Item(channel=item.channel, action="mainlist", title="No se ha podido cargar el listado")) + else: + # Extrae el paginador + patronvideos = "<a href='([^']+)' class='paginar'> Siguiente >>" + matches = re.compile(patronvideos, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + if len(matches) > 0: + scrapedurl = urlparse.urljoin(item.url, matches[0]) + itemlist.append( + Item(channel=item.channel, action="getlist", title="Pagina siguiente >>", url=scrapedurl, folder=True)) + + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + + # Descarga la página + data = httptools.downloadpage(item.url).data + + total_capis = scrapertools.get_match(data, "<input type='hidden' name='total_capis' value='(\d+)'>") + tabla = scrapertools.get_match(data, "<input type='hidden' name='tabla' value='([^']+)'>") + titulo = scrapertools.get_match(data, "<input type='hidden' name='titulo' value='([^']+)'>") + + item.thumbnail = scrapertools.find_single_match(data, + "src='http://www\.mejortorrent\.com(/uploads/imagenes/" + tabla + "/[a-zA-Z0-9_ ]+.jpg)'") + item.thumbnail = 'http://www.mejortorrent.com' + urllib.quote(item.thumbnail) + + # <form name='episodios' action='secciones.php?sec=descargas&ap=contar_varios' method='post'> + data = scrapertools.get_match(data, + "<form name='episodios' action='secciones.php\?sec=descargas\&ap=contar_varios' method='post'>(.*?)</form>") + ''' + <td bgcolor='#C8DAC8' style='border-bottom:1px solid black;'><a href='/serie-episodio-descargar-torrent-18741-Juego-de-tronos-4x01.html'>4x01 - Episodio en V.O. Sub Esp.</a></td> + <td width='120' bgcolor='#C8DAC8' align='right' style='border-right:1px solid black; border-bottom:1px solid black;'><div style='color:#666666; font-size:9px; margin-right:5px;'>Fecha: 2014-04-07</div></td> + <td width='60' bgcolor='#F1F1F1' align='center' style='border-bottom:1px solid black;'> + <input type='checkbox' name='episodios[1]' value='18741'> + ''' + + if item.extra == "series": + patron = "<td bgcolor[^>]+><a[^>]+>([^>]+)</a></td>[^<]+" + else: + patron = "<td bgcolor[^>]+>([^>]+)</td>[^<]+" + + patron += "<td[^<]+<div[^>]+>Fecha: ([^<]+)</div></td>[^<]+" + patron += "<td[^<]+" + patron += "<input type='checkbox' name='([^']+)' value='([^']+)'" + + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + tmdb_title = re.sub(r'(\s*-\s*)?\d+.*?\s*Temporada|(\s*-\s*)?\s*Miniserie\.?|\(.*\)|\[.*\]', '', item.title).strip() + logger.debug('tmdb_title=' + tmdb_title) + + if item.extra == "series": + oTmdb = Tmdb(texto_buscado=tmdb_title.strip(), tipo='tv', idioma_busqueda="es") + else: + oTmdb = Tmdb(texto_buscado=tmdb_title.strip(), idioma_busqueda="es") + + for scrapedtitle, fecha, name, value in matches: + scrapedtitle = scrapedtitle.strip() + if scrapedtitle.endswith('.'): + scrapedtitle = scrapedtitle[:-1] + + title = scrapedtitle + " (" + fecha + ")" + + url = "http://www.mejortorrent.com/secciones.php?sec=descargas&ap=contar_varios" + # "episodios%5B1%5D=11744&total_capis=5&tabla=series&titulo=Sea+Patrol+-+2%AA+Temporada" + post = urllib.urlencode({name: value, "total_capis": total_capis, "tabla": tabla, "titulo": titulo}) + logger.debug("post=" + post) + + if item.extra == "series": + epi = scrapedtitle.split("x") + + # Sólo comprobar Tmdb si el formato es temporadaXcapitulo + if len(epi) > 1: + temporada = re.sub("\D", "", epi[0]) + capitulo = re.search("\d+", epi[1]) + if capitulo: + capitulo = capitulo.group() + else: + capitulo = 1 + + epi_data = oTmdb.get_episodio(temporada, capitulo) + logger.debug("epi_data=" + str(epi_data)) + + if epi_data: + item.thumbnail = epi_data["temporada_poster"] + item.fanart = epi_data["episodio_imagen"] + item.plot = epi_data["episodio_sinopsis"] + epi_title = epi_data["episodio_titulo"] + if epi_title != "": + title = scrapedtitle + " " + epi_title + " (" + fecha + ")" + else: + try: + item.fanart = oTmdb.get_backdrop() + except: + pass + + item.plot = oTmdb.get_sinopsis() + + logger.debug("title=[" + title + "], url=[" + url + "], item=[" + str(item) + "]") + + itemlist.append( + Item(channel=item.channel, action="play", title=title, url=url, thumbnail=item.thumbnail, plot=item.plot, + fanart=item.fanart, extra=post, folder=False)) + + return itemlist + + +def show_movie_info(item): + logger.info() + + itemlist = [] + + tmdb_title = re.sub(r'\(.*\)|\[.*\]', '', item.title).strip() + logger.debug('tmdb_title=' + tmdb_title) + + try: + oTmdb = Tmdb(texto_buscado=tmdb_title, idioma_busqueda="es") + item.fanart = oTmdb.get_backdrop() + item.plot = oTmdb.get_sinopsis() + except: + pass + + data = httptools.downloadpage(item.url).data + logger.debug("data=" + data) + + patron = "<a href='(secciones.php\?sec\=descargas[^']+)'" + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + for scrapedurl in matches: + url = urlparse.urljoin(item.url, scrapedurl) + logger.debug("title=[" + item.title + "], url=[" + url + "], thumbnail=[" + item.thumbnail + "]") + + torrent_data = httptools.downloadpage(url).data + logger.debug("torrent_data=" + torrent_data) + # <a href='/uploads/torrents/peliculas/los-juegos-del-hambre-brrip.torrent'> + link = scrapertools.get_match(torrent_data, "<a href='(/uploads/torrents/peliculas/.*?\.torrent)'>") + link = urlparse.urljoin(url, link) + + logger.debug("link=" + link) + + itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link, + thumbnail=item.thumbnail, plot=item.plot, fanart=item.fanart, folder=False)) + + return itemlist + + +def play(item): + logger.info() + itemlist = [] + + if item.extra == "": + itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=item.url, + thumbnail=item.thumbnail, plot=item.plot, fanart=item.fanart, folder=False)) + + else: + data = httptools.downloadpage(item.url, post=item.extra).data + logger.debug("data=" + data) + + # series + # + # <a href="http://www.mejortorrent.com/uploads/torrents/series/falling-skies-2-01_02.torrent" + # <a href="http://www.mejortorrent.com/uploads/torrents/series/falling-skies-2-03.torrent" + # + # docus + # + # <a href="http://www.mejortorrent.com/uploads/torrents/documentales/En_Suenyos_De_Todos_DVDrip.torrent">El sue–o de todos. </a> + + params = dict(urlparse.parse_qsl(item.extra)) + + patron = '<a href="(http://www.mejortorrent.com/uploads/torrents/' + params["tabla"] + '/.*?\.torrent)"' + + link = scrapertools.get_match(data, patron) + + logger.info("link=" + link) + + itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link, + thumbnail=item.thumbnail, plot=item.plot, folder=False)) + + return itemlist + + +def get_thumbnail(thumb_name=None): + img_path = config.get_runtime_path() + '/resources/images/squares' + + if thumb_name: + file_path = os.path.join(img_path, thumb_name) + if os.path.isfile(file_path): + thumb_path = file_path + else: + thumb_path = urlparse.urljoin(get_thumbnail_path(), thumb_name) + else: + thumb_path = urlparse.urljoin(get_thumbnail_path(), thumb_name) + + return thumb_path diff --git a/plugin.video.alfa/channels/metaserie.json b/plugin.video.alfa/channels/metaserie.json new file mode 100755 index 00000000..b72af541 --- /dev/null +++ b/plugin.video.alfa/channels/metaserie.json @@ -0,0 +1,63 @@ +{ + "id": "metaserie", + "name": "MetaSerie (Latino)", + "compatible": { + "addon_version": "4.3" + }, + "active": true, + "adult": false, + "language": "es", + "thumbnail": "https://s32.postimg.org/7g50yo39h/metaserie.png", + "banner": "https://s31.postimg.org/u6yddil8r/metaserie_banner.png", + "version": 1, + "changes": [ + { + "date": "24/06/2017", + "description": "Cambios para autoplay" + }, + { + "date": "06/06/2017", + "description": "Compatibilidad con AutoPlay" + }, + { + "date": "25/05/2017", + "description": "cambios esteticos" + }, + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "04/01/2017", + "description": "Release." + } + ], + "categories": [ + "latino", + "tvshow" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": false, + "visible": false + }, + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "No filtrar", + "Latino", + "Español", + "VOS" + ] + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/metaserie.py b/plugin.video.alfa/channels/metaserie.py new file mode 100755 index 00000000..e0127bde --- /dev/null +++ b/plugin.video.alfa/channels/metaserie.py @@ -0,0 +1,318 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from channels import autoplay +from channels import filtertools +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + +IDIOMAS = {'la': 'Latino', 'es': 'Español', 'sub': 'VOS'} +list_language = IDIOMAS.values() +list_quality = [] +list_servers = [ + 'openload', + 'gamovideo', + 'powvideo', + 'streamplay', + 'streaminto', + 'streame', + 'flashx' +] + + +def mainlist(item): + logger.info() + + autoplay.init(item.channel, list_servers, list_quality) + itemlist = [] + + itemlist.append(item.clone(title="Series", + action="todas", + url="http://metaserie.com/series-agregadas", + thumbnail='https://s27.postimg.org/iahczwgrn/series.png', + fanart='https://s27.postimg.org/iahczwgrn/series.png' + )) + + # itemlist.append(item.clone(title="Anime", + # action="todas", + # url="http://metaserie.com/animes-agregados", + # thumbnail='https://s2.postimg.org/s38borokp/anime.png', + # fanart='https://s2.postimg.org/s38borokp/anime.png' + # )) + + itemlist.append(item.clone(title="Buscar", + action="search", + url="http://www.metaserie.com/?s=", + thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png', + fanart='https://s30.postimg.org/pei7txpa9/buscar.png' + )) + + autoplay.show_option(item.channel, itemlist) + + return itemlist + + +def todas(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + logger.debug(data) + + patron = '<div class="poster">[^<]' + patron += '<a href="([^"]+)" title="([^"]+)en(.*?)">[^<]' + patron += '<div class="poster_efecto"><span>([^<]+)<.*?div>[^<]' + patron += '<img.*?src="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle, lang, scrapedplot, scrapedthumbnail in matches: + if 'latino' in lang: + idioma = 'Latino' + elif 'español' in lang: + idioma = 'Español' + url = urlparse.urljoin(item.url, scrapedurl) + title = scrapertools.decodeHtmlentities(scrapedtitle) + ' (%s)' % idioma + thumbnail = scrapedthumbnail + plot = scrapedplot + fanart = 'https://s32.postimg.org/7g50yo39h/metaserie.png' + itemlist.append( + Item(channel=item.channel, + action="temporadas", + title=title, + url=url, + thumbnail=thumbnail, + plot=plot, + fanart=fanart, + contentSerieName=title, + context=autoplay.context + )) + + # Paginacion + + next_page_url = scrapertools.find_single_match(data, + '<li><a class="next page-numbers local-link" href="([' + '^"]+)">».*?li>') + if next_page_url != "": + itemlist.append(Item(channel=item.channel, + action="todas", + title=">> Página siguiente", + url=next_page_url, + thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png' + )) + return itemlist + + +def temporadas(item): + logger.info() + itemlist = [] + templist = [] + + data = httptools.downloadpage(item.url).data + patron = '<li class=".*?="([^"]+)".*?>([^<]+)</a>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + url = scrapedurl + contentSeasonNumber = re.findall(r'.*?temporada-([^-]+)-', url) + title = scrapedtitle + title = title.replace("&", "x"); + thumbnail = item.thumbnail + plot = item.plot + fanart = scrapertools.find_single_match(data, '<img src="([^"]+)"/>.*?</a>') + itemlist.append( + Item(channel=item.channel, + action='episodiosxtemp', + title=title, + fulltitle=item.contentSerieName, + url=url, + thumbnail=thumbnail, + plot=plot, + fanart=fanart, + contentSerieName=item.contentSerieName, + contentSeasonNumber=contentSeasonNumber, + context=item.context + )) + + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append( + Item(channel=item.channel, + title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', + url=item.url, + action="add_serie_to_library", + extra='episodios', + contentSerieName=item.contentSerieName + )) + + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + templist = temporadas(item) + for tempitem in templist: + itemlist += episodiosxtemp(tempitem) + + return itemlist + + +def more_episodes(item, itemlist, url): + logger.info() + templist = [] + item.url = url + templist = episodiosxtemp(item) + itemlist += templist + return itemlist + + +def episodiosxtemp(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + logger.debug(data) + patron = '<td><h3 class=".*?href="([^"]+)".*?">([^<]+).*?td>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + url = scrapedurl + contentEpisodeNumber = re.findall(r'.*?x([^\/]+)\/', url) + title = scrapedtitle + title = title.replace("×", "x") + title = title.replace("×", "x") + thumbnail = item.thumbnail + plot = item.plot + fanart = item.fanart + itemlist.append(Item(channel=item.channel, + action="findvideos", + title=title, + fulltitle=item.fulltitle, + url=url, + thumbnail=item.thumbnail, + plot=plot, + contentSerieName=item.contentSerieName, + contentSeasonNumber=item.contentSeasonNumber, + contentEpisodeNumber=contentEpisodeNumber, + context=item.context + )) + more_pages = scrapertools.find_single_match(data, + '<li><a class="next page-numbers local-link" href="(.*?)">»') + logger.debug('more_pages: %s' % more_pages) + if more_pages: + itemlist = more_episodes(item, itemlist, more_pages) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = item.url + texto + itemlist = [] + if texto != '': + try: + data = httptools.downloadpage(item.url).data + patron = '<a href="([^\"]+)" rel="bookmark" class="local-link">([^<]+)<.*?' + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + for scrapedurl, scrapedtitle in matches: + url = scrapedurl + title = scrapertools.decodeHtmlentities(scrapedtitle) + thumbnail = '' + plot = '' + itemlist.append(Item(channel=item.channel, + action="temporadas", + title=title, + fulltitle=title, + url=url, + thumbnail=thumbnail, + plot=plot, + folder=True, + contentSerieName=title + )) + + return itemlist + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def findvideos(item): + logger.info() + itemlist = [] + audio = {'la': '[COLOR limegreen]LATINO[/COLOR]', 'es': '[COLOR yellow]ESPAÑOL[/COLOR]', + 'sub': '[COLOR red]ORIGINAL SUBTITULADO[/COLOR]'} + data = httptools.downloadpage(item.url).data + patron = '<td><img src="http:\/\/metaserie\.com\/wp-content\/themes\/mstheme\/gt\/assets\/img\/([^\.]+).png" ' \ + 'width="20".*?<\/td>.*?<td><img src="http:\/\/www\.google\.com\/s2\/favicons\?domain=([^"]+)" \/> (' \ + '[^<]+)<\/td>' + matches = re.compile(patron, re.DOTALL).findall(data) + + anterior = scrapertools.find_single_match(data, + '<th scope="col"><a href="([^"]+)" rel="prev" ' + 'class="local-link">Anterior</a></th>') + siguiente = scrapertools.find_single_match(data, + '<th scope="col"><a href="([^"]+)" rel="next" ' + 'class="local-link">Siguiente</a></th>') + + for scrapedid, scrapedurl, scrapedserv in matches: + url = scrapedurl + server = servertools.get_server_from_url(url).lower() + title = item.title + ' audio ' + audio[scrapedid] + ' en ' + server + extra = item.thumbnail + thumbnail = servertools.guess_server_thumbnail(server) + + itemlist.append(Item(channel=item.channel, + action="play", + title=title, + fulltitle=item.contentSerieName, + url=url, + thumbnail=thumbnail, + extra=extra, + language=IDIOMAS[scrapedid], + server=server, + )) + if item.extra1 != 'capitulos': + if anterior != '': + itemlist.append(Item(channel=item.channel, + action="findvideos", + title='Capitulo Anterior', + url=anterior, + thumbnail='https://s31.postimg.org/k5kpwyrgb/anterior.png' + )) + if siguiente != '': + itemlist.append(Item(channel=item.channel, + action="findvideos", + title='Capitulo Siguiente', + url=siguiente, + thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png' + )) + + # Requerido para FilterTools + + itemlist = filtertools.get_links(itemlist, item, list_language) + + # Requerido para AutoPlay + + autoplay.start(itemlist, item) + + return itemlist + + +def play(item): + logger.info() + itemlist = [] + from core import servertools + itemlist.extend(servertools.find_video_items(data=item.url)) + for videoitem in itemlist: + video = item.channel + videoitem.title = item.fulltitle + videoitem.folder = False + videoitem.thumbnail = item.extra + videoitem.fulltitle = item.fulltitle + return itemlist diff --git a/plugin.video.alfa/channels/miltorrents.json b/plugin.video.alfa/channels/miltorrents.json new file mode 100755 index 00000000..a6f12828 --- /dev/null +++ b/plugin.video.alfa/channels/miltorrents.json @@ -0,0 +1,39 @@ +{ + "id": "miltorrents", + "name": "Miltorrents", + "active": true, + "adult": false, + "language": "es", + "version": 1, + "changes": [ + { + "date": "06/12/2016", + "description": "Release" + }, + { + "date": "04/04/2017", + "description": "Migración a Httptools y algunos arreglos" + }, + { + "date": "28/06/2017", + "description": "Correciones código y algunas mejoras" + } + ], + "thumbnail": "http://imgur.com/KZoska0.png", + "banner": "miltorrents.png", + "categories": [ + "torrent", + "movie", + "tvshow" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/miltorrents.py b/plugin.video.alfa/channels/miltorrents.py new file mode 100755 index 00000000..a806a4de --- /dev/null +++ b/plugin.video.alfa/channels/miltorrents.py @@ -0,0 +1,1557 @@ +# -*- coding: utf-8 -*- + +import os +import re +import unicodedata +import urllib + +import xbmc +import xbmcgui +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core.item import Item +from core.scrapertools import decodeHtmlentities as dhe + +ACTION_SHOW_FULLSCREEN = 36 +ACTION_GESTURE_SWIPE_LEFT = 511 +ACTION_SELECT_ITEM = 7 +ACTION_PREVIOUS_MENU = 10 +ACTION_MOVE_LEFT = 1 +ACTION_MOVE_RIGHT = 2 +ACTION_MOVE_DOWN = 4 +ACTION_MOVE_UP = 3 +OPTION_PANEL = 6 +OPTIONS_OK = 5 + + +# Para la busqueda en bing evitando baneos + +def browser(url): + import mechanize + + # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing + br = mechanize.Browser() + # Browser options + br.set_handle_equiv(False) + br.set_handle_gzip(True) + br.set_handle_redirect(True) + br.set_handle_referer(False) + br.set_handle_robots(False) + # Follows refresh 0 but not hangs on refresh > 0 + br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) + # Want debugging messages? + # br.set_debug_http(True) + # br.set_debug_redirects(True) + # br.set_debug_responses(True) + + # User-Agent (this is cheating, ok?) + br.addheaders = [('User-agent', + 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')] + # br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')] + # Open some site, let's pick a random one, the first that pops in mind + r = br.open(url) + response = r.read() + print response + if "img,divreturn" in response: + r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url) + print "prooooxy" + response = r.read() + + return response + + +api_key = "2e2160006592024ba87ccdf78c28f49f" +api_fankey = "dffe90fba4d02c199ae7a9e71330c987" + + +def mainlist(item): + logger.info() + check_bg = item.action + + if str(check_bg) == "": + check_bg = "bglobal" + itemlist = [] + + itemlist.append(Item(channel=item.channel, title="[COLOR yellow][B]Películas[/B][/COLOR]", action="peliculas", + url="http://www.miltorrents.com", thumbnail="http://imgur.com/46ZzwrZ.png", + fanart="http://imgur.com/y4nJyZh.jpg")) + title = "[COLOR firebrick][B]Buscar[/B][/COLOR]" + " " + "[COLOR yellow][B]Peliculas[/B][/COLOR]" + itemlist.append(Item(channel=item.channel, title=" " + title, action="search", url="", + thumbnail="http://imgur.com/JdSnBeH.png", fanart="http://imgur.com/gwjawWV.jpg", + extra="peliculas" + "|" + check_bg)) + + itemlist.append(Item(channel=item.channel, title="[COLOR slategray][B]Series[/B][/COLOR]", action="peliculas", + url="http://www.miltorrents.com/series", thumbnail="http://imgur.com/sYpu1KF.png", + fanart="http://imgur.com/LwS32zX.jpg")) + + title = "[COLOR firebrick][B]Buscar[/B][/COLOR]" + " " + "[COLOR slategray][B]Series[/B][/COLOR]" + itemlist.append(Item(channel=item.channel, title=" " + title, action="search", url="", + thumbnail="http://imgur.com/brMIPlT.png", fanart="http://imgur.com/ecPmzDj.jpg", + extra="series" + "|" + check_bg)) + + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + if item.extra: + if item.extra.split("|")[0] == "series": + item.url = "http://www.miltorrents.com/series/?pTit=%s&pOrd=FE" % (texto) + else: + item.url = "http://www.miltorrents.com/?pTit=%s&pOrd=FE" % (texto) + + item.extra = "search" + "|" + item.extra.split("|")[1] + "|" + texto + + try: + return peliculas(item) + # Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + else: + if item.contentType != "movie": + item.url = "http://www.miltorrents.com/series/?pTit=%s&pOrd=FE" % (texto) + check_sp = "tvshow" + else: + item.url = "http://www.miltorrents.com/?pTit=%s&pOrd=FE" % (texto) + check_sp = "peliculas" + item.extra = "search" + "|""bglobal" + "|" + texto + "|" + check_sp + try: + return peliculas(item) + # Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def peliculas(item): + logger.info() + itemlist = [] + + # Descarga la página + data = httptools.downloadpage(item.url).data + data = re.sub(r"Independance", "Independence", data) + if "serie" in item.url: + patron = '<div class="corner-episode">(.*?)<\/div>.*?<a href="([^"]+)".*?image:url\(\'([^"]+)\'.*?"tooltipbox">(.*?)<br' + + matches = re.compile(patron, re.DOTALL).findall(data) + if item.extra.split("|")[0] == "search": + check_bg = item.action + if item.extra.split("|")[1] != "bglobal" and check_bg != "info": + if len(matches) == 0: + dialog = xbmcgui.Dialog() + if dialog.yesno( + '[COLOR crimson][B]Sin resultados en[/B][/COLOR]' + '[COLOR gold][B] Mil[/B][/COLOR]' + '[COLOR floralwhite][B]torrents[/B][/COLOR]', + '[COLOR cadetblue]¿Quieres hacer una busqueda en Alfa?[/COLOR]', + '', "", '[COLOR crimson][B]No,gracias[/B][/COLOR]', + '[COLOR yellow][B]Si[/B][/COLOR]'): + item.extra = "serie" + "|" + item.extra.split("|")[2] + return busqueda(item) + else: + + xbmc.executebuiltin('Action(Back)') + xbmc.sleep(500) + + for episodio, url, thumbnail, title in matches: + title = title.decode('latin1').encode('utf8') + title_fan = title.strip() + trailer = title_fan + " " + "series" + "trailer" + title = "[COLOR slategray][B]" + title.strip() + "[/B][/COLOR]" + " " + "[COLOR floralwhite][B]" + episodio + "[/B][/COLOR]" + trailer = urllib.quote(trailer) + extra = trailer + "|" + title_fan + "|" + " " + "|" + "pelicula" + itemlist.append(Item(channel=item.channel, title=title, url=url, action="fanart", thumbnail=thumbnail, + fanart="http://imgur.com/NrZNOTN.jpg", extra=extra, folder=True)) + else: + patron = '<div class="moviesbox">(.*?)<a href="([^"]+)".*?image:url\(\'([^"]+)\'.*?<span class="tooltipbox">([^<]+)<i>\((\d\d\d\d)\)' + + matches = re.compile(patron, re.DOTALL).findall(data) + + if item.extra.split("|")[0] == "search": + check_bg = item.action + if item.extra.split("|")[1] != "bglobal" and check_bg != "info": + + if len(matches) == 0: + dialog = xbmcgui.Dialog() + if dialog.yesno( + '[COLOR crimson][B]Sin resultados en[/B][/COLOR]' + '[COLOR gold][B] Mil[/B][/COLOR]' + '[COLOR floralwhite][B]torrents[/B][/COLOR]', + '[COLOR cadetblue]¿Quieres hacer una busqueda en Alfa?[/COLOR]', + '', "", '[COLOR crimson][B]No,gracias[/B][/COLOR]', + '[COLOR yellow][B]Si[/B][/COLOR]'): + item.extra = "movie" + "|" + item.extra.split("|")[2] + + return busqueda(item) + + + else: + + xbmc.executebuiltin('Action(Back)') + xbmc.sleep(500) + + for p_rating, url, thumbnail, title, year in matches: + + try: + rating = scrapertools.get_match(p_rating, '<div class="moviesbox_rating">(.*?)<img') + except: + rating = "(Sin puntuacion)" + title = title.decode('latin1').encode('utf8') + title_fan = re.sub(r"\[.*?\]|\(.*?\)|\d&#.*?;\d+|-|Temporada.*?Completa| ;|(Sin puntuacion)", "", title) + + try: + + check_rating = scrapertools.get_match(rating, '(\d+).') + + if int(check_rating) >= 5 and int(check_rating) < 8: + rating = "[COLOR springgreen][B]" + rating + "[/B][/COLOR]" + elif int(check_rating) >= 8 and int(check_rating) < 10: + rating = "[COLOR yellow][B]" + rating + "[/B][/COLOR]" + elif int(check_rating) == 10: + rating = "[COLOR orangered][B]" + rating + "[/B][/COLOR]" + else: + rating = "[COLOR crimson][B]" + rating + "[/B][/COLOR]" + + except: + rating = "[COLOR crimson][B]" + rating + "[/B][/COLOR]" + if "10." in rating: + rating = re.sub(r'10\.\d+', '10', rating) + title = "[COLOR gold][B]" + title + "[/B][/COLOR]" + " " + rating + trailer = title_fan + " " + "trailer" + trailer = urllib.quote(trailer) + + extra = trailer + "|" + title_fan + "|" + year + "|" + "pelicula" + + itemlist.append(Item(channel=item.channel, title=title, url=url, action="fanart", thumbnail=thumbnail, + fanart="http://imgur.com/Oi1mlFn.jpg", extra=extra, folder=True)) + + ## Paginación + patronvideos = '<div class="pagination">.*?<a href="#">.*?<\/a><\/span><a href="([^"]+)"' + matches = re.compile(patronvideos, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + if len(matches) > 0: + url = matches[0] + itemlist.append(Item(channel=item.channel, action="peliculas", title="[COLOR khaki]siguiente[/COLOR]", url=url, + thumbnail="http://imgur.com/fJzoytz.png", fanart="http://imgur.com/3AqH1Zu.jpg", + folder=True)) + + return itemlist + + +def fanart(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + + title_fan = item.extra.split("|")[1] + title = title_fan.replace(' ', '%20') + title = ''.join((c for c in unicodedata.normalize('NFD', unicode(title.decode('utf-8'))) if + unicodedata.category(c) != 'Mn')).encode("ascii", "ignore") + + if not "serie" in item.url: + item.title = re.sub(r" \[COLOR.*?\]\d+.\d+.*?.*?\[\/COLOR\]|\(Sin puntuacion\)", "", item.title) + item.plot = item.extra.split("|")[0] + try: + sinopsis = scrapertools.get_match(data, + '<b>Sinopsis:<\/b><span class="item" itemprop="description">(.*?)<\/span><\/span>').decode( + 'latin1').encode('utf8') + except: + sinopsis = "" + + if not "serie" in item.url: + id_tmdb = "" + # filmafinity + year = item.extra.split("|")[2] + + if year == "0000": + year = "" + + url = "http://www.filmaffinity.com/es/advsearch.php?stext={0}&stype%5B%5D=title&country=&genre=&fromyear={1}&toyear={1}".format( + title, year) + data = httptools.downloadpage(url).data + + url_filmaf = scrapertools.find_single_match(data, '<div class="mc-poster">\s*<a title="[^"]*" href="([^"]+)"') + if url_filmaf: + url_filmaf = "http://www.filmaffinity.com%s" % url_filmaf + data = httptools.downloadpage(url_filmaf).data + else: + + try: + url_bing = "http://www.bing.com/search?q=%s+%s+site:filmaffinity.com" % (title.replace(' ', '+'), year) + data = browser(url_bing) + data = re.sub(r'\n|\r|\t|\s{2}| ', '', data) + + if "myaddrproxy.php" in data: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><div class="b_title"><h2>(<a href="/ myaddrproxy.php/http/www.filmaffinity.com/es/film.*?)"') + subdata_bing = re.sub(r'\/myaddrproxy.php\/http\/', '', subdata_bing) + else: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><h2>(<a href="http://www.filmaffinity.com/es/film.*?)"') + + url_filma = scrapertools.get_match(subdata_bing, '<a href="([^"]+)') + + if not "http" in url_filma: + data = httptools.downloadpage("http://" + url_filma).data + else: + data = httptools.downloadpage(url_filma).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + except: + pass + + if sinopsis == " ": + try: + sinopsis = scrapertools.find_single_match(data, '<dd itemprop="description">(.*?)</dd>') + sinopsis = sinopsis.replace("<br><br />", "\n") + sinopsis = re.sub(r"\(FILMAFFINITY\)<br />", "", sinopsis) + except: + pass + try: + rating_filma = scrapertools.get_match(data, 'itemprop="ratingValue" content="(.*?)">') + except: + rating_filma = "Sin puntuacion" + + critica = "" + patron = '<div itemprop="reviewBody">(.*?)</div>.*?itemprop="author">(.*?)\s*<i alt="([^"]+)"' + matches_reviews = scrapertools.find_multiple_matches(data, patron) + + if matches_reviews: + for review, autor, valoracion in matches_reviews: + review = dhe(scrapertools.htmlclean(review)) + review += "\n" + autor + "[CR]" + review = re.sub(r'Puntuac.*?\)', '', review) + if "positiva" in valoracion: + critica += "[COLOR green][B]%s[/B][/COLOR]\n" % review + elif "neutral" in valoracion: + critica += "[COLOR yellow][B]%s[/B][/COLOR]\n" % review + else: + critica += "[COLOR red][B]%s[/B][/COLOR]\n" % review + else: + critica = "[COLOR floralwhite][B]Esta película no tiene críticas todavía...[/B][/COLOR]" + print "ozuu" + print critica + + url = "http://api.themoviedb.org/3/search/movie?api_key=" + api_key + "&query=" + title + "&year=" + year + "&language=es&include_adult=false" + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + + title = re.sub(r":.*|\(.*?\)", "", title) + url = "http://api.themoviedb.org/3/search/movie?api_key=" + api_key + "&query=" + title + "&language=es&include_adult=false" + + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + extra = item.thumbnail + "|" + "" + "|" + "" + "|" + "Sin información" + "|" + rating_filma + "|" + critica + show = item.fanart + "|" + "" + "|" + sinopsis + posterdb = item.thumbnail + fanart_info = item.fanart + fanart_3 = "" + fanart_2 = item.fanart + category = item.thumbnail + id_scraper = "" + + itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action="findvideos", + thumbnail=item.thumbnail, fanart=item.fanart, extra=extra, show=show, + category=category, folder=True)) + + for id, fan in matches: + + fan = re.sub(r'\\|"', '', fan) + + try: + rating = scrapertools.find_single_match(data, '"vote_average":(.*?),') + except: + rating = "Sin puntuación" + + id_scraper = id + "|" + "peli" + "|" + rating + "|" + rating_filma + "|" + critica + try: + posterdb = scrapertools.get_match(data, '"page":1,.*?"poster_path":"\\\(.*?)"') + posterdb = "https://image.tmdb.org/t/p/original" + posterdb + except: + posterdb = item.thumbnail + + if "null" in fan: + fanart = item.fanart + else: + fanart = "https://image.tmdb.org/t/p/original" + fan + item.extra = fanart + + url = "http://api.themoviedb.org/3/movie/" + id + "/images?api_key=" + api_key + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '"backdrops".*?"file_path":".*?",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + patron = '"backdrops".*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + fanart_info = item.extra + fanart_3 = "" + fanart_2 = item.extra + for fanart_info, fanart_3, fanart_2 in matches: + fanart_info = "https://image.tmdb.org/t/p/original" + fanart_info + fanart_3 = "https://image.tmdb.org/t/p/original" + fanart_3 + fanart_2 = "https://image.tmdb.org/t/p/original" + fanart_2 + + # clearart, fanart_2 y logo + url = "http://webservice.fanart.tv/v3/movies/" + id + "?api_key=" + api_fankey + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '"hdmovielogo":.*?"url": "([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + if '"moviedisc"' in data: + disc = scrapertools.get_match(data, '"moviedisc":.*?"url": "([^"]+)"') + if '"movieposter"' in data: + poster = scrapertools.get_match(data, '"movieposter":.*?"url": "([^"]+)"') + if '"moviethumb"' in data: + thumb = scrapertools.get_match(data, '"moviethumb":.*?"url": "([^"]+)"') + if '"moviebanner"' in data: + banner = scrapertools.get_match(data, '"moviebanner":.*?"url": "([^"]+)"') + + if len(matches) == 0: + extra = posterdb + # "http://es.seaicons.com/wp-content/uploads/2015/11/Editing-Overview-Pages-1-icon.png" + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + category = posterdb + + itemlist.append( + Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, server="torrent", + thumbnail=posterdb, fanart=item.extra, extra=extra, show=show, category=category, folder=True)) + for logo in matches: + if '"hdmovieclearart"' in data: + clear = scrapertools.get_match(data, '"hdmovieclearart":.*?"url": "([^"]+)"') + if '"moviebackground"' in data: + + extra = clear + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + if '"moviedisc"' in data: + category = disc + else: + category = clear + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=logo, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + else: + extra = clear + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + if '"moviedisc"' in data: + category = disc + else: + category = clear + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=logo, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + + if '"moviebackground"' in data: + + if '"hdmovieclearart"' in data: + clear = scrapertools.get_match(data, '"hdmovieclearart":.*?"url": "([^"]+)"') + extra = clear + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + if '"moviedisc"' in data: + category = disc + else: + category = clear + else: + extra = logo + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + if '"moviedisc"' in data: + category = disc + else: + category = logo + + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=logo, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + + if not '"hdmovieclearart"' in data and not '"moviebackground"' in data: + extra = logo + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + if '"moviedisc"' in data: + category = disc + else: + category = item.extra + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=logo, fanart=item.extra, extra=extra, show=show, + category=category, folder=True)) + + + else: + # filmafinity + url_bing = "http://www.bing.com/search?q=%s+Serie+de+tv+site:filmaffinity.com" % (title.replace(' ', '+')) + data = browser(url_bing) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + try: + if "myaddrproxy.php" in data: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><div class="b_title"><h2>(<a href="/myaddrproxy.php/http/www.filmaffinity.com/es/film.*?)"') + subdata_bing = re.sub(r'\/myaddrproxy.php\/http\/', '', subdata_bing) + else: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><h2>(<a href="http://www.filmaffinity.com/es/film.*?)"') + except: + pass + + try: + url_filma = scrapertools.get_match(subdata_bing, '<a href="([^"]+)') + if not "http" in url_filma: + data = httptools.downloadpage("http://" + url_filma).data + else: + data = httptools.downloadpage(url_filma).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + year = scrapertools.get_match(data, '<dt>Año</dt>.*?>(.*?)</dd>') + except: + year = "" + + if sinopsis == " ": + sinopsis = scrapertools.find_single_match(data, '<dd itemprop="description">(.*?)</dd>') + sinopsis = sinopsis.replace("<br><br />", "\n") + sinopsis = re.sub(r"\(FILMAFFINITY\)<br />", "", sinopsis) + try: + rating_filma = scrapertools.get_match(data, 'itemprop="ratingValue" content="(.*?)">') + except: + rating_filma = "Sin puntuacion" + print "lobeznito" + print rating_filma + + critica = "" + patron = '<div itemprop="reviewBody">(.*?)</div>.*?itemprop="author">(.*?)\s*<i alt="([^"]+)"' + matches_reviews = scrapertools.find_multiple_matches(data, patron) + + if matches_reviews: + for review, autor, valoracion in matches_reviews: + review = dhe(scrapertools.htmlclean(review)) + review += "\n" + autor + "[CR]" + review = re.sub(r'Puntuac.*?\)', '', review) + if "positiva" in valoracion: + critica += "[COLOR green][B]%s[/B][/COLOR]\n" % review + elif "neutral" in valoracion: + critica += "[COLOR yellow][B]%s[/B][/COLOR]\n" % review + else: + critica += "[COLOR red][B]%s[/B][/COLOR]\n" % review + else: + critica = "[COLOR floralwhite][B]Esta serie no tiene críticas[/B][/COLOR]" + + ###Busqueda en tmdb + + url_tmdb = "http://api.themoviedb.org/3/search/tv?api_key=" + api_key + "&query=" + title + "&language=es&include_adult=false&first_air_date_year=" + year + data_tmdb = scrapertools.cachePage(url_tmdb) + data_tmdb = re.sub(r"\n|\r|\t|\s{2}| ", "", data_tmdb) + patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),' + matches = re.compile(patron, re.DOTALL).findall(data_tmdb) + + ###Busqueda en bing el id de imdb de la serie + if len(matches) == 0: + url_tmdb = "http://api.themoviedb.org/3/search/tv?api_key=" + api_key + "&query=" + title + "&language=es" + data_tmdb = scrapertools.cachePage(url_tmdb) + data_tmdb = re.sub(r"\n|\r|\t|\s{2}| ", "", data_tmdb) + patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),' + matches = re.compile(patron, re.DOTALL).findall(data_tmdb) + if len(matches) == 0: + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % ( + title.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data) + try: + subdata_imdb = scrapertools.find_single_match(data, + '<li class="b_algo">(.*?)h="ID.*?<strong>.*?TV Series') + except: + pass + + try: + imdb_id = scrapertools.get_match(subdata_imdb, '<a href=.*?http.*?imdb.com/title/(.*?)/.*?"') + except: + imdb_id = "" + ###Busca id de tvdb y tmdb mediante imdb id + + urlremotetbdb = "https://api.themoviedb.org/3/find/" + imdb_id + "?api_key=" + api_key + "&external_source=imdb_id&language=es" + data_tmdb = scrapertools.cachePage(urlremotetbdb) + matches = scrapertools.find_multiple_matches(data_tmdb, + '"tv_results":.*?"id":(.*?),.*?"poster_path":(.*?),') + + if len(matches) == 0: + id_tmdb = "" + fanart_3 = "" + extra = item.thumbnail + "|" + year + "|" + "no data" + "|" + "no data" + "|" + rating_filma + "|" + critica + "|" + "" + "|" + id_tmdb + show = item.fanart + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + item.thumbnail + "|" + id_tmdb + fanart_info = item.fanart + fanart_2 = item.fanart + id_scraper = " " + "|" + "serie" + "|" + rating_filma + "|" + critica + "|" + " " + category = "" + posterdb = item.thumbnail + itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action="findvideos", + thumbnail=item.thumbnail, fanart=item.fanart, extra=extra, category=category, + show=show, folder=True)) + + for id_tmdb, fan in matches: + ###Busca id tvdb + urlid_tvdb = "https://api.themoviedb.org/3/tv/" + id_tmdb + "/external_ids?api_key=" + api_key + "&language=es" + data_tvdb = scrapertools.cachePage(urlid_tvdb) + id = scrapertools.find_single_match(data_tvdb, 'tvdb_id":(.*?),"tvrage_id"') + if id == "null": + id = "" + category = id + ###Busqueda nºepisodios y temporadas,status + url_status = "http://api.themoviedb.org/3/tv/" + id_tmdb + "?api_key=" + api_key + "&append_to_response=credits&language=es" + data_status = scrapertools.cachePage(url_status) + season_episodes = scrapertools.find_single_match(data_status, + '"(number_of_episodes":\d+,"number_of_seasons":\d+,)"') + season_episodes = re.sub(r'"', '', season_episodes) + season_episodes = re.sub(r'number_of_episodes', 'Episodios ', season_episodes) + season_episodes = re.sub(r'number_of_seasons', 'Temporadas', season_episodes) + season_episodes = re.sub(r'_', ' ', season_episodes) + status = scrapertools.find_single_match(data_status, '"status":"(.*?)"') + if status == "Ended": + status = "Finalizada" + else: + status = "En emisión" + status = status + " (" + season_episodes + ")" + status = re.sub(r',', '.', status) + ####### + + fan = re.sub(r'\\|"', '', fan) + try: + # rating tvdb + url_rating_tvdb = "http://thetvdb.com/api/1D62F2F90030C444/series/" + id + "/es.xml" + print "pepote" + print url_rating_tvdb + data = httptools.downloadpage(url_rating_tvdb).data + rating = scrapertools.find_single_match(data, '<Rating>(.*?)<') + except: + ratintg_tvdb = "" + try: + rating = scrapertools.get_match(data, '"vote_average":(.*?),') + except: + + rating = "Sin puntuación" + + id_scraper = id_tmdb + "|" + "serie" + "|" + rating_filma + "|" + critica + "|" + rating + "|" + status # +"|"+emision + + posterdb = scrapertools.find_single_match(data_tmdb, '"poster_path":(.*?)",') + + if "null" in posterdb: + posterdb = item.thumbnail + else: + posterdb = re.sub(r'\\|"', '', posterdb) + posterdb = "https://image.tmdb.org/t/p/original" + posterdb + + if "null" in fan: + fanart = item.fanart + else: + fanart = "https://image.tmdb.org/t/p/original" + fan + + item.extra = fanart + + url = "http://api.themoviedb.org/3/tv/" + id_tmdb + "/images?api_key=" + api_key + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '"backdrops".*?"file_path":".*?",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + patron = '"backdrops".*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + fanart_info = item.extra + fanart_3 = "" + fanart_2 = item.extra + for fanart_info, fanart_3, fanart_2 in matches: + fanart_info = "https://image.tmdb.org/t/p/original" + fanart_info + fanart_3 = "https://image.tmdb.org/t/p/original" + fanart_3 + fanart_2 = "https://image.tmdb.org/t/p/original" + fanart_2 + if fanart == item.fanart: + fanart = fanart_info + url = "http://webservice.fanart.tv/v3/tv/" + id + "?api_key=" + api_fankey + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '"clearlogo":.*?"url": "([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + if '"tvbanner"' in data: + tvbanner = scrapertools.get_match(data, '"tvbanner":.*?"url": "([^"]+)"') + tfv = tvbanner + elif '"tvposter"' in data: + tvposter = scrapertools.get_match(data, '"tvposter":.*?"url": "([^"]+)"') + tfv = tvposter + else: + tfv = posterdb + if '"tvthumb"' in data: + tvthumb = scrapertools.get_match(data, '"tvthumb":.*?"url": "([^"]+)"') + if '"hdtvlogo"' in data: + hdtvlogo = scrapertools.get_match(data, '"hdtvlogo":.*?"url": "([^"]+)"') + if '"hdclearart"' in data: + hdtvclear = scrapertools.get_match(data, '"hdclearart":.*?"url": "([^"]+)"') + if len(matches) == 0: + if '"hdtvlogo"' in data: + if "showbackground" in data: + + if '"hdclearart"' in data: + thumbnail = hdtvlogo + extra = hdtvclear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + else: + thumbnail = hdtvlogo + extra = thumbnail + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=item.extra, + category=category, extra=extra, show=show, folder=True)) + + + else: + if '"hdclearart"' in data: + thumbnail = hdtvlogo + extra = hdtvclear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + else: + thumbnail = hdtvlogo + extra = thumbnail + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + else: + extra = "" + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=posterdb, fanart=fanart, extra=extra, show=show, + category=category, folder=True)) + + for logo in matches: + if '"hdtvlogo"' in data: + thumbnail = hdtvlogo + elif not '"hdtvlogo"' in data: + if '"clearlogo"' in data: + thumbnail = logo + else: + thumbnail = item.thumbnail + if '"clearart"' in data: + clear = scrapertools.get_match(data, '"clearart":.*?"url": "([^"]+)"') + if "showbackground" in data: + + extra = clear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + else: + extra = clear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + + if "showbackground" in data: + + if '"clearart"' in data: + clear = scrapertools.get_match(data, '"clearart":.*?"url": "([^"]+)"') + extra = clear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + else: + extra = logo + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + + if not '"clearart"' in data and not '"showbackground"' in data: + if '"hdclearart"' in data: + extra = hdtvclear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + else: + extra = thumbnail + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + title_info = u'\u012F\u03B7\u0492\u03BF' + title_info = title_info.encode('utf-8') + if not "serie" in item.url: + thumbnail = posterdb + title_info = "[COLOR khaki][B]" + title_info + "[/B][/COLOR]" + if "serie" in item.url: + title_info = "[COLOR skyblue][B]" + title_info + "[/B][/COLOR]" + if '"tvposter"' in data: + thumbnail = scrapertools.get_match(data, '"tvposter":.*?"url": "([^"]+)"') + else: + thumbnail = item.thumbnail + + if "tvbanner" in data: + category = tvbanner + else: + category = show + if '"tvthumb"' in data: + plot = item.plot + "|" + tvthumb + else: + plot = item.plot + "|" + item.thumbnail + if '"tvbanner"' in data: + plot = plot + "|" + tvbanner + elif '"tvthumb"' in data: + plot = plot + "|" + tvthumb + else: + plot = plot + "|" + item.thumbnail + else: + if '"moviethumb"' in data: + plot = item.plot + "|" + thumb + else: + plot = item.plot + "|" + posterdb + + if '"moviebanner"' in data: + plot = plot + "|" + banner + else: + if '"hdmovieclearart"' in data: + plot = plot + "|" + clear + + else: + plot = plot + "|" + posterdb + + id = id_scraper + + extra = extra + "|" + id + "|" + title.encode('utf8') + + itemlist.append( + Item(channel=item.channel, action="info", title=title_info, plot=plot, url=item.url, thumbnail=thumbnail, + fanart=fanart_info, extra=extra, category=category, show=show, viewmode="movie_with_plot", folder=False)) + + return itemlist + + +def capitulos(item): + logger.info() + itemlist = [] + data = item.extra + thumbnail = scrapertools.get_match(data, 'background-image:url\(\'([^"]+)\'') + thumbnail = re.sub(r"w185", "original", thumbnail) + patron = '<a href="([^"]+)".*?<br\/><i>(.*?)<\/i>' + matches = re.compile(patron, re.DOTALL).findall(data) + for url, capitulo in matches: + capitulo = re.sub(r"Cap.*?tulo", "", capitulo) + capitulo = "[COLOR floralwhite][B]" + capitulo + "[/B][/COLOR]" + if capitulo == item.extra.split("|")[4]: + continue + if not ".jpg" in item.extra.split("|")[2]: + fanart = item.show.split("|")[0] + else: + fanart = item.extra.split("|")[2] + itemlist.append(Item(channel=item.channel, title=capitulo, action="findvideos", url=url, thumbnail=thumbnail, + extra="fv2" + "|" + item.extra.split("|")[3], show=item.show, category=item.category, + fanart=fanart, folder=True)) + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + if not "serie" in item.url: + thumbnail = item.category + else: + thumbnail = item.show.split("|")[4] + patronbloque_enlaces = '<div class="detail_content_subtitle">(.*?)<\/div>(.*?)<div class="torrent_sep">' + matchesenlaces = re.compile(patronbloque_enlaces, re.DOTALL).findall(data) + + if len(matchesenlaces) == 0: + thumb = "" + check = "" + itemlist.append( + Item(channel=item.channel, title="[COLOR crimson][B]No hay Torrent[/B][/COLOR]", action="mainlist", url="", + fanart=item.show.split("|")[0], thumbnail=thumbnail, folder=False)) + + for calidad_bloque, bloque_enlaces in matchesenlaces: + + calidad_bloque = dhe(calidad_bloque) + calidad_bloque = ''.join((c for c in unicodedata.normalize('NFD', unicode(calidad_bloque.decode('utf-8'))) if + unicodedata.category(c) != 'Mn')) + if "Alta" in calidad_bloque: + title = u'\u0414\u006C\u03C4\u03B1' + " " + u'\u0110\u04BC\u0492\u0456\u03B7\u0456\u03C2\u0456\u03BF\u03B7' + title = title.encode('utf-8') + title = " [COLOR yellow][B]" + title + "[/B][/COLOR]" + elif "estandar" in calidad_bloque: + title = u'\u0110\u04BC\u0492\u0456\u03B7\u0456\u03C2\u0456\u03BF\u03B7' + " " + u'\u04BC\u0053\u03C4\u03B1\u03B7\u0110\u03B1\u0491' + title = title.encode('utf-8') + title = " [COLOR mediumaquamarine][B]" + title + "[/B][/COLOR]" + else: + + title = u'\u0053\u03C2\u0491\u04BC\u04BC\u03B7\u04BC\u0491' + title = title.encode('utf-8') + title = " [COLOR slategray][B]" + title + "[/B][/COLOR]" + itemlist.append( + Item(channel=item.channel, title=title, action="mainlist", url="", fanart=item.show.split("|")[0], + thumbnail=thumbnail, folder=False)) + + if "serie" in item.url: + thumb = scrapertools.get_match(data, '<div class="detail_background2".*?url\(([^"]+)\)') + patron = '<a href=.*?(http.*?)\'\).*?<i>(.*?)<\/i>' + matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces) + for url, calidad in matches: + + try: + if not url.endswith(".torrent") and not "elitetorrent" in url: + if url.endswith("fx"): + url = httptools.downloadpage(url, follow_redirects=False) + url = url.headers.get("location") + + if url.endswith(".fx"): + url = httptools.downloadpage(url, follow_redirects=False) + url = url.headers.get("location") + + url = " http://estrenosli.org" + url + + else: + if not url.endswith(".mkv"): + url = httptools.downloadpage(url, follow_redirects=False) + url = url.headers.get("location") + + torrents_path = config.get_videolibrary_path() + '/torrents' + + if not os.path.exists(torrents_path): + os.mkdir(torrents_path) + try: + urllib.URLopener.version = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36 SE 2.X MetaSr 1.0' + urllib.urlretrieve(url, torrents_path + "/temp.torrent") + pepe = open(torrents_path + "/temp.torrent", "rb").read() + except: + pepe = "" + if "used CloudFlare" in pepe: + try: + urllib.urlretrieve("http://anonymouse.org/cgi-bin/anon-www.cgi/" + url.strip(), + torrents_path + "/temp.torrent") + pepe = open(torrents_path + "/temp.torrent", "rb").read() + except: + pepe = "" + torrent = decode(pepe) + + try: + name = torrent["info"]["name"] + sizet = torrent["info"]['length'] + sizet = convert_size(sizet) + except: + name = "no disponible" + try: + check_video = scrapertools.find_multiple_matches(str(torrent["info"]["files"]), + "'length': (\d+)}") + + size = max([int(i) for i in check_video]) + + for file in torrent["info"]["files"]: + manolo = "%r - %d bytes" % ("/".join(file["path"]), file["length"]) + if str(size) in manolo: + video = manolo + size = convert_size(size) + ext_v = re.sub(r"-.*? bytes|.*?\[.*?\].|'|.*?COM.|.*?\[.*?\]|\(.*?\)|.*?\.", "", video) + try: + os.remove(torrents_path + "/temp.torrent") + except: + pass + except: + size = sizet + ext_v = re.sub(r"-.*? bytes|.*?\[.*?\].|'|.*?COM.|.*?\.es.|.*?\[.*?\]|.*?\(.*?\)\.|.*?\.", "", + name) + try: + os.remove(torrents_path + "/temp.torrent") + except: + pass + except: + size = "en estos momentos..." + ext_v = "no disponible" + + if "Alta" in calidad_bloque: + title = "[COLOR navajowhite][B]" + calidad + "[/B][/COLOR]" + " " + "[COLOR peachpuff]( Video [/COLOR]" + "[COLOR peachpuff]" + ext_v + " -- " + size + " )[/COLOR]" + elif "estandar" in calidad_bloque: + title = "[COLOR lavender][B]" + calidad + "[/B][/COLOR]" + " " + "[COLOR azure]( Video [/COLOR]" + "[COLOR azure]" + ext_v + " -- " + size + " )[/COLOR]" + else: + title = "[COLOR gainsboro][B]" + calidad + "[/B][/COLOR]" + " " + "[COLOR silver]( Video [/COLOR]" + "[COLOR silver]" + ext_v + " -- " + size + " )[/COLOR]" + if "rar" in ext_v: + ext_v = ext_v + " -- No reproducible" + size = "" + + item.title = re.sub(r"\[.*?\]", "", item.title) + temp_epi = scrapertools.find_multiple_matches(item.title, '(\d+)x(\d+)') + + for temp, epi in temp_epi: + check = temp + "x" + epi + if item.extra.split("|")[0] == "fv2": + extra = item.extra.split("|")[1] + "|" + " " + "|" + temp + "|" + epi + else: + extra = item.extra + "|" + temp + "|" + epi + + itemlist.append(Item(channel=item.channel, title=title, action="play", url=url, server="torrent", + thumbnail=thumbnail, extra=item.extra, show=item.show, + fanart=item.show.split("|")[0], folder=False)) + else: + patron = '<a href=.*?(http.*?)\'\).*?<i>(.*?)<i>(.*?)<\/i>' + matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces) + for url, calidad, peso in matches: + + try: + if not url.endswith(".torrent") and not "elitetorrent" in url: + if url.endswith("fx"): + url = httptools.downloadpage(url, follow_redirects=False) + url = url.headers.get("location") + + if url.endswith(".fx"): + url = httptools.downloadpage(url, follow_redirects=False) + url = url.headers.get("location") + + url = " http://estrenosli.org" + url + else: + if not url.endswith(".mkv"): + url = httptools.downloadpage(url, follow_redirects=False) + url = url.headers.get("location") + + torrents_path = config.get_videolibrary_path() + '/torrents' + + if not os.path.exists(torrents_path): + os.mkdir(torrents_path) + + urllib.URLopener.version = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36 SE 2.X MetaSr 1.0' + urllib.urlretrieve(url, torrents_path + "/temp.torrent") + pepe = open(torrents_path + "/temp.torrent", "rb").read() + + if "used CloudFlare" in pepe: + try: + urllib.urlretrieve("http://anonymouse.org/cgi-bin/anon-www.cgi/" + url.strip(), + torrents_path + "/temp.torrent") + pepe = open(torrents_path + "/temp.torrent", "rb").read() + except: + pepe = "" + torrent = decode(pepe) + + try: + name = torrent["info"]["name"] + except: + name = "no disponible" + try: + check_video = scrapertools.find_multiple_matches(str(torrent["info"]["files"]), + "'length': (\d+)}") + + size = max([int(i) for i in check_video]) + + for file in torrent["info"]["files"]: + manolo = "%r - %d bytes" % ("/".join(file["path"]), file["length"]) + if str(size) in manolo: + video = manolo + + ext_v = re.sub(r"-.*? bytes|\.*?\[.*?\]\.|'|\.*?COM.|.*?\[.*?\]|\(.*?\)|.*?\.", "", video) + try: + os.remove(torrents_path + "/temp.torrent") + except: + pass + except: + + ext_v = re.sub(r"-.*? bytes|.*?\[.*?\].|'|.*?COM.|.*?\.es\.|.*?\[.*?\]|.*?\(.*?\)\.|.*?\.", "", + name) + try: + os.remove(torrents_path + "/temp.torrent") + except: + pass + except: + size = "en estos momentos..." + ext_v = "no disponible" + if "rar" in ext_v: + ext_v = ext_v + " -- No reproducible" + calidad = re.sub(r"</i>", "", calidad) + + if "Alta" in calidad_bloque: + title = "[COLOR khaki][B]" + calidad + "[/B][/COLOR]" + "[COLOR darkkhaki][B]" + " - " + peso + "[/B][/COLOR]" + " " + "[COLOR lemonchiffon]( Video [/COLOR]" + "[COLOR lemonchiffon]" + ext_v + " )[/COLOR]" + elif "estandar" in calidad_bloque: + title = "[COLOR darkcyan][B]" + calidad + "[/B][/COLOR]" + "[COLOR cadetblue][B]" + " - " + peso + "[/B][/COLOR]" + " " + "[COLOR paleturquoise]( Video [/COLOR]" + "[COLOR paleturquoise]" + ext_v + " )[/COLOR]" + else: + title = "[COLOR dimgray][B]" + calidad + "[/B][/COLOR]" + "[COLOR gray][B]" + " - " + peso + "[/B][/COLOR]" + " " + "[COLOR lightslategray]( Video [/COLOR]" + "[COLOR lightslategray]" + ext_v + " )[/COLOR]" + itemlist.append(Item(channel=item.channel, title=title, action="play", url=url, server="torrent", + thumbnail=thumbnail, extra=item.extra, show=item.show, + fanart=item.show.split("|")[0], folder=False)) + if "serie" in item.url: + title_info = u'\u012F\u03B7\u0492\u03BF' + title_info = title_info.encode('utf-8') + title_info = "[COLOR darkseagreen]" + title_info + "[/COLOR]" + itemlist.append( + Item(channel=item.channel, action="info_capitulos", title=" " + title_info, url=item.url, + thumbnail=thumbnail, fanart=item.show.split("|")[0], extra=extra, show=item.show, + category=item.category, folder=False)) + + if "serie" in item.url and item.extra.split("|")[0] != "fv2": + title_info = u'\u03C4\u04BC\u04CE\u0420\u03BF\u0491\u03B1\u0110\u03B1\u0053' + title_info = title_info.encode('utf-8') + title_info = "[COLOR springgreen][B]" + title_info + "[/B][/COLOR]" + itemlist.append(Item(channel=item.channel, title=" " + title_info, + action="mainlist", url="", fanart=item.show.split("|")[0], thumbnail=thumbnail, + folder=False)) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = 'class="contactlinkh">(.*?)<\/a><\/div>(.*?)</div></div></div>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for temporadas, bloque_capitulos in matches: + thumbnail = scrapertools.get_match(bloque_capitulos, 'background-image:url\(\'([^"]+)\'') + + thumbnail = re.sub(r"w185", "original", thumbnail) + + itemlist.append(Item(channel=item.channel, title="[COLOR chartreuse][B]" + temporadas + "[/B][/COLOR]", + action="capitulos", url=item.url, thumbnail=thumbnail, + extra="fv2" + "|" + bloque_capitulos + "|" + thumb + "|" + item.extra + "|" + check, + show=item.show, fanart=item.show.split("|")[0], category=item.category, folder=True)) + + return itemlist + + +def info(item): + logger.info() + itemlist = [] + url = item.url + id = item.extra + + if "serie" in item.url: + try: + rating_tmdba_tvdb = item.extra.split("|")[6] + if item.extra.split("|")[6] == "": + rating_tmdba_tvdb = "Sin puntuación" + except: + rating_tmdba_tvdb = "Sin puntuación" + else: + rating_tmdba_tvdb = item.extra.split("|")[3] + rating_filma = item.extra.split("|")[4] + print "eztoquee" + print rating_filma + print rating_tmdba_tvdb + + filma = "http://s6.postimg.org/6yhe5fgy9/filma.png" + + try: + if "serie" in item.url: + title = item.extra.split("|")[8] + + else: + title = item.extra.split("|")[6] + title = title.replace("%20", " ") + title = "[COLOR yellow][B]" + title + "[/B][/COLOR]" + except: + title = item.title + + try: + if "." in rating_tmdba_tvdb: + check_rat_tmdba = scrapertools.get_match(rating_tmdba_tvdb, '(\d+).') + else: + check_rat_tmdba = rating_tmdba_tvdb + if int(check_rat_tmdba) >= 5 and int(check_rat_tmdba) < 8: + rating = "[COLOR springgreen][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + elif int(check_rat_tmdba) >= 8 or rating_tmdba_tvdb == 10: + rating = "[COLOR yellow][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + else: + rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + print "lolaymaue" + except: + rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + if "10." in rating: + rating = re.sub(r'10\.\d+', '10', rating) + try: + check_rat_filma = scrapertools.get_match(rating_filma, '(\d)') + print "paco" + print check_rat_filma + if int(check_rat_filma) >= 5 and int(check_rat_filma) < 8: + print "dios" + print check_rat_filma + rating_filma = "[COLOR springgreen][B]" + rating_filma + "[/B][/COLOR]" + elif int(check_rat_filma) >= 8: + + print check_rat_filma + rating_filma = "[COLOR yellow][B]" + rating_filma + "[/B][/COLOR]" + else: + rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]" + print "rojo??" + print check_rat_filma + except: + rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]" + + try: + if not "serie" in item.url: + url_plot = "http://api.themoviedb.org/3/movie/" + item.extra.split("|")[ + 1] + "?api_key=" + api_key + "&append_to_response=credits&language=es" + data_plot = scrapertools.cache_page(url_plot) + plot, tagline = scrapertools.find_single_match(data_plot, '"overview":"(.*?)",.*?"tagline":(".*?")') + if plot == "": + plot = item.show.split("|")[2] + + plot = "[COLOR moccasin][B]" + plot + "[/B][/COLOR]" + plot = re.sub(r"\\", "", plot) + + else: + plot = item.show.split("|")[2] + plot = "[COLOR moccasin][B]" + plot + "[/B][/COLOR]" + plot = re.sub(r"\\", "", plot) + + if item.extra.split("|")[7] != "": + tagline = item.extra.split("|")[7] + # tagline= re.sub(r',','.',tagline) + else: + tagline = "" + except: + title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" + plot = "Esta pelicula no tiene informacion..." + plot = plot.replace(plot, "[COLOR yellow][B]" + plot + "[/B][/COLOR]") + photo = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png" + foto = "http://s6.postimg.org/ub7pb76c1/noinfo.png" + info = "" + + if "serie" in item.url: + check2 = "serie" + icon = "http://s6.postimg.org/hzcjag975/tvdb.png" + foto = item.show.split("|")[1] + if item.extra.split("|")[5] != "": + critica = item.extra.split("|")[5] + else: + critica = "Esta serie no tiene críticas..." + + photo = item.extra.split("|")[0].replace(" ", "%20") + try: + tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]" + except: + tagline = "" + + else: + + critica = item.extra.split("|")[5] + if "%20" in critica: + critica = "No hay críticas" + icon = "http://imgur.com/SenkyxF.png" + photo = item.extra.split("|")[0].replace(" ", "%20") + foto = item.show.split("|")[1] + + try: + if tagline == "\"\"": + tagline = " " + except: + tagline = " " + tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]" + check2 = "pelicula" + # Tambien te puede interesar + peliculas = [] + if "serie" in item.url: + + url_tpi = "http://api.themoviedb.org/3/tv/" + item.show.split("|")[ + 5] + "/recommendations?api_key=" + api_key + "&language=es" + data_tpi = scrapertools.cachePage(url_tpi) + tpi = scrapertools.find_multiple_matches(data_tpi, + 'id":(.*?),.*?"original_name":"(.*?)",.*?"poster_path":(.*?),') + + else: + url_tpi = "http://api.themoviedb.org/3/movie/" + item.extra.split("|")[ + 1] + "/recommendations?api_key=" + api_key + "&language=es" + data_tpi = scrapertools.cachePage(url_tpi) + tpi = scrapertools.find_multiple_matches(data_tpi, + 'id":(.*?),.*?"original_title":"(.*?)",.*?"poster_path":(.*?),') + + for idp, peli, thumb in tpi: + + thumb = re.sub(r'"|}', '', thumb) + if "null" in thumb: + thumb = "http://s6.postimg.org/tw1vhymj5/noposter.png" + else: + thumb = "https://image.tmdb.org/t/p/original" + thumb + peliculas.append([idp, peli, thumb]) + + check2 = check2.replace("pelicula", "movie").replace("serie", "tvshow") + infoLabels = {'title': title, 'plot': plot, 'thumbnail': photo, 'fanart': foto, 'tagline': tagline, + 'rating': rating} + item_info = item.clone(info=infoLabels, icon=icon, extra=id, rating=rating, rating_filma=rating_filma, + critica=critica, contentType=check2, + thumb_busqueda="http://s6.postimg.org/u381y91u9/logomil.png") + from channels import infoplus + infoplus.start(item_info, peliculas) + + +def info_capitulos(item): + logger.info() + url = "https://api.themoviedb.org/3/tv/" + item.show.split("|")[5] + "/season/" + item.extra.split("|")[ + 2] + "/episode/" + item.extra.split("|")[3] + "?api_key=" + api_key + "&language=es" + + if "/0" in url: + url = url.replace("/0", "/") + + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '],"name":"(.*?)","overview":"(.*?)".*?"still_path":(.*?),"vote_average":(\d+\.\d).*?,"' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + + url = "http://thetvdb.com/api/1D62F2F90030C444/series/" + item.category + "/default/" + item.extra.split("|")[ + 2] + "/" + item.extra.split("|")[3] + "/es.xml" + if "/0" in url: + url = url.replace("/0", "/") + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '<Data>.*?<EpisodeName>([^<]+)</EpisodeName>.*?<Overview>(.*?)</Overview>.*?<Rating>(.*?)</Rating>' + + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + + title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" + plot = "Este capitulo no tiene informacion..." + plot = "[COLOR yellow][B]" + plot + "[/B][/COLOR]" + image = "http://s6.postimg.org/ub7pb76c1/noinfo.png" + foto = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png" + rating = "" + + + else: + + for name_epi, info, rating in matches: + if "<filename>episodes" in data: + foto = scrapertools.get_match(data, '<Data>.*?<filename>(.*?)</filename>') + fanart = "http://thetvdb.com/banners/" + foto + else: + fanart = item.extra.split("|")[1] + plot = info + plot = "[COLOR peachpuff][B]" + plot + "[/B][/COLOR]" + title = name_epi.upper() + title = "[COLOR bisque][B]" + title + "[/B][/COLOR]" + image = fanart + foto = item.extra.split("|")[0] + if not ".png" in foto: + foto = "http://imgur.com/wSIln04.png" + foto = re.sub(r'\(.*?\)|" "|" "', '', foto) + foto = re.sub(r' ', '', foto) + try: + + check_rating = scrapertools.get_match(rating, '(\d+).') + + if int(check_rating) >= 5 and int(check_rating) < 8: + rating = "Puntuación " + "[COLOR springgreen][B]" + rating + "[/B][/COLOR]" + elif int(check_rating) >= 8 and int(check_rating) < 10: + rating = "Puntuación " + "[COLOR yellow][B]" + rating + "[/B][/COLOR]" + elif int(check_rating) == 10: + rating = "Puntuación " + "[COLOR orangered][B]" + rating + "[/B][/COLOR]" + else: + rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" + + except: + rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" + if "10." in rating: + rating = re.sub(r'10\.\d+', '10', rating) + else: + for name_epi, info, fanart, rating in matches: + if info == "" or info == "\\": + info = "Sin informacion del capítulo aún..." + plot = info + plot = re.sub(r'/n', '', plot) + plot = "[COLOR peachpuff][B]" + plot + "[/B][/COLOR]" + title = name_epi.upper() + title = "[COLOR bisque][B]" + title + "[/B][/COLOR]" + image = fanart + image = re.sub(r'"|}', '', image) + if "null" in image: + image = "http://imgur.com/ZiEAVOD.png" + else: + image = "https://image.tmdb.org/t/p/original" + image + foto = item.extra.split("|")[0] + if not ".png" in foto: + foto = "http://imgur.com/wSIln04.png" + foto = re.sub(r'\(.*?\)|" "|" "', '', foto) + foto = re.sub(r' ', '', foto) + try: + + check_rating = scrapertools.get_match(rating, '(\d+).') + + if int(check_rating) >= 5 and int(check_rating) < 8: + rating = "Puntuación " + "[COLOR springgreen][B]" + rating + "[/B][/COLOR]" + elif int(check_rating) >= 8 and int(check_rating) < 10: + rating = "Puntuación " + "[COLOR yellow][B]" + rating + "[/B][/COLOR]" + elif int(check_rating) == 10: + rating = "Puntuación " + "[COLOR orangered][B]" + rating + "[/B][/COLOR]" + else: + rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" + + except: + rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" + if "10." in rating: + rating = re.sub(r'10\.\d+', '10', rating) + ventana = TextBox2(title=title, plot=plot, thumbnail=image, fanart=foto, rating=rating) + ventana.doModal() + + +class TextBox2(xbmcgui.WindowDialog): + """ Create a skinned textbox window """ + + def __init__(self, *args, **kwargs): + self.getTitle = kwargs.get('title') + self.getPlot = kwargs.get('plot') + self.getThumbnail = kwargs.get('thumbnail') + self.getFanart = kwargs.get('fanart') + self.getRating = kwargs.get('rating') + + self.background = xbmcgui.ControlImage(70, 20, 1150, 630, 'http://imgur.com/Vj7pYVt.jpg') + self.title = xbmcgui.ControlTextBox(120, 60, 430, 50) + self.rating = xbmcgui.ControlTextBox(145, 112, 1030, 45) + self.plot = xbmcgui.ControlTextBox(120, 150, 1056, 100) + self.thumbnail = xbmcgui.ControlImage(120, 300, 1056, 300, self.getThumbnail) + self.fanart = xbmcgui.ControlImage(780, 43, 390, 100, self.getFanart) + + self.addControl(self.background) + self.background.setAnimations( + [('conditional', 'effect=slide start=1000% end=0% time=1500 condition=true tween=bounce',), + ('WindowClose', 'effect=slide delay=800 start=0% end=1000% time=800 condition=true',)]) + self.addControl(self.thumbnail) + self.thumbnail.setAnimations([('conditional', + 'effect=zoom start=0% end=100% delay=2700 time=1500 condition=true tween=elastic easing=inout',), + ('WindowClose', 'effect=slide end=0,700% time=300 condition=true',)]) + self.addControl(self.plot) + self.plot.setAnimations( + [('conditional', 'effect=zoom delay=2000 center=auto start=0 end=100 time=800 condition=true ',), ( + 'conditional', + 'effect=rotate delay=2000 center=auto aceleration=6000 start=0% end=360% time=800 condition=true',), + ('WindowClose', 'effect=zoom center=auto start=100% end=-0% time=600 condition=true',)]) + self.addControl(self.fanart) + self.fanart.setAnimations( + [('WindowOpen', 'effect=slide start=0,-700 delay=1000 time=2500 tween=bounce condition=true',), ( + 'conditional', + 'effect=rotate center=auto start=0% end=360% delay=3000 time=2500 tween=bounce condition=true',), + ('WindowClose', 'effect=slide end=0,-700% time=1000 condition=true',)]) + self.addControl(self.title) + self.title.setText(self.getTitle) + self.title.setAnimations( + [('conditional', 'effect=slide start=-1500% end=0% delay=1000 time=2000 condition=true tween=elastic',), + ('WindowClose', 'effect=slide start=0% end=-1500% time=800 condition=true',)]) + self.addControl(self.rating) + self.rating.setText(self.getRating) + self.rating.setAnimations( + [('conditional', 'effect=fade start=0% end=100% delay=3000 time=1500 condition=true',), + ('WindowClose', 'effect=slide end=0,-700% time=600 condition=true',)]) + xbmc.sleep(200) + + try: + self.plot.autoScroll(7000, 6000, 30000) + except: + + xbmc.executebuiltin( + 'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000,"https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/kodi-icon.png")') + self.plot.setText(self.getPlot) + + def get(self): + self.show() + + def onAction(self, action): + if action == ACTION_PREVIOUS_MENU or action.getId() == ACTION_GESTURE_SWIPE_LEFT or action == 110 or action == 92: + self.close() + + +def test(): + return True + + +def translate(to_translate, to_langage="auto", langage="auto"): + '''Return the translation using google translate + you must shortcut the langage you define (French = fr, English = en, Spanish = es, etc...) + if you don't define anything it will detect it or use english by default + Example: + print(translate("salut tu vas bien?", "en")) + hello you alright?''' + agents = { + 'User-Agent': "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)"} + before_trans = 'class="t0">' + link = "http://translate.google.com/m?hl=%s&sl=%s&q=%s" % (to_langage, langage, to_translate.replace(" ", "+")) + request = urllib2.Request(link, headers=agents) + page = urllib2.urlopen(request).read() + result = page[page.find(before_trans) + len(before_trans):] + result = result.split("<")[0] + return result + + +if __name__ == '__main__': + to_translate = 'Hola como estas?' + print("%s >> %s" % (to_translate, translate(to_translate))) + print("%s >> %s" % (to_translate, translate(to_translate, 'fr'))) + + +# should print Hola como estas >> Hello how are you +# and Hola como estas? >> Bonjour comment allez-vous? + +def tokenize(text, match=re.compile("([idel])|(\d+):|(-?\d+)").match): + i = 0 + while i < len(text): + m = match(text, i) + s = m.group(m.lastindex) + i = m.end() + if m.lastindex == 2: + yield "s" + yield text[i:i + int(s)] + i = i + int(s) + else: + yield s + + +def decode_item(next, token): + if token == "i": + # integer: "i" value "e" + data = int(next()) + if next() != "e": + raise ValueError + elif token == "s": + # string: "s" value (virtual tokens) + data = next() + elif token == "l" or token == "d": + # container: "l" (or "d") values "e" + data = [] + tok = next() + while tok != "e": + data.append(decode_item(next, tok)) + tok = next() + if token == "d": + data = dict(zip(data[0::2], data[1::2])) + else: + raise ValueError + return data + + +def decode(text): + try: + src = tokenize(text) + data = decode_item(src.next, src.next()) + for token in src: # look for more tokens + raise SyntaxError("trailing junk") + except (AttributeError, ValueError, StopIteration): + try: + data = data + except: + data = src + + return data + + +def convert_size(size): + import math + if (size == 0): + return '0B' + size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") + i = int(math.floor(math.log(size, 1024))) + p = math.pow(1024, i) + s = round(size / p, 2) + return '%s %s' % (s, size_name[i]) + + +def busqueda(item): + logger.info() + cat = [item.extra.split("|")[0].replace("tv", "serie"), 'torrent'] + new_item = Item() + new_item.extra = item.extra.split("|")[1].replace("+", " ") + new_item.category = item.extra.split("|")[0] + + from channels import search + return search.do_search(new_item, cat) diff --git a/plugin.video.alfa/channels/miradetodo.json b/plugin.video.alfa/channels/miradetodo.json new file mode 100755 index 00000000..27e9f5f8 --- /dev/null +++ b/plugin.video.alfa/channels/miradetodo.json @@ -0,0 +1,50 @@ +{ + "id": "miradetodo", + "name": "MiraDeTodo", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "https://s17.postimg.org/e8kp12mcv/miradetodo.png", + "banner": "https://s7.postimg.org/it21t0dej/miradetodo-banner.png", + "version": 1, + "changes": [ + { + "date": "25/05/2017", + "description": "cambios esteticos" + }, + { + "date": "05/05/2017", + "description": "First release" + } + ], + "categories": [ + "latino", + "movie" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_infantiles", + "type": "bool", + "label": "Incluir en Novedades - Infantiles", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/miradetodo.py b/plugin.video.alfa/channels/miradetodo.py new file mode 100755 index 00000000..8260c92d --- /dev/null +++ b/plugin.video.alfa/channels/miradetodo.py @@ -0,0 +1,432 @@ +# -*- coding: utf-8 -*- + +import re + +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item + +tgenero = {"Comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png", + "Suspense": "https://s13.postimg.org/wmw6vl1cn/suspenso.png", + "Drama": "https://s16.postimg.org/94sia332d/drama.png", + "Acción": "https://s3.postimg.org/y6o9puflv/accion.png", + "Aventura": "https://s10.postimg.org/6su40czih/aventura.png", + "Romance": "https://s15.postimg.org/fb5j8cl63/romance.png", + "Animación": "https://s13.postimg.org/5on877l87/animacion.png", + "Ciencia ficción": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png", + "Terror": "https://s7.postimg.org/yi0gij3gb/terror.png", + "Documental": "https://s16.postimg.org/7xjj4bmol/documental.png", + "Música": "https://s29.postimg.org/bbxmdh9c7/musical.png", + "Western": "https://s23.postimg.org/lzyfbjzhn/western.png", + "Fantasía": "https://s13.postimg.org/65ylohgvb/fantasia.png", + "Guerra": "https://s4.postimg.org/n1h2jp2jh/guerra.png", + "Misterio": "https://s1.postimg.org/w7fdgf2vj/misterio.png", + "Crimen": "https://s4.postimg.org/6z27zhirx/crimen.png", + "Historia": "https://s15.postimg.org/fmc050h1n/historia.png", + "película de la televisión": "https://s9.postimg.org/t8xb14fb3/delatv.png", + "Action & Adventure": "https://s4.postimg.org/neu65orz1/action_adventure.png", + "Sci-Fi & Fantasy": "https://s23.postimg.org/ys5if2oez/scifi_fantasy.png", + "Suspenso": "https://s13.postimg.org/wmw6vl1cn/suspenso.png", + "Familia": "https://s7.postimg.org/6s7vdhqrf/familiar.png", + "Foreign": "https://s29.postimg.org/jdc2m158n/extranjera.png", + "Cartelera MDT": "https://s1.postimg.org/6yle12szj/cartelera.png", + "Romanticas": "https://s21.postimg.org/xfsj7ua0n/romantica.png" + } + +tcalidad = {"FULL HD": "https://s18.postimg.org/qszt3n6tl/fullhd.png", + "HD": "https://s27.postimg.org/m2dhhkrur/image.png", + "SD": "https://s29.postimg.org/l66t2pfqf/image.png" + } +host = 'http://miradetodo.io/' + + +def mainlist(item): + logger.info() + + itemlist = [] + + itemlist.append(item.clone(title="Peliculas", + action="menu_peliculas", + thumbnail='https://s8.postimg.org/6wqwy2c2t/peliculas.png', + fanart='https://s8.postimg.org/6wqwy2c2t/peliculas.png' + )) + + itemlist.append(item.clone(title="Series", + action="menu_series", + thumbnail='https://s27.postimg.org/iahczwgrn/series.png', + fanart='https://s27.postimg.org/iahczwgrn/series.png', + )) + + itemlist.append(item.clone(title="Buscar", action="search", + thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png', + fanart='https://s30.postimg.org/pei7txpa9/buscar.png', + url=host + '?s=' + )) + + return itemlist + + +def menu_peliculas(item): + logger.info() + + itemlist = [] + + itemlist.append(item.clone(title="Todas", + action="lista", + thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png', + fanart='https://s18.postimg.org/fwvaeo6qh/todas.png', + url=host + 'page/1/?s' + )) + + itemlist.append(item.clone(title="Generos", + action="seccion", + url=host + 'page/1/?s', + thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png', + fanart='https://s3.postimg.org/5s9jg2wtf/generos.png', + seccion='generos-pelicula' + )) + + itemlist.append(item.clone(title="Por Año", + action="seccion", + url=host + 'page/1/?s', + thumbnail='https://s8.postimg.org/7eoedwfg5/pora_o.png', + fanart='https://s8.postimg.org/7eoedwfg5/pora_o.png', + seccion='fecha-estreno' + )) + + itemlist.append(item.clone(title="Calidad", + action="seccion", + url=host + 'page/1/?s', + thumbnail='https://s13.postimg.org/6nzv8nlkn/calidad.png', + fanart='https://s13.postimg.org/6nzv8nlkn/calidad.png', + seccion='calidad' + )) + + return itemlist + + +def menu_series(item): + logger.info() + + itemlist = [] + + itemlist.append(item.clone(title="Todas", + action="lista", + thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png', + fanart='https://s18.postimg.org/fwvaeo6qh/todas.png', + url=host + 'series/page/1/', + )) + + itemlist.append(item.clone(title="Generos", + action="seccion", + url=host + 'series/page/1/', + thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png', + fanart='https://s3.postimg.org/5s9jg2wtf/generos.png', + seccion='generos-serie' + )) + + itemlist.append(item.clone(title="Por Año", + action="seccion", + url=host + 'series/page/1/', + thumbnail='https://s8.postimg.org/7eoedwfg5/pora_o.png', + fanart='https://s8.postimg.org/7eoedwfg5/pora_o.png', + seccion='series-lanzamiento' + )) + + return itemlist + + +def lista(item): + logger.info() + + itemlist = [] + data = get_source(item.url) + patron = 'class=item>.*?<a href=(.*?)><div class=image>.*?<img src=(.*?) alt=(.*?) \(\d{4}.*?ttx>(.*?)' + patron += '<div class=degradado>.*?fixyear><h2>.*?<\/h2>.*?<span class=year>(.*?)<\/span><\/div>(.*?)<\/div>' + + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot, scrapedyear, scrapedquality in matches: + url = scrapedurl + action = 'findvideos' + thumbnail = scrapedthumbnail + plot = scrapedplot + contentSerieName = '' + contentTitle = scrapedtitle + title = contentTitle + if scrapedquality != '': + quality = scrapertools.find_single_match(scrapedquality, 'calidad2>(.*?)<') + title = contentTitle + ' (%s)' % quality + year = scrapedyear + + if 'series' in item.url or 'series' in url: + action = 'temporadas' + contentSerieName = contentTitle + contentTitle = '' + quality = '' + + itemlist.append(Item(channel=item.channel, + action=action, + title=title, + url=url, + thumbnail=thumbnail, + plot=plot, + contentTitle=contentTitle, + contentSerieName=contentSerieName, + quality=quality, + infoLabels={'year': year} + )) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + # Paginacion + + if itemlist != []: + actual_page_url = item.url + next_page = scrapertools.find_single_match(data, + 'alignleft><a href=(.*?) ><\/a><\/div><div class=nav-next alignright>') + if next_page != '': + itemlist.append(Item(channel=item.channel, + action="lista", + title='Siguiente >>>', + url=next_page, + thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png' + )) + return itemlist + + +def seccion(item): + logger.info() + itemlist = [] + data = get_source(item.url) + if item.seccion == 'generos-pelicula': + patron = '<li class=cat-item cat-item-.*?><a href=(.*?) >(.*?<\/a> <span>.*?)<\/span><\/li>' + elif item.seccion == 'generos-serie': + patron = '<li class=cat-item cat-item-.*?><a href=(.*?\/series-genero\/.*?) >(.*?<\/a> <span>.*?)<\/span><\/li>' + elif item.seccion in ['fecha-estreno', 'series-lanzamiento']: + patron = '<li><a href=http:\/\/miradetodo\.io\/fecha-estreno(.*?)>(.*?)<\/a>' + elif item.seccion == 'calidad': + patron = '<li><a href=http:\/\/miradetodo\.io\/calidad(.*?)>(.*?)<\/a>' + matches = re.compile(patron, re.DOTALL).findall(data) + for scrapedurl, scrapedtitle in matches: + thumbnail = '' + if 'generos' in item.seccion: + cantidad = re.sub(r'.*?<\/a> <span>', '', scrapedtitle) + title = re.sub(r'<\/a> <span>|\d|\.', '', scrapedtitle) + url = scrapedurl + title = scrapertools.decodeHtmlentities(title) + if title in tgenero: + thumbnail = tgenero[title] + title = title + ' (%s)' % cantidad + elif item.seccion in ['series-lanzamiento', 'fecha-estreno', 'calidad']: + title = scrapedtitle + url = 'http://miradetodo.io/%s%s' % (item.seccion, scrapedurl) + if item.seccion == 'calidad' and title in tcalidad: + thumbnail = tcalidad[title] + + itemlist.append(item.clone(action='lista', + title=title, + url=url, + thumbnail=thumbnail + )) + return itemlist + + +def temporadas(item): + logger.info() + + itemlist = [] + + data = get_source(item.url) + patron = '<span class=title>.*?- Temporada (.*?)<\/span>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for temporada in matches: + title = 'Temporada %s' % temporada + contentSeasonNumber = temporada + item.infoLabels['season'] = contentSeasonNumber + itemlist.append(item.clone(action='episodiosxtemp', + title=title, + contentSeasonNumber=contentSeasonNumber + )) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append(Item(channel=item.channel, + title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', + url=item.url, + action="add_serie_to_library", + extra="episodios", + contentSerieName=item.contentSerieName, + contentSeasonNumber=contentSeasonNumber + )) + + return itemlist + + +def episodios(item): + logger.info() + + itemlist = [] + data = get_source(item.url) + + patron = '<li><div class=numerando>(\d+).*?x.*?(\d+)<\/div>.*?<a href=(.*?)> (.*?)<\/a>.*?<\/i>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedtemp, scrapedep, scrapedurl, scrapedtitle in matches: + temporada = scrapedtemp + title = temporada + 'x%s %s' % (scrapedep, scrapedtitle) + url = scrapedurl + contentEpisodeNumber = scrapedep + item.infoLabels['episode'] = contentEpisodeNumber + itemlist.append(item.clone(action='findvideos', + title=title, + url=url, + contentEpisodeNumber=contentEpisodeNumber, + )) + return itemlist + + +def episodiosxtemp(item): + logger.info() + + itemlist = [] + data = get_source(item.url) + temporada = item.contentSeasonNumber + patron = '<li><div class=numerando>%s.*?x.*?(\d+)<\/div>.*?<a href=(.*?)> (.*?)<\/a>.*?<\/i>' % temporada + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedep, scrapedurl, scrapedtitle in matches: + title = temporada + 'x%s %s' % (scrapedep, scrapedtitle) + url = scrapedurl + contentEpisodeNumber = scrapedep + item.infoLabels['episode'] = contentEpisodeNumber + itemlist.append(item.clone(action='findvideos', + title=title, + url=url, + contentEpisodeNumber=contentEpisodeNumber, + )) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + return itemlist + + +def findvideos(item): + logger.info() + url_list = [] + itemlist = [] + duplicados = [] + data = get_source(item.url) + src = data + patron = 'id=(?:div|player)(\d+)>.*?<iframe src=.*? data-lazy-src=(.*?) marginheight' + matches = re.compile(patron, re.DOTALL).findall(data) + + for option, videoitem in matches: + lang = scrapertools.find_single_match(src, + '<a href=#(?:div|player)%s.*?>.*?(Doblado|Subtitulado)<\/a>' % option) + data = get_source(videoitem) + if 'play' in videoitem: + url = scrapertools.find_single_match(data, '<span>Ver Online<.*?<li><a href=(.*?)><span class=icon>') + else: + url = scrapertools.find_single_match(data, '<iframe src=(.*?) scrolling=') + + url_list.append([url, lang]) + + for video_url in url_list: + language = video_url[1] + if 'jw.miradetodo' in video_url[0]: + data = get_source('http:' + video_url[0]) + patron = 'label:.*?(.*?),.*?file:.*?(.*?)&app.*?\}' + matches = re.compile(patron, re.DOTALL).findall(data) + + for quality, scrapedurl in matches: + quality = quality + title = item.contentTitle + ' (%s) %s' % (quality, language) + server = 'directo' + url = scrapedurl + url = url.replace('\/', '/') + subtitle = scrapertools.find_single_match(data, "tracks: \[\{file: '.*?linksub=(.*?)',label") + if url not in duplicados: + itemlist.append(item.clone(title=title, + action='play', + url=url, + quality=quality, + server=server, + subtitle=subtitle, + language=language + )) + duplicados.append(url) + elif video_url != '': + itemlist.extend(servertools.find_video_items(data=video_url[0])) + + for videoitem in itemlist: + if videoitem.server != 'directo': + + quality = item.quality + title = item.contentTitle + ' (%s) %s' % (videoitem.server, language) + if item.quality != '': + title = item.contentTitle + ' (%s) %s' % (quality, language) + videoitem.title = title + videoitem.channel = item.channel + videoitem.thumbnail = config.get_thumb("server_%s.png" % videoitem.server) + videoitem.quality = item.quality + + if item.infoLabels['mediatype'] == 'movie': + if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': + itemlist.append(Item(channel=item.channel, + title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', + url=item.url, + action="add_pelicula_to_library", + extra="findvideos", + contentTitle=item.contentTitle + )) + + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = item.url + texto + try: + if texto != '': + return lista(item) + else: + return [] + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + try: + if categoria == 'peliculas': + item.url = host + 'page/1/?s' + + elif categoria == 'infantiles': + item.url = host + 'category/animacion/' + + itemlist = lista(item) + if itemlist[-1].title == 'Siguiente >>>': + itemlist.pop() + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist + + +def get_source(url): + logger.info() + data = httptools.downloadpage(url).data + data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data) + return data diff --git a/plugin.video.alfa/channels/mocosoftx.json b/plugin.video.alfa/channels/mocosoftx.json new file mode 100755 index 00000000..c8d2a7a2 --- /dev/null +++ b/plugin.video.alfa/channels/mocosoftx.json @@ -0,0 +1,40 @@ +{ + "id": "mocosoftx", + "name": "MocosoftX", + "active": true, + "adult": true, + "language": "es", + "thumbnail": "mocosoftx.png", + "banner": "mocosoftx.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "05/08/2016", + "description": "Eliminado de sección películas." + } + ], + "categories": [ + "adult" + ], + "settings": [ + { + "id": "mocosoftxuser", + "type": "text", + "label": "@30014", + "enabled": true, + "visible": true + }, + { + "id": "mocosoftxpassword", + "type": "text", + "label": "@30015", + "enabled": true, + "visible": true, + "hidden": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/mocosoftx.py b/plugin.video.alfa/channels/mocosoftx.py new file mode 100755 index 00000000..c8ad8faa --- /dev/null +++ b/plugin.video.alfa/channels/mocosoftx.py @@ -0,0 +1,208 @@ +# -*- coding: utf-8 -*- + +import re +import urllib +import urlparse + +from core import config +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item +from platformcode import platformtools + +MAIN_HEADERS = [] +MAIN_HEADERS.append(["Host", "mocosoftx.com"]) +MAIN_HEADERS.append(["User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:8.0) Gecko/20100101 Firefox/8.0"]) +MAIN_HEADERS.append(["Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"]) +MAIN_HEADERS.append(["Accept-Language", "es-es,es;q=0.8,en-us;q=0.5,en;q=0.3"]) +MAIN_HEADERS.append(["Accept-Charset", "ISO-8859-1,utf-8;q=0.7,*;q=0.7"]) +MAIN_HEADERS.append(["Connection", "keep-alive"]) + + +# Login: +# <form action="http://mocosoftx.com/foro/login2/" method="post" accept-charset="ISO-8859-1" onsubmit="hashLoginPassword(this, '3e468fdsab5d9');" > +# pst: user=blablabla&passwrd=&cookielength=-1&hash_passwrd=78e88DSe408508d22f +# doForm.hash_passwrd.value = hex_sha1(hex_sha1(doForm.user.value.php_to8bit().php_strtolower() + doForm.passwrd.value.php_to8bit()) + cur_session_id); +def login(): + # Averigua el id de sesión + data = scrapertools.cache_page("http://mocosoftx.com/foro/login/") + cur_session_id = scrapertools.get_match(data, + 'form action="[^"]+" name="frmLogin" id="frmLogin" method="post" accept-charset="ISO-8859-1" onsubmit="hashLoginPassword\(this, \'([a-z0-9]+)\'') + cur_session_id = "c95633073dc6afaa813d33b2bfeda520" + logger.info("cur_session_id=" + cur_session_id) + + # Calcula el hash del password + email = config.get_setting("mocosoftxuser", "mocosoftx") + password = config.get_setting("mocosoftxpassword", "mocosoftx") + logger.info("email=" + email) + logger.info("password=" + password) + + # doForm.hash_passwrd.value = hex_sha1(hex_sha1(doForm.user.value.php_to8bit().php_strtolower() + doForm.passwrd.value.php_to8bit()) + cur_session_id); + hash_passwrd = scrapertools.get_sha1(scrapertools.get_sha1(email.lower() + password.lower()) + cur_session_id) + logger.info("hash_passwrd=" + hash_passwrd) + + # Hace el submit del email + # post = "user="+email+"&passwrd=&cookieneverexp=on&hash_passwrd="+hash_passwrd + post = urllib.urlencode({'user': email, "passwrd": password}) + "&cookieneverexp=on&hash_passwrd=" + logger.info("post=" + post) + + headers = [] + headers.append(["Host", "mocosoftx.com"]) + headers.append(["User-Agent", + "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36"]) + headers.append(["Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"]) + headers.append(["Accept-Language", "es-ES,es;q=0.8,en;q=0.6,gl;q=0.4"]) + headers.append(["Accept-Encoding", "gzip, deflate"]) + headers.append(["Connection", "keep-alive"]) + headers.append(["Referer", "http://mocosoftx.com/foro/login/"]) + headers.append(["Origin", "http://mocosoftx.com"]) + headers.append(["Content-Type", "application/x-www-form-urlencoded"]) + headers.append(["Content-Length", str(len(post))]) + headers.append(["Cache-Control", "max-age=0"]) + headers.append(["Upgrade-Insecure-Requests", "1"]) + + data = scrapertools.cache_page("http://mocosoftx.com/foro/login2/", post=post, headers=headers) + logger.info("data=" + data) + + return True + + +def mainlist(item): + logger.info() + itemlist = [] + + if config.get_setting("mocosoftxuser", "mocosoftx") == "": + itemlist.append( + Item(channel=item.channel, title="Habilita tu cuenta en la configuración...", action="settingCanal", + url="")) + else: + if login(): + item.url = "http://mocosoftx.com/foro/forum/" + itemlist = foro(item) + itemlist.append(Item(channel=item.channel, action="settingCanal", title="Configuración...", url="")) + else: + itemlist.append( + Item(channel=item.channel, title="Cuenta incorrecta, revisa la configuración...", action="", url="", + folder=False)) + + return itemlist + + +def settingCanal(item): + return platformtools.show_channel_settings() + + +def foro(item): + logger.info() + itemlist = [] + + # Descarga la página + data = scrapertools.cache_page(item.url, headers=MAIN_HEADERS) + + # Extrae los foros y subforos + patron = '<h4><a href="([^"]+)"[^>]+>([^<]+)</a>' + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + for scrapedurl, scrapedtitle in matches: + scrapedtitle = unicode(scrapedtitle, "iso-8859-1", errors="replace").encode("utf-8") + title = ">> Foro " + scrapedtitle + url = urlparse.urljoin(item.url, scrapedurl) + # http://mocosoftx.com/foro/fotos-hentai/?PHPSESSID=nflddqf9nvbm2dd92 + if "PHPSESSID" in url: + url = scrapertools.get_match(url, "(.*?)\?PHPSESSID=") + thumbnail = "" + plot = "" + itemlist.append(Item(channel=item.channel, title=title, action="foro", url=url, plot=plot, thumbnail=thumbnail, + folder=True)) + + # Extrae los hilos individuales + patron = '<td class="icon2 windowbgb">[^<]+' + patron += '<img src="([^"]+)"[^<]+' + patron += '</td>[^<]+' + patron += '<td class="subject windowbgb2">[^<]+' + patron += '<div >[^<]+' + patron += '<span id="msg_\d+"><a href="([^"]+)">([^>]+)</a>' + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + for scrapedthumbnail, scrapedurl, scrapedtitle in matches: + title = scrapedtitle + url = urlparse.urljoin(item.url, scrapedurl) + if "PHPSESSID" in url: + url = scrapertools.get_match(url, "(.*?)\?PHPSESSID=") + thumbnail = scrapedthumbnail + plot = "" + itemlist.append( + Item(channel=item.channel, title=title, action="findvideos", url=url, plot=plot, thumbnail=thumbnail, + folder=True)) + + # Extrae la marca de siguiente página + # <a class="navPages" href="http://mocosoftx.com/foro/peliculas-xxx-online-(completas)/20/?PHPSESSID=rpejdrj1trngh0sjdp08ds0ef7">2</a> + patronvideos = '<strong>\d+</strong[^<]+<a class="navPages" href="([^"]+)">' + matches = re.compile(patronvideos, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + if len(matches) > 0: + scrapedtitle = ">> Página siguiente" + scrapedurl = urlparse.urljoin(item.url, matches[0]) + if "PHPSESSID" in scrapedurl: + scrapedurl = scrapertools.get_match(scrapedurl, "(.*?)\?PHPSESSID=") + scrapedthumbnail = "" + scrapedplot = "" + itemlist.append(Item(channel=item.channel, title=scrapedtitle, action="foro", url=scrapedurl, plot=scrapedplot, + thumbnail=scrapedthumbnail, folder=True)) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + + # Busca el thumbnail y el argumento + data = scrapertools.cache_page(item.url) + logger.info("data=" + data) + + try: + thumbnail = scrapertools.get_match(data, '<div class="post">.*?<img src="([^"]+)"') + except: + thumbnail = "" + + plot = "" + + # Ahora busca los vídeos + itemlist = servertools.find_video_items(data=data) + + for videoitem in itemlist: + videoitem.channel = item.channel + videoitem.plot = plot + videoitem.thumbnail = thumbnail + videoitem.fulltitle = item.title + + parsed_url = urlparse.urlparse(videoitem.url) + fichero = parsed_url.path + partes = fichero.split("/") + titulo = partes[len(partes) - 1] + videoitem.title = titulo + " - [" + videoitem.server + "]" + + if not itemlist: + + patron = '<a href="([^"]+)" class="bbc_link" target="_blank"><span style="color: orange;" class="bbc_color">' + matches = re.compile(patron, re.DOTALL).findall(data) + if matches: + data = scrapertools.cache_page(matches[0]) + logger.info(data) + itemlist = servertools.find_video_items(data=data) + for videoitem in itemlist: + videoitem.channel = item.channel + videoitem.plot = plot + videoitem.thumbnail = thumbnail + videoitem.fulltitle = item.title + + parsed_url = urlparse.urlparse(videoitem.url) + fichero = parsed_url.path + partes = fichero.split("/") + titulo = partes[len(partes) - 1] + videoitem.title = titulo + " - [" + videoitem.server + "]" + + return itemlist diff --git a/plugin.video.alfa/channels/mundoflv.json b/plugin.video.alfa/channels/mundoflv.json new file mode 100755 index 00000000..3f02a6e0 --- /dev/null +++ b/plugin.video.alfa/channels/mundoflv.json @@ -0,0 +1,69 @@ +{ + "id": "mundoflv", + "name": "MundoFlv", + "compatible": { + "addon_version": "4.3" + }, + "active": true, + "adult": false, + "language": "es", + "thumbnail": "https://s32.postimg.org/h1ewz9hhx/mundoflv.png", + "banner": "mundoflv.png", + "version": 1, + "changes": [ + { + "date": "24/06/2017", + "description": "Cambios para autoplay" + }, + { + "date": "06/06/2017", + "description": "Compatibilidad con AutoPlay" + }, + { + "date": "03/06/2017", + "description": "Reparado por mala subida" + }, + { + "date": "25/05/2017", + "description": "cambios esteticos" + }, + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "28/01/2017", + "description": "Release." + } + ], + "categories": [ + "latino", + "tvshow" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": false, + "visible": false + }, + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "No filtrar", + "Latino", + "Español", + "VOS", + "VOSE", + "VO" + ] + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/mundoflv.py b/plugin.video.alfa/channels/mundoflv.py new file mode 100755 index 00000000..1d237b9d --- /dev/null +++ b/plugin.video.alfa/channels/mundoflv.py @@ -0,0 +1,651 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from channels import autoplay +from channels import filtertools +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item + +host = "http://mundoflv.com" +thumbmx = 'http://flags.fmcdn.net/data/flags/normal/mx.png' +thumbes = 'http://flags.fmcdn.net/data/flags/normal/es.png' +thumben = 'http://flags.fmcdn.net/data/flags/normal/gb.png' +thumbsub = 'https://s32.postimg.org/nzstk8z11/sub.png' +thumbtodos = 'https://s29.postimg.org/4p8j2pkdj/todos.png' +patrones = ['<<meta property="og:image" content="([^"]+)" \/>" \/>', '\/><\/a>([^*]+)<p><\/p>.*'] + +IDIOMAS = {'la': 'Latino', + 'es': 'Español', + 'sub': 'VOS', + 'vosi': 'VOSE', + 'en': 'VO' + } +list_language = IDIOMAS.values() +list_quality = [] +list_servers = [ + 'openload', + 'gamovideo', + 'powvideo', + 'streamplay', + 'streamin', + 'streame', + 'flashx', + 'nowvideo' +] + +list_quality = ['default'] + +audio = {'la': '[COLOR limegreen]LATINO[/COLOR]', 'es': '[COLOR yellow]ESPAÑOL[/COLOR]', + 'sub': '[COLOR orange]ORIGINAL SUBTITULADO[/COLOR]', 'en': '[COLOR red]Original[/COLOR]', + 'vosi': '[COLOR red]ORIGINAL SUBTITULADO INGLES[/COLOR]' + } + +headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'], + ['Referer', host]] + + +def mainlist(item): + logger.info() + + autoplay.init(item.channel, list_servers, list_quality) + itemlist = [] + + itemlist.append(Item(channel=item.channel, + title="Series", + action="todas", + url=host, + thumbnail='https://s27.postimg.org/iahczwgrn/series.png', + fanart='https://s27.postimg.org/iahczwgrn/series.png' + )) + + itemlist.append(Item(channel=item.channel, + title="Alfabetico", + action="letras", + url=host, + thumbnail='https://s17.postimg.org/fwi1y99en/a-z.png', + fanart='https://s17.postimg.org/fwi1y99en/a-z.png' + )) + + itemlist.append(Item(channel=item.channel, + title="Mas vistas", + action="masvistas", + url=host, + thumbnail='https://s9.postimg.org/wmhzu9d7z/vistas.png', + fanart='https://s9.postimg.org/wmhzu9d7z/vistas.png' + )) + + itemlist.append(Item(channel=item.channel, + title="Recomendadas", + action="recomendadas", + url=host, + thumbnail='https://s12.postimg.org/s881laywd/recomendadas.png', + fanart='https://s12.postimg.org/s881laywd/recomendadas.png' + )) + + itemlist.append(Item(channel=item.channel, + title="Ultimas Agregadas", + action="ultimas", + url=host, + thumbnail='https://s22.postimg.org/cb7nmhwv5/ultimas.png', + fanart='https://s22.postimg.org/cb7nmhwv5/ultimas.png' + )) + + itemlist.append(Item(channel=item.channel, + title="Buscar", + action="search", + url='http://mundoflv.com/?s=', + thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png', + fanart='https://s30.postimg.org/pei7txpa9/buscar.png' + )) + + if autoplay.context: + autoplay.show_option(item.channel, itemlist) + + return itemlist + + +def todas(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = 'class="item"><a href="(.*?)" title="(.*?)(?:\|.*?|\(.*?|- )(\d{4})(?:\)|-)".*?' + patron += '<div class="img">.*?' + patron += '<img src="([^"]+)" alt.*?>' + + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle, scrapedyear, scrapedthumbnail in matches: + url = scrapedurl + title = scrapertools.decodeHtmlentities(scrapedtitle) + title = title.rstrip(' ') + thumbnail = scrapedthumbnail + year = scrapedyear + plot = '' + + fanart = 'https://s32.postimg.org/h1ewz9hhx/mundoflv.png' + itemlist.append( + Item(channel=item.channel, + action="temporadas", + title=title, + url=url, + thumbnail=thumbnail, + plot=plot, + fanart=fanart, + contentSerieName=title, + infoLabels={'year': year}, + show=title, + list_language=list_language, + context=autoplay.context + )) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + itemlist = fail_tmdb(itemlist) + # Paginacion + next_page_url = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />') + + if next_page_url != "": + itemlist.append(Item(channel=item.channel, + action="todas", + title=">> Página siguiente", + url=next_page_url, + thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png' + )) + + return itemlist + + +def letras(item): + thumbletras = {'0-9': 'https://s32.postimg.org/drojt686d/image.png', + '0 - 9': 'https://s32.postimg.org/drojt686d/image.png', + '#': 'https://s32.postimg.org/drojt686d/image.png', + 'a': 'https://s32.postimg.org/llp5ekfz9/image.png', + 'b': 'https://s32.postimg.org/y1qgm1yp1/image.png', + 'c': 'https://s32.postimg.org/vlon87gmd/image.png', + 'd': 'https://s32.postimg.org/3zlvnix9h/image.png', + 'e': 'https://s32.postimg.org/bgv32qmsl/image.png', + 'f': 'https://s32.postimg.org/y6u7vq605/image.png', + 'g': 'https://s32.postimg.org/9237ib6jp/image.png', + 'h': 'https://s32.postimg.org/812yt6pk5/image.png', + 'i': 'https://s32.postimg.org/6nbbxvqat/image.png', + 'j': 'https://s32.postimg.org/axpztgvdx/image.png', + 'k': 'https://s32.postimg.org/976yrzdut/image.png', + 'l': 'https://s32.postimg.org/fmal2e9yd/image.png', + 'm': 'https://s32.postimg.org/m19lz2go5/image.png', + 'n': 'https://s32.postimg.org/b2ycgvs2t/image.png', + 'o': 'https://s32.postimg.org/c6igsucpx/image.png', + 'p': 'https://s32.postimg.org/jnro82291/image.png', + 'q': 'https://s32.postimg.org/ve5lpfv1h/image.png', + 'r': 'https://s32.postimg.org/nmovqvqw5/image.png', + 's': 'https://s32.postimg.org/zd2t89jol/image.png', + 't': 'https://s32.postimg.org/wk9lo8jc5/image.png', + 'u': 'https://s32.postimg.org/w8s5bh2w5/image.png', + 'v': 'https://s32.postimg.org/e7dlrey91/image.png', + 'w': 'https://s32.postimg.org/fnp49k15x/image.png', + 'x': 'https://s32.postimg.org/dkep1w1d1/image.png', + 'y': 'https://s32.postimg.org/um7j3zg85/image.png', + 'z': 'https://s32.postimg.org/jb4vfm9d1/image.png'} + + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + + patron = '<li><a.*?href="([^"]+)">([^<]+)<\/a><\/li>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + url = urlparse.urljoin(item.url, scrapedurl) + title = scrapedtitle + if scrapedtitle.lower() in thumbletras: + thumbnail = thumbletras[scrapedtitle.lower()] + else: + thumbnail = '' + plot = "" + fanart = item.fanart + itemlist.append( + Item(channel=item.channel, + action="todas", + title=title, + url=url, + thumbnail=thumbnail, + plot=plot, + fanart=fanart, + contentSerieName=title + )) + + return itemlist + + +def fail_tmdb(itemlist): + logger.info() + realplot = '' + for item in itemlist: + if item.infoLabels['plot'] == '': + data = httptools.downloadpage(item.url).data + if item.thumbnail == '': + item.thumbnail = scrapertools.find_single_match(data, patrones[0]) + realplot = scrapertools.find_single_match(data, patrones[1]) + item.plot = scrapertools.remove_htmltags(realplot) + return itemlist + + +def masvistas(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '<li><a href="(?!http:\/\/mundoflv\.com\/tag\/)(.*?)">.*?' + patron += 'div class="im">.*?' + patron += '<img src=".*?" alt="(.*?)(?:\|.*?|\(.*?|- )(\d{4})|-" \/>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle, scrapedyear in matches: + url = scrapedurl + title = scrapedtitle + fanart = item.fanart + contentSerieName = scrapedtitle + year = scrapedyear + thumbnail = '' + plot = 'nada' + itemlist.append( + Item(channel=item.channel, + action="temporadas", + title=title, + url=url, + thumbnail=thumbnail, + plot=plot, + fanart=fanart, + contentSerieName=contentSerieName, + infoLabels={'year': year}, + context=autoplay.context + )) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + itemlist = fail_tmdb(itemlist) + return itemlist + + +def recomendadas(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + realplot = '' + patron = '<li><A HREF="([^"]+)"><.*?>Ver ([^<]+)<\/A><\/li>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + url = urlparse.urljoin(item.url, scrapedurl) + data = httptools.downloadpage(scrapedurl).data + thumbnail = scrapertools.get_match(data, '<meta property="og:image" content="([^"]+)".*?>') + realplot = scrapertools.find_single_match(data, '\/><\/a>([^*]+)<p><\/p>.*') + plot = scrapertools.remove_htmltags(realplot) + title = scrapedtitle.replace('online', '') + title = scrapertools.decodeHtmlentities(title) + fanart = item.fanart + itemlist.append( + Item(channel=item.channel, + action="temporadas", + title=title, + url=url, + thumbnail=thumbnail, + plot=plot, + fanart=fanart, + contentSerieName=title, + context=autoplay.context + )) + + return itemlist + + +def ultimas(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + realplot = '' + patron = '<li><A HREF="([^"]+)"> <.*?>Ver ([^<]+)<\/A><\/li>' + + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + url = urlparse.urljoin(item.url, scrapedurl) + data = httptools.downloadpage(scrapedurl).data + thumbnail = scrapertools.get_match(data, '<meta property="og:image" content="([^"]+)".*?>') + realplot = scrapertools.find_single_match(data, '\/><\/a>([^*]+)<p><\/p>.*') + plot = scrapertools.remove_htmltags(realplot) + plot = "" + title = scrapedtitle.replace('online', '') + title = scrapertools.decodeHtmlentities(title) + fanart = item.fanart + itemlist.append( + Item(channel=item.channel, + action="idioma", + title=title, + url=url, + thumbnail=thumbnail, + plot=plot, + fanart=fanart, + contentSerieName=title, + context=autoplay.context + )) + + return itemlist + + +def temporadas(item): + logger.info() + + itemlist = [] + templist = [] + data = httptools.downloadpage(item.url).data + realplot = '' + patron = "<button class='classnamer' onclick='javascript: mostrarcapitulos.*?blank'>([^<]+)</button>" + + matches = re.compile(patron, re.DOTALL).findall(data) + + serieid = scrapertools.find_single_match(data, 'data-nonce="(.*?)"') + + item.thumbnail = item.thumbvid + infoLabels = item.infoLabels + for scrapedtitle in matches: + url = 'http://mundoflv.com/wp-content/themes/wpRafael/includes/capitulos.php?serie=' + serieid + \ + '&temporada=' + scrapedtitle + title = 'Temporada ' + scrapertools.decodeHtmlentities(scrapedtitle) + contentSeasonNumber = scrapedtitle + thumbnail = item.thumbnail + realplot = scrapertools.find_single_match(data, '\/><\/a>([^*]+)<p><\/p>.*') + plot = '' + fanart = '' + itemlist.append( + Item(channel=item.channel, + action="episodiosxtemp", + title=title, + fulltitle=item.title, + url=url, + thumbnail=thumbnail, + plot=plot, + fanart=fanart, + extra1=item.extra1, + contentSerieName=item.contentSerieName, + contentSeasonNumber=contentSeasonNumber, + infoLabels={'season': contentSeasonNumber}, + context=item.context + )) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append( + Item(channel=item.channel, + title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', + url=item.url, + action="add_serie_to_library", + extra="episodios", + contentSerieName=item.contentSerieName, + extra1=item.extra1 + )) + + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + templist = temporadas(item) + for tempitem in templist: + itemlist += episodiosxtemp(tempitem) + + return itemlist + + +def episodiosxtemp(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = "<button class='classnamer' onclick='javascript: mostrarenlaces\(([^\)]+)\).*?<" + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedtitle in matches: + item.url = item.url.replace("&sr", "") + item.url = item.url.replace("capitulos", "enlaces") + url = item.url + '&capitulo=' + scrapedtitle + contentEpisodeNumber = scrapedtitle + title = item.contentSerieName + ' ' + item.contentSeasonNumber + 'x' + contentEpisodeNumber + thumbnail = item.thumbnail + plot = '' + infoLabels = item.infoLabels + infoLabels['episode'] = contentEpisodeNumber + itemlist.append( + Item(channel=item.channel, + action="findvideos", + title=title, + fulltitle=item.fulltitle, + url=url, + thumbnail=thumbnail, + plot=plot, + extra1=item.extra1, + idioma='', + contentSerieName=item.contentSerieName, + contentSeasonNumber=item.contentSeasonNumber, + infoLabels=infoLabels, + show=item.contentSerieName, + list_language=list_language, + context=item.context + )) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + return itemlist + + +def idioma(item): + logger.info() + + itemlist = [] + thumbvid = item.thumbnail + itemlist.append( + Item(channel=item.channel, + title="Latino", + action="temporadas", + url=item.url, + thumbnail=thumbmx, + fanart='', + extra1='la', + fulltitle=item.title, + thumbvid=thumbvid, + contentSerieName=item.contentSerieName, + infoLabels=item.infoLabels, + language='la' + )) + + itemlist.append( + Item(channel=item.channel, + title="Español", + action="temporadas", + url=item.url, + thumbnail=thumbes, + fanart='', + extra1='es', + fulltitle=item.title, + thumbvid=thumbvid, + contentSerieName=item.contentSerieName, + language='es' + )) + + itemlist.append( + Item(channel=item.channel, + title="Subtitulado", + action="temporadas", + url=item.url, + thumbnail=thumbsub, + fanart='', + extra1='sub', + fulltitle=item.title, + thumbvid=thumbvid, + contentSerieName=item.contentSerieName, + language='sub' + )) + + itemlist.append( + Item(channel=item.channel, + title="Original", + action="temporadas", + url=item.url, + thumbnail=thumben, + fanart='', + extra1='en', + fulltitle=item.title, + thumbvid=thumbvid, + contentSerieName=item.contentSerieName, + language='en' + )) + + itemlist.append( + Item(channel=item.channel, + title="Original Subtitulado en Ingles", + action="temporadas", + url=item.url, + thumbnail=thumben, + fanart='', + extra1='vosi', + fulltitle=item.title, + thumbvid=thumbvid, + contentSerieName=item.contentSerieName, + language='vosi' + )) + + itemlist.append( + Item(channel=item.channel, + title="Todo", + action="temporadas", + url=item.url, + thumbnail=thumbtodos, + fanart='', + extra1='all', + fulltitle=item.title, + thumbvid=thumbvid, + contentSerieName=item.contentSerieName, + language='all' + )) + + return itemlist + + +def busqueda(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '<img class=.*?src="([^"]+)" alt="(.*?)(?:\|.*?|\(.*?|")>.*?h3><a href="(.*?)".*?class="year">(' \ + '.*?)<\/span>' + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + for scrapedthumbnail, scrapedtitle, scrapedurl, scrapedyear in matches: + url = scrapedurl + title = scrapertools.decodeHtmlentities(scrapedtitle) + thumbnail = scrapedthumbnail + plot = '' + year = scrapedyear + itemlist.append( + Item(channel=item.channel, + action="idioma", + title=title, + fulltitle=title, + url=url, + thumbnail=thumbnail, + plot=plot, + contentSerieName=title, + infoLabels={'year': year}, + context=autoplay.context + )) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + itemlist = fail_tmdb(itemlist) + + # Paginacion + next_page_url = scrapertools.find_single_match(data, + "<a rel='nofollow' class=previouspostslink' href='([" + "^']+)'>Siguiente ›</a>") + if next_page_url != "": + item.url = next_page_url + itemlist.append( + Item(channel=item.channel, + action="busqueda", + title=">> Página siguiente", + url=next_page_url, + thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png' + )) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = item.url + texto + if texto != '': + return busqueda(item) + + +def findvideos(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + patron = 'href="([^"]+)".*?domain=.*?>([^<]+).*?gold">([^<]+)<' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedserver, scrapedidioma in matches: + url = scrapedurl + idioma = audio[scrapedidioma] + server = scrapedserver.strip(' ') + title = item.contentSerieName + ' ' + str(item.contentSeasonNumber) + 'x' + str( + item.contentEpisodeNumber) + ' ' + idioma + ' (' + server + ')' + + new_item = item.clone(title=title, + url=url, + action="play", + language=IDIOMAS[scrapedidioma], + server=server, + quality='default', + fulltitle=item.ContentSeriename, + ) + + # Requerido para FilterTools + + itemlist = filtertools.get_link(itemlist, new_item, list_language) + + for videoitem in itemlist: + videoitem.infoLabels = item.infoLabels + videoitem.thumbnail = config.get_thumb("server_%s.png" % videoitem.server) + + # Requerido para AutoPlay + + autoplay.start(itemlist, item) + + return itemlist + + +def play(item): + logger.info() + + data = httptools.downloadpage(item.url).data + if 'streamplay' not in item.server or 'streame' not in item.server: + url = scrapertools.find_single_match(data, '<(?:IFRAME|iframe).*?(?:SRC|src)=*([^ ]+) (?!style|STYLE)') + else: + url = scrapertools.find_single_match(data, '<meta http-equiv="refresh" content="0; url=([^"]+)">') + + itemlist = servertools.find_video_items(data=url) + for videoitem in itemlist: + videoitem.infoLabels = item.infoLabels + videoitem.title = item.title + videoitem.thumbnail = videoitem.infoLabels['thumbnail'] + + return itemlist diff --git a/plugin.video.alfa/channels/newpct.json b/plugin.video.alfa/channels/newpct.json new file mode 100755 index 00000000..cd8f5128 --- /dev/null +++ b/plugin.video.alfa/channels/newpct.json @@ -0,0 +1,36 @@ +{ + "id": "newpct", + "name": "Newpct", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "newpct.png", + "banner": "newpct.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ], + "categories": [ + "movie", + "tvshow", + "documentary", + "anime" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/newpct.py b/plugin.video.alfa/channels/newpct.py new file mode 100755 index 00000000..f3a18502 --- /dev/null +++ b/plugin.video.alfa/channels/newpct.py @@ -0,0 +1,351 @@ +# -*- coding: utf-8 -*- + +import re +import urllib +import urlparse + +from core import logger +from core import scrapertools +from core.item import Item + + +def mainlist(item): + logger.info() + + itemlist = [] + itemlist.append(Item(channel=item.channel, action="submenu", title="Películas")) + itemlist.append(Item(channel=item.channel, action="submenu", title="Series")) + itemlist.append(Item(channel=item.channel, action="listado", title="Anime", url="http://www.newpct.com/anime/", + viewmode="movie_with_plot")) + itemlist.append( + Item(channel=item.channel, action="listado", title="Documentales", url="http://www.newpct.com/documentales/", + viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, action="search", title="Buscar")) + + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + + item.url = "http://www.newpct.com/buscar-descargas/%s" % (texto) + try: + return buscador(item) + # Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def buscador(item): + logger.info() + itemlist = [] + + # Descarga la página + data = scrapertools.cache_page(item.url) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + # <td class="center" style="border-bottom:solid 1px cyan;">14-09-14</td><td style="border-bottom:solid 1px cyan;"><strong><a href="http://www.newpct.com/descargar-pelicula/malefica-3d-sbs/" title="Más información sobre Malefica 3D SBS [BluRay 1080p][DTS 5.1-AC3 5.1 Castellano DTS 5.1-Ingles+Subs][ES-EN]"> <span class="searchTerm">Malefica</span> 3D SBS [BluRay 1080p][DTS 5.1-AC3 5.1 Castellano DTS 5.1-Ingles+Subs][ES-EN]</a></strong></td><td class="center" style="border-bottom:solid 1px cyan;">10.9 GB</td><td style="border-bottom:solid 1px cyan;"><a href="http://tumejorserie.com/descargar/index.php?link=torrents/059784.torrent" title="Descargar Malefica 3D SBS [BluRay 1080p][DTS 5.1-AC3 5.1 Castellano DTS 5.1-Ingles+Subs][ES-EN]"><img src="http://newpct.com/v2/imagenes//buttons/download.png" + + patron = '<td class="center" style="border-bottom:solid 1px cyan;">([^<]+)</td>.*?' # createdate + patron += '<td class="center" style="border-bottom:solid 1px cyan;">([^<]+)</td>.*?' # info + patron += '<a href="([^"]+)" ' # url + patron += 'title="Descargar([^"]+)">' # title + patron += '<img src="([^"]+)"' # thumbnail + + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + for scrapedcreatedate, scrapedinfo, scrapedurl, scrapedtitle, scrapedthumbnail in matches: + scrapedtitle = scrapedtitle + "(Tamaño:" + scrapedinfo + "--" + scrapedcreatedate + ")" + itemlist.append(Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="play", server="torrent", + thumbnail=scrapedthumbnail, fulltitle=scrapedtitle, folder=True)) + + from core import servertools + itemlist.extend(servertools.find_video_items(data=data)) + for videoitem in itemlist: + videoitem.channel = item.channel + videoitem.action = "play" + videoitem.folder = False + + return itemlist + + +def submenu(item): + logger.info() + itemlist = [] + + if item.title == "Películas": + itemlist.append(Item(channel=item.channel, action="listado", title="Peliculas DVDRIP-BRRIP Castellano", + url="http://www.newpct.com/peliculas-castellano/peliculas-rip/", + viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, action="listado", title="Peliculas Latino", + url="http://www.newpct.com/peliculas-latino/", viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, action="listado", title="Estrenos de Cine Castellano", + url="http://www.newpct.com/peliculas-castellano/estrenos-de-cine/", + viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, action="listado", title="Peliculas Alta Definicion HD", + url="http://www.newpct.com/cine-alta-definicion-hd/", viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, action="listado", title="Peliculas en 3D HD", + url="http://www.newpct.com/peliculas-en-3d-hd/", viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, action="listado", title="Peliculas DVDFULL", + url="http://www.newpct.com/peliculas-castellano/peliculas-dvd/", + viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, action="listado", title="Peliculas V.O.Subtituladas", + url="http://www.newpct.com/peliculas-vo/", viewmode="movie_with_plot")) + else: + itemlist.append( + Item(channel=item.channel, action="listado", title="HDTV Castellano", url="http://www.newpct.com/series/", + category="serie", viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, action="listado", title="Miniseries Castellano", + url="http://www.newpct.com/miniseries-es/", viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, action="listado", title="Series TV - V.O.S.E", + url="http://www.newpct.com/series-vo/", category="serie", viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, action="listado", title="Últimos Capítulos HD", + url="http://www.newpct.com/series-alta-definicion-hd/", category="serie", + viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, action="series", title="Series HD [A-Z]", + url="http://www.newpct.com/index.php?l=torrentListByCategory&subcategory_s=1469&more=listar", + category="serie")) + return itemlist + + +def listado(item): + logger.info() + itemlist = [] + data = scrapertools.cache_page(item.url) + + ''' + <li> + <a href='http://www.newpct.com/descargar-pelicula/la-pequena-venecia/'> + <div class='boxgrid captionb'> + <img src='http://images.newpct.com/banco_de_imagenes/destacados/038707/la-pequeña-venecia--dvdrip--ac3-5-1-español-castellano--2012-.jpg' alt='Descargar Peliculas Castellano » Películas RIP La Pequeña Venecia [DVDrip][AC3 5.1 Español Castellano][2012]' /> + <div class='cover boxcaption'> + <h3>La Pequeña Venecia </h3> + <p>Peliculas Castellano<br/> + Calidad: DVDRIP AC3 5.1<br> + Tamaño: 1.1 GB<br> + Idioma : Español Castellano + </p> + </div> + </div> + </a> + <div id='bot-desc'> + <div id='tinfo'> + <a class='youtube' href='#' rel='gx9EKDC0UFQ' title='Ver Trailer' alt='Ver Trailer'> + <img style='width:25px;' src='http://www.newpct.com/images.inc/images/playm2.gif'></a> + </div> + <div id='tdescargar' ><a class='atdescargar' href='http://www.newpct.com/descargar-pelicula/la-pequena-venecia/'>DESCARGAR</a></div> + </div> + </li> + ''' + patron = "<li[^<]+" + patron += "<a href='([^']+)'[^<]+" + patron += "<div class='boxgrid captionb'[^<]+" + patron += "<img src='([^']+)'[^<]+" + patron += "<div class='cover boxcaption'[^<]+" + patron += '<h3>([^<]+)</h3>(.*?)</div>' + + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot in matches: + title = scrapedtitle.strip() + title = unicode(title, "iso-8859-1", errors="replace").encode("utf-8") + + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) + plot = scrapertools.htmlclean(scrapedplot).strip() + plot = unicode(plot, "iso-8859-1", errors="replace").encode("utf-8") + + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + if item.category == "serie": + itemlist.append( + Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, plot=plot)) + else: + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot)) + + # Página siguiente + ''' + GET /include.inc/ajax.php/orderCategory.php?type=todo&leter=&sql=SELECT+DISTINCT+++%09%09%09%09%09%09torrentID%2C+++%09%09%09%09%09%09torrentCategoryID%2C+++%09%09%09%09%09%09torrentCategoryIDR%2C+++%09%09%09%09%09%09torrentImageID%2C+++%09%09%09%09%09%09torrentName%2C+++%09%09%09%09%09%09guid%2C+++%09%09%09%09%09%09torrentShortName%2C++%09%09%09%09%09%09torrentLanguage%2C++%09%09%09%09%09%09torrentSize%2C++%09%09%09%09%09%09calidad+as+calidad_%2C++%09%09%09%09%09%09torrentDescription%2C++%09%09%09%09%09%09torrentViews%2C++%09%09%09%09%09%09rating%2C++%09%09%09%09%09%09n_votos%2C++%09%09%09%09%09%09vistas_hoy%2C++%09%09%09%09%09%09vistas_ayer%2C++%09%09%09%09%09%09vistas_semana%2C++%09%09%09%09%09%09vistas_mes++%09%09%09%09++FROM+torrentsFiles+as+t+WHERE++(torrentStatus+%3D+1+OR+torrentStatus+%3D+2)++AND+(torrentCategoryID+IN+(1537%2C+758%2C+1105%2C+760%2C+1225))++++ORDER+BY+torrentDateAdded++DESC++LIMIT+0%2C+50&pag=3&tot=&ban=3&cate=1225 HTTP/1.1 + Host: www.newpct.com + User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:19.0) Gecko/20100101 Firefox/19.0 + Accept: */* + Accept-Language: es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3 + Accept-Encoding: gzip, deflate + X-Requested-With: XMLHttpRequest + Referer: http://www.newpct.com/peliculas-castellano/peliculas-rip/ + Cookie: adbooth_popunder=5%7CSat%2C%2009%20Mar%202013%2018%3A23%3A22%20GMT + Connection: keep-alive + ''' + + ''' + function orderCategory(type,leter,pag,other) + { + + + if(leter=='buscar') + { + leter = document.getElementById('word').value; + } + if(type=='todo') + { + document.getElementById('todo').className = "active_todo"; + } + if(type=='letter') + { + switch(leter) + { + case '09': + document.getElementById('09').className = "active_num"; + break; + default: + document.getElementById(leter).className = "active_a"; + break; + } + } + + var parametros = { + "type" : type, + "leter" : leter, + "sql" : "SELECT DISTINCT torrentID, torrentCategoryID, torrentCategoryIDR, torrentImageID, torrentName, guid, torrentShortName, torrentLanguage, torrentSize, calidad as calidad_, torrentDescription, torrentViews, rating, n_votos, vistas_hoy, vistas_ayer, vistas_semana, vistas_mes FROM torrentsFiles as t WHERE (torrentStatus = 1 OR torrentStatus = 2) AND (torrentCategoryID IN (1537, 758, 1105, 760, 1225)) ORDER BY torrentDateAdded DESC LIMIT 0, 50", + "pag" : pag, + "tot" : '', + "ban" : '3', + "other": other, + "cate" : '1225' + + }; + //alert(type+leter); + + $('#content-category').html('<div style="margin:100px auto;width:100px;height:100px;"><img src="http://www.newpct.com/images.inc/images/ajax-loader.gif"/></div>'); + var page = $(this).attr('data'); + var dataString = 'page='+page; + + $.ajax({ + type: "GET", + url: 'http://www.newpct.com/include.inc/ajax.php/orderCategory.php', + data: parametros, + success: function(data) { + + //Cargamos finalmente el contenido deseado + $('#content-category').fadeIn(1000).html(data); + } + }); + + } + ''' + if item.extra != "": + bloque = item.extra + else: + bloque = scrapertools.get_match(data, "function orderCategory(.*?)\}\)\;") + logger.info("bloque=" + bloque) + param_type = scrapertools.get_match(data, "<a href='javascript:;' onclick=\"orderCategory\('([^']+)'[^>]+> >> </a>") + logger.info("param_type=" + param_type) + param_leter = scrapertools.get_match(data, + "<a href='javascript:;' onclick=\"orderCategory\('[^']+','([^']*)'[^>]+> >> </a>") + logger.info("param_leter=" + param_leter) + param_pag = scrapertools.get_match(data, + "<a href='javascript:;' onclick=\"orderCategory\('[^']+','[^']*','([^']+)'[^>]+> >> </a>") + logger.info("param_pag=" + param_pag) + param_total = scrapertools.get_match(bloque, '"total"\s*\:\s*\'([^\']+)') + logger.info("param_sql=" + param_total) + param_sql = scrapertools.get_match(bloque, '"sql"\s*\:\s*\'([^\']+)') + logger.info("param_sql=" + param_sql) + param_tot = scrapertools.get_match(bloque, "\"tot\"\s*\:\s*'([^']*)'") + logger.info("param_tot=" + param_tot) + param_ban = scrapertools.get_match(bloque, "\"ban\"\s*\:\s*'([^']+)'") + logger.info("param_ban=" + param_ban) + param_cate = scrapertools.get_match(bloque, "\"cate\"\s*\:\s*'([^']+)'") + logger.info("param_cate=" + param_cate) + base_url = scrapertools.get_match(bloque, "url\s*\:\s*'([^']+)'") + base_url = re.sub("../..", "http://www.newpct.com", base_url, count=1) + logger.info("base_url=" + base_url) + # http://www.newpct.com/include.inc/ajax.php/orderCategory.php?type=todo&leter=&sql=SELECT+DISTINCT+++%09%09%09%09%09%09torrentID%2C+++%09%09%09%09%09%09torrentCategoryID%2C+++%09%09%09%09%09%09torrentCategoryIDR%2C+++%09%09%09%09%09%09torrentImageID%2C+++%09%09%09%09%09%09torrentName%2C+++%09%09%09%09%09%09guid%2C+++%09%09%09%09%09%09torrentShortName%2C++%09%09%09%09%09%09torrentLanguage%2C++%09%09%09%09%09%09torrentSize%2C++%09%09%09%09%09%09calidad+as+calidad_%2C++%09%09%09%09%09%09torrentDescription%2C++%09%09%09%09%09%09torrentViews%2C++%09%09%09%09%09%09rating%2C++%09%09%09%09%09%09n_votos%2C++%09%09%09%09%09%09vistas_hoy%2C++%09%09%09%09%09%09vistas_ayer%2C++%09%09%09%09%09%09vistas_semana%2C++%09%09%09%09%09%09vistas_mes++%09%09%09%09++FROM+torrentsFiles+as+t+WHERE++(torrentStatus+%3D+1+OR+torrentStatus+%3D+2)++AND+(torrentCategoryID+IN+(1537%2C+758%2C+1105%2C+760%2C+1225))++++ORDER+BY+torrentDateAdded++DESC++LIMIT+0%2C+50&pag=3&tot=&ban=3&cate=1225 + url_next_page = base_url + "?" + urllib.urlencode( + {"total": param_total, "type": param_type, "leter": param_leter, "sql": param_sql, "pag": param_pag, + "tot": param_tot, "ban": param_ban, "cate": param_cate}) + logger.info("url_next_page=" + url_next_page) + if item.category == "serie": + itemlist.append( + Item(channel=item.channel, action="listado", title=">> Página siguiente", url=url_next_page, extra=bloque, + category="serie", viewmode="movie_with_plot")) + else: + itemlist.append( + Item(channel=item.channel, action="listado", title=">> Página siguiente", url=url_next_page, extra=bloque, + viewmode="movie_with_plot")) + + return itemlist + + +def series(item): + logger.info() + itemlist = [] + # Lista menú Series de la A-Z + data = scrapertools.cache_page(item.url) + patron = '<div id="content-abc">(.*?)<\/div>' + data = re.compile(patron, re.DOTALL | re.M).findall(data) + patron = 'id="([^"]+)".*?>([^"]+)<\/a>' + matches = re.compile(patron, re.DOTALL | re.M).findall(data[0]) + for id, scrapedtitle in matches: + url_base = "http://www.newpct.com/include.inc/ajax.php/orderCategory.php?total=9&type=letter&leter=%s&sql=+%09%09SELECT++t.torrentID%2C++%09%09%09%09t.torrentCategoryID%2C++%09%09%09%09t.torrentCategoryIDR%2C++%09%09%09%09t.torrentImageID%2C++%09%09%09%09t.torrentName%2C++%09%09%09%09t.guid%2C++%09%09%09%09t.torrentShortName%2C+%09%09%09%09t.torrentLanguage%2C+%09%09%09%09t.torrentSize%2C+%09%09%09%09t.calidad+as+calidad_%2C+%09%09%09%09t.torrentDescription%2C+%09%09%09%09t.torrentViews%2C+%09%09%09%09t.rating%2C+%09%09%09%09t.n_votos%2C+%09%09%09%09t.vistas_hoy%2C+%09%09%09%09t.vistas_ayer%2C+%09%09%09%09t.vistas_semana%2C+%09%09%09%09t.vistas_mes%2C+%09%09%09%09t.imagen+FROM+torrentsFiles+as+t++%09%09LEFT+JOIN+torrentsCategories+as+tc+ON+(t.torrentCategoryID+%3D+tc.categoryID)++%09%09INNER+JOIN++%09%09(+%09%09%09SELECT+torrentID+%09%09%09FROM+torrentsFiles++%09%09%09WHERE++torrentCategoryIDR+%3D+1469+%09%09%09ORDER+BY+torrentID+DESC+%09%09)t1+ON+t1.torrentID+%3D+t.torrentID+WHERE+(t.torrentStatus+%3D+1+OR+t.torrentStatus+%3D+2)+AND+t.home_active+%3D+0++AND+tc.categoryIDR+%3D+1469+GROUP+BY+t.torrentCategoryID+ORDER+BY+t.torrentID+DESC+LIMIT+0%2C+50&pag=&tot=&ban=3&cate=1469" + scrapedurl = url_base.replace("%s", id) + if id != "todo": itemlist.append( + Item(channel=item.channel, action="listaseries", title=scrapedtitle, url=scrapedurl, folder=True)) + + return itemlist + + +def listaseries(item): + logger.info() + itemlist = [] + + data = scrapertools.downloadpageGzip(item.url) + patron = "<li[^<]+<a href='([^']+)'>.*?<img src='([^']+)'.*?<h3>([^']+)<\/h3>" + matches = re.compile(patron, re.DOTALL | re.M).findall(data) + for scrapedurl, scrapedthumbnail, scrapedtitle in matches: + itemlist.append(Item(channel=item.channel, action="episodios", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, folder=True)) + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + + data = scrapertools.cache_page(item.url) + patron = "<ul style='display:none;'.*?>(.*?)<\/ul>" + data = re.compile(patron, re.DOTALL | re.M).findall(data) + patron = "<a href='([^']+)'.*?title='([^']+)'" + for index in range(len(data)): + matches = re.compile(patron, re.DOTALL | re.M).findall(data[index]) + for scrapedurl, scrapedtitle in matches: + itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl, + thumbnail=item.thumbnail, folder=True)) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + + data = scrapertools.cache_page(item.url) + + # <span id='content-torrent'> <a href='http://tumejorjuego.com/descargar/index.php?link=descargar/torrent/58591/el-tour-de-los-muppets-bluray-screener-espanol-castellano-line-2014.html' rel='nofollow' id='58591' title='el-tour-de-los-muppets-bluray-screener-espanol-castellano-line-2014' class='external-url' target='_blank'> + torrent_url = scrapertools.find_single_match(data, "<span id='content-torrent'[^<]+<a href='([^']+)'") + if torrent_url != "": + itemlist.append(Item(channel=item.channel, action="play", title="Torrent", url=torrent_url, server="torrent")) + + from core import servertools + itemlist.extend(servertools.find_video_items(data=data)) + for videoitem in itemlist: + videoitem.channel = item.channel + videoitem.action = "play" + videoitem.folder = False + videoitem.title = "[" + videoitem.server + "]" + + return itemlist diff --git a/plugin.video.alfa/channels/newpct1.json b/plugin.video.alfa/channels/newpct1.json new file mode 100755 index 00000000..d59e3d16 --- /dev/null +++ b/plugin.video.alfa/channels/newpct1.json @@ -0,0 +1,44 @@ +{ + "id": "newpct1", + "name": "Newpct1", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "newpct1.png", + "banner": "newpct1.png", + "version": 1, + "changes": [ + { + "date": "12/05/17", + "description": "Modificado listado para evitar errores con infoplus" + }, + { + "date": "17/04/17", + "description": "Arreglada seccion series" + }, + { + "date": "29/12/16", + "description": "Adaptado, por cambios en la web, seccion Series" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ], + "categories": [ + "movie", + "tvshow", + "anime", + "torrent" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/newpct1.py b/plugin.video.alfa/channels/newpct1.py new file mode 100755 index 00000000..48098d4d --- /dev/null +++ b/plugin.video.alfa/channels/newpct1.py @@ -0,0 +1,487 @@ +# -*- coding: utf-8 -*- + +import re + +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + + +def mainlist(item): + logger.info() + + itemlist = [] + itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url="http://www.newpct1.com/", + extra="peliculas")) + itemlist.append( + Item(channel=item.channel, action="submenu", title="Series", url="http://www.newpct1.com/", extra="series")) + itemlist.append(Item(channel=item.channel, action="search", title="Buscar")) + + return itemlist + + +def search(item, texto): + logger.info("search:" + texto) + texto = texto.replace(" ", "+") + item.url = "http://www.newpct1.com/index.php?page=buscar&q=%27" + texto + "%27&ordenar=Fecha&inon=Descendente" + item.extra = "buscar-list" + try: + itemlist = completo(item) + + # Esta pagina coloca a veces contenido duplicado, intentamos descartarlo + dict_aux = {} + for i in itemlist: + if not i.url in dict_aux: + dict_aux[i.url] = i + else: + itemlist.remove(i) + + return itemlist + + + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def submenu(item): + logger.info() + itemlist = [] + + data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data) + data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + + patron = '<li><a href="http://www.newpct1.com/' + item.extra + '/">.*?<ul>(.*?)</ul>' + data = scrapertools.get_match(data, patron) + + patron = '<a href="([^"]+)".*?>([^>]+)</a>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + title = scrapedtitle.strip() + url = scrapedurl + + itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra="pelilist")) + itemlist.append( + Item(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url, extra="pelilist")) + + return itemlist + + +def alfabeto(item): + logger.info() + itemlist = [] + + data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data) + data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + + patron = '<ul class="alfabeto">(.*?)</ul>' + data = scrapertools.get_match(data, patron) + + patron = '<a href="([^"]+)"[^>]+>([^>]+)</a>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + title = scrapedtitle.upper() + url = scrapedurl + + itemlist.append(Item(channel=item.channel, action="completo", title=title, url=url, extra=item.extra)) + + return itemlist + + +def listado(item): + logger.info() + # logger.info("[newpct1.py] listado url=" + item.url) + itemlist = [] + + data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data) + data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + + patron = '<ul class="' + item.extra + '">(.*?)</ul>' + logger.debug("patron=" + patron) + fichas = scrapertools.get_match(data, patron) + + # <li><a href="http://www.newpct1.com/pelicula/x-men-dias-del-futuro-pasado/ts-screener/" title="Descargar XMen Dias Del Futuro gratis"><img src="http://www.newpct1.com/pictures/f/58066_x-men-dias-del-futuro--blurayrip-ac3-5.1.jpg" width="130" height="180" alt="Descargar XMen Dias Del Futuro gratis"><h2>XMen Dias Del Futuro </h2><span>BluRayRip AC3 5.1</span></a></li> + patron = '<li><a href="([^"]+).*?' # url + patron += 'title="([^"]+).*?' # titulo + patron += '<img src="([^"]+)"[^>]+>.*?' # thumbnail + patron += '<span>([^<]*)</span>' # calidad + + matches = re.compile(patron, re.DOTALL).findall(fichas) + + for scrapedurl, scrapedtitle, scrapedthumbnail, calidad in matches: + url = scrapedurl + title = scrapedtitle + thumbnail = scrapedthumbnail + action = "findvideos" + extra = "" + + if "1.com/series" in url: + action = "completo" + extra = "serie" + context = "tvshow" + + title = scrapertools.find_single_match(title, '([^-]+)') + title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea", "", + 1).strip() + # logger.info("[newpct1.py] titulo="+title) + ''' + if len(title)>3: + url_i = 'http://www.newpct1.com/index.php?page=buscar&url=&letter=&q=%22' + title.replace(" ","%20") + '%22' + else: + url_i = 'http://www.newpct1.com/index.php?page=buscar&url=&letter=&q=' + title + + if "1.com/series-hd" in url: + extra="serie-hd" + url = url_i + '&categoryID=&categoryIDR=1469&calidad=' + calidad.replace(" ","+") #DTV+720p+AC3+5.1 + elif "1.com/series-vo" in url: + extra="serie-vo" + url = url_i + '&categoryID=&categoryIDR=775&calidad=' + calidad.replace(" ","+") #HDTV+720p+AC3+5.1 + elif "1.com/series/" in url: + extra="serie-tv" + url = url_i + '&categoryID=&categoryIDR=767&calidad=' + calidad.replace(" ","+") + + url += '&idioma=&ordenar=Nombre&inon=Descendente' + ''' + show = title + else: + title = title.replace("Descargar", "", 1).strip() + if title.endswith("gratis"): title = title[:-7] + show = "" + context = "movie" + + context_title = scrapertools.find_single_match(url, "http://www.newpct1.com/(.*?)/(.*?)/") + if context_title: + try: + context_title = context_title[1].replace("-", " ") + if re.search('\d{4}', context_title[-4:]): + context_title = context_title[:-4] + elif re.search('\(\d{4}\)', context_title[-6:]): + context_title = context_title[:-6] + except: + context_title = title + + if item.extra != "buscar-list": + title = title + ' ' + calidad + + itemlist.append( + Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail, extra=extra, show=show, + contentTitle=context_title, contentType=context, context=["buscar_trailer"])) + + if "pagination" in data: + patron = '<ul class="pagination">(.*?)</ul>' + paginacion = scrapertools.get_match(data, patron) + + if "Next" in paginacion: + url_next_page = scrapertools.get_match(paginacion, '<a href="([^>]+)>Next</a>')[:-1].replace(" ", "%20") + itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente", url=url_next_page, + extra=item.extra)) + # logger.info("[newpct1.py] listado items:" + str(len(itemlist))) + return itemlist + + +def completo(item): + logger.info() + itemlist = [] + categoryID = "" + + # Guarda el valor por si son etiquetas para que lo vea 'listadofichas' + item_extra = item.extra + item_show = item.show + item_title = item.title + + # Lee las entradas + if item_extra.startswith("serie"): + ultimo_action = "get_episodios" + + if item.extra != "serie_add": + ''' + # Afinar mas la busqueda + if item_extra=="serie-hd": + categoryID=buscar_en_subcategoria(item.show,'1469') + elif item_extra=="serie-vo": + categoryID=buscar_en_subcategoria(item.show,'775') + elif item_extra=="serie-tv": + categoryID=buscar_en_subcategoria(item.show,'767') + if categoryID !="": + item.url=item.url.replace("categoryID=","categoryID="+categoryID) + + #Fanart + oTvdb= TvDb() + serieID=oTvdb.get_serieId_by_title(item.show) + fanart = oTvdb.get_graphics_by_serieId(serieID) + if len(fanart)>0: + item.fanart = fanart[0]''' + try: + from core.tmdb import Tmdb + oTmdb = Tmdb(texto_buscado=item.show, tipo="tv", idioma_busqueda="es") + item.fanart = oTmdb.get_backdrop() + item.plot = oTmdb.get_sinopsis() + print item.plot + except: + pass + else: + item_title = item.show + + items_programas = get_episodios(item) + else: + ultimo_action = "listado" + items_programas = listado(item) + + if len(items_programas) == 0: + return itemlist # devolver lista vacia + + salir = False + while not salir: + + # Saca la URL de la siguiente página + ultimo_item = items_programas[len(items_programas) - 1] + + # Páginas intermedias + if ultimo_item.action == ultimo_action: + # Quita el elemento de "Página siguiente" + ultimo_item = items_programas.pop() + + # Añade las entradas de la página a la lista completa + itemlist.extend(items_programas) + + # Carga la siguiente página + ultimo_item.extra = item_extra + ultimo_item.show = item_show + ultimo_item.title = item_title + logger.debug("url=" + ultimo_item.url) + if item_extra.startswith("serie"): + items_programas = get_episodios(ultimo_item) + else: + items_programas = listado(ultimo_item) + + # Última página + else: + # Añade a la lista completa y sale + itemlist.extend(items_programas) + salir = True + + if (config.get_videolibrary_support() and len(itemlist) > 0 and item.extra.startswith("serie")): + itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url, + action="add_serie_to_library", extra="completo###serie_add", show=item.show)) + logger.debug("items=" + str(len(itemlist))) + return itemlist + + +def get_episodios(item): + logger.info("url=" + item.url) + itemlist = [] + data = re.sub(r'\n|\r|\t|\s{2}|<!--.*?-->|<i class="icon[^>]+"></i>', "", httptools.downloadpage(item.url).data) + data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + + logger.debug("data=" + data) + + patron = '<ul class="buscar-list">(.*?)</ul>' + # logger.info("[newpct1.py] patron=" + patron) + + fichas = scrapertools.get_match(data, patron) + # logger.info("[newpct1.py] matches=" + str(len(fichas))) + + # <li><a href="http://www.newpct1.com/serie/forever/capitulo-101/" title="Serie Forever 1x01"><img src="http://www.newpct1.com/pictures/c/minis/1880_forever.jpg" alt="Serie Forever 1x01"></a> <div class="info"> <a href="http://www.newpct1.com/serie/forever/capitulo-101/" title="Serie Forever 1x01"><h2 style="padding:0;">Serie <strong style="color:red;background:none;">Forever - Temporada 1 </strong> - Temporada<span style="color:red;background:none;">[ 1 ]</span>Capitulo<span style="color:red;background:none;">[ 01 ]</span><span style="color:red;background:none;padding:0px;">Espa�ol Castellano</span> Calidad <span style="color:red;background:none;">[ HDTV ]</span></h2></a> <span>27-10-2014</span> <span>450 MB</span> <span class="color"><ahref="http://www.newpct1.com/serie/forever/capitulo-101/" title="Serie Forever 1x01"> Descargar</a> </div></li> + # logger.info("[newpct1.py] get_episodios: " + fichas) + patron = '<li[^>]*><a href="([^"]+).*?' # url + patron += '<img src="([^"]+)".*?' # thumbnail + patron += '<h2 style="padding(.*?)/h2>' # titulo, idioma y calidad + + matches = re.compile(patron, re.DOTALL).findall(fichas) + # logger.info("[newpct1.py] get_episodios matches: " + str(len(matches))) + for scrapedurl, scrapedthumbnail, scrapedinfo in matches: + try: + url = scrapedurl + if '</span>' in scrapedinfo: + # logger.info("[newpct1.py] get_episodios: scrapedinfo="+scrapedinfo) + try: + # <h2 style="padding:0;">Serie <strong style="color:red;background:none;">The Big Bang Theory - Temporada 6 </strong> - Temporada<span style="color:red;background:none;">[ 6 ]</span>Capitulo<span style="color:red;background:none;">[ 03 ]</span><span style="color:red;background:none;padding:0px;">Español Castellano</span> Calidad <span style="color:red;background:none;">[ HDTV ]</span></h2> + patron = '<span style=".*?">\[\s*(.*?)\]</span>.*?' # temporada + patron += '<span style=".*?">\[\s*(.*?)\].*?' # capitulo + patron += ';([^/]+)' # idioma + info_extra = re.compile(patron, re.DOTALL).findall(scrapedinfo) + (temporada, capitulo, idioma) = info_extra[0] + + except: + # <h2 style="padding:0;">Serie <strong style="color:red;background:none;">The Affair Temporada 3 Capitulo 5</strong> - <span style="color:red;background:none;padding:0px;">Español Castellano</span> Calidad <span style="color:red;background:none;">[ HDTV ]</span></h2> + patron = '<strong style=".*?">([^<]+).*?' # temporada y capitulo + patron += '<span style=".*?">([^<]+)' + + info_extra = re.compile(patron, re.DOTALL).findall(scrapedinfo) + (temporada_capitulo, idioma) = info_extra[0] + if re.search(r'(?i)Capitulos', temporada_capitulo): + temporada = scrapertools.find_single_match(temporada_capitulo, 'Temp.*?\s*([\d]+)') + cap1, cap2 = scrapertools.find_single_match(temporada_capitulo, 'Cap.*?\s*(\d+).*?(\d+)') + capitulo = "" + else: + temporada, capitulo = scrapertools.get_season_and_episode(temporada_capitulo).split('x') + + # logger.info("[newpct1.py] get_episodios: temporada=" + temporada) + # logger.info("[newpct1.py] get_episodios: capitulo=" + capitulo) + logger.debug("idioma=" + idioma) + if '">' in idioma: + idioma = " [" + scrapertools.find_single_match(idioma, '">([^<]+)').strip() + "]" + elif ' ' in idioma: + idioma = " [" + scrapertools.find_single_match(idioma, ' ([^<]+)').strip() + "]" + '''else: + idioma=""''' + if capitulo: + title = item.title + " (" + temporada.strip() + "x" + capitulo.strip() + ") " + idioma + else: + title = item.title + " (Del %sx%s al %sx%s) %s" % (temporada, cap1, temporada, cap2, idioma) + else: + # <h2 style="padding:0;">The Big Bang Theory - Temporada 6 [HDTV][Cap.602][Español Castellano]</h2> + # <h2 style="padding:0;">The Beast - Temporada 1 [HDTV] [Capítulo 13] [Español]</h2 + # <h2 style="padding:0;">The Beast - Temp.1 [DVD-DVB][Cap.103][Spanish]</h2> + try: + temp, cap = scrapertools.get_season_and_episode(scrapedinfo).split('x') + except: + # Formatear temporadaXepisodio + patron = re.compile('Cap.*?\s*([\d]+)', re.IGNORECASE) + info_extra = patron.search(scrapedinfo) + + if len(str(info_extra.group(1))) >= 3: + cap = info_extra.group(1)[-2:] + temp = info_extra.group(1)[:-2] + else: + cap = info_extra.group(1) + patron = 'Temp.*?\s*([\d]+)' + temp = re.compile(patron, re.IGNORECASE).search(scrapedinfo).group(1) + + title = item.title + " (" + temp + 'x' + cap + ")" + + # logger.info("[newpct1.py] get_episodios: fanart= " +item.fanart) + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=item.thumbnail, + show=item.show, fanart=item.fanart)) + except: + logger.error("ERROR al añadir un episodio") + if "pagination" in data: + patron = '<ul class="pagination">(.*?)</ul>' + paginacion = scrapertools.get_match(data, patron) + # logger.info("[newpct1.py] get_episodios: paginacion= " + paginacion) + if "Next" in paginacion: + url_next_page = scrapertools.get_match(paginacion, '<a href="([^>]+)>Next</a>')[:-1] + url_next_page = url_next_page.replace(" ", "%20") + # logger.info("[newpct1.py] get_episodios: url_next_page= " + url_next_page) + itemlist.append( + Item(channel=item.channel, action="get_episodios", title=">> Página siguiente", url=url_next_page)) + + return itemlist + + +def buscar_en_subcategoria(titulo, categoria): + data = httptools.downloadpage("http://www.newpct1.com/pct1/library/include/ajax/get_subcategory.php", + post="categoryIDR=" + categoria).data + data = data.replace("</option>", " </option>") + patron = '<option value="(\d+)">(' + titulo.replace(" ", "\s").replace("(", "/(").replace(")", + "/)") + '\s[^<]*)</option>' + logger.debug("data=" + data) + logger.debug("patron=" + patron) + matches = re.compile(patron, re.DOTALL | re.IGNORECASE).findall(data) + + if len(matches) == 0: matches = [('', '')] + logger.debug("resultado=" + matches[0][0]) + return matches[0][0] + + +def findvideos(item): + logger.info() + itemlist = [] + + ## Cualquiera de las tres opciones son válidas + # item.url = item.url.replace("1.com/","1.com/ver-online/") + # item.url = item.url.replace("1.com/","1.com/descarga-directa/") + item.url = item.url.replace("1.com/", "1.com/descarga-torrent/") + + # Descarga la página + data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data) + data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + + title = scrapertools.find_single_match(data, "<h1><strong>([^<]+)</strong>[^<]+</h1>") + title += scrapertools.find_single_match(data, "<h1><strong>[^<]+</strong>([^<]+)</h1>") + caratula = scrapertools.find_single_match(data, '<div class="entry-left">.*?src="([^"]+)"') + + # <a href="http://tumejorjuego.com/download/index.php?link=descargar-torrent/058310_yo-frankenstein-blurayrip-ac3-51.html" title="Descargar torrent de Yo Frankenstein " class="btn-torrent" target="_blank">Descarga tu Archivo torrent!</a> + + patron = '<a href="([^"]+)" title="[^"]+" class="btn-torrent" target="_blank">' + + # escraped torrent + url = scrapertools.find_single_match(data, patron) + if url != "": + itemlist.append( + Item(channel=item.channel, action="play", server="torrent", title=title + " [torrent]", fulltitle=title, + url=url, thumbnail=caratula, plot=item.plot, folder=False)) + + # escraped ver vídeos, descargar vídeos un link, múltiples liks + data = data.replace("'", '"') + data = data.replace( + 'javascript:;" onClick="popup("http://www.newpct1.com/pct1/library/include/ajax/get_modallinks.php?links=', "") + data = data.replace("http://tumejorserie.com/descargar/url_encript.php?link=", "") + data = data.replace("$!", "#!") + + patron_descargar = '<div id="tab2"[^>]+>.*?</ul>' + patron_ver = '<div id="tab3"[^>]+>.*?</ul>' + + match_ver = scrapertools.find_single_match(data, patron_ver) + match_descargar = scrapertools.find_single_match(data, patron_descargar) + + patron = '<div class="box1"><img src="([^"]+)".*?' # logo + patron += '<div class="box2">([^<]+)</div>' # servidor + patron += '<div class="box3">([^<]+)</div>' # idioma + patron += '<div class="box4">([^<]+)</div>' # calidad + patron += '<div class="box5"><a href="([^"]+)".*?' # enlace + patron += '<div class="box6">([^<]+)</div>' # titulo + + enlaces_ver = re.compile(patron, re.DOTALL).findall(match_ver) + enlaces_descargar = re.compile(patron, re.DOTALL).findall(match_descargar) + + for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver: + servidor = servidor.replace("streamin", "streaminto") + titulo = titulo + " [" + servidor + "]" + if servertools.is_server_enabled(servidor): + try: + servers_module = __import__("servers." + servidor) + server_module = getattr(servers_module, servidor) + devuelve = server_module.find_videos(enlace) + if devuelve: + enlace = devuelve[0][1] + itemlist.append( + Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, + fulltitle=item.title, url=enlace, thumbnail=logo, plot=item.plot, folder=False)) + except: + pass + + for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar: + servidor = servidor.replace("uploaded", "uploadedto") + partes = enlace.split(" ") + p = 1 + for enlace in partes: + parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " [" + servidor + "]" + p += 1 + if servertools.is_server_enabled(servidor): + try: + servers_module = __import__("servers." + servidor) + server_module = getattr(servers_module, servidor) + devuelve = server_module.find_videos(enlace) + if devuelve: + enlace = devuelve[0][1] + itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, + title=parte_titulo, fulltitle=item.title, url=enlace, thumbnail=logo, + plot=item.plot, folder=False)) + except: + pass + return itemlist + + +def episodios(item): + # Necesario para las actualizaciones automaticas + return completo(Item(channel=item.channel, url=item.url, show=item.show, extra="serie_add")) diff --git a/plugin.video.alfa/channels/news.json b/plugin.video.alfa/channels/news.json new file mode 100755 index 00000000..ba632499 --- /dev/null +++ b/plugin.video.alfa/channels/news.json @@ -0,0 +1,67 @@ +{ + "id": "news", + "name": "Novedades", + "active": false, + "adult": false, + "language": "es", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "07/02/17", + "description": "Añadir cuadro de progreso" + }, + { + "date": "01/07/16", + "description": "Eliminado código innecesario." + }, + { + "date": "29/04/16", + "description": "Posibilidad de incluir otros canales, mediante configuracion" + } + ], + "categories": [ + "movie" + ], + "settings": [ + { + "id": "multithread", + "type": "bool", + "label": "Buscar de manera concurrente (multiprocesos)", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "result_mode", + "type": "list", + "label": "Mostrar resultados:", + "default": 2, + "enabled": true, + "visible": true, + "lvalues": [ + "Agrupados por contenido", + "Agrupados por canales", + "Sin Agrupar" + ] + }, + { + "id": "perfil", + "type": "list", + "label": "Perfil de color", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "Frio", + "Calido", + "Lila", + "Pastel", + "Vivos" + ] + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/news.py b/plugin.video.alfa/channels/news.py new file mode 100755 index 00000000..ee1ea801 --- /dev/null +++ b/plugin.video.alfa/channels/news.py @@ -0,0 +1,482 @@ +# -*- coding: utf-8 -*- +# ------------------------------------------------------------ +# Channel for recent videos on several channels +# ------------------------------------------------------------ + +import glob +import os +import re +import time +from threading import Thread + +from core import channeltools +from core import config +from core import logger +from core import scrapertools +from core.item import Item +from platformcode import platformtools + +THUMBNAILS = {'0': 'posters', '1': 'banners', '2': 'squares'} + +__perfil__ = config.get_setting('perfil', "news") + +# Fijar perfil de color +perfil = [['0xFF0B7B92', '0xFF89FDFB', '0xFFACD5D4'], + ['0xFFB31313', '0xFFFF9000', '0xFFFFEE82'], + ['0xFF891180', '0xFFCB22D7', '0xFFEEA1EB'], + ['0xFFA5DEE5', '0xFFE0F9B5', '0xFFFEFDCA'], + ['0xFFF23557', '0xFF22B2DA', '0xFFF0D43A']] + +color1, color2, color3 = perfil[__perfil__] + +list_newest = [] +channels_id_name = {} + + +def mainlist(item): + logger.info() + + itemlist = [] + list_canales = get_channels_list() + + if list_canales['peliculas']: + thumbnail = config.get_thumb("thumb_channels_movie.png") + new_item = Item(channel=item.channel, action="novedades", extra="peliculas", title="Películas", + thumbnail=thumbnail) + + new_item.context = [{"title": "Canales incluidos en: %s" % new_item.title, + "extra": new_item.extra, + "action": "setting_channel", + "channel": new_item.channel}] + new_item.category = "Novedades en %s" % new_item.extra + itemlist.append(new_item) + + if list_canales['infantiles']: + thumbnail = config.get_thumb("thumb_channels_children.png") + new_item = Item(channel=item.channel, action="novedades", extra="infantiles", title="Para niños", + thumbnail=thumbnail) + new_item.context = [{"title": "Canales incluidos en: %s" % new_item.title, + "extra": new_item.extra, + "action": "setting_channel", + "channel": new_item.channel}] + new_item.category = "Novedades en %s" % new_item.extra + itemlist.append(new_item) + + if list_canales['series']: + thumbnail = config.get_thumb("thumb_channels_tvshow.png") + new_item = Item(channel=item.channel, action="novedades", extra="series", title="Episodios de series", + thumbnail=thumbnail) + new_item.context = [{"title": "Canales incluidos en: %s" % new_item.title, + "extra": new_item.extra, + "action": "setting_channel", + "channel": new_item.channel}] + new_item.category = "Novedades en %s" % new_item.extra + itemlist.append(new_item) + + if list_canales['anime']: + thumbnail = config.get_thumb("thumb_channels_anime.png") + new_item = Item(channel=item.channel, action="novedades", extra="anime", title="Episodios de anime", + thumbnail=thumbnail) + new_item.context = [{"title": "Canales incluidos en: %s" % new_item.title, + "extra": new_item.extra, + "action": "setting_channel", + "channel": new_item.channel}] + new_item.category = "Novedades en %s" % new_item.extra + itemlist.append(new_item) + + if list_canales['documentales']: + thumbnail = config.get_thumb("thumb_channels_documentary.png") + new_item = Item(channel=item.channel, action="novedades", extra="documentales", title="Documentales", + thumbnail=thumbnail) + new_item.context = [{"title": "Canales incluidos en: %s" % new_item.title, + "extra": new_item.extra, + "action": "setting_channel", + "channel": new_item.channel}] + new_item.category = "Novedades en %s" % new_item.extra + itemlist.append(new_item) + + return itemlist + + +def get_channels_list(): + logger.info() + + list_canales = {'peliculas': [], 'infantiles': [], 'series': [], 'anime': [], 'documentales': []} + + # Rellenar listas de canales disponibles + channels_path = os.path.join(config.get_runtime_path(), "channels", '*.json') + channel_language = config.get_setting("channel_language") + + if channel_language == "": + channel_language = "all" + + for infile in sorted(glob.glob(channels_path)): + channel_id = os.path.basename(infile)[:-5] + channel_parameters = channeltools.get_channel_parameters(channel_id) + + # No incluir si es un canal inactivo + if not channel_parameters["active"]: + continue + + # No incluir si es un canal para adultos, y el modo adulto está desactivado + if channel_parameters["adult"] and config.get_setting("adult_mode") == 0: + continue + + # No incluir si el canal es en un idioma filtrado + if channel_language != "all" and channel_parameters["language"] != channel_language: + continue + + # Incluir en cada categoria, si en su configuracion el canal esta activado para mostrar novedades + for categoria in list_canales: + include_in_newest = config.get_setting("include_in_newest_" + categoria, channel_id) + if include_in_newest: + channels_id_name[channel_id] = channel_parameters["title"] + list_canales[categoria].append((channel_id, channel_parameters["title"])) + + return list_canales + + +def novedades(item): + logger.info() + + global list_newest + threads = [] + list_newest = [] + start_time = time.time() + + multithread = config.get_setting("multithread", "news") + logger.info("multithread= " + str(multithread)) + + if not multithread: + if platformtools.dialog_yesno("Búsqueda concurrente desactivada", + "La búsqueda concurrente de novedades proporciona", + "una mayor velocidad y su desactivación solo es aconsejable en caso de fallo.", + "¿Desea activar la búsqueda concurrente ahora?"): + if config.set_setting("multithread", True, "news"): + multithread = True + + progreso = platformtools.dialog_progress(item.category, "Buscando canales...") + list_canales = get_channels_list() + + import math + # fix float porque la division se hace mal en python 2.x + number_of_channels = float(100) / len(list_canales[item.extra]) + + for index, channel in enumerate(list_canales[item.extra]): + channel_id, channel_title = channel + percentage = int(math.ceil((index + 1) * number_of_channels)) + + # if progreso.iscanceled(): + # progreso.close() + # logger.info("Búsqueda cancelada") + # return itemlist + + # Modo Multi Thread + if multithread: + t = Thread(target=get_newest, args=[channel_id, item.extra], name=channel_title) + t.start() + threads.append(t) + progreso.update(percentage, "", "Buscando en '%s'..." % channel_title) + + # Modo single Thread + else: + logger.info("Obteniendo novedades de channel_id=" + channel_id) + progreso.update(percentage, "", "Buscando en '%s'..." % channel_title) + get_newest(channel_id, item.extra) + + # Modo Multi Thread: esperar q todos los hilos terminen + if multithread: + pendent = [a for a in threads if a.isAlive()] + t = float(100) / len(pendent) + while pendent: + index = (len(threads) - len(pendent)) + 1 + percentage = int(math.ceil(index * t)) + + list_pendent_names = [a.getName() for a in pendent] + mensaje = "Buscando en %s" % (", ".join(list_pendent_names)) + progreso.update(percentage, "Finalizado en %d/%d canales..." % (len(threads) - len(pendent), len(threads)), + mensaje) + logger.debug(mensaje) + + if progreso.iscanceled(): + logger.info("Busqueda de novedades cancelada") + break + + time.sleep(0.5) + pendent = [a for a in threads if a.isAlive()] + + mensaje = "Resultados obtenidos: %s | Tiempo: %2.f segundos" % (len(list_newest), time.time()-start_time) + progreso.update(100, mensaje, " ", " ") + logger.info(mensaje) + start_time = time.time() + # logger.debug(start_time) + + result_mode = config.get_setting("result_mode", "news") + if result_mode == 0: # Agrupados por contenido + ret = group_by_content(list_newest) + elif result_mode == 1: # Agrupados por canales + ret = group_by_channel(list_newest) + else: # Sin agrupar + ret = no_group(list_newest) + + while time.time() - start_time < 2: + # mostrar cuadro de progreso con el tiempo empleado durante almenos 2 segundos + time.sleep(0.5) + + progreso.close() + return ret + + +def get_newest(channel_id, categoria): + logger.info("channel_id=" + channel_id + ", categoria=" + categoria) + + global list_newest + + # Solicitamos las novedades de la categoria (item.extra) buscada en el canal channel + # Si no existen novedades para esa categoria en el canal devuelve una lista vacia + try: + + puede = True + try: + modulo = __import__('channels.%s' % channel_id, fromlist=["channels.%s" % channel_id]) + except: + try: + exec "import channels." + channel_id + " as modulo" + except: + puede = False + + if not puede: + return + + logger.info("running channel " + modulo.__name__ + " " + modulo.__file__) + list_result = modulo.newest(categoria) + logger.info("canal= %s %d resultados" % (channel_id, len(list_result))) + + for item in list_result: + # logger.info("item="+item.tostring()) + item.channel = channel_id + list_newest.append(item) + + except: + logger.error("No se pueden recuperar novedades de: " + channel_id) + import traceback + logger.error(traceback.format_exc()) + + +def get_title(item): + if item.contentSerieName: # Si es una serie + title = item.contentSerieName + if not scrapertools.get_season_and_episode(title) and item.contentEpisodeNumber: + if not item.contentSeason: + item.contentSeason = '1' + title = "%s - %sx%s" % (title, item.contentSeason, str(item.contentEpisodeNumber).zfill(2)) + + elif item.contentTitle: # Si es una pelicula con el canal adaptado + title = item.contentTitle + elif item.fulltitle: # Si el canal no esta adaptado + title = item.fulltitle + else: # Como ultimo recurso + title = item.title + + # Limpiamos el titulo de etiquetas de formato anteriores + title = re.compile("\[/*COLO.*?\]", re.DOTALL).sub("", title) + title = re.compile("\[/*B\]", re.DOTALL).sub("", title) + title = re.compile("\[/*I\]", re.DOTALL).sub("", title) + + return title + + +def no_group(list_result_canal): + itemlist = [] + global channels_id_name + + for i in list_result_canal: + i.title = get_title(i) + " [" + channels_id_name[i.channel] + "]" + i.text_color = color3 + + itemlist.append(i.clone()) + + return sorted(itemlist, key=lambda it: it.title.lower()) + + +def group_by_channel(list_result_canal): + global channels_id_name + dict_canales = {} + itemlist = [] + + for i in list_result_canal: + if i.channel not in dict_canales: + dict_canales[i.channel] = [] + # Formatear titulo + i.title = get_title(i) + # Añadimos el contenido al listado de cada canal + dict_canales[i.channel].append(i) + + # Añadimos el contenido encontrado en la lista list_result + for c in sorted(dict_canales): + itemlist.append(Item(channel="news", title=channels_id_name[c] + ':', text_color=color1, text_bold=True)) + + for i in dict_canales[c]: + if i.contentQuality: + i.title += ' (%s)' % i.contentQuality + if i.language: + i.title += ' [%s]' % i.language + i.title = ' %s' % i.title + i.text_color = color3 + itemlist.append(i.clone()) + + return itemlist + + +def group_by_content(list_result_canal): + global channels_id_name + dict_contenidos = {} + list_result = [] + + for i in list_result_canal: + # Formatear titulo + i.title = get_title(i) + + # Eliminar tildes y otros caracteres especiales para la key + import unicodedata + try: + new_key = i.title.lower().strip().decode("UTF-8") + new_key = ''.join((c for c in unicodedata.normalize('NFD', new_key) if unicodedata.category(c) != 'Mn')) + + except: + new_key = i.title + + if new_key in dict_contenidos: + # Si el contenido ya estaba en el diccionario añadirlo a la lista de opciones... + dict_contenidos[new_key].append(i) + else: # ...sino añadirlo al diccionario + dict_contenidos[new_key] = [i] + + # Añadimos el contenido encontrado en la lista list_result + for v in dict_contenidos.values(): + title = v[0].title + if len(v) > 1: + # Eliminar de la lista de nombres de canales los q esten duplicados + canales_no_duplicados = [] + for i in v: + if i.channel not in canales_no_duplicados: + canales_no_duplicados.append(channels_id_name[i.channel]) + + if len(canales_no_duplicados) > 1: + canales = ', '.join([i for i in canales_no_duplicados[:-1]]) + title += " (En %s y %s)" % (canales, canales_no_duplicados[-1]) + else: + title += " (En %s)" % (', '.join([i for i in canales_no_duplicados])) + + new_item = v[0].clone(channel="news", title=title, action="show_channels", + sub_list=[i.tourl() for i in v], extra=channels_id_name) + else: + new_item = v[0].clone(title=title) + + new_item.text_color = color3 + list_result.append(new_item) + + return sorted(list_result, key=lambda it: it.title.lower()) + + +def show_channels(item): + logger.info() + global channels_id_name + channels_id_name = item.extra + itemlist = [] + + for i in item.sub_list: + new_item = Item() + new_item = new_item.fromurl(i) + # logger.debug(new_item.tostring()) + if new_item.contentQuality: + new_item.title += ' (%s)' % new_item.contentQuality + if new_item.language: + new_item.title += ' [%s]' % new_item.language + new_item.title += ' (%s)' % channels_id_name[new_item.channel] + new_item.text_color = color1 + + itemlist.append(new_item.clone()) + + return itemlist + + +def menu_opciones(item): + itemlist = list() + itemlist.append(Item(channel=item.channel, title="Canales incluidos en:", + thumbnail=config.get_thumb("thumb_setting_0.png"), + folder=False)) + itemlist.append(Item(channel=item.channel, action="setting_channel", extra="peliculas", title=" - Películas ", + thumbnail=config.get_thumb("thumb_channels_movie.png"), + folder=False)) + itemlist.append(Item(channel=item.channel, action="setting_channel", extra="infantiles", title=" - Para niños", + thumbnail=config.get_thumb("thumb_channels_children.png"), + folder=False)) + itemlist.append(Item(channel=item.channel, action="setting_channel", extra="series", + title=" - Episodios de series", + thumbnail=config.get_thumb("thumb_channels_tvshow.png"), + folder=False)) + itemlist.append(Item(channel=item.channel, action="setting_channel", extra="anime", + title=" - Episodios de anime", + thumbnail=config.get_thumb("thumb_channels_anime.png"), + folder=False)) + itemlist.append(Item(channel=item.channel, action="setting_channel", extra="documentales", + title=" - Documentales", + thumbnail=config.get_thumb("thumb_channels_documentary.png"), + folder=False)) + itemlist.append(Item(channel=item.channel, action="settings", title="Otros ajustes", + thumbnail=config.get_thumb("thumb_setting_0.png"), + folder=False)) + return itemlist + + +def settings(item): + return platformtools.show_channel_settings(caption="configuración -- Novedades") + + +def setting_channel(item): + channels_path = os.path.join(config.get_runtime_path(), "channels", '*.json') + channel_language = config.get_setting("channel_language") + + if channel_language == "": + channel_language = "all" + + list_controls = [] + for infile in sorted(glob.glob(channels_path)): + channel_id = os.path.basename(infile)[:-5] + channel_parameters = channeltools.get_channel_parameters(channel_id) + + # No incluir si es un canal inactivo + if not channel_parameters["active"]: + continue + + # No incluir si es un canal para adultos, y el modo adulto está desactivado + if channel_parameters["adult"] and config.get_setting("adult_mode") == 0: + continue + + # No incluir si el canal es en un idioma filtrado + if channel_language != "all" and channel_parameters["language"] != channel_language: + continue + + # No incluir si en su configuracion el canal no existe 'include_in_newest' + include_in_newest = config.get_setting("include_in_newest_" + item.extra, channel_id) + if include_in_newest is None: + continue + + control = {'id': channel_id, + 'type': "bool", + 'label': channel_parameters["title"], + 'default': include_in_newest, + 'enabled': True, + 'visible': True} + + list_controls.append(control) + + caption = "Canales incluidos en Novedades " + item.title.replace("Canales incluidos en: ", "- ").strip() + return platformtools.show_channel_settings(list_controls=list_controls, callback="save_settings", item=item, + caption=caption, custom_button={"visible": False}) + + +def save_settings(item, dict_values): + for v in dict_values: + config.set_setting("include_in_newest_" + item.extra, dict_values[v], v) diff --git a/plugin.video.alfa/channels/nuvid.json b/plugin.video.alfa/channels/nuvid.json new file mode 100755 index 00000000..f51412c3 --- /dev/null +++ b/plugin.video.alfa/channels/nuvid.json @@ -0,0 +1,23 @@ +{ + "id": "nuvid", + "name": "Nuvid", + "language": "es", + "active": true, + "adult": true, + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "16/02/2017", + "description": "Primera versión" + } + ], + "thumbnail": "http://i.imgur.com/rSbuStX.png", + "banner": "nuvid.png", + "categories": [ + "adult" + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/nuvid.py b/plugin.video.alfa/channels/nuvid.py new file mode 100755 index 00000000..6e0ab3aa --- /dev/null +++ b/plugin.video.alfa/channels/nuvid.py @@ -0,0 +1,115 @@ +# -*- coding: utf-8 -*- + +import base64 +import hashlib +import urlparse + +from core import httptools +from core import logger +from core import scrapertools + +host = "https://www.nuvid.com" + + +def mainlist(item): + logger.info() + itemlist = [] + + itemlist.append( + item.clone(action="lista", title="Nuevos Vídeos", url="https://www.nuvid.com/search/videos/_empty_/")) + itemlist.append( + item.clone(action="lista", title="Mejor Valorados", url="https://www.nuvid.com/search/videos/_empty_/", + extra="rt")) + itemlist.append( + item.clone(action="lista", title="Solo HD", url="https://www.nuvid.com/search/videos/hd", calidad="1")) + itemlist.append(item.clone(action="categorias", title="Categorías", url=host)) + itemlist.append(item.clone(title="Buscar...", action="search")) + return itemlist + + +def search(item, texto): + logger.info() + item.url = "https://www.nuvid.com/search/videos/" + texto.replace(" ", "%20") + item.extra = "buscar" + return lista(item) + + +def lista(item): + logger.info() + itemlist = [] + + if not item.calidad: + item.calidad = "0" + filter = 'ch=178.1.2.3.4.191.7.8.5.9.10.169.11.12.13.14.15.16.17.18.28.190.20.21.22.27.23.24.25.26.189.30.31.32.181' \ + '.35.36.37.180.176.38.33.34.39.40.41.42.177.44.43.45.47.48.46.49.50.51.52.53.54.55.56.57.58.179.59.60.61.' \ + '62.63.64.65.66.69.68.71.67.70.72.73.74.75.182.183.77.76.78.79.80.81.82.84.85.88.86.188.87.91.90.92.93.94' \ + '&hq=%s&rate=&dur=&added=&sort=%s' % (item.calidad, item.extra) + header = {'X-Requested-With': 'XMLHttpRequest'} + if item.extra != "buscar": + header['Cookie'] = 'area=EU; lang=en; search_filter_new=%s' % filter + # Descarga la pagina + data = httptools.downloadpage(item.url, headers=header, cookies=False).data + + # Extrae las entradas + patron = '<div class="box-tumb related_vid">.*?href="([^"]+)" title="([^"]+)".*?src="([^"]+)"(.*?)<i class="time">([^<]+)<' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedtitle, scrapedthumbnail, quality, duration in matches: + scrapedurl = urlparse.urljoin(host, scrapedurl) + if duration: + scrapedtitle = "%s - %s" % (duration, scrapedtitle) + if item.calidad == "0" and 'class="hd"' in quality: + scrapedtitle += " [COLOR red][HD][/COLOR]" + itemlist.append( + item.clone(action="play", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, folder=False)) + + # Extrae la marca de siguiente página + next_page = scrapertools.find_single_match(data, '<li class="next1">.*?href="([^"]+)"') + if next_page: + next_page = urlparse.urljoin(host, next_page) + itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page)) + + return itemlist + + +def categorias(item): + logger.info() + itemlist = [] + + # Descarga la pagina + data = httptools.downloadpage("https://www.nuvid.com/categories").data + + # Extrae las entradas (carpetas) + bloques = scrapertools.find_multiple_matches(data, '<h2 class="c-mt-output title2">.*?>([^<]+)</h2>(.*?)</div>') + for cat, b in bloques: + cat = cat.replace("Straight", "Hetero") + itemlist.append(item.clone(action="", title=cat, text_color="gold")) + matches = scrapertools.find_multiple_matches(b, '<li.*?href="([^"]+)">(.*?)</span>') + for scrapedurl, scrapedtitle in matches: + scrapedtitle = " " + scrapedtitle.replace("<span>", "") + scrapedurl = urlparse.urljoin(host, scrapedurl) + itemlist.append(item.clone(action="lista", title=scrapedtitle, url=scrapedurl)) + + return itemlist + + +def play(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url, cookies=False).data + h = scrapertools.find_single_match(data, "params\s*\+=\s*'h=([^']+)'") + t = scrapertools.find_single_match(data, "params\s*\+=\s*'%26t=([^']+)'") + vkey = scrapertools.find_single_match(data, "params\s*\+=\s*'%26vkey='.*?'([^']+)'") + pkey = hashlib.md5(vkey + base64.b64decode("aHlyMTRUaTFBYVB0OHhS")).hexdigest() + + url = 'https://www.nuvid.com/player_config/?h=%s&check_speed=1&t=%s&vkey=%s&pkey=%s&aid=&domain_id=' % ( + h, t, vkey, pkey) + data = httptools.downloadpage(url, cookies=False).data + videourl = scrapertools.find_single_match(data, '<video_file>.*?(http.*?)\]') + if videourl: + itemlist.append(['.mp4 [directo]', videourl]) + videourl = scrapertools.find_single_match(data, '<hq_video_file>.*?(http.*?)\]') + if videourl: + itemlist.append(['.mp4 HD [directo]', videourl]) + + return itemlist diff --git a/plugin.video.alfa/channels/pasateatorrent.json b/plugin.video.alfa/channels/pasateatorrent.json new file mode 100755 index 00000000..4b4554fa --- /dev/null +++ b/plugin.video.alfa/channels/pasateatorrent.json @@ -0,0 +1,43 @@ +{ + "id": "pasateatorrent", + "name": "PasateaTorrent", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "http://imgur.com/iLeISt0.png", + "banner": "pasateatorrent.png", + "version": 1, + "changes": [ + { + "date": "06/12/2016", + "description": "Release" + }, + { + "date": "13/01/2017", + "description": "Arreglo sagas en peliculas que no se mostraban.Mejoras en series/info-capitulos" + }, + { + "date": "04/04/2017", + "description": "Migración httptools.Adaptación proxy según Kodi sea igual o menor v.17.Pequeñas mejoras código" + }, + { + "date": "28/06/2017", + "description": "Correciones código y algunas mejoras" + } + ], + "categories": [ + "torrent", + "movie", + "tvshow" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/pasateatorrent.py b/plugin.video.alfa/channels/pasateatorrent.py new file mode 100755 index 00000000..52304d35 --- /dev/null +++ b/plugin.video.alfa/channels/pasateatorrent.py @@ -0,0 +1,1821 @@ +# -*- coding: utf-8 -*- + +import os +import re +import unicodedata +import urllib + +import xbmc +import xbmcgui +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core.item import Item +from core.scrapertools import decodeHtmlentities as dhe +from platformcode import platformtools + +ACTION_SHOW_FULLSCREEN = 36 +ACTION_GESTURE_SWIPE_LEFT = 511 +ACTION_SELECT_ITEM = 7 +ACTION_PREVIOUS_MENU = 10 +ACTION_MOVE_LEFT = 1 +ACTION_MOVE_RIGHT = 2 +ACTION_MOVE_DOWN = 4 +ACTION_MOVE_UP = 3 +OPTION_PANEL = 6 +OPTIONS_OK = 5 + +krypton = False + + +# Proxy para acceder a datos(Este canal usa cloudflare con https) +def get_page(url): + logger.info() + global krypton + xbmc_version = xbmc.getInfoLabel("System.BuildVersion") + check_xbmc_version = scrapertools.get_match(xbmc_version, '(\d+).') + + if check_xbmc_version >= 17: + krypton = True + data = httptools.downloadpage(url).data + else: + data = httptools.downloadpage("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url).data + + return data + + +# Para la busqueda en bing evitando baneos + +def browser(url): + import mechanize + + # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing + br = mechanize.Browser() + # Browser options + br.set_handle_equiv(False) + br.set_handle_gzip(True) + br.set_handle_redirect(True) + br.set_handle_referer(False) + br.set_handle_robots(False) + # Follows refresh 0 but not hangs on refresh > 0 + br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) + # Want debugging messages? + # br.set_debug_http(True) + # br.set_debug_redirects(True) + # br.set_debug_responses(True) + + # User-Agent (this is cheating, ok?) + br.addheaders = [('User-agent', + 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')] + # br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')] + # Open some site, let's pick a random one, the first that pops in mind + r = br.open(url) + response = r.read() + print response + if "img,divreturn" in response: + r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url) + print "prooooxy" + response = r.read() + + return response + + +api_key = "2e2160006592024ba87ccdf78c28f49f" +api_fankey = "dffe90fba4d02c199ae7a9e71330c987" + + +def mainlist(item): + logger.info() + check_bg = item.action + if str(check_bg) == "": + check_bg = "bglobal" + itemlist = [] + itemlist.append(Item(channel=item.channel, title="[COLOR yellow][B]Peliculas[/B][/COLOR]", action="peliculas", + url="http://pasateatorrent.com/", + thumbnail="https://s6.postimg.org/j9amymu1d/dxtorrentpelo.png", + fanart="http://imgur.com/uexmGEg.png")) + itemlist.append(Item(channel=item.channel, title="[COLOR skyblue][B]Series[/B][/COLOR]", action="peliculas", + url="http://pasateatorrent.com//series/", + thumbnail="https://s6.postimg.org/6vxsrq4cx/dxtorrentselo.png", + fanart="http://imgur.com/vQTyY6r.png")) + + itemlist.append(Item(channel=item.channel, title="[COLOR green][B]Buscar[/B][/COLOR]", action="", url="", + thumbnail="https://s6.postimg.org/hy2vq5yfl/dxtorrentbpelo.png", + fanart="http://imgur.com/P9jol7f.png")) + + itemlist.append( + Item(channel=item.channel, title=" " + "[COLOR yellow]Peliculas[/COLOR]", action="search", url="", + thumbnail="https://s6.postimg.org/79z4rbogh/dxtorrentpbselo.png", fanart="http://imgur.com/W7iwPvD.png", + extra="peliculas" + "|" + check_bg)) + itemlist.append( + Item(channel=item.channel, title=" " + "[COLOR skyblue]Series[/COLOR]", action="search", url="", + thumbnail="https://s6.postimg.org/hy2vq5yfl/dxtorrentbpelo.png", fanart="http://imgur.com/BD86Wdn.png", + extra="series" + "|" + check_bg)) + + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + check_bg = item.action + if item.extra: + if item.extra.split("|")[0] == "series": + item.url = "http://pasateatorrent.com/series/?s=%s&post_type=Buscar+serie" % (texto) + check_sp = "tvshow" + else: + item.url = "http://pasateatorrent.com/?s=%s&post_type=Buscar+película" % (texto) + check_sp = "peliculas" + item.extra = "search" + "|" + item.extra.split("|")[1] + "|" + texto + "|" + check_sp + + try: + return peliculas(item) + # Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + else: + if item.contentType != "movie": + item.url = "http://pasateatorrent.com/series/?s=%s&post_type=Buscar+serie" % (texto) + check_sp = "tvshow" + else: + item.url = "http://pasateatorrent.com/?s=%s&post_type=Buscar+película" % (texto) + check_sp = "peliculas" + item.extra = "search" + "|" + "bglobal" + "|" + texto + "|" + check_sp + + try: + return peliculas(item) + # Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + + +def peliculas(item): + logger.info() + itemlist = [] + global krypton + check_url = "" + # Descarga la página + data = get_page(item.url) + # data =re.sub("-"," ",data) + if "serie" in item.url: + data = re.sub(r"&#.*?;", "x", data) + if item.extra.split("|")[0] == "search": + check_bg = item.action + bloque_enlaces = scrapertools.find_single_match(data, + '<div class="contenedor_imagenes">(.*?)<center><\/center>') + bloque_enlaces = bloque_enlaces.strip() + if item.extra.split("|")[1] != "bglobal" and check_bg != "info": + if str(bloque_enlaces) == "</div>": + if item.extra.split("|")[3] == "peliculas": + dialog = xbmcgui.Dialog() + if dialog.yesno( + '[COLOR crimson][B]Sin resultados en[/B][/COLOR]' + '[COLOR gold][B] Pasate[/B][/COLOR]' + '[COLOR floralwhite][B]A[/B][/COLOR]' + '[COLOR yellow][B]Torrent[/B][/COLOR]', + '[COLOR cadetblue]¿Quieres hacer una busqueda en Alfa?[/COLOR]', + '', "", '[COLOR crimson][B]No,gracias[/B][/COLOR]', + '[COLOR yellow][B]Si[/B][/COLOR]'): + item.extra = "movie" + "|" + item.extra.split("|")[2] + return busqueda(item) + else: + xbmc.executebuiltin('Action(Back)') + xbmc.sleep(500) + else: + dialog = xbmcgui.Dialog() + if dialog.yesno( + '[COLOR crimson][B]Sin resultados en[/B][/COLOR]' + '[COLOR slateblue][B] Pasate[/B][/COLOR]' + '[COLOR floralwhite][B]A[/B][/COLOR]' + '[COLOR slateblue][B]Torrent[/B][/COLOR]', + '[COLOR cadetblue]¿Quieres hacer una busqueda en Alfa?[/COLOR]', + '', "", '[COLOR crimson][B]No,gracias[/B][/COLOR]', + '[COLOR yellow][B]Si[/B][/COLOR]'): + item.extra = "serie" + "|" + item.extra.split("|")[2] + return busqueda(item) + else: + xbmc.executebuiltin('Action(Back)') + xbmc.sleep(500) + else: + bloque_enlaces = scrapertools.find_single_match(data, + '<div class="contenedor_imagenes">(.*?)<center><div class="navigation">') + if krypton: + patron = '<a href="https://([^"]+)/".*?src="https://([^"]+)".*?class="bloque_inferior">(.*?)<\/div>' + matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces) + + else: + + patron = '<a href="\/myaddrproxy.php\/https\/([^"]+)/".*?src="\/myaddrproxy.php\/https\/([^"]+)".*?class="bloque_inferior">(.*?)<\/div>' + matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces) + + for scrapedurl, scrapedthumbnail, scrapedtitle in matches: + if not item.extra == "search": + calidad = scrapertools.find_single_match(data, 'class="bloque_superior">(.*?)<\/div>') + + if "search" in item.extra: + scrapedtitle = re.sub(r'La Saga|Saga|Tetralogía|Tetralogia|Trilogía|Triloga|Pentalogía|Pentalogia', '', + scrapedtitle) + scrapedtitle = re.sub('<br>', '', scrapedtitle) + scrapedurl = "http://" + scrapedurl + if "serie" in item.url: + scrapedurl = re.sub(r' \d+x\d+| \d+', '', scrapedurl).strip() + scrapedurl = re.sub(r' ', '-', scrapedurl).strip() + scrapedthumbnail = "http://" + scrapedthumbnail + scrapedthumbnail = re.sub(r' ', '-', scrapedthumbnail) + title_fan = re.sub( + r"\[.*?\]|\(.*?\)|\d+x\d+.*?Final|-\d+|-|\d+x\d+|Temporada.*?Completa| ;|Serie Completa|Especial.*", "", + scrapedtitle).strip() + if not "100" in scrapedtitle: + title_fan = re.sub(r"\d+", "", title_fan) + title_serie = re.sub('<br>.*', '', scrapedtitle) + if "serie" in item.url: + try: + check_temp, check_serie = scrapertools.find_single_match(title_serie, '(\d+)x\d+ (\d+)') + if check_serie: + title_serie = title_serie.replace(" " + check_serie, "-" + check_serie) + if check_temp: + scrapedurl = scrapedurl + "-temporada-" + check_temp + except: + try: + check_temp, check_serie = scrapertools.find_single_match(title_serie, '(\d+)x\d+-(\d+)') + except: + try: + check_temp, check_serie = scrapertools.find_single_match(title_serie, '(\d+)-(\d+)') + except: + check_serie = "" + + title = scrapedtitle.title() + if "series" in scrapedurl: + # title_fan= re.sub(r'') + trailer = title_fan + " " + "series" + "trailer" + title = "[COLOR skyblue][B]" + title_serie + "[/B][/COLOR]" + + else: + title = "[COLOR yellow][B]" + scrapedtitle + "[/B][/COLOR]" + trailer = title_fan + " " + "trailer" + + trailer = urllib.quote(trailer) + extra = trailer + "|" + title_fan + "|" + "pelicula" + "|" + item.extra + if "Saga" in title or "Serie Completa" in title or "Tetralogía" in title or "Tetralogia" in title or "Trilogía" in title or "Trilogia" in title or "Pentalogía" in title or "Pentalogia" in title or "Pack Peliculas" in title or "Pack Películas" in title or "Duología" in title or "Duologia" in title: + if "serie" in item.url: + if krypton: + l_scrapedurl = re.sub(r"http://", "http/", scrapedurl) + l_scrapedurl = "http://ssl-proxy.my-addr.org/myaddrproxy.php/" + scrapedurl + else: + l_scrapedurl = scrapedurl + url = scrapertools.get_header_from_response(l_scrapedurl, header_to_get="location") + check_url = scrapertools.get_header_from_response(url, header_to_get="location") + if "series/?s" in check_url: + scrapedurl = re.sub(r" ", "-", scrapedurl.strip()) + action = "peliculas" + pepe = "search" + "|" + " " + check_bg = "" + else: + check_url = "capitulos" + action = "fanart" + pepe = extra + else: + scrapedurl = re.sub(r" ", "-", scrapedurl.strip()) + action = "peliculas" + pepe = "search" + "|" + " " + check_bg = "" + else: + action = "fanart" + pepe = extra + itemlist.append( + Item(channel=item.channel, title=title, url=scrapedurl, action=action, thumbnail=scrapedthumbnail, + fanart=item.fanart, extra=pepe, folder=True)) + + if "series" in item.url and not "Completa" in scrapedtitle and check_serie == "" and not "Temporada" in title_serie: + xbmc.log("pocoyoespajo") + xbmc.log(scrapedtitle) + url_1 = re.compile('([^<]+) (\d+).*?(\d+)', re.DOTALL).findall(scrapedtitle) + for title_capitulo, temp, epi in url_1: + xbmc.log("pocoyoespajo") + xbmc.log(scrapedtitle) + if "serie-completa" in scrapedurl: + title = "[COLOR cyan] Ver capitulos de temporada[/COLOR]" + else: + title = "[COLOR cyan]Ver capitulo[/COLOR]" + " " + "[COLOR slateblue]" + temp + "x" + epi + "[/COLOR]" + # url = "http://descargaportorrent.net/series/"+url_2+"-temporada-"+temp + extra = temp + "|" + epi + "|" + scrapedtitle + if "Especial" in scrapedtitle: + title = "[COLOR cyan] Ver capitulo especial[/COLOR]" + extra = "" + "|" + "Especial" + "|" + scrapedtitle + itemlist.append(Item(channel=item.channel, title=" " + title, url=scrapedurl, action="ver_capitulo", + thumbnail=scrapedthumbnail, fanart=item.fanart, extra=extra, folder=True)) + else: + if item.extra != "search" or item.extra == "search" and not "Saga" in title and not "Serie Completa" in title and not "Tetralogia" in title and not "Tetralogia" in title and not "Trilogía" in title and not "Trilogia" in title and not "Pentalogía" in title and not "Pentalogia" in title and not "Pack Peliculas" in title and not "Pack Películas" in title or not "Duología" in title or not "Duologia" in title: + if "series" in scrapedurl and not "Serie Completa" in title: + + if "Temporada" in scrapedtitle: + title = "[COLOR cyan] Ver capitulos de temporada[/COLOR]" + else: + title = "[COLOR cyan] Ver Capitulos[/COLOR]" + + else: + + if not "Completa" in title and not "Tetralogía" in title and not "Tetralogia" in title and not "Saga" in title and not "Trilogía" in title and not "Trilogia" in title and not "Pentalogía" in title and not "Pentalogia" in title and not "Pack Películas" in title and not "Pack Peliculas" in title and not "Duología" in title and not "Duologia" in title: + title = "[COLOR khaki] Ver pelicula[/COLOR]" + else: + if "Serie Completa" in title and check_url == "capitulos": + title = "[COLOR cyan] Ver Capitulos[/COLOR]" + else: + continue + itemlist.append(Item(channel=item.channel, title=title, url=scrapedurl, action="ver_capitulo", + thumbnail=scrapedthumbnail, fanart=item.fanart, extra=extra, folder=True)) + ## Paginación + if krypton: + patronvideos = '<li class="barranav"><a href="([^"]+)" >P' + matches = re.compile(patronvideos, re.DOTALL).findall(data) + + else: + patronvideos = '<li class="barranav">.*?<a href="/myaddrproxy.php/https/([^"]+)" >Página siguiente ' + matches = re.compile(patronvideos, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + if len(matches) > 0: + scrapedurl = matches[0] + if krypton: + url = scrapedurl + else: + url = "http://" + scrapedurl + title = "siguiente>>" + title = "[COLOR slategray]" + title + "[/COLOR]" + itemlist.append(Item(channel=item.channel, action="peliculas", title=title, url=url, + thumbnail="http://s6.postimg.org/drfhhwrtd/muarrow.png", fanart=item.fanart, folder=True)) + + return itemlist + + +def fanart(item): + logger.info() + itemlist = [] + url = item.url + data = get_page(url) + title_fan = item.extra.split("|")[1] + title = re.sub(r'Serie Completa|Temporada.*', '', title_fan) + if "series" in item.url and not "temporada" in item.url: + item.title = re.sub(r'\d+x\d+.*?Final|-\d+|-|\d+x\d+|\d+', '', item.title) + item.title = re.sub(r'Especial.*?\[', '[', item.title) + title = title.replace(' ', '%20') + title = ''.join((c for c in unicodedata.normalize('NFD', unicode(title.decode('utf-8'))) if + unicodedata.category(c) != 'Mn')).encode("ascii", "ignore") + + item.plot = item.extra.split("|")[0] + try: + year = scrapertools.get_match(data, '<div class="ano_page_exit">(\d\d\d\d)') + except: + year = "" + try: + sinopsis = scrapertools.get_match(data, 'Sinopsis.*?<p>(.*?)</p>') + except: + sinopsis = "" + if not "series" in item.url: + + # filmafinity + url = "http://www.filmaffinity.com/es/advsearch.php?stext={0}&stype%5B%5D=title&country=&genre=&fromyear={1}&toyear={1}".format( + title, year) + data = httptools.downloadpage(url).data + + url_filmaf = scrapertools.find_single_match(data, '<div class="mc-poster">\s*<a title="[^"]*" href="([^"]+)"') + if url_filmaf: + url_filmaf = "http://www.filmaffinity.com%s" % url_filmaf + data = httptools.downloadpage(url_filmaf).data + else: + + try: + url_bing = "http://www.bing.com/search?q=%s+%s+site:filmaffinity.com" % (title.replace(' ', '+'), year) + data = browser(url_bing) + data = re.sub(r'\n|\r|\t|\s{2}| ', '', data) + + if "myaddrproxy.php" in data: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><div class="b_title"><h2>(<a href="/ myaddrproxy.php/http/www.filmaffinity.com/es/film.*?)"') + subdata_bing = re.sub(r'\/myaddrproxy.php\/http\/', '', subdata_bing) + else: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><h2>(<a href="http://www.filmaffinity.com/es/film.*?)"') + + url_filma = scrapertools.get_match(subdata_bing, '<a href="([^"]+)') + + if not "http" in url_filma: + data = httptools.downloadpage("http://" + url_filma).data + else: + data = httptools.downloadpage(url_filma).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + except: + pass + if sinopsis == " ": + try: + sinopsis = scrapertools.find_single_match(data, '<dd itemprop="description">(.*?)</dd>') + sinopsis = sinopsis.replace("<br><br />", "\n") + sinopsis = re.sub(r"\(FILMAFFINITY\)<br />", "", sinopsis) + except: + pass + try: + rating_filma = scrapertools.get_match(data, 'itemprop="ratingValue" content="(.*?)">') + except: + rating_filma = "Sin puntuacion" + + critica = "" + patron = '<div itemprop="reviewBody">(.*?)</div>.*?itemprop="author">(.*?)\s*<i alt="([^"]+)"' + matches_reviews = scrapertools.find_multiple_matches(data, patron) + + if matches_reviews: + for review, autor, valoracion in matches_reviews: + review = dhe(scrapertools.htmlclean(review)) + review += "\n" + autor + "[CR]" + review = re.sub(r'Puntuac.*?\)', '', review) + if "positiva" in valoracion: + critica += "[COLOR green][B]%s[/B][/COLOR]\n" % review + elif "neutral" in valoracion: + critica += "[COLOR yellow][B]%s[/B][/COLOR]\n" % review + else: + critica += "[COLOR red][B]%s[/B][/COLOR]\n" % review + else: + critica = "[COLOR floralwhite][B]Esta película no tiene críticas todavía...[/B][/COLOR]" + print "ozuu" + print critica + + url = "http://api.themoviedb.org/3/search/movie?api_key=" + api_key + "&query=" + title + "&year=" + year + "&language=es&include_adult=false" + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + + title = re.sub(r":.*|\(.*?\)", "", title) + url = "http://api.themoviedb.org/3/search/movie?api_key=" + api_key + "&query=" + title + "&language=es&include_adult=false" + + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + extra = item.thumbnail + "|" + "" + "|" + "" + "|" + "Sin puntuacón" + "|" + rating_filma + "|" + critica + show = "http://imgur.com/c3rzL6x.jpg" + "|" + "" + "|" + sinopsis + posterdb = item.thumbnail + fanart_info = "http://imgur.com/c3rzL6x.jpg" + fanart_3 = "" + fanart_2 = "http://imgur.com/c3rzL6x.jpg" + category = item.thumbnail + id_scraper = "" + + itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action="findvideos", + thumbnail=item.thumbnail, fanart="http://imgur.com/c3rzL6x.jpg", extra=extra, + show=show, category=category, folder=True)) + + for id, fan in matches: + + fan = re.sub(r'\\|"', '', fan) + + try: + rating = scrapertools.find_single_match(data, '"vote_average":(.*?),') + except: + rating = "Sin puntuación" + + id_scraper = id + "|" + "peli" + "|" + rating + "|" + rating_filma + "|" + critica + try: + posterdb = scrapertools.get_match(data, '"page":1,.*?"poster_path":"\\\(.*?)"') + posterdb = "https://image.tmdb.org/t/p/original" + posterdb + except: + posterdb = item.thumbnail + + if "null" in fan: + fanart = "http://imgur.com/c3rzL6x.jpg" + else: + fanart = "https://image.tmdb.org/t/p/original" + fan + item.extra = fanart + + url = "http://api.themoviedb.org/3/movie/" + id + "/images?api_key=" + api_key + "" + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '"backdrops".*?"file_path":".*?",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + patron = '"backdrops".*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + fanart_info = item.extra + fanart_3 = "" + fanart_2 = item.extra + for fanart_info, fanart_3, fanart_2 in matches: + fanart_info = "https://image.tmdb.org/t/p/original" + fanart_info + fanart_3 = "https://image.tmdb.org/t/p/original" + fanart_3 + fanart_2 = "https://image.tmdb.org/t/p/original" + fanart_2 + + # clearart, fanart_2 y logo + url = "http://webservice.fanart.tv/v3/movies/" + id + "?api_key=" + api_fankey + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '"hdmovielogo":.*?"url": "([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + if '"moviedisc"' in data: + disc = scrapertools.get_match(data, '"moviedisc":.*?"url": "([^"]+)"') + if '"movieposter"' in data: + poster = scrapertools.get_match(data, '"movieposter":.*?"url": "([^"]+)"') + if '"moviethumb"' in data: + thumb = scrapertools.get_match(data, '"moviethumb":.*?"url": "([^"]+)"') + if '"moviebanner"' in data: + banner = scrapertools.get_match(data, '"moviebanner":.*?"url": "([^"]+)"') + + if len(matches) == 0: + extra = posterdb + # "http://es.seaicons.com/wp-content/uploads/2015/11/Editing-Overview-Pages-1-icon.png" + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + category = posterdb + + itemlist.append( + Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, server="torrent", + thumbnail=posterdb, fanart=item.extra, extra=extra, show=show, category=category, folder=True)) + for logo in matches: + if '"hdmovieclearart"' in data: + clear = scrapertools.get_match(data, '"hdmovieclearart":.*?"url": "([^"]+)"') + if '"moviebackground"' in data: + + extra = clear + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + if '"moviedisc"' in data: + category = disc + else: + category = clear + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=logo, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + else: + extra = clear + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + if '"moviedisc"' in data: + category = disc + else: + category = clear + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=logo, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + + if '"moviebackground"' in data: + + if '"hdmovieclearart"' in data: + clear = scrapertools.get_match(data, '"hdmovieclearart":.*?"url": "([^"]+)"') + extra = clear + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + if '"moviedisc"' in data: + category = disc + else: + category = clear + else: + extra = logo + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + if '"moviedisc"' in data: + category = disc + else: + category = logo + + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=logo, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + + if not '"hdmovieclearart"' in data and not '"moviebackground"' in data: + extra = logo + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + if '"moviedisc"' in data: + category = disc + else: + category = item.extra + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=logo, fanart=item.extra, extra=extra, show=show, + category=category, folder=True)) + + + else: + + # filmafinity + url = "http://www.filmaffinity.com/es/advsearch.php?stext={0}&stype%5B%5D=title&country=&ggenre=TV_SE&fromyear={1}&toyear={1}".format( + title, year) + data = httptools.downloadpage(url).data + + url_filmaf = scrapertools.find_single_match(data, '<div class="mc-poster">\s*<a title="[^"]*" href="([^"]+)"') + if url_filmaf: + url_filmaf = "http://www.filmaffinity.com%s" % url_filmaf + data = httptools.downloadpage(url_filmaf).data + else: + + try: + url_bing = "http://www.bing.com/search?q=%s+%s+site:filmaffinity.com" % (title.replace(' ', '+'), year) + data = browser(url_bing) + data = re.sub(r'\n|\r|\t|\s{2}| ', '', data) + + if "myaddrproxy.php" in data: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><div class="b_title"><h2>(<a href="/ myaddrproxy.php/http/www.filmaffinity.com/es/film.*?)"') + subdata_bing = re.sub(r'\/myaddrproxy.php\/http\/', '', subdata_bing) + else: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><h2>(<a href="http://www.filmaffinity.com/es/film.*?)"') + + url_filma = scrapertools.get_match(subdata_bing, '<a href="([^"]+)') + if not "http" in url_filma: + data = httptools.downloadpage("http://" + url_filma).data + else: + data = httptools.downloadpage(url_filma).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + except: + pass + if sinopsis == " ": + try: + sinopsis = scrapertools.find_single_match(data, '<dd itemprop="description">(.*?)</dd>') + sinopsis = sinopsis.replace("<br><br />", "\n") + sinopsis = re.sub(r"\(FILMAFFINITY\)<br />", "", sinopsis) + except: + pass + try: + rating_filma = scrapertools.get_match(data, 'itemprop="ratingValue" content="(.*?)">') + except: + rating_filma = "Sin puntuacion" + + critica = "" + patron = '<div itemprop="reviewBody">(.*?)</div>.*?itemprop="author">(.*?)\s*<i alt="([^"]+)"' + matches_reviews = scrapertools.find_multiple_matches(data, patron) + + if matches_reviews: + for review, autor, valoracion in matches_reviews: + review = dhe(scrapertools.htmlclean(review)) + review += "\n" + autor + "[CR]" + review = re.sub(r'Puntuac.*?\)', '', review) + if "positiva" in valoracion: + critica += "[COLOR green][B]%s[/B][/COLOR]\n" % review + elif "neutral" in valoracion: + critica += "[COLOR yellow][B]%s[/B][/COLOR]\n" % review + else: + critica += "[COLOR red][B]%s[/B][/COLOR]\n" % review + else: + critica = "[COLOR floralwhite][B]Esta serie no tiene críticas[/B][/COLOR]" + + ###Busqueda en tmdb + + url_tmdb = "http://api.themoviedb.org/3/search/tv?api_key=" + api_key + "&query=" + title + "&language=es&include_adult=false&first_air_date_year=" + year + data_tmdb = scrapertools.cachePage(url_tmdb) + data_tmdb = re.sub(r"\n|\r|\t|\s{2}| ", "", data_tmdb) + patron = '"page":1.*?,"id":(.*?)".*?"backdrop_path":(.*?),' + matches = re.compile(patron, re.DOTALL).findall(data_tmdb) + ###Busqueda en bing el id de imdb de la serie + if len(matches) == 0: + url_tmdb = "http://api.themoviedb.org/3/search/tv?api_key=" + api_key + "&query=" + title + "&language=es" + data_tmdb = scrapertools.cachePage(url_tmdb) + data_tmdb = re.sub(r"\n|\r|\t|\s{2}| ", "", data_tmdb) + patron = '"page":1.*?,"id":(.*?),"backdrop_path":(.*?),' + matches = re.compile(patron, re.DOTALL).findall(data_tmdb) + if len(matches) == 0: + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % ( + title.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data) + try: + subdata_imdb = scrapertools.find_single_match(data, + '<li class="b_algo">(.*?)h="ID.*?<strong>.*?TV Series') + except: + pass + + try: + imdb_id = scrapertools.get_match(subdata_imdb, '<a href=.*?http.*?imdb.com/title/(.*?)/.*?"') + except: + imdb_id = "" + ###Busca id de tvdb y tmdb mediante imdb id + + urlremotetbdb = "https://api.themoviedb.org/3/find/" + imdb_id + "?api_key=" + api_key + "&external_source=imdb_id&language=es" + data_tmdb = scrapertools.cachePage(urlremotetbdb) + matches = scrapertools.find_multiple_matches(data_tmdb, + '"tv_results":.*?"id":(.*?),.*?"poster_path":(.*?),') + + if len(matches) == 0: + id_tmdb = "" + fanart_3 = "" + extra = item.thumbnail + "|" + year + "|" + "no data" + "|" + "no data" + "|" + "Sin puntuación" + "|" + "" + "|" + "" + "|" + id_tmdb + show = "http://imgur.com/ldWNcHm.jpg" + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + item.thumbnail + "|" + id_tmdb + fanart_info = "http://imgur.com/ldWNcHm.jpg" + fanart_2 = "http://imgur.com/ldWNcHm.jpg" + id_scraper = "" + category = "" + posterdb = item.thumbnail + itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action="findvideos", + thumbnail=item.thumbnail, fanart="http://imgur.com/ldWNcHm.jpg", extra=extra, + category=category, show=show, folder=True)) + + for id_tmdb, fan in matches: + ###Busca id tvdb + urlid_tvdb = "https://api.themoviedb.org/3/tv/" + id_tmdb + "/external_ids?api_key=" + api_key + "&language=es" + data_tvdb = scrapertools.cachePage(urlid_tvdb) + id = scrapertools.find_single_match(data_tvdb, 'tvdb_id":(.*?),"tvrage_id"') + if id == "null": + id = "" + category = id + ###Busqueda nºepisodios y temporadas,status + url_status = "http://api.themoviedb.org/3/tv/" + id_tmdb + "?api_key=" + api_key + "&append_to_response=credits&language=es" + data_status = scrapertools.cachePage(url_status) + season_episodes = scrapertools.find_single_match(data_status, + '"(number_of_episodes":\d+,"number_of_seasons":\d+,)"') + season_episodes = re.sub(r'"', '', season_episodes) + season_episodes = re.sub(r'number_of_episodes', 'Episodios ', season_episodes) + season_episodes = re.sub(r'number_of_seasons', 'Temporadas', season_episodes) + season_episodes = re.sub(r'_', ' ', season_episodes) + status = scrapertools.find_single_match(data_status, '"status":"(.*?)"') + if status == "Ended": + status = "Finalizada" + else: + status = "En emisión" + status = status + " (" + season_episodes + ")" + status = re.sub(r',', '.', status) + ####### + + fan = re.sub(r'\\|"', '', fan) + try: + # rating tvdb + url_rating_tvdb = "http://thetvdb.com/api/1D62F2F90030C444/series/" + id + "/es.xml" + print "pepote" + print url_rating_tvdb + data = httptools.downloadpage(url_rating_tvdb).data + rating = scrapertools.find_single_match(data, '<Rating>(.*?)<') + except: + ratintg_tvdb = "" + try: + rating = scrapertools.get_match(data, '"vote_average":(.*?),') + except: + + rating = "Sin puntuación" + + id_scraper = id_tmdb + "|" + "serie" + "|" + rating_filma + "|" + critica + "|" + rating + "|" + status # +"|"+emision + posterdb = scrapertools.find_single_match(data_tmdb, '"poster_path":(.*?)",') + + if "null" in posterdb: + posterdb = item.thumbnail + else: + posterdb = re.sub(r'\\|"', '', posterdb) + posterdb = "https://image.tmdb.org/t/p/original" + posterdb + if "null" in fan: + fanart = "http://imgur.com/ldWNcHm.jpg" + else: + fanart = "https://image.tmdb.org/t/p/original" + fan + + item.extra = fanart + + url = "http://api.themoviedb.org/3/tv/" + id_tmdb + "/images?api_key=" + api_key + + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '"backdrops".*?"file_path":".*?",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + patron = '"backdrops".*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + fanart_info = item.extra + fanart_3 = "" + fanart_2 = item.extra + for fanart_info, fanart_3, fanart_2 in matches: + if fanart == "http://imgur.com/ldWNcHm.jpg": + fanart = "https://image.tmdb.org/t/p/original" + fanart_info + fanart_info = "https://image.tmdb.org/t/p/original" + fanart_info + fanart_3 = "https://image.tmdb.org/t/p/original" + fanart_3 + fanart_2 = "https://image.tmdb.org/t/p/original" + fanart_2 + url = "http://webservice.fanart.tv/v3/tv/" + id + "?api_key=" + api_fankey + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '"clearlogo":.*?"url": "([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + if '"tvbanner"' in data: + tvbanner = scrapertools.get_match(data, '"tvbanner":.*?"url": "([^"]+)"') + tfv = tvbanner + elif '"tvposter"' in data: + tvposter = scrapertools.get_match(data, '"tvposter":.*?"url": "([^"]+)"') + tfv = tvposter + else: + tfv = posterdb + if '"tvthumb"' in data: + tvthumb = scrapertools.get_match(data, '"tvthumb":.*?"url": "([^"]+)"') + if '"hdtvlogo"' in data: + hdtvlogo = scrapertools.get_match(data, '"hdtvlogo":.*?"url": "([^"]+)"') + if '"hdclearart"' in data: + hdtvclear = scrapertools.get_match(data, '"hdclearart":.*?"url": "([^"]+)"') + if len(matches) == 0: + if '"hdtvlogo"' in data: + if "showbackground" in data: + + if '"hdclearart"' in data: + thumbnail = hdtvlogo + extra = hdtvclear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + else: + thumbnail = hdtvlogo + extra = thumbnail + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=item.extra, + category=category, extra=extra, show=show, folder=True)) + + + else: + if '"hdclearart"' in data: + thumbnail = hdtvlogo + extra = hdtvclear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + else: + thumbnail = hdtvlogo + extra = thumbnail + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + else: + extra = "" + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=posterdb, fanart=fanart, extra=extra, show=show, + category=category, folder=True)) + + for logo in matches: + if '"hdtvlogo"' in data: + thumbnail = hdtvlogo + elif not '"hdtvlogo"' in data: + if '"clearlogo"' in data: + thumbnail = logo + else: + thumbnail = item.thumbnail + if '"clearart"' in data: + clear = scrapertools.get_match(data, '"clearart":.*?"url": "([^"]+)"') + if "showbackground" in data: + + extra = clear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + else: + extra = clear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + + if "showbackground" in data: + + if '"clearart"' in data: + clear = scrapertools.get_match(data, '"clearart":.*?"url": "([^"]+)"') + extra = clear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + else: + extra = logo + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + + if not '"clearart"' in data and not '"showbackground"' in data: + if '"hdclearart"' in data: + extra = hdtvclear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + else: + extra = thumbnail + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + + if not "series" in item.url: + thumbnail = posterdb + title_info = "[COLOR khaki]Info[/COLOR]" + if "series" in item.url: + title_info = "[COLOR skyblue]Info[/COLOR]" + if '"tvposter"' in data: + thumbnail = scrapertools.get_match(data, '"tvposter":.*?"url": "([^"]+)"') + else: + thumbnail = posterdb + + if "tvbanner" in data: + category = tvbanner + else: + category = show + if '"tvthumb"' in data: + plot = item.plot + "|" + tvthumb + else: + plot = item.plot + "|" + item.thumbnail + if '"tvbanner"' in data: + plot = plot + "|" + tvbanner + elif '"tvthumb"' in data: + plot = plot + "|" + tvthumb + else: + plot = plot + "|" + item.thumbnail + else: + if '"moviethumb"' in data: + plot = item.plot + "|" + thumb + else: + plot = item.plot + "|" + posterdb + + if '"moviebanner"' in data: + plot = plot + "|" + banner + else: + if '"hdmovieclearart"' in data: + plot = plot + "|" + clear + + else: + plot = plot + "|" + posterdb + + id = id_scraper + + extra = extra + "|" + id + "|" + title.encode('utf8') + + itemlist.append( + Item(channel=item.channel, action="info", title=title_info, plot=plot, url=item.url, thumbnail=thumbnail, + fanart=fanart_info, extra=extra, category=category, show=show, viewmode="movie_with_plot", folder=False)) + + return itemlist + + +def ver_capitulo(item): + logger.info() + itemlist = [] + data = get_page(item.url) + data = re.sub(r"&#.*?;", "x", data) + patronbloque_enlaces = '<tr class="lol">(.*?)ha sido descargada' + matchesenlaces = re.compile(patronbloque_enlaces, re.DOTALL).findall(data) + for enlaces in matchesenlaces: + enlaces = re.sub(r"alt=.*?<a href=.*?rar.*?>Click", "", enlaces) + enlaces = re.sub(r"\(Contrase.*?\).*?", "NO REPRODUCIBLE-RAR", enlaces) + if "Serie Completa" in item.extra.split("|")[2] or "pelicula" in item.extra.split("|")[2]: + patron = 'alt="[^<]+".*?".*?Click' + + matches = re.compile(patron, re.DOTALL).findall(enlaces) + + scrapertools.printMatches(matches) + if krypton: + catchurl = re.compile('<a href="([^"]+)"', re.DOTALL).findall(str(matches)) + else: + catchurl = re.compile('<a href="/myaddrproxy.php/http/([^"]+)"', re.DOTALL).findall(str(matches)) + + for datos in matches: + + if "x Temporada" in item.extra.split("|")[2]: + + for (a, b) in enumerate(matches): + + calidad = re.compile('alt="[^<]+" title=.*?;">(.*?)</span>', re.DOTALL).findall(b) + idioma = re.compile('alt="[^<]+" title=.*?;">[^<]+</span>.*?;">(.*?)</span>', + re.DOTALL).findall(b) + + for (c, d) in enumerate(calidad): + xbmc.log("calidaddd") + xbmc.log(str(c)) + xbmc.log(str(d)) + + for (f, g) in enumerate(idioma): + xbmc.log("idiomaaaa") + xbmc.log(str(f)) + + xbmc.log("topotamadre") + xbmc.log(str(g)) + matches[a] = b.replace(b, "[COLOR orange][B]" + d + "[/B][/COLOR]") + "--" + b.replace(b, + "[COLOR palegreen][B]" + g + "[/B][/COLOR]") + else: + for (a, b) in enumerate(matches): + + calidad = re.compile('alt=.*?<td>(.*?)<', re.DOTALL).findall(b) + idioma = re.compile('alt=.*?<td>.*?<.*?<td>(.*?)<\/td>', re.DOTALL).findall(b) + + for (c, d) in enumerate(calidad): + xbmc.log("calidaddd") + xbmc.log(str(c)) + xbmc.log(str(d)) + + for (f, g) in enumerate(idioma): + xbmc.log("idiomaaaa") + xbmc.log(str(f)) + + xbmc.log("topotamadre") + xbmc.log(str(g)) + matches[a] = b.replace(b, "[COLOR orange][B]" + g + "[/B][/COLOR]") + "--" + b.replace(b, + "[COLOR palegreen][B]" + d + "[/B][/COLOR]") + else: + if item.extra.split("|")[1] != "Especial": + check = item.extra.split("|")[0] + "x" + item.extra.split("|")[1] + else: + check = item.extra.split("|")[1] + + patron = 'icono_espaniol\.png" title="[^<]+" alt="[^<]+"><\/td>\\n<td>' + check + '.*?<\/td>\\n<td>[^<]+<\/td>.*?Click' # 'icono_.*?png" alt="([^<]+)" .*?;">([^<]+)</span>.*?;">(.*?)</span>.*?<a href="/myaddrproxy.php/http/([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(enlaces) + '''patron ='alt="[^<]+" title="[^<]+"/></td><td style="text-align:center"><span style="font-family: \'Open Sans\';">[^<]+</span></td><td style="text-align:center"><span style="font-family: \'Open Sans\';">'+check+'.*?Click' #'icono_.*?png" alt="([^<]+)" .*?;">([^<]+)</span>.*?;">(.*?)</span>.*? <a href="/myaddrproxy.php/http/([^"]+)"' + matches = re.compile(patron,re.DOTALL).findall(enlaces)''' + + scrapertools.printMatches(matches) + if krypton: + catchurl = re.compile('<a href="([^"]+)"', re.DOTALL).findall(str(matches)) + else: + catchurl = re.compile('<a href="/myaddrproxy.php/http/([^"]+)"', re.DOTALL).findall(str(matches)) + + for datos in matches: + + for (a, b) in enumerate(matches): + + calidad = scrapertools.find_multiple_matches(b, 'alt=".*?<td>.*?<td>(.*?)<') + idioma = re.compile('alt="([^<]+)">', re.DOTALL).findall(b) + peso = re.compile('alt="[^<]+"><\/td>\\n<td>(.*?)<\/td>\\n<td>.*?<\/td>', re.DOTALL).findall(b) + + for (c, d) in enumerate(calidad): + xbmc.log("calidaddd") + xbmc.log(str(c)) + xbmc.log(str(d)) + + for (f, g) in enumerate(idioma): + xbmc.log("idiomaaaa") + xbmc.log(str(f)) + + xbmc.log("topotamadre") + xbmc.log(str(g)) + + for (h, i) in enumerate(peso): + if "RAR" in i: + i = " (No reproducible--RAR)" + else: + i = "" + if not "x" in g: + xbmc.log("digiiiit") + g = check + matches[a] = "[COLOR crimson][B]Capitulo[/B][/COLOR]" + "--" + b.replace(b, + "[COLOR orange][B]" + g + "[/B][/COLOR]") + "--" + b.replace( + b, "[COLOR palegreen][B]" + d + i + "[/B][/COLOR]") + + get_url = [(z, x) for z, x in enumerate(catchurl)] + get_url = repr(get_url) + + index = xbmcgui.Dialog().select("[COLOR orange][B]Seleciona calidad...[/B][/COLOR]", matches) + + if index != -1: + index = str(index) + url = scrapertools.get_match(get_url, '\(' + index + ', \'(.*?)\'') + if krypton: + item.url = url + else: + item.url = "http://" + url + item.server = "torrent" + platformtools.play_video(item) + xbmc.executebuiltin('Action(Back)') + xbmc.sleep(100) + else: + xbmc.executebuiltin('Action(Back)') + xbmc.sleep(100) + + return itemlist + + +def findvideos(item): + logger.info() + check_iepi2 = " " + itemlist = [] + data = get_page(item.url) + check_calidad = "" + check_epi = "" + check_especial = "" + if not "series" in item.url: + thumbnail = item.category + if "series" in item.url: + try: + if krypton: + url = scrapertools.get_match(data, '<a style="float:left;" href="([^"]+)">Temporada Anterior') + else: + url = scrapertools.get_match(data, + '<a style="float:left;" href="/myaddrproxy.php/https/([^"]+)">Temporada Anterior') + url_a = "http://" + url + temp_a = scrapertools.get_match(url, 'temporada-(\d+)') + year = item.extra.split("|")[1] + + title_info = item.show.split("|")[3].replace(' ', '%20') + + try: + backdrop = scrapertools.get_match(data2, 'page":1.*?,"id".*?"backdrop_path":"\\\(.*?)"') + fanart = "https://image.tmdb.org/t/p/original" + backdrop + + except: + fanart = item.show.split("|")[0] + url_temp = "http://api.themoviedb.org/3/tv/" + item.show.split("|")[ + 5] + "/season/" + temp_a + "/images?api_key=" + api_key + "" + data2 = scrapertools.cachePage(url_temp) + data2 = re.sub(r"\n|\r|\t|\s{2}| ", "", data2) + patron = '{"id".*?"file_path":"(.*?)","height"' + matches = re.compile(patron, re.DOTALL).findall(data2) + if len(matches) == 0: + thumbnail = item.thumbnail + for thumtemp in matches: + thumbnail = "https://image.tmdb.org/t/p/original" + thumtemp + if "Temporada" in item.title: + new_temp = "Temporada " + temp_a + title = re.sub(r"Temporada.*?(\d+)", new_temp, item.title) + title = re.sub(r"skyblue", "tomato", title) + else: + title = "[COLOR darkturquoise][B]TEMPORADA ANTERIOR[/B][/COLOR]" + + itemlist.append( + Item(channel=item.channel, title=title, url=url_a, action="findvideos", thumbnail=thumbnail, + extra=item.extra, show=item.show, fanart=fanart, folder=True)) + + except: + pass + patronbloque_enlaces = '<tr class="lol">(.*?)ha sido descargada' + matchesenlaces = re.compile(patronbloque_enlaces, re.DOTALL).findall(data) + + for enlaces in matchesenlaces: + if "serie" in item.url: + try: + temp_check = scrapertools.find_single_match(enlaces, + 'icono_.*?png".*?alt=".*?".*?<td>(\d+&#\d+;\d+)<\/td>.*?<td>.*?<\/td>') + if temp_check == "": + temp_check = scrapertools.find_single_match(enlaces, + 'icono_.*?png".*?alt=".*?".*?<td>(\d+&#\d+;\d+-\d+)<\/td>.*?<td>.*?<\/td>') + if temp_check == "": + check = "" + else: + check = "yes" + else: + check = "yes" + except: + check = "" + + else: + check = "pelicula" + + if "Completa" in item.title and check == "" or not "Completa" in item.title and check == "": + if krypton: + patron = 'icono_.*?png" title="(.*?)".*?<td>.*?<.*?<td>(.*?)<.*?<a href="([^"]+)"' + else: + patron = 'icono_.*?png" title="(.*?)".*?<td>.*?<.*?<td>(.*?)<.*?<a href="/myaddrproxy.php/http/([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(enlaces) + scrapertools.printMatches(matches) + + for calidad, idioma, url in matches: + + year = item.extra.split("|")[1] + try: + temp = scrapertools.get_match(item.url, 'temporada-(\d+)') + except: + temp = "0" + url_tmdb2 = "http://api.themoviedb.org/3/tv/" + item.show.split("|")[ + 5] + "/season/" + temp + "/images?api_key=" + api_key + "" + data = httptools.downloadpage(url_tmdb2).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '{"id".*?"file_path":"(.*?)","height"' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + if "TEMPORADA ANTERIOR" in item.title: + fanart = item.fanart + thumbnail = item.thumbnail + title = "[COLOR steelblue][B]" + idioma + "[/B][/COLOR]" + "-" + "[COLOR lightskyblue][B]" + calidad + "[/B][/COLOR]" + title = re.sub(r"tomato", "skyblue", title) + itemlist.append( + Item(channel=item.channel, title=title, action="play", url="http://" + url, server="torrent", + thumbnail=thumbnail, extra=item.extra, show=item.show, fanart=item.show.split("|")[0], + folder=False)) + for thumtemp in matches: + thumbnail = "https://image.tmdb.org/t/p/original" + thumtemp + if "TEMPORADA ANTERIOR" in item.title: + fanart = item.fanart + + title = "[COLOR steelblue][B]" + idioma + "[/B][/COLOR]" + "-" + "[COLOR lightskyblue][B]" + calidad + "[/B][/COLOR]" + title = re.sub(r"tomato", "skyblue", title) + if not "http://" in url: + url = "http://" + url + itemlist.append(Item(channel=item.channel, title=title, action="play", url=url, server="torrent", + thumbnail=thumbnail, extra=item.extra, show=item.show, + fanart=item.show.split("|")[0], folder=False)) + else: + if krypton: + patron = 'icono_.*?png".*?alt="(.*?)".*?<td>(.*?)<\/td>.*?<td>(.*?)<\/td>.*?href="([^"]+)"' + else: + patron = 'icono_.*?png".*?alt="(.*?)".*?<td>(.*?)<\/td>.*?<td>(.*?)<\/td>.*?href="\/myaddrproxy.php\/http\/([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(enlaces) + scrapertools.printMatches(matches) + + for calidad, idioma, peso, url in matches: + if not "Especial:" in idioma: + check_especial = "" + if "Temporada" in item.title: + try: + temp_check = scrapertools.find_single_match(enlaces, + 'icono_.*?png".*?alt=".*?".*?<td>(\d+&#\d+;\d+)<\/td>.*?<td>.*?<\/td>') + if temp_check == "": + check = "" + else: + check = "yes" + except: + check = "" + + idioma = re.sub(r'\(Contra.*?\)', '', idioma) + if "Completa" in peso and check == "": + continue + if krypton: + url = url + else: + url = "http://" + url + torrents_path = config.get_videolibrary_path() + '/torrents' + if not os.path.exists(torrents_path): + os.mkdir(torrents_path) + try: + urllib.urlretrieve("http://anonymouse.org/cgi-bin/anon-www.cgi/" + url, + torrents_path + "/temp.torrent") + pepe = open(torrents_path + "/temp.torrent", "rb").read() + except: + pepe = "" + torrent = decode(pepe) + try: + name = torrent["info"]["name"] + sizet = torrent["info"]['length'] + sizet = convert_size(sizet) + except: + name = "no disponible" + try: + check_video = scrapertools.find_multiple_matches(str(torrent["info"]["files"]), "'length': (\d+)}") + size = max([int(i) for i in check_video]) + for file in torrent["info"]["files"]: + manolo = "%r - %d bytes" % ("/".join(file["path"]), file["length"]) + if str(size) in manolo: + video = manolo + size = convert_size(size) + ext_v = re.sub(r"-.*? bytes|.*?\[.*?\].|'|.*?COM.|.*?\[.*?\]|\(.*?\)|.*?\.", "", video) + try: + os.remove(torrents_path + "/temp.torrent") + except: + pass + except: + try: + size = sizet + ext_v = re.sub(r"-.*? bytes|.*?\[.*?\].|'|.*?COM.|.*?\.es.|.*?\[.*?\]|.*?\(.*?\)\.|.*?\.", "", + name) + except: + size = "NO REPRODUCIBLE" + ext_v = "" + try: + os.remove(torrents_path + "/temp.torrent") + except: + pass + if "rar" in ext_v: + ext_v = ext_v + " -- No reproducible" + size = "" + + title = "[COLOR gold][B]" + idioma + "[/B][/COLOR]" + "-" + "[COLOR lemonchiffon][B]" + calidad + "[/B][/COLOR]" + "-" + "[COLOR khaki] ( Video" + "[/COLOR]" + " " + "[COLOR khaki]" + ext_v + "[/COLOR]" + " " + "[COLOR khaki]" + size + " )" + "[/COLOR]" + + if "series" in item.url and not "Completa" in item.title or check != "" and check != "pelicula": + year = item.extra.split("|")[1] + # idioma= re.sub(r"-.*","",idioma) + check = calidad + "|" + peso + "|" + idioma + temp_epi = re.compile('(\d)&#.*?;(\d+)', re.DOTALL).findall(check) + + for temp, epi in temp_epi: + url_tmdb2 = "http://api.themoviedb.org/3/tv/" + item.show.split("|")[ + 5] + "/season/" + temp + "/images?api_key=" + api_key + "" + data = httptools.downloadpage(url_tmdb2).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '{"id".*?"file_path":"(.*?)","height"' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + thumbnail = item.thumbnail + for thumtemp in matches: + thumbnail = "https://image.tmdb.org/t/p/original" + thumtemp + + if check_epi == epi and check_calidad != peso and not "Especial:" in idioma or "Especial:" in idioma and check_especial == "yes": + check_info = "no" + title = " [COLOR mediumslateblue][B]Versión[/B][/COLOR]" + " " + "[COLOR royalblue][B]" + peso + "[/B][/COLOR]" + "[COLOR turquoise] ( Video" + "[/COLOR]" + " " + "[COLOR turquoise]" + ext_v + "[/COLOR]" + " " + "[COLOR turquoise]" + size + " )" + "[/COLOR]" + else: + check_info = "yes" + if "Especial:" in idioma: + check_especial = "yes" + title = "[COLOR steelblue][B]" + idioma + "[/B][/COLOR]" + "-" + "[COLOR lightskyblue][B]" + calidad + "[/B][/COLOR]" + "-" + "[COLOR royalblue][B]" + peso + "[/B][/COLOR]" + "[COLOR turquoise] ( Video" + "[/COLOR]" + " " + "[COLOR turquoise]" + ext_v + "[/COLOR]" + " " + "[COLOR turquoise]" + size + " )" + "[/COLOR]" + + check_epi = epi + check_calidad = peso + + itemlist.append(Item(channel=item.channel, title=title, action="play", url=url, server="torrent", + thumbnail=thumbnail, extra=item.extra, show=item.show, + fanart=item.show.split("|")[0], folder=False)) + + if "series" in item.url: + if check_info == "yes": + extra = item.extra + "|" + temp + "|" + epi + if "-" in idioma: + temp_epi2 = re.compile('\d&#.*?;\d+-(\d+)', re.DOTALL).findall(check) + for epi2 in temp_epi2: + len_epis = int(epi2) - int(epi) + if len_epis == 1: + check_iepi2 = "ok" + title_info = " Info Cap." + epi + title_info = "[COLOR skyblue]" + title_info + "[/COLOR]" + itemlist.append( + Item(channel=item.channel, action="info_capitulos", title=title_info, + url=item.url, thumbnail=thumbnail, fanart=item.show.split("|")[0], + extra=extra, show=item.show, category=item.category, folder=False)) + else: + epis_len = range(int(epi), int(epi2) + 1) + extra = item.extra + "|" + temp + "|" + str(epis_len) + title_info = " Info Capítulos" + title_info = "[COLOR skyblue]" + title_info + "[/COLOR]" + itemlist.append( + Item(channel=item.channel, action="capitulos", title=title_info, url=item.url, + thumbnail=thumbnail, fanart=item.show.split("|")[0], extra=extra, + show=item.show, category=item.category, folder=True)) + check_iepi2 = "" + else: + title_info = " Info" + title_info = "[COLOR skyblue]" + title_info + "[/COLOR]" + itemlist.append( + Item(channel=item.channel, action="info_capitulos", title=title_info, url=item.url, + thumbnail=thumbnail, fanart=item.show.split("|")[0], extra=extra, show=item.show, + category=item.category, folder=False)) + + if check_iepi2 == "ok": + extra = item.extra + "|" + temp + "|" + epi2 + title_info = " Info Cap." + epi2 + title_info = "[COLOR skyblue]" + title_info + "[/COLOR]" + itemlist.append( + Item(channel=item.channel, action="info_capitulos", title=title_info, url=item.url, + thumbnail=thumbnail, fanart=item.show.split("|")[0], extra=extra, show=item.show, + category=item.category, folder=False)) + + return itemlist + + +def capitulos(item): + logger.info() + itemlist = [] + url = item.url + capis = item.extra.split("|")[3] + capis = re.sub(r'\[|\]', '', capis) + capis = [int(k) for k in capis.split(',')] + for i in capis: + extra = item.extra.split("|")[0] + "|" + item.extra.split("|")[1] + "|" + item.extra.split("|")[2] + "|" + str( + i) + itemlist.append( + Item(channel=item.channel, action="info_capitulos", title="[COLOR skyblue]Info Cap." + str(i) + "[/COLOR]", + url=item.url, thumbnail=item.thumbnail, fanart=item.show.split("|")[0], extra=extra, show=item.show, + category=item.category, folder=False)) + return itemlist + + +def info(item): + logger.info() + itemlist = [] + url = item.url + id = item.extra + + if "serie" in item.url: + try: + rating_tmdba_tvdb = item.extra.split("|")[6] + if item.extra.split("|")[6] == "": + rating_tmdba_tvdb = "Sin puntuación" + except: + rating_tmdba_tvdb = "Sin puntuación" + else: + rating_tmdba_tvdb = item.extra.split("|")[3] + rating_filma = item.extra.split("|")[4] + print "eztoquee" + print rating_filma + print rating_tmdba_tvdb + + filma = "http://s6.postimg.org/6yhe5fgy9/filma.png" + + try: + if "serie" in item.url: + title = item.extra.split("|")[8] + + else: + title = item.extra.split("|")[6] + title = title.replace("%20", " ") + title = "[COLOR yellow][B]" + title + "[/B][/COLOR]" + except: + title = item.title + + try: + if "." in rating_tmdba_tvdb: + check_rat_tmdba = scrapertools.get_match(rating_tmdba_tvdb, '(\d+).') + else: + check_rat_tmdba = rating_tmdba_tvdb + if int(check_rat_tmdba) >= 5 and int(check_rat_tmdba) < 8: + rating = "[COLOR springgreen][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + elif int(check_rat_tmdba) >= 8 or rating_tmdba_tvdb == 10: + rating = "[COLOR yellow][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + else: + rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + print "lolaymaue" + except: + rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + if "10." in rating: + rating = re.sub(r'10\.\d+', '10', rating) + try: + check_rat_filma = scrapertools.get_match(rating_filma, '(\d)') + print "paco" + print check_rat_filma + if int(check_rat_filma) >= 5 and int(check_rat_filma) < 8: + print "dios" + print check_rat_filma + rating_filma = "[COLOR springgreen][B]" + rating_filma + "[/B][/COLOR]" + elif int(check_rat_filma) >= 8: + + print check_rat_filma + rating_filma = "[COLOR yellow][B]" + rating_filma + "[/B][/COLOR]" + else: + rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]" + print "rojo??" + print check_rat_filma + except: + rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]" + + try: + if not "serie" in item.url: + url_plot = "http://api.themoviedb.org/3/movie/" + item.extra.split("|")[ + 1] + "?api_key=" + api_key + "&append_to_response=credits&language=es" + data_plot = scrapertools.cache_page(url_plot) + plot, tagline = scrapertools.find_single_match(data_plot, '"overview":"(.*?)",.*?"tagline":(".*?")') + if plot == "": + plot = item.show.split("|")[2] + + plot = "[COLOR moccasin][B]" + plot + "[/B][/COLOR]" + plot = re.sub(r"\\", "", plot) + + else: + plot = item.show.split("|")[2] + plot = "[COLOR moccasin][B]" + plot + "[/B][/COLOR]" + plot = re.sub(r"\\", "", plot) + + if item.extra.split("|")[7] != "": + tagline = item.extra.split("|")[7] + # tagline= re.sub(r',','.',tagline) + else: + tagline = "" + except: + title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" + plot = "Esta pelicula no tiene informacion..." + plot = plot.replace(plot, "[COLOR yellow][B]" + plot + "[/B][/COLOR]") + photo = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png" + foto = "http://s6.postimg.org/ub7pb76c1/noinfo.png" + info = "" + + if "serie" in item.url: + check2 = "serie" + thumb_busqueda = "http://imgur.com/84pleyQ.png" + icon = "http://s6.postimg.org/hzcjag975/tvdb.png" + foto = item.show.split("|")[1] + if item.extra.split("|")[5] != "": + critica = item.extra.split("|")[5] + else: + critica = "Esta serie no tiene críticas..." + + photo = item.extra.split("|")[0].replace(" ", "%20") + try: + tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]" + except: + tagline = "" + + else: + thumb_busqueda = "http://imgur.com/Slbtn28.png" + critica = item.extra.split("|")[5] + if "%20" in critica: + critica = "No hay críticas" + icon = "http://imgur.com/SenkyxF.png" + + photo = item.extra.split("|")[0].replace(" ", "%20") + foto = item.show.split("|")[1] + if foto == item.thumbnail: + foto = "http://imgur.com/5jEL62c.jpg" + + try: + if tagline == "\"\"": + tagline = " " + except: + tagline = " " + tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]" + check2 = "pelicula" + # Tambien te puede interesar + peliculas = [] + if "serie" in item.url: + + url_tpi = "http://api.themoviedb.org/3/tv/" + item.show.split("|")[ + 5] + "/recommendations?api_key=" + api_key + "&language=es" + data_tpi = scrapertools.cachePage(url_tpi) + tpi = scrapertools.find_multiple_matches(data_tpi, + 'id":(.*?),.*?"original_name":"(.*?)",.*?"poster_path":(.*?),') + + else: + url_tpi = "http://api.themoviedb.org/3/movie/" + item.extra.split("|")[ + 1] + "/recommendations?api_key=" + api_key + "&language=es" + data_tpi = scrapertools.cachePage(url_tpi) + tpi = scrapertools.find_multiple_matches(data_tpi, + 'id":(.*?),.*?"original_title":"(.*?)",.*?"poster_path":(.*?),') + + for idp, peli, thumb in tpi: + + thumb = re.sub(r'"|}', '', thumb) + if "null" in thumb: + thumb = "http://s6.postimg.org/tw1vhymj5/noposter.png" + else: + thumb = "https://image.tmdb.org/t/p/original" + thumb + peliculas.append([idp, peli, thumb]) + + check2 = check2.replace("pelicula", "movie").replace("serie", "tvshow") + infoLabels = {'title': title, 'plot': plot, 'thumbnail': photo, 'fanart': foto, 'tagline': tagline, + 'rating': rating} + item_info = item.clone(info=infoLabels, icon=icon, extra=id, rating=rating, rating_filma=rating_filma, + critica=critica, contentType=check2, thumb_busqueda=thumb_busqueda) + from channels import infoplus + infoplus.start(item_info, peliculas) + + +def info_capitulos(item): + logger.info() + url = "https://api.themoviedb.org/3/tv/" + item.show.split("|")[5] + "/season/" + item.extra.split("|")[ + 2] + "/episode/" + item.extra.split("|")[3] + "?api_key=" + api_key + "&language=es" + + if "/0" in url: + url = url.replace("/0", "/") + + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '],"name":"(.*?)","overview":"(.*?)".*?"still_path":(.*?),"vote_average":(\d+\.\d).*?,"' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + + url = "http://thetvdb.com/api/1D62F2F90030C444/series/" + item.category + "/default/" + item.extra.split("|")[ + 2] + "/" + item.extra.split("|")[3] + "/es.xml" + if "/0" in url: + url = url.replace("/0", "/") + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '<Data>.*?<EpisodeName>([^<]+)</EpisodeName>.*?<Overview>(.*?)</Overview>.*?<Rating>(.*?)</Rating>' + + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + + title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" + plot = "Este capitulo no tiene informacion..." + plot = "[COLOR yellow][B]" + plot + "[/B][/COLOR]" + image = "http://s6.postimg.org/ub7pb76c1/noinfo.png" + foto = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png" + rating = "" + + + else: + + for name_epi, info, rating in matches: + if "<filename>episodes" in data: + foto = scrapertools.get_match(data, '<Data>.*?<filename>(.*?)</filename>') + fanart = "http://thetvdb.com/banners/" + foto + else: + fanart = "http://imgur.com/ZiEAVOD.png" + plot = info + plot = "[COLOR peachpuff][B]" + plot + "[/B][/COLOR]" + title = name_epi.upper() + title = "[COLOR bisque][B]" + title + "[/B][/COLOR]" + image = fanart + foto = item.extra.split("|")[0] + if not ".png" in foto: + if "serie" in item.url: + foto = "http://imgur.com/6uXGkrz.png" + else: + foto = "http://i.imgur.com/5jEL62c.png" + foto = re.sub(r'\(.*?\)|" "|" "', '', foto) + foto = re.sub(r' ', '', foto) + try: + + check_rating = scrapertools.get_match(rating, '(\d+).') + + if int(check_rating) >= 5 and int(check_rating) < 8: + rating = "Puntuación " + "[COLOR springgreen][B]" + rating + "[/B][/COLOR]" + elif int(check_rating) >= 8 and int(check_rating) < 10: + rating = "Puntuación " + "[COLOR yellow][B]" + rating + "[/B][/COLOR]" + elif int(check_rating) == 10: + rating = "Puntuación " + "[COLOR orangered][B]" + rating + "[/B][/COLOR]" + else: + rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" + + except: + rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" + if "10." in rating: + rating = re.sub(r'10\.\d+', '10', rating) + else: + for name_epi, info, fanart, rating in matches: + if info == "" or info == "\\": + info = "Sin informacion del capítulo aún..." + plot = info + plot = re.sub(r'/n', '', plot) + plot = "[COLOR peachpuff][B]" + plot + "[/B][/COLOR]" + title = name_epi.upper() + title = "[COLOR bisque][B]" + title + "[/B][/COLOR]" + image = fanart + image = re.sub(r'"|}', '', image) + if "null" in image: + image = "http://imgur.com/ZiEAVOD.png" + else: + image = "https://image.tmdb.org/t/p/original" + image + foto = item.extra.split("|")[0] + if not ".png" in foto: + if "serie" in item.url: + foto = "http://imgur.com/6uXGkrz.png" + else: + foto = "http://i.imgur.com/5jEL62c.png" + foto = re.sub(r'\(.*?\)|" "|" "', '', foto) + foto = re.sub(r' ', '', foto) + try: + + check_rating = scrapertools.get_match(rating, '(\d+).') + + if int(check_rating) >= 5 and int(check_rating) < 8: + rating = "Puntuación " + "[COLOR springgreen][B]" + rating + "[/B][/COLOR]" + elif int(check_rating) >= 8 and int(check_rating) < 10: + rating = "Puntuación " + "[COLOR yellow][B]" + rating + "[/B][/COLOR]" + elif int(check_rating) == 10: + rating = "Puntuación " + "[COLOR orangered][B]" + rating + "[/B][/COLOR]" + else: + rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" + + except: + rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" + if "10." in rating: + rating = re.sub(r'10\.\d+', '10', rating) + ventana = TextBox2(title=title, plot=plot, thumbnail=image, fanart=foto, rating=rating) + ventana.doModal() + + +class TextBox2(xbmcgui.WindowDialog): + """ Create a skinned textbox window """ + + def __init__(self, *args, **kwargs): + self.getTitle = kwargs.get('title') + self.getPlot = kwargs.get('plot') + self.getThumbnail = kwargs.get('thumbnail') + self.getFanart = kwargs.get('fanart') + self.getRating = kwargs.get('rating') + + self.background = xbmcgui.ControlImage(70, 20, 1150, 630, 'http://imgur.com/gh1GShA.jpg') + self.title = xbmcgui.ControlTextBox(120, 60, 430, 50) + self.rating = xbmcgui.ControlTextBox(145, 112, 1030, 45) + self.plot = xbmcgui.ControlTextBox(120, 150, 1056, 100) + self.thumbnail = xbmcgui.ControlImage(120, 300, 1056, 300, self.getThumbnail) + self.fanart = xbmcgui.ControlImage(780, 43, 390, 100, self.getFanart) + + self.addControl(self.background) + self.background.setAnimations( + [('conditional', 'effect=slide start=1000% end=0% time=1500 condition=true tween=bounce',), + ('WindowClose', 'effect=slide delay=800 start=0% end=1000% time=800 condition=true',)]) + self.addControl(self.thumbnail) + self.thumbnail.setAnimations([('conditional', + 'effect=zoom start=0% end=100% delay=2700 time=1500 condition=true tween=elastic easing=inout',), + ('WindowClose', 'effect=slide end=0,700% time=300 condition=true',)]) + self.addControl(self.plot) + self.plot.setAnimations( + [('conditional', 'effect=zoom delay=2000 center=auto start=0 end=100 time=800 condition=true ',), ( + 'conditional', + 'effect=rotate delay=2000 center=auto aceleration=6000 start=0% end=360% time=800 condition=true',), + ('WindowClose', 'effect=zoom center=auto start=100% end=-0% time=600 condition=true',)]) + self.addControl(self.fanart) + self.fanart.setAnimations( + [('WindowOpen', 'effect=slide start=0,-700 delay=1000 time=2500 tween=bounce condition=true',), ( + 'conditional', + 'effect=rotate center=auto start=0% end=360% delay=3000 time=2500 tween=bounce condition=true',), + ('WindowClose', 'effect=slide end=0,-700% time=1000 condition=true',)]) + self.addControl(self.title) + self.title.setText(self.getTitle) + self.title.setAnimations( + [('conditional', 'effect=slide start=-1500% end=0% delay=1000 time=2000 condition=true tween=elastic',), + ('WindowClose', 'effect=slide start=0% end=-1500% time=800 condition=true',)]) + self.addControl(self.rating) + self.rating.setText(self.getRating) + self.rating.setAnimations( + [('conditional', 'effect=fade start=0% end=100% delay=3000 time=1500 condition=true',), + ('WindowClose', 'effect=slide end=0,-700% time=600 condition=true',)]) + xbmc.sleep(200) + + try: + self.plot.autoScroll(7000, 6000, 30000) + except: + + xbmc.executebuiltin( + 'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000,"https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/kodi-icon.png")') + self.plot.setText(self.getPlot) + + def get(self): + self.show() + + def onAction(self, action): + if action == ACTION_PREVIOUS_MENU or action.getId() == ACTION_GESTURE_SWIPE_LEFT or action == 110 or action == 92: + self.close() + + +def test(): + return True + + +def translate(to_translate, to_langage="auto", langage="auto"): + '''Return the translation using google translate + you must shortcut the langage you define (French = fr, English = en, Spanish = es, etc...) + if you don't define anything it will detect it or use english by default + Example: + print(translate("salut tu vas bien?", "en")) + hello you alright?''' + agents = { + 'User-Agent': "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)"} + before_trans = 'class="t0">' + link = "http://translate.google.com/m?hl=%s&sl=%s&q=%s" % (to_langage, langage, to_translate.replace(" ", "+")) + request = urllib2.Request(link, headers=agents) + page = urllib2.urlopen(request).read() + result = page[page.find(before_trans) + len(before_trans):] + result = result.split("<")[0] + return result + + +if __name__ == '__main__': + to_translate = 'Hola como estas?' + print("%s >> %s" % (to_translate, translate(to_translate))) + print("%s >> %s" % (to_translate, translate(to_translate, 'fr'))) + + +# should print Hola como estas >> Hello how are you +# and Hola como estas? >> Bonjour comment allez-vous? + + + +def tokenize(text, match=re.compile("([idel])|(\d+):|(-?\d+)").match): + i = 0 + while i < len(text): + m = match(text, i) + s = m.group(m.lastindex) + i = m.end() + if m.lastindex == 2: + yield "s" + yield text[i:i + int(s)] + i = i + int(s) + else: + yield s + + +def decode_item(next, token): + if token == "i": + # integer: "i" value "e" + data = int(next()) + if next() != "e": + raise ValueError + elif token == "s": + # string: "s" value (virtual tokens) + data = next() + elif token == "l" or token == "d": + # container: "l" (or "d") values "e" + data = [] + tok = next() + while tok != "e": + data.append(decode_item(next, tok)) + tok = next() + if token == "d": + data = dict(zip(data[0::2], data[1::2])) + else: + raise ValueError + return data + + +def decode(text): + try: + src = tokenize(text) + data = decode_item(src.next, src.next()) + for token in src: # look for more tokens + data = data + except (AttributeError, ValueError, StopIteration): + try: + data = data + except: + data = src + + return data + + +def convert_size(size): + import math + if (size == 0): + return '0B' + size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") + i = int(math.floor(math.log(size, 1024))) + p = math.pow(1024, i) + s = round(size / p, 2) + return '%s %s' % (s, size_name[i]) + + +def busqueda(item): + logger.info() + cat = [item.extra.split("|")[0].replace("tv", "serie"), 'torrent'] + new_item = Item() + new_item.extra = item.extra.split("|")[1].replace("+", " ") + new_item.category = item.extra.split("|")[0] + + from channels import search + return search.do_search(new_item, cat) diff --git a/plugin.video.alfa/channels/peliculasaudiolatino.json b/plugin.video.alfa/channels/peliculasaudiolatino.json new file mode 100755 index 00000000..d4f6e3ea --- /dev/null +++ b/plugin.video.alfa/channels/peliculasaudiolatino.json @@ -0,0 +1,38 @@ +{ + "id": "peliculasaudiolatino", + "name": "Peliculasaudiolatino", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "peliculasaudiolatino.png", + "banner": "peliculasaudiolatino.png", + "version": 1, + "changes": [ + { + "date": "01/06/2017", + "description": "Reparado patron en play() para algunos servers" + }, + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ], + "categories": [ + "latino", + "movie" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/peliculasaudiolatino.py b/plugin.video.alfa/channels/peliculasaudiolatino.py new file mode 100755 index 00000000..2819a2a6 --- /dev/null +++ b/plugin.video.alfa/channels/peliculasaudiolatino.py @@ -0,0 +1,185 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + +HOST = 'http://peliculasaudiolatino.com' + + +def mainlist(item): + logger.info() + + itemlist = [] + itemlist.append( + Item(channel=item.channel, title="Recién agregadas", action="peliculas", url=HOST + "/ultimas-agregadas.html", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, title="Recién actualizadas", action="peliculas", + url=HOST + "/recien-actualizadas.html", viewmode="movie")) + itemlist.append( + Item(channel=item.channel, title="Las más vistas", action="peliculas", url=HOST + "/las-mas-vistas.html", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, title="Listado por géneros", action="generos", url=HOST)) + itemlist.append(Item(channel=item.channel, title="Listado por años", action="anyos", url=HOST)) + itemlist.append(Item(channel=item.channel, title="Buscar...", action="search")) + + return itemlist + + +def peliculas(item): + logger.info() + + # Descarga la página + data = httptools.downloadpage(item.url).data + + # Extrae las entradas de la pagina seleccionada + patron = '<td><a href="([^"]+)"><img src="([^"]+)" class="[^"]+" alt="([^"]+)"' + + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + for scrapedurl, scrapedthumbnail, scrapedtitle in matches: + url = urlparse.urljoin(item.url, scrapedurl) + title = scrapedtitle.strip() + thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) + plot = "" + + # Añade al listado + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumbnail, + plot=plot, folder=True)) + + # Extrae la marca de siguiente página + next_page = scrapertools.find_single_match(data, '<a href="([^"]+)"><span class="icon-chevron-right">') + if next_page != "": + itemlist.append(Item(channel=item.channel, action="peliculas", title=">> Página siguiente", + url=urlparse.urljoin(item.url, next_page).replace("/../../", "/"), viewmode="movie", + folder=True)) + + return itemlist + + +def generos(item): + logger.info() + itemlist = [] + + # Descarga la página + data = httptools.downloadpage(item.url).data + # Limita el bloque donde buscar + data = scrapertools.find_single_match(data, '<table class="generos"(.*?)</table>') + # Extrae las entradas + matches = re.compile('<a href="([^"]+)">([^<]+)<', re.DOTALL).findall(data) + for match in matches: + scrapedurl = urlparse.urljoin(item.url, match[0]) + scrapedtitle = match[1].strip() + scrapedthumbnail = "" + scrapedplot = "" + # logger.info(scrapedtitle) + + itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True, viewmode="movie")) + + itemlist = sorted(itemlist, key=lambda Item: Item.title) + return itemlist + + +def anyos(item): + logger.info() + itemlist = [] + + # Descarga la página + data = httptools.downloadpage(item.url).data + # Limita el bloque donde buscar + data = scrapertools.find_single_match(data, '<table class="years"(.*?)</table>') + # Extrae las entradas + matches = re.compile('<a href="([^"]+)">([^<]+)<', re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + url = urlparse.urljoin(item.url, scrapedurl) + title = scrapedtitle + thumbnail = "" + plot = "" + itemlist.append( + Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot, + folder=True, viewmode="movie")) + + return itemlist + + +def search(item, texto): + logger.info() + itemlist = [] + + texto = texto.replace(" ", "+") + try: + # Series + item.url = HOST + "/busqueda.php?q=%s" + item.url = item.url % texto + item.extra = "" + itemlist.extend(peliculas(item)) + itemlist = sorted(itemlist, key=lambda Item: Item.title) + + return itemlist + + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def findvideos(item): + logger.info() + # Descarga la página + + data = httptools.downloadpage(item.url).data + data = scrapertools.find_single_match(data, '<div class="opciones">(.*?)<div id="sidebar"') + + title = item.title + scrapedthumbnail = item.thumbnail + itemlist = [] + + patron = '<span class="infotx">([^<]+)</span></th[^<]+' + patron += '<th align="left"><img src="[^"]+" width="\d+" alt="([^"]+)"[^<]+</th[^<]+' + patron += '<th align="left"><img[^>]+>([^<]+)</th[^<]+' + patron += '<th class="slink" align="left"><div id="btnp"><a href="[^"]+" onClick="[^h]+([^\']+)\'' + + matches = re.compile(patron, re.DOTALL).findall(data) + for servidor, idioma, calidad, scrapedurl in matches: + url = scrapedurl + title = "Ver en " + servidor + " [" + idioma + "][" + calidad + "]" + itemlist.append(Item(channel=item.channel, action="play", title=title, fulltitle=item.fulltitle, url=url, + thumbnail=scrapedthumbnail, folder=False)) + + return itemlist + + +def play(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = scrapertools.find_single_match(data, 'src="(' + HOST + '/show/[^"]+)"') + data = httptools.downloadpage(data, headers=[['User-Agent', 'Mozilla/5.0'], ['Accept-Encoding', 'gzip, deflate'], + ['Referer', HOST], ['Connection', 'keep-alive']]).data + videoUrl = scrapertools.find_single_match(data, '(?i)<IFRAME.*?SRC="([^"]+)"') + goo = scrapertools.find_single_match(videoUrl, '://([^/]+)/') + if (goo == 'goo.gl'): + videoUrl = httptools.downloadpage(videoUrl, follow_redirects=False, only_headers=True).headers["location"] + server = scrapertools.find_single_match(videoUrl, '://([^/]+)/') + # logger.info("videoUrl = "+videoUrl) + enlaces = servertools.findvideos(videoUrl) + if enlaces: + thumbnail = servertools.guess_server_thumbnail(videoUrl) + # Añade al listado de XBMC + itemlist.append( + Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=enlaces[0][1], + server=enlaces[0][2], thumbnail=thumbnail, folder=False)) + + return itemlist diff --git a/plugin.video.alfa/channels/peliculasdk.json b/plugin.video.alfa/channels/peliculasdk.json new file mode 100755 index 00000000..fa9fc63b --- /dev/null +++ b/plugin.video.alfa/channels/peliculasdk.json @@ -0,0 +1,38 @@ +{ + "id": "peliculasdk", + "name": "PeliculasDK", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "http://s29.postimg.org/wzw749oon/pldklog.jpg", + "banner": "peliculasdk.png", + "version": 1, + "changes": [ + { + "date": "06/12/2016", + "description": "Corrección código. Adaptación Infoplus" + }, + { + "date": "26/04/2017", + "description": "Adaptación videoteca" + }, + { + "date": "28/06/2017", + "description": "Corrección código y algunas mejoras" + } + ], + "categories": [ + "torrent", + "movie" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/peliculasdk.py b/plugin.video.alfa/channels/peliculasdk.py new file mode 100755 index 00000000..a77a6382 --- /dev/null +++ b/plugin.video.alfa/channels/peliculasdk.py @@ -0,0 +1,776 @@ +# -*- coding: utf-8 -*- + +import re + +from core import config +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item +from core.scrapertools import decodeHtmlentities as dhe + +try: + import xbmc + import xbmcgui +except: + pass +import unicodedata + +ACTION_SHOW_FULLSCREEN = 36 +ACTION_GESTURE_SWIPE_LEFT = 511 +ACTION_SELECT_ITEM = 7 +ACTION_PREVIOUS_MENU = 10 +ACTION_MOVE_LEFT = 1 +ACTION_MOVE_RIGHT = 2 +ACTION_MOVE_DOWN = 4 +ACTION_MOVE_UP = 3 +OPTION_PANEL = 6 +OPTIONS_OK = 5 + +host = "http://www.peliculasdk.com/" + + +def bbcode_kodi2html(text): + if config.get_platform().startswith("plex") or config.get_platform().startswith("mediaserver"): + import re + text = re.sub(r'\[COLOR\s([^\]]+)\]', + r'<span style="color: \1">', + text) + text = text.replace('[/COLOR]', '</span>') + text = text.replace('[CR]', '<br>') + text = text.replace('[B]', '<b>') + text = text.replace('[/B]', '</b>') + text = text.replace('"color: yellow"', '"color: gold"') + text = text.replace('"color: white"', '"color: auto"') + + return text + + +def mainlist(item): + logger.info() + itemlist = [] + title = "Estrenos" + title = title.replace(title, bbcode_kodi2html("[COLOR orange]" + title + "[/COLOR]")) + itemlist.append( + Item(channel=item.channel, title=title, action="peliculas", url="http://www.peliculasdk.com/ver/estrenos", + fanart="http://s24.postimg.org/z6ulldcph/pdkesfan.jpg", + thumbnail="http://s16.postimg.org/st4x601d1/pdkesth.jpg")) + title = "PelisHd" + title = title.replace(title, bbcode_kodi2html("[COLOR orange]" + title + "[/COLOR]")) + itemlist.append( + Item(channel=item.channel, title=title, action="peliculas", url="http://www.peliculasdk.com/calidad/HD-720/", + fanart="http://s18.postimg.org/wzqonq3w9/pdkhdfan.jpg", + thumbnail="http://s8.postimg.org/nn5669ln9/pdkhdthu.jpg")) + title = "Pelis HD-Rip" + title = title.replace(title, bbcode_kodi2html("[COLOR orange]" + title + "[/COLOR]")) + itemlist.append( + Item(channel=item.channel, title=title, action="peliculas", url="http://www.peliculasdk.com/calidad/HD-320", + fanart="http://s7.postimg.org/3pmnrnu7f/pdkripfan.jpg", + thumbnail="http://s12.postimg.org/r7re8fie5/pdkhdripthub.jpg")) + title = "Pelis Audio español" + title = title.replace(title, bbcode_kodi2html("[COLOR orange]" + title + "[/COLOR]")) + itemlist.append( + Item(channel=item.channel, title=title, action="peliculas", url="http://www.peliculasdk.com/idioma/Espanol/", + fanart="http://s11.postimg.org/65t7bxlzn/pdkespfan.jpg", + thumbnail="http://s13.postimg.org/sh1034ign/pdkhsphtub.jpg")) + title = "Buscar..." + title = title.replace(title, bbcode_kodi2html("[COLOR orange]" + title + "[/COLOR]")) + itemlist.append( + Item(channel=item.channel, title=title, action="search", url="http://www.peliculasdk.com/calidad/HD-720/", + fanart="http://s14.postimg.org/ceqajaw2p/pdkbusfan.jpg", + thumbnail="http://s13.postimg.org/o85gsftyv/pdkbusthub.jpg")) + + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + + item.url = "http://www.peliculasdk.com/index.php?s=%s&x=0&y=0" % (texto) + + try: + return buscador(item) + # Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def buscador(item): + logger.info() + itemlist = [] + + # Descarga la página + data = scrapertools.cache_page(item.url) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '<div class="karatula".*?' + patron += 'src="([^"]+)".*?' + patron += '<div class="tisearch"><a href="([^"]+)">' + patron += '([^<]+)<.*?' + patron += 'Audio:(.*?)</a>.*?' + patron += 'Género:(.*?)</a>.*?' + patron += 'Calidad:(.*?),' + + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + for scrapedthumbnail, scrapedurl, scrapedtitle, scrapedlenguaje, scrapedgenero, scrapedcalidad in matches: + try: + year = scrapertools.get_match(scrapedtitle, '\((\d+)\)') + except: + year = "" + title_fan = re.sub(r"\[.*?\]|\(.*?\)|\d+x\d+.*?Final|-\d+|-|\d+x\d+|Temporada.*?Completa| ;", "", + scrapedtitle).strip() + scrapedcalidad = re.sub(r"<a href.*?>|</a>|</span>", "", scrapedcalidad).strip() + scrapedlenguaje = re.sub(r"<a href.*?>|</a>|</span>", "", scrapedlenguaje).strip() + + if not "Adultos" in scrapedgenero and not "Adultos" in scrapedlenguaje and not "Adultos" in scrapedcalidad: + scrapedcalidad = scrapedcalidad.replace(scrapedcalidad, + bbcode_kodi2html("[COLOR orange]" + scrapedcalidad + "[/COLOR]")) + scrapedlenguaje = scrapedlenguaje.replace(scrapedlenguaje, + bbcode_kodi2html("[COLOR orange]" + scrapedlenguaje + "[/COLOR]")) + + scrapedtitle = scrapedtitle + "-(Idioma: " + scrapedlenguaje + ")" + "-(Calidad: " + scrapedcalidad + ")" + scrapedtitle = scrapedtitle.replace(scrapedtitle, + bbcode_kodi2html("[COLOR white]" + scrapedtitle + "[/COLOR]")) + extra = year + "|" + title_fan + itemlist.append(Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="fanart", + thumbnail=scrapedthumbnail, extra=extra, + fanart="http://s18.postimg.org/h9kb22mnt/pdkfanart.jpg", library=True, folder=True)) + + try: + next_page = scrapertools.get_match(data, + '<span class="current">.*?<a href="(.*?)".*?>Siguiente »</a></div>') + + title = "siguiente>>" + title = title.replace(title, bbcode_kodi2html("[COLOR red]" + title + "[/COLOR]")) + itemlist.append(Item(channel=item.channel, action="buscador", title=title, url=next_page, + thumbnail="http://s6.postimg.org/uej03x4r5/bricoflecha.png", + fanart="http://s18.postimg.org/h9kb22mnt/pdkfanart.jpg", folder=True)) + except: + pass + + return itemlist + + +def peliculas(item): + logger.info() + itemlist = [] + + # Descarga la página + data = scrapertools.cache_page(item.url) + data = re.sub(r"\n|\r|\t|\s{2}| |&#.*?;", "", data) + + patron = 'style="position:relative;"> ' + patron += '<a href="([^"]+)" ' + patron += 'title="([^<]+)">' + patron += '<img src="([^"]+)".*?' + patron += 'Audio:(.*?)</br>.*?' + patron += 'Calidad:(.*?)</br>.*?' + patron += 'Género:.*?tag">(.*?)</a>' + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedlenguaje, scrapedcalidad, scrapedgenero in matches: + + try: + year = scrapertools.get_match(scrapedtitle, '\((\d+)\)') + except: + year = "" + title_fan = re.sub(r"\[.*?\]|\(.*?\)|\d+x\d+.*?Final|-\d+|-|\d+x\d+|Temporada.*?Completa| ;", "", scrapedtitle) + scrapedtitle = re.sub(r"\(\d+\)", "", scrapedtitle).strip() + scrapedcalidad = re.sub(r"<a href.*?>|</a>", "", scrapedcalidad).strip() + scrapedlenguaje = re.sub(r"<a href.*?>|</a>", "", scrapedlenguaje).strip() + scrapedcalidad = scrapedcalidad.replace(scrapedcalidad, + bbcode_kodi2html("[COLOR orange]" + scrapedcalidad + "[/COLOR]")) + + if not "Adultos" in scrapedgenero and not "Adultos" in scrapedlenguaje and not "Adultos" in scrapedcalidad: + scrapedlenguaje = scrapedlenguaje.replace(scrapedlenguaje, + bbcode_kodi2html("[COLOR orange]" + scrapedlenguaje + "[/COLOR]")) + + scrapedtitle = scrapedtitle + "-(Idioma: " + scrapedlenguaje + ")" + "-(Calidad: " + scrapedcalidad + ")" + scrapedtitle = scrapedtitle.replace(scrapedtitle, + bbcode_kodi2html("[COLOR white]" + scrapedtitle + "[/COLOR]")) + extra = year + "|" + title_fan + itemlist.append(Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="fanart", + thumbnail=scrapedthumbnail, extra=extra, + fanart="http://s18.postimg.org/h9kb22mnt/pdkfanart.jpg", library=True, folder=True)) + ## Paginación + + next_page = scrapertools.get_match(data, '<span class="current">.*?<a href="(.*?)".*?>Siguiente »</a></div>') + + title = "siguiente>>" + title = title.replace(title, bbcode_kodi2html("[COLOR red]" + title + "[/COLOR]")) + itemlist.append(Item(channel=item.channel, action="peliculas", title=title, url=next_page, + thumbnail="http://s6.postimg.org/uej03x4r5/bricoflecha.png", + fanart="http://s18.postimg.org/h9kb22mnt/pdkfanart.jpg", folder=True)) + + return itemlist + + +def fanart(item): + logger.info() + itemlist = [] + url = item.url + data = scrapertools.cachePage(url) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + title_fan = item.extra.split("|")[1] + title = re.sub(r'Serie Completa|Temporada.*?Completa', '', title_fan) + fulltitle = title + title = title.replace(' ', '%20') + title = ''.join( + (c for c in unicodedata.normalize('NFD', unicode(title.decode('utf-8'))) if unicodedata.category(c) != 'Mn')) + try: + sinopsis = scrapertools.find_single_match(data, '<span class="clms">Sinopsis: <\/span>(.*?)<\/div>') + except: + sinopsis = "" + year = item.extra.split("|")[0] + + if not "series" in item.url: + + # filmafinity + url = "http://www.filmaffinity.com/es/advsearch.php?stext={0}&stype%5B%5D=title&country=&genre=&fromyear={1}&toyear={1}".format( + title, year) + data = scrapertools.downloadpage(url) + + url_filmaf = scrapertools.find_single_match(data, '<div class="mc-poster">\s*<a title="[^"]*" href="([^"]+)"') + if url_filmaf: + url_filmaf = "http://www.filmaffinity.com%s" % url_filmaf + data = scrapertools.downloadpage(url_filmaf) + else: + + try: + url_bing = "http://www.bing.com/search?q=%s+%s+site:filmaffinity.com" % (title.replace(' ', '+'), year) + data = browser(url_bing) + data = re.sub(r'\n|\r|\t|\s{2}| ', '', data) + + if "myaddrproxy.php" in data: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><div class="b_title"><h2>(<a href="/ myaddrproxy.php/http/www.filmaffinity.com/es/film.*?)"') + subdata_bing = re.sub(r'\/myaddrproxy.php\/http\/', '', subdata_bing) + else: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><h2>(<a href="http://www.filmaffinity.com/es/film.*?)"') + + url_filma = scrapertools.get_match(subdata_bing, '<a href="([^"]+)') + + if not "http" in url_filma: + data = scrapertools.cachePage("http://" + url_filma) + else: + data = scrapertools.cachePage(url_filma) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + except: + pass + + if sinopsis == " ": + try: + sinopsis = scrapertools.find_single_match(data, '<dd itemprop="description">(.*?)</dd>') + sinopsis = sinopsis.replace("<br><br />", "\n") + sinopsis = re.sub(r"\(FILMAFFINITY\)<br />", "", sinopsis) + except: + pass + try: + rating_filma = scrapertools.get_match(data, 'itemprop="ratingValue" content="(.*?)">') + except: + rating_filma = "Sin puntuacion" + + critica = "" + patron = '<div itemprop="reviewBody">(.*?)</div>.*?itemprop="author">(.*?)\s*<i alt="([^"]+)"' + matches_reviews = scrapertools.find_multiple_matches(data, patron) + + if matches_reviews: + for review, autor, valoracion in matches_reviews: + review = dhe(scrapertools.htmlclean(review)) + review += "\n" + autor + "[CR]" + review = re.sub(r'Puntuac.*?\)', '', review) + if "positiva" in valoracion: + critica += "[COLOR green][B]%s[/B][/COLOR]\n" % review + elif "neutral" in valoracion: + critica += "[COLOR yellow][B]%s[/B][/COLOR]\n" % review + else: + critica += "[COLOR red][B]%s[/B][/COLOR]\n" % review + else: + critica = "[COLOR floralwhite][B]Esta película no tiene críticas todavía...[/B][/COLOR]" + print "ozuu" + print critica + + url = "http://api.themoviedb.org/3/search/movie?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + title + "&year=" + year + "&language=es&include_adult=false" + data = scrapertools.cachePage(url) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + + title = re.sub(r":.*|\(.*?\)", "", title) + url = "http://api.themoviedb.org/3/search/movie?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + title + "&language=es&include_adult=false" + + data = scrapertools.cachePage(url) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + extra = item.thumbnail + "|" + "" + "|" + "" + "|" + "Sin puntuación" + "|" + rating_filma + "|" + critica + show = item.fanart + "|" + "" + "|" + sinopsis + posterdb = item.thumbnail + fanart_info = item.fanart + fanart_3 = "" + fanart_2 = item.fanart + category = item.thumbnail + id_scraper = "" + + itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action="findvideos", + thumbnail=item.thumbnail, fanart=item.fanart, extra=extra, show=show, + category=category, library=item.library, fulltitle=fulltitle, folder=True)) + + for id, fan in matches: + + fan = re.sub(r'\\|"', '', fan) + + try: + rating = scrapertools.find_single_match(data, '"vote_average":(.*?),') + except: + rating = "Sin puntuación" + + id_scraper = id + "|" + "peli" + "|" + rating + "|" + rating_filma + "|" + critica + try: + posterdb = scrapertools.get_match(data, '"page":1,.*?"poster_path":"\\\(.*?)"') + posterdb = "https://image.tmdb.org/t/p/original" + posterdb + except: + posterdb = item.thumbnail + + if "null" in fan: + fanart = item.fanart + else: + fanart = "https://image.tmdb.org/t/p/original" + fan + item.extra = fanart + + url = "http://api.themoviedb.org/3/movie/" + id + "/images?api_key=2e2160006592024ba87ccdf78c28f49f" + data = scrapertools.cachePage(url) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '"backdrops".*?"file_path":".*?",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + patron = '"backdrops".*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + fanart_info = item.extra + fanart_3 = "" + fanart_2 = item.extra + for fanart_info, fanart_3, fanart_2 in matches: + fanart_info = "https://image.tmdb.org/t/p/original" + fanart_info + fanart_3 = "https://image.tmdb.org/t/p/original" + fanart_3 + fanart_2 = "https://image.tmdb.org/t/p/original" + fanart_2 + if fanart == item.fanart: + fanart = fanart_info + # clearart, fanart_2 y logo + url = "http://webservice.fanart.tv/v3/movies/" + id + "?api_key=dffe90fba4d02c199ae7a9e71330c987" + data = scrapertools.cachePage(url) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '"hdmovielogo":.*?"url": "([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + if '"moviedisc"' in data: + disc = scrapertools.get_match(data, '"moviedisc":.*?"url": "([^"]+)"') + if '"movieposter"' in data: + poster = scrapertools.get_match(data, '"movieposter":.*?"url": "([^"]+)"') + if '"moviethumb"' in data: + thumb = scrapertools.get_match(data, '"moviethumb":.*?"url": "([^"]+)"') + if '"moviebanner"' in data: + banner = scrapertools.get_match(data, '"moviebanner":.*?"url": "([^"]+)"') + + if len(matches) == 0: + extra = posterdb + # "http://es.seaicons.com/wp-content/uploads/2015/11/Editing-Overview-Pages-1-icon.png" + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + category = posterdb + + itemlist.append( + Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, server="torrent", + thumbnail=posterdb, fanart=item.extra, extra=extra, show=show, category=category, + library=item.library, fulltitle=fulltitle, folder=True)) + for logo in matches: + if '"hdmovieclearart"' in data: + clear = scrapertools.get_match(data, '"hdmovieclearart":.*?"url": "([^"]+)"') + if '"moviebackground"' in data: + + extra = clear + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + if '"moviedisc"' in data: + category = disc + else: + category = clear + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=logo, fanart=item.extra, extra=extra, + show=show, category=category, library=item.library, fulltitle=fulltitle, + folder=True)) + else: + extra = clear + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + if '"moviedisc"' in data: + category = disc + else: + category = clear + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=logo, fanart=item.extra, extra=extra, + show=show, category=category, library=item.library, fulltitle=fulltitle, + folder=True)) + + if '"moviebackground"' in data: + + if '"hdmovieclearart"' in data: + clear = scrapertools.get_match(data, '"hdmovieclearart":.*?"url": "([^"]+)"') + extra = clear + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + if '"moviedisc"' in data: + category = disc + else: + category = clear + else: + extra = logo + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + if '"moviedisc"' in data: + category = disc + else: + category = logo + + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=logo, fanart=item.extra, extra=extra, + show=show, category=category, library=item.library, fulltitle=fulltitle, + folder=True)) + + if not '"hdmovieclearart"' in data and not '"moviebackground"' in data: + extra = logo + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + if '"moviedisc"' in data: + category = disc + else: + category = item.extra + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=logo, fanart=item.extra, extra=extra, show=show, + category=category, library=item.library, fulltitle=fulltitle, folder=True)) + + title_info = "Info" + + if posterdb == item.thumbnail: + if '"movieposter"' in data: + thumbnail = poster + else: + thumbnail = item.thumbnail + else: + thumbnail = posterdb + + id = id_scraper + + extra = extra + "|" + id + "|" + title.encode('utf8') + + title_info = title_info.replace(title_info, bbcode_kodi2html("[COLOR skyblue]" + title_info + "[/COLOR]")) + itemlist.append(Item(channel=item.channel, action="info", title=title_info, url=item.url, thumbnail=thumbnail, + fanart=fanart_info, extra=extra, category=category, show=show, folder=False)) + + return itemlist + + +def findvideos(item): + logger.info() + + itemlist = [] + data = scrapertools.cache_page(item.url) + data = re.sub(r"<!--.*?-->", "", data) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + bloque_tab = scrapertools.find_single_match(data, '<div id="verpelicula">(.*?)<div class="tab_container">') + patron = '<li><a href="#([^<]+)"><span class="re">\d<\/span><span class="([^<]+)"><\/span><span class=.*?>([^<]+)<\/span>' + check = re.compile(patron, re.DOTALL).findall(bloque_tab) + + servers_data_list = [] + + patron = '<div id="(tab\d+)" class="tab_content"><script type="text/rocketscript">(\w+)\("([^"]+)"\)</script></div>' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + patron = '<div id="(tab\d+)" class="tab_content"><script>(\w+)\("([^"]+)"\)</script></div>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for check_tab, server, id in matches: + scrapedplot = scrapertools.get_match(data, '<span class="clms">(.*?)</div></div>') + plotformat = re.compile('(.*?:) </span>', re.DOTALL).findall(scrapedplot) + scrapedplot = scrapedplot.replace(scrapedplot, bbcode_kodi2html("[COLOR white]" + scrapedplot + "[/COLOR]")) + + for plot in plotformat: + scrapedplot = scrapedplot.replace(plot, bbcode_kodi2html("[COLOR red][B]" + plot + "[/B][/COLOR]")) + scrapedplot = scrapedplot.replace("</span>", "[CR]") + scrapedplot = scrapedplot.replace(":", "") + if check_tab in str(check): + idioma, calidad = scrapertools.find_single_match(str(check), "" + check_tab + "', '(.*?)', '(.*?)'") + + servers_data_list.append([server, id, idioma, calidad]) + + url = "http://www.peliculasdk.com/Js/videod.js" + data = scrapertools.cachePage(url) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + data = data.replace('<iframe width="100%" height="400" scrolling="no" frameborder="0"', '') + + patron = 'function (\w+)\(id\).*?' + patron += 'data-src="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + for server, url in matches: + + for enlace, id, idioma, calidad in servers_data_list: + + if server == enlace: + + video_url = re.sub(r"embed\-|\-.*?x.*?\.html|u\'|\'\(", "", str(url)) + video_url = re.sub(r"'\+codigo\+'", "", video_url) + video_url = video_url.replace('embed//', 'embed/') + video_url = video_url + id + if "goo.gl" in video_url: + try: + from unshortenit import unshorten + url = unshorten(video_url) + video_url = scrapertools.get_match(str(url), "u'([^']+)'") + except: + continue + + servertitle = scrapertools.get_match(video_url, 'http.*?://(.*?)/') + servertitle = servertitle.replace(servertitle, + bbcode_kodi2html("[COLOR red]" + servertitle + "[/COLOR]")) + servertitle = servertitle.replace("embed.", "") + servertitle = servertitle.replace("player.", "") + servertitle = servertitle.replace("api.video.", "") + servertitle = re.sub(r"hqq.tv|hqq.watch", "netu.tv", servertitle) + servertitle = servertitle.replace("anonymouse.org", "netu.tv") + title = bbcode_kodi2html("[COLOR orange]Ver en --[/COLOR]") + servertitle + " " + idioma + " " + calidad + itemlist.append( + Item(channel=item.channel, title=title, url=video_url, action="play", thumbnail=item.category, + plot=scrapedplot, fanart=item.show)) + if item.library and config.get_videolibrary_support() and len(itemlist) > 0: + infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'], + 'title': item.fulltitle} + itemlist.append(Item(channel=item.channel, title="Añadir esta película a la videoteca", + action="add_pelicula_to_library", url=item.url, infoLabels=infoLabels, + text_color="0xFFff6666", + thumbnail='http://imgur.com/0gyYvuC.png')) + + return itemlist + + +def play(item): + logger.info() + + itemlist = servertools.find_video_items(data=item.url) + data = scrapertools.cache_page(item.url) + + listavideos = servertools.findvideos(data) + + for video in listavideos: + videotitle = scrapertools.unescape(video[0]) + url = item.url + server = video[2] + + # xbmctools.addnewvideo( item.channel , "play" , category , server , , url , thumbnail , plot ) + itemlist.append( + Item(channel=item.channel, action="play", server=server, title="Trailer - " + videotitle, url=url, + thumbnail=item.thumbnail, plot=item.plot, fulltitle=item.title, + fanart="http://s23.postimg.org/84vkeq863/movietrailers.jpg", folder=False)) + + return itemlist + + +def info(item): + logger.info() + itemlist = [] + url = item.url + id = item.extra + if "serie" in item.url: + try: + rating_tmdba_tvdb = item.extra.split("|")[6] + if item.extra.split("|")[6] == "": + rating_tmdba_tvdb = "Sin puntuación" + except: + rating_tmdba_tvdb = "Sin puntuación" + else: + rating_tmdba_tvdb = item.extra.split("|")[3] + rating_filma = item.extra.split("|")[4] + print "eztoquee" + print rating_filma + print rating_tmdba_tvdb + + filma = "http://s6.postimg.org/6yhe5fgy9/filma.png" + + try: + if "serie" in item.url: + title = item.extra.split("|")[8] + + else: + title = item.extra.split("|")[6] + title = title.replace("%20", " ") + title = "[COLOR yellow][B]" + title + "[/B][/COLOR]" + except: + title = item.title + + try: + if "." in rating_tmdba_tvdb: + check_rat_tmdba = scrapertools.get_match(rating_tmdba_tvdb, '(\d+).') + else: + check_rat_tmdba = rating_tmdba_tvdb + if int(check_rat_tmdba) >= 5 and int(check_rat_tmdba) < 8: + rating = "[COLOR springgreen][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + elif int(check_rat_tmdba) >= 8 or rating_tmdba_tvdb == 10: + rating = "[COLOR yellow][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + else: + rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + print "lolaymaue" + except: + rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + if "10." in rating: + rating = re.sub(r'10\.\d+', '10', rating) + try: + check_rat_filma = scrapertools.get_match(rating_filma, '(\d)') + print "paco" + print check_rat_filma + if int(check_rat_filma) >= 5 and int(check_rat_filma) < 8: + print "dios" + print check_rat_filma + rating_filma = "[COLOR springgreen][B]" + rating_filma + "[/B][/COLOR]" + elif int(check_rat_filma) >= 8: + + print check_rat_filma + rating_filma = "[COLOR yellow][B]" + rating_filma + "[/B][/COLOR]" + else: + rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]" + print "rojo??" + print check_rat_filma + except: + rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]" + + if not "serie" in item.url: + url_plot = "http://api.themoviedb.org/3/movie/" + item.extra.split("|")[ + 1] + "?api_key=2e2160006592024ba87ccdf78c28f49f&append_to_response=credits&language=es" + data_plot = scrapertools.cache_page(url_plot) + plot = scrapertools.find_single_match(data_plot, '"overview":"(.*?)",') + tagline = scrapertools.find_single_match(data_plot, '"tagline":(".*?")') + if plot == "": + plot = item.show.split("|")[2] + + plot = "[COLOR moccasin][B]" + plot + "[/B][/COLOR]" + plot = re.sub(r"\\", "", plot) + + else: + plot = item.show.split("|")[2] + plot = "[COLOR moccasin][B]" + plot + "[/B][/COLOR]" + plot = re.sub(r"\\", "", plot) + + if item.extra.split("|")[7] != "": + tagline = item.extra.split("|")[7] + # tagline= re.sub(r',','.',tagline) + else: + tagline = "" + + if "serie" in item.url: + check2 = "serie" + icon = "http://s6.postimg.org/hzcjag975/tvdb.png" + foto = item.show.split("|")[1] + if item.extra.split("|")[5] != "": + critica = item.extra.split("|")[5] + else: + critica = "Esta serie no tiene críticas..." + if not ".png" in item.extra.split("|")[0]: + photo = "http://imgur.com/6uXGkrz.png" + else: + photo = item.extra.split("|")[0].replace(" ", "%20") + try: + tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]" + except: + tagline = "" + + else: + critica = item.extra.split("|")[5] + if "%20" in critica: + critica = "No hay críticas" + icon = "http://imgur.com/SenkyxF.png" + photo = item.extra.split("|")[0].replace(" ", "%20") + foto = item.show.split("|")[1] + try: + if tagline == "\"\"": + tagline = " " + except: + tagline = " " + tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]" + check2 = "pelicula" + + # Tambien te puede interesar + peliculas = [] + if "serie" in item.url: + + url_tpi = "http://api.themoviedb.org/3/tv/" + item.show.split("|")[ + 5] + "/recommendations?api_key=2e2160006592024ba87ccdf78c28f49f&language=es" + data_tpi = scrapertools.cachePage(url_tpi) + tpi = scrapertools.find_multiple_matches(data_tpi, + 'id":(.*?),.*?"original_name":"(.*?)",.*?"poster_path":(.*?),') + + else: + url_tpi = "http://api.themoviedb.org/3/movie/" + item.extra.split("|")[ + 1] + "/recommendations?api_key=2e2160006592024ba87ccdf78c28f49f&language=es" + data_tpi = scrapertools.cachePage(url_tpi) + tpi = scrapertools.find_multiple_matches(data_tpi, + 'id":(.*?),.*?"original_title":"(.*?)",.*?"poster_path":(.*?),') + + for idp, peli, thumb in tpi: + + thumb = re.sub(r'"|}', '', thumb) + if "null" in thumb: + thumb = "http://s6.postimg.org/tw1vhymj5/noposter.png" + else: + thumb = "https://image.tmdb.org/t/p/original" + thumb + peliculas.append([idp, peli, thumb]) + + check2 = check2.replace("pelicula", "movie").replace("serie", "tvshow") + infoLabels = {'title': title, 'plot': plot, 'thumbnail': photo, 'fanart': foto, 'tagline': tagline, + 'rating': rating} + item_info = item.clone(info=infoLabels, icon=icon, extra=id, rating=rating, rating_filma=rating_filma, + critica=critica, contentType=check2, thumb_busqueda="http://imgur.com/kdfWEJ6.png") + from channels import infoplus + infoplus.start(item_info, peliculas) + + +def browser(url): + import mechanize + + # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing + br = mechanize.Browser() + # Browser options + br.set_handle_equiv(False) + br.set_handle_gzip(True) + br.set_handle_redirect(True) + br.set_handle_referer(False) + br.set_handle_robots(False) + # Follows refresh 0 but not hangs on refresh > 0 + br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) + # Want debugging messages? + # br.set_debug_http(True) + # br.set_debug_redirects(True) + # br.set_debug_responses(True) + + # User-Agent (this is cheating, ok?) + br.addheaders = [('User-agent', + 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')] + # br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')] + # Open some site, let's pick a random one, the first that pops in mind + r = br.open(url) + response = r.read() + print response + if "img,divreturn" in response: + r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url) + print "prooooxy" + response = r.read() + + return response diff --git a/plugin.video.alfa/channels/peliculaseroticas.json b/plugin.video.alfa/channels/peliculaseroticas.json new file mode 100755 index 00000000..8bbca10b --- /dev/null +++ b/plugin.video.alfa/channels/peliculaseroticas.json @@ -0,0 +1,23 @@ +{ + "id": "peliculaseroticas", + "name": "PeliculasEroticas", + "active": true, + "adult": true, + "language": "es", + "thumbnail": "peliculaseroticas.png", + "banner": "peliculaseroticas.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "05/08/2016", + "description": "Eliminado de sección peliculas." + } + ], + "categories": [ + "adult" + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/peliculaseroticas.py b/plugin.video.alfa/channels/peliculaseroticas.py new file mode 100755 index 00000000..20b99140 --- /dev/null +++ b/plugin.video.alfa/channels/peliculaseroticas.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import logger +from core import scrapertools +from core.item import Item + + +def mainlist(item): + logger.info() + + if item.url == "": + item.url = "http://www.peliculaseroticas.net/" + + # Descarga la página + data = scrapertools.cachePage(item.url) + + # Extrae las entradas de la pagina seleccionada + patron = '<div class="post"[^<]+' + patron += '<a href="([^"]+)">([^<]+)</a[^<]+' + patron += '<hr[^<]+' + patron += '<a[^<]+<img src="([^"]+)"' + + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + for scrapedurl, scrapedtitle, scrapedthumbnail in matches: + url = urlparse.urljoin(item.url, scrapedurl) + title = scrapedtitle.strip() + thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) + plot = "" + + # Añade al listado + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumbnail, + plot=plot, viewmode="movie", folder=True)) + + # Extrae la marca de siguiente página + if item.url == "http://www.peliculaseroticas.net/": + next_page_url = "http://www.peliculaseroticas.net/cine-erotico/2.html" + else: + current_page = scrapertools.find_single_match(item.url, "(\d+)") + next_page = int(current_page) + 1 + next_page_url = "http://www.peliculaseroticas.net/cine-erotico/" + str(next_page) + ".html" + + itemlist.append( + Item(channel=item.channel, action="peliculas", title=">> Página siguiente", url=next_page_url, folder=True)) + + return itemlist diff --git a/plugin.video.alfa/channels/peliculasgratis.json b/plugin.video.alfa/channels/peliculasgratis.json new file mode 100755 index 00000000..74811410 --- /dev/null +++ b/plugin.video.alfa/channels/peliculasgratis.json @@ -0,0 +1,35 @@ +{ + "id": "peliculasgratis", + "name": "PeliculasGratis", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "http://imgur.com/ThH8Zmk.png", + "banner": "peliculasgratis.png", + "version": 1, + "changes": [ + { + "date": "13/01/2017", + "description": "Release" + }, + { + "date": "04/04/2017", + "description": "Reparación cambios web" + } + ], + "categories": [ + "torrent", + "movie", + "tvshow" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/peliculasgratis.py b/plugin.video.alfa/channels/peliculasgratis.py new file mode 100755 index 00000000..dbbf8a25 --- /dev/null +++ b/plugin.video.alfa/channels/peliculasgratis.py @@ -0,0 +1,1136 @@ +# -*- coding: utf-8 -*- + +import os +import re +import urllib +import urlparse + +import xbmc +import xbmcgui +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item +from core.scrapertools import decodeHtmlentities as dhe + +ACTION_SHOW_FULLSCREEN = 36 +ACTION_GESTURE_SWIPE_LEFT = 511 +ACTION_SELECT_ITEM = 7 +ACTION_PREVIOUS_MENU = 10 +ACTION_MOVE_LEFT = 1 +ACTION_MOVE_RIGHT = 2 +ACTION_MOVE_DOWN = 4 +ACTION_MOVE_UP = 3 +OPTION_PANEL = 6 +OPTIONS_OK = 5 + +host = "http://peliculasgratis.biz" + +CALIDADES = {"micro1080p": "[COLOR plum]Micro1080p[/COLOR]", "dvds": "[COLOR lime]Dvds[/COLOR]", + "hdrip": "[COLOR dodgerblue]Hdrip[/COLOR]", "dvdrip": "[COLOR crimson]Dvdrip[/COLOR]", + "hdts": "[COLOR aqua]Hdts[/COLOR]", "bluray-line": "[COLOR lightslategray]Bluray-line[/COLOR]", + "hdtv-rip": "[COLOR black]Hdtv-rip[/COLOR]", "micro720p": "[COLOR yellow]Micro720p[/COLOR]", + "ts-hq": "[COLOR mediumspringgreen]Ts-Hq[/COLOR]", "camrip": "[COLOR royalblue]Camp-Rip[/COLOR]", + "webs": "[COLOR lightsalmon]Webs[/COLOR]", "hd": "[COLOR mediumseagreen]HD[/COLOR]"} +IDIOMAS = {"castellano": "[COLOR yellow]Castelllano[/COLOR]", "latino": "[COLOR orange]Latino[/COLOR]", + "vose": "[COLOR lightsalmon]Subtitulada[/COLOR]", "vo": "[COLOR crimson]Ingles[/COLOR]", + "en": "[COLOR crimson]Ingles[/COLOR]"} +IDIOMASP = {"es": "[COLOR yellow]CAST[/COLOR]", "la": "[COLOR orange]LAT[/COLOR]", + "vs": "[COLOR lightsalmon]SUB[/COLOR]", "vo": "[COLOR crimson]Ingles[/COLOR]", + "en": "[COLOR crimson]INGL[/COLOR]"} + + +# Para la busqueda en bing evitando baneos + +def browser(url): + import mechanize + + # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing + br = mechanize.Browser() + # Browser options + br.set_handle_equiv(False) + br.set_handle_gzip(True) + br.set_handle_redirect(True) + br.set_handle_referer(False) + br.set_handle_robots(False) + # Follows refresh 0 but not hangs on refresh > 0 + br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) + # Want debugging messages? + # br.set_debug_http(True) + # br.set_debug_redirects(True) + # br.set_debug_responses(True) + + # User-Agent (this is cheating, ok?) + # br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')] + # br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')] + # Open some site, let's pick a random one, the first that pops in mind + r = br.open(url) + response = r.read() + print response + if "img,divreturn" in response: + r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url) + print "prooooxy" + response = r.read() + + return response + + +api_key = "2e2160006592024ba87ccdf78c28f49f" +api_fankey = "dffe90fba4d02c199ae7a9e71330c987" + + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append(item.clone(title="[COLOR lightskyblue][B]Películas[/B][/COLOR]", action="scraper", url=host, + thumbnail="http://imgur.com/fN2p6qH.png", fanart="http://imgur.com/b8OuBR2.jpg", + contentType="movie")) + itemlist.append(itemlist[-1].clone(title="[COLOR lightskyblue][B] Más vistas[/B][/COLOR]", action="scraper", + url="http://peliculasgratis.biz/catalogue?order=most_viewed", + thumbnail="http://imgur.com/fN2p6qH.png", fanart="http://imgur.com/b8OuBR2.jpg", + contentType="movie")) + itemlist.append(itemlist[-1].clone(title=" [COLOR lightskyblue][B]Recomendadas[/B][/COLOR]", action="scraper", + url="http://peliculasgratis.biz/catalogue?order=most_rated", + thumbnail="http://imgur.com/fN2p6qH.png.png", + fanart="http://imgur.com/b8OuBR2.jpg", contentType="movie")) + itemlist.append(itemlist[-1].clone(title="[COLOR lightskyblue][B] Actualizadas[/B][/COLOR]", action="scraper", + url="http://peliculasgratis.biz/catalogue?", + thumbnail="http://imgur.com/fN2p6qH.png", fanart="http://imgur.com/b8OuBR2.jpg", + contentType="movie")) + itemlist.append(itemlist[-1].clone(title="[COLOR lightskyblue][B]Series[/B][/COLOR]", action="scraper", + url="http://peliculasgratis.biz/lista-de-series", + thumbnail="http://imgur.com/Jia27Uc.png", fanart="http://imgur.com/b8OuBR2.jpg", + contentType="tvshow")) + itemlist.append(itemlist[-1].clone(title="[COLOR lightskyblue][B]Buscar[/B][/COLOR]", action="", url="", + thumbnail="http://imgur.com/mwTwfN7.png", fanart="http://imgur.com/b8OuBR2.jpg")) + itemlist.append( + itemlist[-1].clone(title="[COLOR lightskyblue][B] Buscar Película[/B][/COLOR]", action="search", url="", + thumbnail="http://imgur.com/mwTwfN7.png", fanart="http://imgur.com/b8OuBR2.jpg", + contentType="movie")) + itemlist.append( + itemlist[-1].clone(title="[COLOR lightskyblue][B] Buscar Serie[/B][/COLOR]", action="search", url="", + thumbnail="http://imgur.com/mwTwfN7.png", fanart="http://imgur.com/b8OuBR2.jpg", + contentType="tvshow")) + + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = "http://peliculasgratis.biz/search/%s" % texto + + try: + return scraper(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def scraper(item): + logger.info() + itemlist = [] + + # Descarga la página + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + bloque_enlaces = scrapertools.find_single_match(data, '<h1 class="titulo-h1">(.*?)<\/i>Anuncios') + if item.contentType != "movie": + matches = scrapertools.find_multiple_matches(bloque_enlaces, + '<a class="i" href="([^"]+)".*?src="([^"]+)".*?<div class="l">(.*?)<\/a><h3>.*?(completa)">([^"]+)<\/a><\/h3> <span>(.*?)<\/span>') + else: + matches = scrapertools.find_multiple_matches(bloque_enlaces, + '<a class="i" href="([^"]+)".*?src="([^"]+)".*?">([^<]+)<.*?<div class="l">(.*?)<\/a><h3>.*?title[^<]+>([^<]+)<\/a><\/h3> <span>(.*?)<') + for url, thumb, quality, check_idioma, title, check_year in matches: + title_fan = title + title_item = "[COLOR cornflowerblue][B]" + title + "[/B][/COLOR]" + if item.contentType != "movie": + title = "[COLOR cornflowerblue][B]" + title + "[/B][/COLOR]" + else: + if quality == "ts": + quality = re.sub(r'ts', 'ts-hq', quality) + if CALIDADES.get(quality): + quality = CALIDADES.get(quality) + else: + quality = quality + idiomas = scrapertools.find_multiple_matches(check_idioma, '<div class="id (.*?)">') + if len(idiomas) == 1: + idioma1 = idiomas[0].strip() + if IDIOMASP.get(idioma1): + idiomas = "-" + IDIOMASP.get(idioma1) + elif len(idiomas) == 2: + idioma1, idioma2 = idiomas[0], idiomas[1] + if IDIOMASP.get(idioma1, idioma2): + idioma1 = IDIOMASP.get(idioma1) + idioma2 = IDIOMASP.get(idioma2) + idiomas = "-" + idioma1 + "|" + idioma2 + elif len(idiomas) == 3: + idioma1, idioma2, idioma3 = idiomas[0], idiomas[1], idiomas[2] + idioma1 = IDIOMASP.get(idioma1) + idioma2 = IDIOMASP.get(idioma2) + idioma3 = IDIOMASP.get(idioma3) + idiomas = "-" + idioma1 + "|" + idioma2 + "|" + idioma3 + elif len(idiomas) >= 4: + idioma1, idioma2, idioma3, idioma4 = idiomas[0], idiomas[1], idiomas[2], idiomas[3] + idioma1 = IDIOMASP.get(idioma1) + idioma2 = IDIOMASP.get(idioma2) + idioma3 = IDIOMASP.get(idioma3) + idioma4 = IDIOMASP.get(idioma4) + idiomas = "-" + idioma1 + "|" + idioma2 + "|" + idioma3 + "|" + idioma4 + + title = "[COLOR cornflowerblue][B]" + title + "[/B][/COLOR]" + " " + quality + " " + idiomas + + itemlist.append( + Item(channel=item.channel, title=title, url=urlparse.urljoin(host, url), action="fanart", thumbnail=thumb, + fanart="http://imgur.com/nqmJozd.jpg", extra=title_fan + "|" + title_item + "|" + check_year.strip(), + contentType=item.contentType, folder=True)) + ## Paginación + if check_year: + next = scrapertools.find_single_match(data, 'href="([^"]+)" title="Siguiente página">') + if len(next) > 0: + url = next + if not "http" in url: + url = urlparse.urljoin(host, url) + itemlist.append( + Item(channel=item.channel, action="scraper", title="[COLOR floralwhite][B]Siguiente[/B][/COLOR]", + url=url, thumbnail="http://imgur.com/jhRFAmk.png", fanart="http://imgur.com/nqmJozd.jpg", + extra=item.extra, contentType=item.contentType, folder=True)) + + return itemlist + + +def fanart(item): + logger.info() + itemlist = [] + url = item.url + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + year = item.extra.split("|")[2] + if not year.isdigit(): + try: + year = scrapertools.find_single_match(data, '<span class="izq">[^<]+<\/span><span>(\d+)<') + except: + year = "" + if item.contentType != "movie": + tipo_ps = "tv" + else: + tipo_ps = "movie" + title = item.extra.split("|")[0] + fulltitle = title + if "El infiltrado" in title: + title = "The Night Manager" + title_o = scrapertools.find_single_match(data, '<meta name="description"[^<]+original(.*?)&') + item.title = item.extra.split("|")[1] + title_imdb = re.sub(r'\[.*?\]', '', item.extra.split("|")[1]) + title = re.sub( + r"\(.*?\)|-Remastered|Black And Chrome Edition|V.extendida|Version Extendida|V.Extendida|HEVC|X\d+|x\d+|LINE|HD|1080p|Screeener|V.O|Hdrip|.*?--|3D|SBS|HOU", + "", title) + + sinopsis = scrapertools.find_single_match(data, '<div class="sinopsis".*?<\/h2>(.*?)<\/div>') + if sinopsis == "": + try: + sinopsis = scrapertools.find_single_match(data, 'sinopsis\'>(.*?)<\/div>') + except: + sinopsis = "" + if "Miniserie" in sinopsis: + tipo_ps = "tv" + year = scrapertools.find_single_match(sinopsis, 'de TV \((\d+)\)') + if year == "": + if item.contentType != "movie": + try: + year = scrapertools.find_single_match(data, '<strong>Estreno:<\/strong>(\d+)<\/span>') + except: + year = "" + else: + year = scrapertools.find_single_match(data, '<br \/>A.*?(\d+)<br \/>') + if year == "": + try: + year = scrapertools.find_single_match(data, 'Estreno.*?\d+/\d+/(\d+)') + except: + try: + year = scrapertools.find_single_match(data, + '<div class=\'descripcion_top\'>.*?Año<br />.*?(\d\d\d\d)') + except: + try: + year = scrapertools.find_single_match(data, + '<meta name="description"[^<]+Año[^<]+\d\d\d\d') + except: + year = "" + infoLabels = {'title': title, 'sinopsis': sinopsis, 'year': year} + critica, rating_filma, year_f, sinopsis_f = filmaffinity(item, infoLabels) + if sinopsis == "": + sinopsis = sinopsis_f + if year == "": + year = year_f + otmdb = tmdb.Tmdb(texto_buscado=title, year=year, tipo=tipo_ps) + id = otmdb.result.get("id") + posterdb = otmdb.result.get("poster_path") + if posterdb == None: + otmdb = tmdb.Tmdb(texto_buscado=title, tipo=tipo_ps) + id = otmdb.result.get("id") + posterdb = otmdb.result.get("poster_path") + if posterdb == None: + if item.contentType != "movie": + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % ( + title_imdb.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data) + subdata_imdb = scrapertools.find_single_match(data, + '<li class="b_algo">(.*?)h="ID.*?<strong>.*?TV Series') + else: + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % (title_imdb.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data) + subdata_imdb = scrapertools.find_single_match(data, '<li class="b_algo">(.*?)h="ID.*?<strong>') + try: + imdb_id = scrapertools.get_match(subdata_imdb, '<a href=.*?http.*?imdb.com/title/(.*?)/.*?"') + except: + try: + imdb_id = scrapertools.get_match(subdata_imdb, '<a href=.*?http.*?imdb.com/.*?/title/(.*?)/.*?"') + except: + imdb_id = "" + otmdb = tmdb.Tmdb(external_id=imdb_id, external_source="imdb_id", tipo=tipo_ps, idioma_busqueda="es") + id = otmdb.result.get("id") + posterdb = otmdb.result.get("poster_path") + if not posterdb: + if "(" in title_imdb: + title = scrapertools.find_single_match(title_imdb, '\(.*?\)') + if item.contentType != "movie": + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % ( + title_imdb.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data) + subdata_imdb = scrapertools.find_single_match(data, + '<li class="b_algo">(.*?)h="ID.*?<strong>.*?TV Series') + else: + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % ( + title_imdb.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data) + subdata_imdb = scrapertools.find_single_match(data, '<li class="b_algo">(.*?)h="ID.*?<strong>') + try: + imdb_id = scrapertools.get_match(subdata_imdb, '<a href=.*?http.*?imdb.com/title/(.*?)/.*?"') + except: + try: + imdb_id = scrapertools.get_match(subdata_imdb, + '<a href=.*?http.*?imdb.com/.*?/title/(.*?)/.*?"') + except: + imdb_id = "" + otmdb = tmdb.Tmdb(external_id=imdb_id, external_source="imdb_id", tipo=tipo_ps, + idioma_busqueda="es") + id = otmdb.result.get("id") + posterdb = otmdb.result.get("poster_path") + if not posterdb: + id = tiw = rating = tagline = id_tvdb = "" + fanart_4 = fanart_2 = fanart_3 = item.fanart + rating = "Sin Puntuación" + posterdb = tvf = item.thumbnail + fanart_info = item.fanart + thumbnail_art = item.thumbnail + extra = str(fanart_2) + "|" + str(fanart_3) + "|" + str(fanart_4) + "|" + str(id) + "|" + str( + tvf) + "|" + str(id_tvdb) + "|" + str(tiw) + "|" + str(rating) + if tipo_ps != "movie": + action = "findvideos_series" + else: + action = "findvideos" + itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action=action, + thumbnail=item.thumbnail, fanart=item.fanart, extra=extra, folder=True)) + else: + if tipo_ps != "movie": + action = "findvideos_series" + else: + action = "findvideos" + id = tiw = rating = tagline = id_tvdb = "" + fanart_4 = fanart_2 = fanart_3 = item.fanart + rating = "Sin Puntuación" + posterdb = tvf = item.thumbnail + fanart_info = item.fanart + thumbnail_art = item.thumbnail + extra = str(fanart_2) + "|" + str(fanart_3) + "|" + str(fanart_4) + "|" + str(id) + "|" + str( + tvf) + "|" + str(id_tvdb) + "|" + str(tiw) + "|" + str(rating) + itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action=action, + thumbnail=item.thumbnail, fanart=item.fanart, extra=extra, + contentType=item.contentType, folder=True)) + if posterdb != item.thumbnail: + if not "null" in posterdb: + posterdb = "https://image.tmdb.org/t/p/original" + posterdb + else: + posterdb = item.thumbnail + + if otmdb.result.get("backdrop_path"): + fanart = "https://image.tmdb.org/t/p/original" + otmdb.result.get("backdrop_path") + else: + fanart = item.fanart + if sinopsis == "": + if otmdb.result.get("'overview'"): + sinopsis = otmdb.result.get("'overview'") + else: + sinopsis = "" + if otmdb.result.get("vote_average"): + rating = otmdb.result.get("vote_average") + else: + rating = "Sin puntuacíon" + imagenes = [] + itmdb = tmdb.Tmdb(id_Tmdb=id, tipo=tipo_ps) + images = itmdb.result.get("images") + for key, value in images.iteritems(): + for detail in value: + imagenes.append('https://image.tmdb.org/t/p/original' + detail["file_path"]) + if item.contentType != "movie": + if itmdb.result.get("number_of_seasons"): + season_number = itmdb.result.get("number_of_seasons") + else: + season_episode = "" + if itmdb.result.get("number_of_episodes"): + season_episode = itmdb.result.get("number_of_episodes") + else: + season_episode = "" + if itmdb.result.get("status"): + status = itmdb.result.get("status") + else: + status = "" + if status == "Ended": + status = "Finalizada" + else: + status = "En emisión" + tagline = str(status) + " (Temporadas:" + str(season_number) + ",Episodios:" + str(season_episode) + ")" + if itmdb.result.get("external_ids").get("tvdb_id"): + id_tvdb = itmdb.result.get("external_ids").get("tvdb_id") + else: + id_tvdb = "" + else: + id_tvdb = "" + if itmdb.result.get("tagline"): + tagline = itmdb.result.get("tagline") + else: + tagline = "" + if len(imagenes) >= 5: + fanart_info = imagenes[1] + fanart_2 = imagenes[2] + fanart_3 = imagenes[3] + fanart_4 = imagenes[4] + if fanart == item.fanart: + fanart = fanart_info + elif len(imagenes) == 4: + fanart_info = imagenes[1] + fanart_2 = imagenes[2] + fanart_3 = imagenes[3] + fanart_4 = imagenes[1] + if fanart == item.fanart: + fanart = fanart_info + elif len(imagenes) == 3: + fanart_info = imagenes[1] + fanart_2 = imagenes[2] + fanart_3 = imagenes[1] + fanart_4 = imagenes[0] + if fanart == item.fanart: + fanart = fanart_info + elif len(imagenes) == 2: + fanart_info = imagenes[1] + fanart_2 = imagenes[0] + fanart_3 = imagenes[1] + fanart_4 = imagenes[1] + if fanart == item.fanart: + fanart = fanart_info + else: + fanart_info = fanart + fanart_2 = fanart + fanart_3 = fanart + fanart_4 = fanart + images_fanarttv = fanartv(item, id_tvdb, id) + if item.contentType != "movie": + url = item.url + "/episodios" + action = "findvideos_series" + if images_fanarttv: + try: + thumbnail_art = images_fanarttv.get("hdtvlogo")[0].get("url") + except: + try: + thumbnail_art = images_fanarttv.get("clearlogo")[0].get("url") + except: + thumbnail_art = posterdb + if images_fanarttv.get("tvbanner"): + tvf = images_fanarttv.get("tvbanner")[0].get("url") + elif images_fanarttv.get("tvthumb"): + tvf = images_fanarttv.get("tvthumb")[0].get("url") + elif images_fanarttv.get("tvposter"): + tvf = images_fanarttv.get("tvposter")[0].get("url") + else: + tvf = posterdb + if images_fanarttv.get("tvthumb"): + thumb_info = images_fanarttv.get("tvthumb")[0].get("url") + else: + thumb_info = thumbnail_art + + if images_fanarttv.get("hdclearart"): + tiw = images_fanarttv.get("hdclearart")[0].get("url") + elif images_fanarttv.get("characterart"): + tiw = images_fanarttv.get("characterart")[0].get("url") + elif images_fanarttv.get("hdtvlogo"): + tiw = images_fanarttv.get("hdtvlogo")[0].get("url") + else: + tiw = "" + else: + tiw = "" + tvf = thumbnail_info = thumbnail_art = posterdb + else: + url = item.url + action = "findvideos" + if images_fanarttv: + if images_fanarttv.get("hdmovielogo"): + thumbnail_art = images_fanarttv.get("hdmovielogo")[0].get("url") + elif images_fanarttv.get("moviethumb"): + thumbnail_art = images_fanarttv.get("moviethumb")[0].get("url") + elif images_fanarttv.get("moviebanner"): + thumbnail_art = images_fanarttv.get("moviebanner")[0].get("url") + else: + thumbnail_art = posterdb + if images_fanarttv.get("moviedisc"): + tvf = images_fanarttv.get("moviedisc")[0].get("url") + elif images_fanarttv.get("hdmovielogo"): + tvf = images_fanarttv.get("hdmovielogo")[0].get("url") + else: + tvf = posterdb + if images_fanarttv.get("hdmovieclearart"): + tiw = images_fanarttv.get("hdmovieclearart")[0].get("url") + elif images_fanarttv.get("hdmovielogo"): + tiw = images_fanarttv.get("hdmovielogo")[0].get("url") + else: + tiw = "" + else: + tiw = "" + tvf = thumbnail_art = posterdb + extra = str(fanart_2) + "|" + str(fanart_3) + "|" + str(fanart_4) + "|" + str(id) + "|" + str(tvf) + "|" + str( + id_tvdb) + "|" + str(tiw) + "|" + str(rating) + "|" + tipo_ps + itemlist.append( + Item(channel=item.channel, title=item.title, url=url, action=action, thumbnail=thumbnail_art, fanart=fanart, + extra=extra, contentType=item.contentType, fulltitle=fulltitle, folder=True)) + title_info = "[COLOR powderblue][B]Info[/B][/COLOR]" + extra = str(rating) + "|" + str(rating_filma) + "|" + str(id) + "|" + str(item.title) + "|" + str( + id_tvdb) + "|" + str(tagline) + "|" + str(sinopsis) + "|" + str(critica) + "|" + str(thumbnail_art) + "|" + str( + fanart_4) + itemlist.append(Item(channel=item.channel, action="info", title=title_info, url=item.url, thumbnail=posterdb, + fanart=fanart_info, extra=extra, contentType=item.contentType, folder=False)) + return itemlist + + +def findvideos_series(item): + logger.info() + itemlist = [] + fanart = "" + check_temp = [] + data = httptools.downloadpage(item.url).data + if item.contentType != "movie": + itmdb = tmdb.Tmdb(id_Tmdb=item.extra.split("|")[3], tipo=item.extra.split("|")[8]) + season = itmdb.result.get("seasons") + check = "no" + try: + temp, bloque_enlaces = scrapertools.find_single_match(data, 'Temporada (\d+)(.*?)Temporada') + except: + if "no se agregaron" in data: + temp = bloque_enlaces = "" + else: + temp, bloque_enlaces = scrapertools.find_single_match(data, 'Temporada (\d+)(.*?)<div class="enlaces">') + if temp != "": + thumbnail = "" + if season: + for detail in season: + if str(detail["season_number"]) == temp: + if detail["poster_path"]: + thumbnail = "https://image.tmdb.org/t/p/original" + detail["poster_path"] + images_fanarttv = fanartv(item, item.extra.split("|")[5], item.extra.split("|")[3]) + if images_fanarttv: + season_f = images_fanarttv.get("showbackground") + if season_f: + for detail in season_f: + if str(detail["season"]) == temp: + if detail["url"]: + fanart = detail["url"] + if fanart == "": + fanart = item.extra.split("|")[0] + if thumbnail == "": + thumbnail = item.thumbnail + itemlist.append(Item(channel=item.channel, + title="[COLOR darkturquoise]Temporada[/COLOR] " + "[COLOR beige]" + temp + "[/COLOR]", + url="", action="", thumbnail=thumbnail, fanart=fanart, extra="", + contentType=item.contentType, folder=False)) + capitulos = scrapertools.find_multiple_matches(bloque_enlaces, 'href="([^"]+)".*?Episodio (\d+) - ([^<]+)') + for url, epi, title in capitulos: + if epi == "1": + if epi in str(check_temp): + temp = int(temp) + 1 + thumbnail = "" + if season: + for detail in season: + if detail["season_number"] == temp: + if detail["poster_path"]: + thumbnail = "https://image.tmdb.org/t/p/original" + detail["poster_path"] + images_fanarttv = fanartv(item, item.extra.split("|")[5], item.extra.split("|")[3]) + if images_fanarttv: + season_f = images_fanarttv.get("showbackground") + if season_f: + for detail in season_f: + if detail["season"] == temp: + if detail["url"]: + fanart = detail["url"] + if fanart == "": + fanart = item.extra.split("|")[0] + if thumbnail == "": + thumbnail = item.thumbnail + itemlist.append(Item(channel=item.channel, + title="[COLOR darkturquoise]Temporada[/COLOR] " + "[COLOR beige]" + str( + temp) + "[/COLOR]", url="", action="", thumbnail=thumbnail, fanart=fanart, + extra="", contentType=item.contentType, folder=False)) + check_temp.append([epi]) + itemlist.append(Item(channel=item.channel, + title=" [COLOR cyan]Episodio[/COLOR] " + "[COLOR darkcyan]" + epi + "[/COLOR]" + " - " + "[COLOR cadetblue]" + title + "[/COLOR]", + url=url, action="findvideos", thumbnail=item.extra.split("|")[4], + fanart=item.extra.split("|")[0], extra="", contentType=item.contentType, folder=True)) + title_info = " Info" + title_info = "[COLOR steelblue]" + title_info + "[/COLOR]" + itemlist.append(Item(channel=item.channel, action="info_capitulos", title=title_info, url=item.url, + thumbnail=item.extra.split("|")[6], fanart=item.extra.split("|")[1], + extra=item.extra + "|" + str(temp) + "|" + epi, folder=False)) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + + if item.extra != "dd" and item.extra != "descarga": + + if item.contentType != "movie": + bloque_links = scrapertools.find_single_match(data, '<div class="links">(.*?)<\/i>Selecciona un') + if bloque_links == "": + bloque_links = scrapertools.find_single_match(data, '<div class="links">(.*?)<div class="enlaces">') + else: + bloque_links = scrapertools.find_single_match(data, '<div class="links">(.*?)<\/i>Descargar') + if bloque_links == "": + bloque_links = scrapertools.find_single_match(data, '<div class="links">(.*?)<div class="enlaces">') + links = scrapertools.find_multiple_matches(bloque_links, + '<a class="goto" rel="nofollow".*?data-id="([^<]+)".*?src="([^"]+)">([^<]+)<.*?src="([^"]+)">([^<]+).*?<span>([^<]+)') + for id, thumb, server, idiomapng, idioma, calidad in links: + idioma = idioma.strip() + calidad = calidad.lower() + calidad = re.sub(r' ', '-', calidad) + if calidad == "ts": + calidad = re.sub(r'ts', 'ts-hq', calidad) + url = "http://peliculasgratis.biz/goto/" + url_post = urllib.urlencode({'id': id}) + server_name = scrapertools.get_match(server, '(.*?)\.').strip() + icon_server = os.path.join(config.get_runtime_path(), "resources", "images", "servers", + "server_" + server_name + ".png") + icon_server = icon_server.replace('streamin', 'streaminto') + if not os.path.exists(icon_server): + icon_server = thumb + if CALIDADES.get(calidad): + calidad = CALIDADES.get(calidad) + else: + calidad = "[COLOR brown]" + calidad + "[/COLOR]" + if IDIOMAS.get(idioma): + idioma = IDIOMAS.get(idioma) + else: + idioma = "[COLOR brown]" + idioma + "[/COLOR]" + + extra = "online" + itemlist.append(Item(channel=item.channel, + title="[COLOR lightcyan][B]" + server + "[/B][/COLOR] " + calidad + " " + idioma, + url=url, action="play", thumbnail=icon_server, fanart="", contentType=item.contentType, + folder=True, id=url_post)) + else: + bloque_dd = scrapertools.find_single_match(data, '<\/i>Descargar(.*?)<div class="enlaces">') + links_dd = scrapertools.find_multiple_matches(bloque_dd, + '<a class="low".*?data-id="(.*?)".*?src="([^"]+)">([^<]+)<.*?src[^<]+>([^<]+).*?<span>([^<]+)') + for id, thumb, server, idioma, calidad in links_dd: + idioma = idioma.strip() + calidad = calidad.lower() + calidad = re.sub(r' ', '-', calidad) + if calidad == "ts": + calidad = re.sub(r'ts', 'ts-hq', calidad) + if CALIDADES.get(calidad): + calidad = CALIDADES.get(calidad) + else: + calidad = "[COLOR brown]" + calidad + "[/COLOR]" + if IDIOMAS.get(idioma): + idioma = IDIOMAS.get(idioma) + else: + idioma = "[COLOR brown]" + idioma + "[/COLOR]" + url = "http://peliculasgratis.biz/goto/" + data_post = urllib.urlencode({'id': id}) + server_name = scrapertools.get_match(server, '(.*?)\.').strip() + icon_server = os.path.join(config.get_runtime_path(), "resources", "images", "servers", + "server_" + server_name + ".png") + icon_server = icon_server.replace('streamin', 'streaminto') + icon_server = icon_server.replace('ul', 'uploadedto') + if not os.path.exists(icon_server): + icon_server = thumb + extra = "descarga" + itemlist.append( + item.clone(title="[COLOR floralwhite][B]" + server + "[/B][/COLOR] " + calidad + " " + idioma, url=url, + action="play", thumbnail=icon_server, fanart="", contentType=item.contentType, id=data_post)) + + if item.contentType == "movie" and item.extra != "descarga" and item.extra != "online": + + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append(Item(channel=item.channel, title="Añadir película a la videoteca", + action="add_pelicula_to_library", url=item.url, text_color="green", + infoLabels={'title': item.fulltitle}, thumbnail="http://imgur.com/xjrGmVM.png", + fulltitle=item.fulltitle, + extra=extra)) + if item.extra != "dd" and item.extra != "descarga" and item.extra != "online": + bloque_dd = scrapertools.find_single_match(data, '<\/i>Descargar(.*?)<div class="enlaces">') + if bloque_dd: + itemlist.append(item.clone(title="[COLOR aqua][B]Ver enlaces Descarga[/B][/COLOR] ", action="findvideos", + thumbnail=thumb, fanart="", contentType=item.contentType, bloque_dd=bloque_dd, + extra="dd")) + + return itemlist + + +def play(item): + itemlist = [] + data = httptools.downloadpage(item.url, post=item.id, ).data + enlaces = scrapertools.find_single_match(data, '<a rel="nofollow" href="([^"]+)"') + itemlist = servertools.find_video_items(data=enlaces) + + for videoitem in itemlist: + videoitem.title = item.title + videoitem.thumbnail = item.extra + videoitem.extra = item.extra + videoitem.channel = item.channel + + return itemlist + + +def info(item): + logger.info() + itemlist = [] + url = item.url + rating_tmdba_tvdb = item.extra.split("|")[0] + if item.extra.split("|")[6] == "": + rating_tmdba_tvdb = "Sin puntuación" + rating_filma = item.extra.split("|")[1] + filma = "http://s6.postimg.org/6yhe5fgy9/filma.png" + title = item.extra.split("|")[3] + title = title.replace("%20", " ") + try: + if "." in rating_tmdba_tvdb: + check_rat_tmdba = scrapertools.get_match(rating_tmdba_tvdb, '(\d+).') + else: + check_rat_tmdba = rating_tmdba_tvdb + if int(check_rat_tmdba) >= 5 and int(check_rat_tmdba) < 8: + rating = "[COLOR springgreen][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + elif int(check_rat_tmdba) >= 8 or rating_tmdba_tvdb == 10: + rating = "[COLOR yellow][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + else: + rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + print "lolaymaue" + except: + rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + if "10." in rating: + rating = re.sub(r'10\.\d+', '10', rating) + try: + check_rat_filma = scrapertools.get_match(rating_filma, '(\d)') + print "paco" + print check_rat_filma + if int(check_rat_filma) >= 5 and int(check_rat_filma) < 8: + print "dios" + print check_rat_filma + rating_filma = "[COLOR springgreen][B]" + rating_filma + "[/B][/COLOR]" + elif int(check_rat_filma) >= 8: + + print check_rat_filma + rating_filma = "[COLOR yellow][B]" + rating_filma + "[/B][/COLOR]" + else: + rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]" + print "rojo??" + print check_rat_filma + except: + rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]" + plot = item.extra.split("|")[6] + plot = "[COLOR moccasin][B]" + plot + "[/B][/COLOR]" + plot = re.sub(r"\\|<br />", "", plot) + if item.extra.split("|")[5] != "": + tagline = item.extra.split("|")[5] + if tagline == "\"\"": + tagline = " " + tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]" + else: + tagline = "" + if item.contentType != "movie": + icon = "http://s6.postimg.org/hzcjag975/tvdb.png" + else: + icon = "http://imgur.com/SenkyxF.png" + + foto = item.extra.split("|")[9] + if not "tmdb" in foto: + foto = "" + if item.extra.split("|")[7] != "": + critica = item.extra.split("|")[7] + else: + critica = "Esta serie no tiene críticas..." + + photo = item.extra.split("|")[8].replace(" ", "%20") + if ".jpg" in photo: + photo = "" + # Tambien te puede interesar + peliculas = [] + if item.contentType != "movie": + url_tpi = "http://api.themoviedb.org/3/tv/" + item.extra.split("|")[ + 2] + "/recommendations?api_key=" + api_key + "&language=es" + data_tpi = httptools.downloadpage(url_tpi).data + tpi = scrapertools.find_multiple_matches(data_tpi, + 'id":(.*?),.*?"original_name":"(.*?)",.*?"poster_path":(.*?),"popularity"') + else: + url_tpi = "http://api.themoviedb.org/3/movie/" + item.extra.split("|")[ + 2] + "/recommendations?api_key=" + api_key + "&language=es" + data_tpi = httptools.downloadpage(url_tpi).data + tpi = scrapertools.find_multiple_matches(data_tpi, + 'id":(.*?),.*?"original_title":"(.*?)",.*?"poster_path":(.*?),"popularity"') + + for idp, peli, thumb in tpi: + + thumb = re.sub(r'"|}', '', thumb) + if "null" in thumb: + thumb = "http://s6.postimg.org/tw1vhymj5/noposter.png" + else: + thumb = "https://image.tmdb.org/t/p/original" + thumb + peliculas.append([idp, peli, thumb]) + + extra = "" + "|" + item.extra.split("|")[2] + "|" + item.extra.split("|")[2] + "|" + item.extra.split("|")[ + 6] + "|" + "" + infoLabels = {'title': title, 'plot': plot, 'thumbnail': photo, 'fanart': foto, 'tagline': tagline, + 'rating': rating} + item_info = item.clone(info=infoLabels, icon=icon, extra=extra, rating=rating, rating_filma=rating_filma, + critica=critica, contentType=item.contentType, thumb_busqueda="http://imgur.com/Q0BTHyO.png") + from channels import infoplus + infoplus.start(item_info, peliculas) + + +def info_capitulos(item, images={}): + logger.info() + url = "https://api.themoviedb.org/3/tv/" + item.extra.split("|")[3] + "/season/" + item.extra.split("|")[ + 9] + "/episode/" + item.extra.split("|")[10] + "?api_key=" + api_key + "&language=es" + if "/0" in url: + url = url.replace("/0", "/") + from core import jsontools + data = jsontools.load(scrapertools.downloadpage(url)) + foto = item.extra.split("|")[6] + if not ".png" in foto: + foto = "http://imgur.com/Q0BTHyO.png" + if data: + if data.get("name"): + title = data.get("name") + else: + title = "" + title = "[COLOR aqua][B]" + title + "[/B][/COLOR]" + if data.get("still_path"): + image = "https://image.tmdb.org/t/p/original" + data.get("still_path") + else: + image = "http://imgur.com/ZiEAVOD.png" + if data.get("overview"): + plot = data.get("overview") + else: + plot = "Sin informacion del capítulo aún..." + plot = "[COLOR honeydew][B]" + plot + "[/B][/COLOR]" + if data.get("vote_average"): + rating = data.get("vote_average") + else: + rating = 0 + try: + + if rating >= 5 and rating < 8: + rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR springgreen][B]" + str(rating) + "[/B][/COLOR]" + elif rating >= 8 and rating < 10: + rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR yellow][B]" + str(rating) + "[/B][/COLOR]" + elif rating == 10: + rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR orangered][B]" + str(rating) + "[/B][/COLOR]" + else: + rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR crimson][B]" + str(rating) + "[/B][/COLOR]" + except: + rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR crimson][B]" + str(rating) + "[/B][/COLOR]" + if "10." in rating: + rating = re.sub(r'10\.\d+', '10', rating) + + + else: + + title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" + plot = "Este capitulo no tiene informacion..." + plot = "[COLOR yellow][B]" + plot + "[/B][/COLOR]" + image = "http://s6.postimg.org/ub7pb76c1/noinfo.png" + foto = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png" + rating = "" + + ventana = TextBox2(title=title, plot=plot, thumbnail=image, fanart=foto, rating=rating) + ventana.doModal() + + +class TextBox2(xbmcgui.WindowDialog): + """ Create a skinned textbox window """ + + def __init__(self, *args, **kwargs): + self.getTitle = kwargs.get('title') + self.getPlot = kwargs.get('plot') + self.getThumbnail = kwargs.get('thumbnail') + self.getFanart = kwargs.get('fanart') + self.getRating = kwargs.get('rating') + + self.background = xbmcgui.ControlImage(70, 20, 1150, 630, 'http://imgur.com/oWUdNFg.jpg') + self.title = xbmcgui.ControlTextBox(120, 60, 430, 50) + self.rating = xbmcgui.ControlTextBox(145, 112, 1030, 45) + self.plot = xbmcgui.ControlTextBox(120, 150, 1056, 100) + self.thumbnail = xbmcgui.ControlImage(120, 300, 1056, 300, self.getThumbnail) + self.fanart = xbmcgui.ControlImage(780, 43, 390, 100, self.getFanart) + + self.addControl(self.background) + self.background.setAnimations( + [('conditional', 'effect=slide start=1000% end=0% time=1500 condition=true tween=bounce',), + ('WindowClose', 'effect=slide delay=800 start=0% end=1000% time=800 condition=true',)]) + self.addControl(self.thumbnail) + self.thumbnail.setAnimations([('conditional', + 'effect=zoom start=0% end=100% delay=2700 time=1500 condition=true tween=elastic easing=inout',), + ('WindowClose', 'effect=slide end=0,700% time=300 condition=true',)]) + self.addControl(self.plot) + self.plot.setAnimations( + [('conditional', 'effect=zoom delay=2000 center=auto start=0 end=100 time=800 condition=true ',), ( + 'conditional', + 'effect=rotate delay=2000 center=auto aceleration=6000 start=0% end=360% time=800 condition=true',), + ('WindowClose', 'effect=zoom center=auto start=100% end=-0% time=600 condition=true',)]) + self.addControl(self.fanart) + self.fanart.setAnimations( + [('WindowOpen', 'effect=slide start=0,-700 delay=1000 time=2500 tween=bounce condition=true',), ( + 'conditional', + 'effect=rotate center=auto start=0% end=360% delay=3000 time=2500 tween=bounce condition=true',), + ('WindowClose', 'effect=slide end=0,-700% time=1000 condition=true',)]) + self.addControl(self.title) + self.title.setText(self.getTitle) + self.title.setAnimations( + [('conditional', 'effect=slide start=-1500% end=0% delay=1000 time=2000 condition=true tween=elastic',), + ('WindowClose', 'effect=slide start=0% end=-1500% time=800 condition=true',)]) + self.addControl(self.rating) + self.rating.setText(self.getRating) + self.rating.setAnimations( + [('conditional', 'effect=fade start=0% end=100% delay=3000 time=1500 condition=true',), + ('WindowClose', 'effect=slide end=0,-700% time=600 condition=true',)]) + xbmc.sleep(200) + + try: + self.plot.autoScroll(7000, 6000, 30000) + except: + + xbmc.executebuiltin( + 'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000,"https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/kodi-icon.png")') + self.plot.setText(self.getPlot) + + def get(self): + self.show() + + def onAction(self, action): + if action == ACTION_PREVIOUS_MENU or action.getId() == ACTION_GESTURE_SWIPE_LEFT or action == 110 or action == 92: + self.close() + + +def test(): + return True + + +def tokenize(text, match=re.compile("([idel])|(\d+):|(-?\d+)").match): + i = 0 + while i < len(text): + m = match(text, i) + s = m.group(m.lastindex) + i = m.end() + if m.lastindex == 2: + yield "s" + yield text[i:i + int(s)] + i = i + int(s) + else: + yield s + + +def decode_item(next, token): + if token == "i": + # integer: "i" value "e" + data = int(next()) + if next() != "e": + raise ValueError + elif token == "s": + # string: "s" value (virtual tokens) + data = next() + elif token == "l" or token == "d": + # container: "l" (or "d") values "e" + data = [] + tok = next() + while tok != "e": + data.append(decode_item(next, tok)) + tok = next() + if token == "d": + data = dict(zip(data[0::2], data[1::2])) + else: + raise ValueError + return data + + +def decode(text): + try: + src = tokenize(text) + data = decode_item(src.next, src.next()) + for token in src: # look for more tokens + raise SyntaxError("trailing junk") + except (AttributeError, ValueError, StopIteration): + try: + data = data + except: + data = src + + return data + + +def convert_size(size): + import math + if (size == 0): + return '0B' + size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") + i = int(math.floor(math.log(size, 1024))) + p = math.pow(1024, i) + s = round(size / p, 2) + return '%s %s' % (s, size_name[i]) + + +def fanartv(item, id_tvdb, id, images={}): + headers = [['Content-Type', 'application/json']] + from core import jsontools + if item.contentType == "movie": + url = "http://webservice.fanart.tv/v3/movies/%s?api_key=cab16e262d72fea6a6843d679aa10300" \ + % id + else: + url = "http://webservice.fanart.tv/v3/tv/%s?api_key=cab16e262d72fea6a6843d679aa10300" % id_tvdb + try: + data = jsontools.load(scrapertools.downloadpage(url, headers=headers)) + if data and not "error message" in data: + for key, value in data.items(): + if key not in ["name", "tmdb_id", "imdb_id", "thetvdb_id"]: + images[key] = value + else: + images = [] + + except: + images = [] + return images + + +def filmaffinity(item, infoLabels): + title = infoLabels["title"].replace(" ", "+") + try: + year = infoLabels["year"] + except: + year = "" + sinopsis = infoLabels["sinopsis"] + + if year == "": + if item.contentType != "movie": + tipo = "serie" + url_bing = "http://www.bing.com/search?q=%s+Serie+de+tv+site:filmaffinity.com" % title + else: + tipo = "película" + url_bing = "http://www.bing.com/search?q=%s+site:filmaffinity.com" % title + try: + data = browser(url_bing) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + if "myaddrproxy.php" in data: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><div class="b_title"><h2>(<a href="/myaddrproxy.php/http/www.filmaffinity.com/es/film.*?)"') + subdata_bing = re.sub(r'\/myaddrproxy.php\/http\/', '', subdata_bing) + else: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><h2>(<a href="http://www.filmaffinity.com/.*?/film.*?)"') + + url_filma = scrapertools.get_match(subdata_bing, '<a href="([^"]+)') + if not "http" in url_filma: + try: + data = httptools.downloadpage("http://" + url_filma, cookies=False, timeout=1).data + except: + data = httptools.downloadpage("http://" + url_filma, cookies=False, timeout=1).data + else: + try: + data = httptools.downloadpage(url_filma, cookies=False, timeout=1).data + except: + data = httptools.downloadpage(url_filma, cookies=False, timeout=1).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + except: + pass + else: + tipo = "Pelicula" + url = "http://www.filmaffinity.com/es/advsearch.php?stext={0}&stype%5B%5D=title&country=&genre=&fromyear={1}&toyear={1}".format( + title, year) + data = httptools.downloadpage(url, cookies=False).data + url_filmaf = scrapertools.find_single_match(data, '<div class="mc-poster">\s*<a title="[^"]*" href="([^"]+)"') + if url_filmaf: + url_filmaf = "http://www.filmaffinity.com%s" % url_filmaf + data = httptools.downloadpage(url_filmaf, cookies=False).data + else: + if item.contentType != "movie": + tipo = "serie" + url_bing = "http://www.bing.com/search?q=%s+Serie+de+tv+site:filmaffinity.com" % title + else: + tipo = "película" + url_bing = "http://www.bing.com/search?q=%s+site:filmaffinity.com" % title + try: + data = browser(url_bing) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + if "myaddrproxy.php" in data: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><div class="b_title"><h2>(<a href="/myaddrproxy.php/http/www.filmaffinity.com/es/film.*?)"') + subdata_bing = re.sub(r'\/myaddrproxy.php\/http\/', '', subdata_bing) + else: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><h2>(<a href="http://www.filmaffinity.com/.*?/film.*?)"') + + url_filma = scrapertools.get_match(subdata_bing, '<a href="([^"]+)') + if not "http" in url_filma: + data = httptools.downloadpage("http://" + url_filma, cookies=False).data + else: + data = httptools.downloadpage(url_filma, cookies=False).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + except: + pass + sinopsis_f = scrapertools.find_single_match(data, '<dd itemprop="description">(.*?)</dd>') + sinopsis_f = sinopsis_f.replace("<br><br />", "\n") + sinopsis_f = re.sub(r"\(FILMAFFINITY\)<br />", "", sinopsis_f) + try: + year_f = scrapertools.get_match(data, '<dt>Año</dt>.*?>(\d+)</dd>') + except: + year_f = "" + try: + rating_filma = scrapertools.get_match(data, 'itemprop="ratingValue" content="(.*?)">') + except: + rating_filma = "Sin puntuacion" + critica = "" + patron = '<div itemprop="reviewBody">(.*?)</div>.*?itemprop="author">(.*?)\s*<i alt="([^"]+)"' + matches_reviews = scrapertools.find_multiple_matches(data, patron) + + if matches_reviews: + for review, autor, valoracion in matches_reviews: + review = dhe(scrapertools.htmlclean(review)) + review += "\n" + autor + "[CR]" + review = re.sub(r'Puntuac.*?\)', '', review) + if "positiva" in valoracion: + critica += "[COLOR green][B]%s[/B][/COLOR]\n" % review + elif "neutral" in valoracion: + critica += "[COLOR yellow][B]%s[/B][/COLOR]\n" % review + else: + critica += "[COLOR red][B]%s[/B][/COLOR]\n" % review + else: + critica = "[COLOR floralwhite][B]Esta %s no tiene críticas todavía...[/B][/COLOR]" % tipo + + return critica, rating_filma, year_f, sinopsis_f diff --git a/plugin.video.alfa/channels/peliculashindu.json b/plugin.video.alfa/channels/peliculashindu.json new file mode 100755 index 00000000..776c6b04 --- /dev/null +++ b/plugin.video.alfa/channels/peliculashindu.json @@ -0,0 +1,19 @@ +{ + "id": "peliculashindu", + "name": "PeliculasHindu", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "peliculashindu.png", + "banner": "peliculashindu.png", + "version": 1, + "changes": [ + { + "date": "25/05/2017", + "description": "Primera version" + } + ], + "categories": [ + "movie" + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/peliculashindu.py b/plugin.video.alfa/channels/peliculashindu.py new file mode 100755 index 00000000..7d4090d0 --- /dev/null +++ b/plugin.video.alfa/channels/peliculashindu.py @@ -0,0 +1,126 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + +host = "http://www.peliculashindu.com/" + + +def mainlist(item): + logger.info() + + itemlist = list() + + itemlist.append( + Item(channel=item.channel, action="lista", title="Top Películas", url=urlparse.urljoin(host, "top"))) + itemlist.append(Item(channel=item.channel, action="lista", title="Novedades", url=host)) + itemlist.append(Item(channel=item.channel, action="explorar", title="Género", url=urlparse.urljoin(host, "genero"))) + itemlist.append(Item(channel=item.channel, action="explorar", title="Listado Alfabético", + url=urlparse.urljoin(host, "alfabetico"))) + # itemlist.append(Item(channel=item.channel, action="explorar", title="Listado por año", url=urlparse.urljoin(host, "año"))) + itemlist.append(Item(channel=item.channel, action="lista", title="Otras Películas (No Bollywood)", + url=urlparse.urljoin(host, "estrenos"))) + itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=urlparse.urljoin(host, "buscar-"))) + return itemlist + + +def explorar(item): + logger.info() + itemlist = list() + url1 = str(item.url) + data = httptools.downloadpage(host).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + # logger.info("loca :"+url1+" aaa"+data) + if 'genero' in url1: + patron = '<div class="d"><h3>Pel.+?neros<\/h3>(.+?)<\/h3>' + if 'alfabetico' in url1: + patron = '<\/li><\/ul><h3>Pel.+?tico<\/h3>(.+?)<\/h3>' + if 'año' in url1: + patron = '<ul class="anio"><li>(.+?)<\/ul>' + data_explorar = scrapertools.find_single_match(data, patron) + patron_explorar = '<a href="([^"]+)">([^"]+)<\/a>' + matches = scrapertools.find_multiple_matches(data_explorar, patron_explorar) + for scrapedurl, scrapedtitle in matches: + if 'Acci' in scrapedtitle: + scrapedtitle = 'Acción' + if 'Anima' in scrapedtitle: + scrapedtitle = 'Animación' + if 'Fanta' in scrapedtitle: + scrapedtitle = 'Fantasía' + if 'Hist' in scrapedtitle: + scrapedtitle = 'Histórico' + if 'lico Guerra' in scrapedtitle: + scrapedtitle = 'Bélico Guerra' + if 'Ciencia' in scrapedtitle: + scrapedtitle = 'Ciencia Ficción' + itemlist.append(item.clone(action='lista', title=scrapedtitle, url=scrapedurl)) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "-") + item.url = item.url + texto + # logger.info("item="+item.url) + if texto != '': + return lista(item) + + +def lista(item): + logger.info() + + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) # Eliminamos tabuladores, dobles espacios saltos de linea, etc... + url1 = str(item.url) + if 'http://www.peliculashindu.com/' in url1: + url1 = url1.replace("http://www.peliculashindu.com/", "") + if url1 != 'estrenos': + data = scrapertools.find_single_match(data, '<div id="cuerpo"><div class="iz">.+>Otras') + # data= scrapertools.find_single_match(data,'<div id="cuerpo"><div class="iz">.+>Otras') + patron = '<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)"' # scrapedurl, scrapedthumbnail, scrapedtitle + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedthumbnail, scrapedtitle in matches: # scrapedthumbnail, scrapedtitle in matches: + itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, action="findvideos", + show=scrapedtitle)) + # Paginacion + patron_pag = '<a href="([^"]+)" title="Siguiente .+?">' + paginasig = scrapertools.find_single_match(data, patron_pag) + + next_page_url = item.url + paginasig + + if paginasig != "": + item.url = next_page_url + itemlist.append(Item(channel=item.channel, action="lista", title=">> Página siguiente", url=next_page_url, + thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png')) + return itemlist + + +def findvideos(item): + logger.info() + + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + itemlist.extend(servertools.find_video_items(data=data)) + logger.info("holaa" + data) + patron_show = '<strong>Ver Pel.+?a([^<]+) online<\/strong>' + show = scrapertools.find_single_match(data, patron_show) + logger.info("holaa" + show) + for videoitem in itemlist: + videoitem.channel = item.channel + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append( + Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url, + action="add_pelicula_to_library", extra="findvideos", contentTitle=show)) + + return itemlist diff --git a/plugin.video.alfa/channels/peliculasmx.json b/plugin.video.alfa/channels/peliculasmx.json new file mode 100755 index 00000000..b3434ac4 --- /dev/null +++ b/plugin.video.alfa/channels/peliculasmx.json @@ -0,0 +1,34 @@ +{ + "id": "peliculasmx", + "name": "PeliculasMX", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "peliculasmx.png", + "banner": "peliculasmx.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/11/2016", + "description": "Adaptado por cambios en la web." + } + ], + "categories": [ + "latino", + "movie" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/peliculasmx.py b/plugin.video.alfa/channels/peliculasmx.py new file mode 100755 index 00000000..4b2a29fd --- /dev/null +++ b/plugin.video.alfa/channels/peliculasmx.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import logger +from core import scrapertools +from core.item import Item + + +def mainlist(item): + logger.info() + + itemlist = [] + itemlist.append( + Item(channel=item.channel, title="Últimas añadidas", action="peliculas", url="http://www.peliculasmx.net/")) + itemlist.append( + Item(channel=item.channel, title="Últimas por género", action="generos", url="http://www.peliculasmx.net/")) + itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", url="http://www.peliculasmx.net/")) + return itemlist + + +def generos(item): + logger.info() + itemlist = [] + + # Descarga la página + data = scrapertools.cachePage(item.url) + + # <li class="cat-item cat-item-3"><a href="http://peliculasmx.net/category/accion/" >Accion</a> <span>246</span> + patron = '<li class="cat-item cat-item-.*?' + patron += '<a href="([^"]+)".*?' + patron += '>([^<]+).*?' + patron += '<span>([^<]+)' + + matches = re.compile(patron, re.DOTALL).findall(data) + logger.debug(matches) + for match in matches: + scrapedurl = urlparse.urljoin("", match[0]) + scrapedtitle = match[1].strip() + ' [' + match[2] + ']' + + itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl, folder=True)) + + itemlist = sorted(itemlist, key=lambda Item: Item.title) + return itemlist + + +def peliculas(item): + logger.info() + extra = item.extra + itemlist = [] + + # Descarga la página + data = scrapertools.cachePage(item.url) + + patron = '<div id="mt-.*?' + patron += '<a href="([^"]+)".*?' + patron += '<img src="([^"]+)".*?' + patron += '<span class="tt">([^<]+).*?' + patron += '<span class="calidad2">([^<]+)' + + matches = re.compile(patron, re.DOTALL).findall(data) + + for match in matches: + scrapedurl = match[0] # urlparse.urljoin("",match[0]) + scrapedtitle = match[2] + ' [' + match[3] + ']' + scrapedthumbnail = match[1] + itemlist.append( + Item(channel=item.channel, action="findvideos", title=scrapedtitle, fulltitle=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, folder=True)) + + # Extrae la marca de siguiente página + paginador = scrapertools.find_single_match(data, "<div class='paginado'>.*?lateral") + + patron = "<li.*?<a class='current'>.*?href='([^']+)" + scrapedurl = scrapertools.find_single_match(paginador, patron) + + if scrapedurl: + scrapedtitle = "!Pagina Siguiente ->" + itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl, folder=True)) + + return itemlist + + +def search(item, texto): + logger.info() + itemlist = [] + + texto = texto.replace(" ", "+") + try: + # Series + item.url = "http://www.peliculasmx.net/?s=%s" % texto + itemlist.extend(peliculas(item)) + itemlist = sorted(itemlist, key=lambda Item: Item.title) + + return itemlist + + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] diff --git a/plugin.video.alfa/channels/peliculasnu.json b/plugin.video.alfa/channels/peliculasnu.json new file mode 100755 index 00000000..431906ca --- /dev/null +++ b/plugin.video.alfa/channels/peliculasnu.json @@ -0,0 +1,72 @@ +{ + "id": "peliculasnu", + "name": "Peliculas.Nu", + "language": "es", + "active": true, + "adult": false, + "version": 1, + "changes": [ + { + "date": "12/05/2017", + "description": "Corregidos enlaces directos" + }, + { + "date": "07/04/2017", + "description": "Subtitulos en videos directos" + }, + { + "date": "12/03/2017", + "description": "Añadida info de idiomas" + }, + { + "date": "16/02/2017", + "description": "Versión inicial" + } + ], + "thumbnail": "http://i.imgur.com/2iupwXE.png", + "banner": "peliculasnu.png", + "categories": [ + "movie", + "vos", + "latino" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Películas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "modo_grafico", + "type": "bool", + "label": "Buscar información extra", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "perfil", + "type": "list", + "label": "Perfil de color", + "default": 2, + "enabled": true, + "visible": true, + "llvalues": [ + "Perfil 3", + "Perfil 2", + "Perfil 1" + ] + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/peliculasnu.py b/plugin.video.alfa/channels/peliculasnu.py new file mode 100755 index 00000000..91717d1d --- /dev/null +++ b/plugin.video.alfa/channels/peliculasnu.py @@ -0,0 +1,274 @@ +# -*- coding: utf-8 -*- + +import urllib + +from core import config +from core import httptools +from core import jsontools +from core import logger +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item + +__modo_grafico__ = config.get_setting("modo_grafico", "peliculasnu") +__perfil__ = config.get_setting("perfil", "peliculasnu") + +# Fijar perfil de color +perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'], + ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'], + ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']] +color1, color2, color3 = perfil[__perfil__] +host = "http://peliculas.nu/" + + +def mainlist(item): + logger.info() + itemlist = [] + item.text_color = color1 + + itemlist.append(item.clone(title="Novedades", action="entradas", url=host, fanart="http://i.imgur.com/c3HS8kj.png")) + itemlist.append(item.clone(title="Más Vistas", action="entradas", url=host + "mas-vistas", + fanart="http://i.imgur.com/c3HS8kj.png")) + itemlist.append(item.clone(title="Mejor Valoradas", action="entradas", url=host + "mejor-valoradas", + fanart="http://i.imgur.com/c3HS8kj.png")) + item.text_color = color2 + itemlist.append(item.clone(title="En Español", action="entradas", url=host + "?s=Español", + fanart="http://i.imgur.com/c3HS8kj.png")) + itemlist.append(item.clone(title="En Latino", action="entradas", url=host + "?s=Latino", + fanart="http://i.imgur.com/c3HS8kj.png")) + itemlist.append( + item.clone(title="En VOSE", action="entradas", url=host + "?s=VOSE", fanart="http://i.imgur.com/c3HS8kj.png")) + item.text_color = color3 + itemlist.append(item.clone(title="Por género", action="indices", fanart="http://i.imgur.com/c3HS8kj.png")) + itemlist.append(item.clone(title="Por letra", action="indices", fanart="http://i.imgur.com/c3HS8kj.png")) + + itemlist.append(item.clone(title="", action="")) + itemlist.append(item.clone(title="Buscar...", action="search")) + itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False)) + + return itemlist + + +def configuracion(item): + from platformcode import platformtools + ret = platformtools.show_channel_settings() + platformtools.itemlist_refresh() + return ret + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + try: + item.url = "%s?s=%s" % (host, texto) + item.action = "entradas" + return entradas(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + try: + if categoria == "peliculas": + item.url = host + item.from_newest = True + item.action = "entradas" + itemlist = entradas(item) + + if itemlist[-1].action == "entradas": + itemlist.pop() + + # Se captura la excepción, para no interrumpir al canal novedades si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist + + +def entradas(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + patron = '<li class="TPostMv">.*?href="([^"]+)".*?src="([^"]+)".*?class="Title">([^<]+)<.*?' \ + '.*?"Date AAIco-date_range">(\d+).*?class="Qlty">([^<]+)<.*?<p class="Idioma(.*?)</p>' + matches = scrapertools.find_multiple_matches(data, patron) + if item.extra == "next": + matches_ = matches[15:] + else: + matches_ = matches[:15] + for scrapedurl, scrapedthumbnail, scrapedtitle, year, calidad, data_idioma in matches_: + idiomas = [] + if "/espm" in data_idioma: + idiomas.append("CAST") + if "/latinom" in data_idioma: + idiomas.append("LAT") + if "/vosemi" in data_idioma: + idiomas.append("VOSE") + + titulo = "%s [%s]" % (scrapedtitle, calidad) + if idiomas: + titulo += " [%s]" % "/".join(idiomas) + + scrapedthumbnail = scrapedthumbnail.replace("-160x242", "") + infolabels = {'year': year} + itemlist.append(Item(channel=item.channel, action="findvideos", url=scrapedurl, title=titulo, + contentTitle=scrapedtitle, infoLabels=infolabels, text_color=color2, + thumbnail=scrapedthumbnail, contentType="movie", fulltitle=scrapedtitle)) + + if not item.from_newest: + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + if not item.extra and len(matches) > 15: + itemlist.append(item.clone(title=">> Página Siguiente", extra="next", text_color=color3)) + elif item.extra == "next": + next_page = scrapertools.find_single_match(data, '<a class="nextpostslink".*?href="([^"]+)"') + if next_page: + itemlist.append(item.clone(title=">> Página Siguiente", url=next_page, text_color=color3, extra="")) + + return itemlist + + +def listado(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + patron = '<td class="MvTbImg">.*?href="([^"]+)".*?src="([^"]+)".*?<strong>([^<]+)<.*?' \ + '.*?<td>(\d+).*?class="Qlty">([^<]+)<' + matches = scrapertools.find_multiple_matches(data, patron) + if item.extra == "next": + matches_ = matches[15:] + else: + matches_ = matches[:15] + for scrapedurl, scrapedthumbnail, scrapedtitle, year, calidad in matches_: + titulo = "%s [%s]" % (scrapedtitle, calidad) + scrapedthumbnail = scrapedthumbnail.replace("-55x85", "") + infolabels = {'year': year} + itemlist.append(Item(channel=item.channel, action="findvideos", url=scrapedurl, title=titulo, + contentTitle=scrapedtitle, infoLabels=infolabels, text_color=color2, + thumbnail=scrapedthumbnail, contentType="movie", fulltitle=scrapedtitle)) + + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + if not item.extra and len(matches) > 15: + itemlist.append(item.clone(title=">> Página Siguiente", extra="next", text_color=color3)) + elif item.extra == "next": + next_page = scrapertools.find_single_match(data, '<a class="nextpostslink".*?href="([^"]+)"') + if next_page: + itemlist.append(item.clone(title=">> Página Siguiente", url=next_page, text_color=color3, extra="")) + + return itemlist + + +def indices(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(host).data + if "letra" in item.title: + action = "listado" + bloque = scrapertools.find_single_match(data, '<ul class="AZList">(.*?)</ul>') + else: + action = "entradas" + bloque = scrapertools.find_single_match(data, 'Géneros</a>(.*?)</ul>') + matches = scrapertools.find_multiple_matches(bloque, '<li.*?<a href="([^"]+)">([^<]+)</a>') + for scrapedurl, scrapedtitle in matches: + itemlist.append(item.clone(action=action, url=scrapedurl, title=scrapedtitle)) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + + tmdb.set_infoLabels_item(item, __modo_grafico__) + data = httptools.downloadpage(item.url).data + + if not item.infoLabels["plot"]: + item.infoLabels["plot"] = scrapertools.find_single_match(data, '<div class="Description">.*?<p>(.*?)</p>') + fanart = scrapertools.find_single_match(data, '<img class="TPostBg" src="([^"]+)"') + if not item.fanart and fanart: + item.fanart = fanart + + patron = '<li class="Button STPb.*?data-tipo="([^"]+)" data-playersource="([^"]+)".*?><span>.*?<span>(.*?)</span>' + matches = scrapertools.find_multiple_matches(data, patron) + for tipo, source, title in matches: + if tipo == "trailer": + continue + post = "source=%s&action=obtenerurl" % urllib.quote(source) + headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': item.url} + data_url = httptools.downloadpage(host + 'wp-admin/admin-ajax.php', post, headers=headers).data + url = jsontools.load(data_url).get("url") + + if 'openload' in url: + url = url + '|' + item.url + + title = "%s - %s" % ('%s', title) + itemlist.append(item.clone(action="play", url=url, title=title, text_color=color3)) + + itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) + + if item.extra != "findvideos" and config.get_videolibrary_support(): + itemlist.append(item.clone(title="Añadir película a la videoteca", action="add_pelicula_to_library", + extra="findvideos", text_color="green")) + + return itemlist + + +def play(item): + logger.info() + itemlist = [] + if "drive.php?v=" in item.url: + if not item.url.startswith("http:") and not item.url.startswith("https:"): + item.url = "http:" + item.url + data = httptools.downloadpage(item.url, add_referer=True).data.replace("\\", "") + + subtitulo = scrapertools.find_single_match(data, "var subtitulo='([^']+)'") + patron = '"label":\s*"([^"]+)","type":\s*"video/([^"]+)","(?:src|file)":\s*"([^"]+)"' + matches = scrapertools.find_multiple_matches(data, patron) + for calidad, extension, url in matches: + url = url.replace(",", "%2C") + title = ".%s %s [directo]" % (extension, calidad) + itemlist.append([title, url, 0, subtitulo]) + try: + itemlist.sort(key=lambda it: int(it[0].split(" ")[1].split("p")[0])) + except: + pass + elif "metiscs" in item.url: + import base64 + from lib import jsunpack + + item.url = item.url.replace("https:", "http:") + if not item.url.startswith("http:"): + item.url = "http:" + item.url + + data = httptools.downloadpage(item.url, add_referer=True).data + str_encode = scrapertools.find_multiple_matches(data, '(?:\+|\()"([^"]+)"') + data = base64.b64decode("".join(str_encode)) + packed = scrapertools.find_single_match(data, '(eval\(function.*?)(?:</script>|\}\)\))') + if not packed: + packed = data + data_js = jsunpack.unpack(packed) + + subtitle = scrapertools.find_single_match(data_js, 'tracks:\[\{"file":"([^"]+)"') + patron = '{"file":\s*"([^"]+)","label":\s*"([^"]+)","type":\s*"video/([^"]+)"' + matches = scrapertools.find_multiple_matches(data_js, patron) + for url, calidad, extension in matches: + url = url.replace(",", "%2C") + title = ".%s %s [directo]" % (extension, calidad) + itemlist.insert(0, [title, url, 0, subtitle]) + else: + return [item] + + return itemlist diff --git a/plugin.video.alfa/channels/peliculasrey.json b/plugin.video.alfa/channels/peliculasrey.json new file mode 100755 index 00000000..c727b4c6 --- /dev/null +++ b/plugin.video.alfa/channels/peliculasrey.json @@ -0,0 +1,23 @@ +{ + "id": "peliculasrey", + "name": "peliculasrey", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "peliculasrey.png", + "banner": "peliculasrey.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ], + "categories": [ + "movie" + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/peliculasrey.py b/plugin.video.alfa/channels/peliculasrey.py new file mode 100755 index 00000000..3837b066 --- /dev/null +++ b/plugin.video.alfa/channels/peliculasrey.py @@ -0,0 +1,216 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + + +def mainlist(item): + logger.info() + + itemlist = [] + itemlist.append( + Item(channel=item.channel, action="PorFecha", title="Año de Lanzamiento", url="http://www.peliculasrey.com")) + itemlist.append(Item(channel=item.channel, action="Idiomas", title="Idiomas", url="http://www.peliculasrey.com")) + itemlist.append( + Item(channel=item.channel, action="calidades", title="Por calidad", url="http://www.peliculasrey.com")) + itemlist.append(Item(channel=item.channel, action="generos", title="Por género", url="http://www.peliculasrey.com")) + itemlist.append(Item(channel=item.channel, action="search", title="Buscar...", url="http://www.peliculasrey.com")) + + return itemlist + + +def PorFecha(item): + logger.info() + + # Descarga la pagina + data = scrapertools.cache_page(item.url) + data = scrapertools.find_single_match(data, '<section class="lanzamiento">(.*?)</section>') + logger.info("data=" + data) + + # Extrae las entradas (carpetas) + patron = '<a href="([^"]+).*?title="([^"]+)' + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + for scrapedurl, scrapedtitle in matches: + title = scrapedtitle.strip() + thumbnail = "" + plot = "" + url = urlparse.urljoin(item.url, scrapedurl) + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + itemlist.append( + Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot, + fulltitle=title, viewmode="movie")) + + return itemlist + + +def Idiomas(item): + logger.info() + + # Descarga la pagina + data = scrapertools.cache_page(item.url) + data = scrapertools.find_single_match(data, '<section class="idioma">(.*?)</section>') + logger.info("data=" + data) + + # Extrae las entradas (carpetas) + patron = '<a href="([^"]+).*?title="([^"]+)' + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + for scrapedurl, scrapedtitle in matches: + title = scrapedtitle.strip() + thumbnail = "" + plot = "" + url = urlparse.urljoin(item.url, scrapedurl) + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + itemlist.append( + Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot, + fulltitle=title, viewmode="movie")) + + return itemlist + + +def calidades(item): + logger.info() + + # Descarga la pagina + data = scrapertools.cache_page(item.url) + data = scrapertools.find_single_match(data, '<section class="calidades">(.*?)</section>') + logger.info("data=" + data) + + # Extrae las entradas (carpetas) + patron = '<a href="([^"]+).*?title="([^"]+)' + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + for scrapedurl, scrapedtitle in matches: + title = scrapedtitle.strip() + thumbnail = "" + plot = "" + url = urlparse.urljoin(item.url, scrapedurl) + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + itemlist.append( + Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot, + fulltitle=title, viewmode="movie")) + + return itemlist + + +def generos(item): + logger.info() + + # Descarga la pagina + data = scrapertools.cache_page(item.url) + data = scrapertools.find_single_match(data, '<section class="generos">(.*?)</section>') + logger.info("data=" + data) + + # Extrae las entradas (carpetas) + patron = '<a href="([^"]+).*?title="([^"]+)' + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + for scrapedurl, scrapedtitle in matches: + title = scrapedtitle.strip() + thumbnail = "" + plot = "" + url = urlparse.urljoin(item.url, scrapedurl) + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + itemlist.append( + Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot, + fulltitle=title, viewmode="movie")) + + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = "http://www.peliculasrey.com/?s=" + texto + + try: + # return buscar(item) + return peliculas(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def peliculas(item): + logger.info() + + # Descarga la pagina + data = scrapertools.cache_page(item.url) + logger.info("data=" + data) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + tabla_pelis = scrapertools.find_single_match(data, + 'class="section col-17 col-main grid-125 overflow clearfix">(.*?)</div></section>') + patron = '<img src="([^"]+)" alt="([^"]+).*?href="([^"]+)' + + matches = re.compile(patron, re.DOTALL).findall(tabla_pelis) + itemlist = [] + + for scrapedthumbnail, scrapedtitle, scrapedurl in matches: + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot="", fulltitle=scrapedtitle)) + + next_page = scrapertools.find_single_match(data, 'rel="next" href="([^"]+)') + if next_page != "": + # itemlist.append( Item(channel=item.channel, action="peliculas" , title=">> Página siguiente" , url=item.url+next_page, folder=True, viewmode="movie")) + itemlist.append( + Item(channel=item.channel, action="peliculas", title=">> Página siguiente", url=next_page, folder=True, + viewmode="movie")) + + return itemlist + + +def findvideos(item): + logger.info() + + # Descarga la pagina + data = scrapertools.cache_page(item.url) + # logger.info("data="+data) + + # Extrae las entradas (carpetas) + patron = 'hand" rel="([^"]+).*?title="(.*?)".*?<span>([^<]+)</span>.*?</span><span class="q">(.*?)<' + + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + for scrapedurl, nombre_servidor, idioma, calidad in matches: + idioma = idioma.strip() + calidad = calidad.strip() + + title = "Ver en " + nombre_servidor + " (" + idioma + ") (Calidad " + calidad + ")" + url = scrapedurl + thumbnail = "" + plot = "" + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot, + folder=False)) + + return itemlist + + +def play(item): + logger.info("url=" + item.url) + + itemlist = servertools.find_video_items(data=item.url) + + for videoitem in itemlist: + videoitem.title = item.title + videoitem.fulltitle = item.fulltitle + videoitem.thumbnail = item.thumbnail + videoitem.channel = item.channel + + return itemlist diff --git a/plugin.video.alfa/channels/pelis24.json b/plugin.video.alfa/channels/pelis24.json new file mode 100755 index 00000000..70bb73e0 --- /dev/null +++ b/plugin.video.alfa/channels/pelis24.json @@ -0,0 +1,51 @@ +{ + "id": "pelis24", + "name": "Pelis24", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "pelis24.png", + "banner": "pelis24.png", + "version": 1, + "changes": [ + { + "date": "27/04/17", + "description": "Adaptación por cambio de dominio y estructura" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario" + } + ], + "categories": [ + "latino", + "movie", + "vos" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_infantiles", + "type": "bool", + "label": "Incluir en Novedades - Infantiles", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/pelis24.py b/plugin.video.alfa/channels/pelis24.py new file mode 100755 index 00000000..fe135e12 --- /dev/null +++ b/plugin.video.alfa/channels/pelis24.py @@ -0,0 +1,154 @@ +# -*- coding: utf-8 -*- + +import re +import sys + +from core import httptools +from core import logger +from core import scrapertools +from core.item import Item + +thumbnail_host = 'https://github.com/master-1970/resources/raw/master/images/squares/pelis24.PNG' + + +def mainlist(item): + logger.info() + itemlist = [] + item.thumbnail = thumbnail_host + item.action = "peliculas" + itemlist.append(item.clone(title="Novedades", url="http://www.pelis24.tv/ultimas-peliculas/")) + itemlist.append(item.clone(title="Estrenos", url="http://pelis24.tv/estrenos/")) + itemlist.append(item.clone(title="", folder=False)) + itemlist.append(item.clone(title="Castellano", url="http://pelis24.tv/pelicula-ca/")) + itemlist.append(item.clone(title="Latino", url="http://pelis24.tv/pelicula-latino/")) + itemlist.append(item.clone(title="Versión original", url="http://pelis24.tv/peliculasvo/")) + itemlist.append(item.clone(title="Versión original subtitulada", url="http://pelis24.tv/peliculasvose/")) + + itemlist.append(item.clone(title="", folder=False)) + itemlist.append(item.clone(title="Filtrar por género", action="genero", url="http://pelis24.tv/tags/")) + itemlist.append(item.clone(title="Buscar", action="search", url="http://www.pelis24.tv/")) + return itemlist + + +def newest(categoria): + logger.info() + item = Item() + try: + if categoria == 'peliculas': + item.url = "http://www.pelis24.tv/ultimas-peliculas/" + elif categoria == 'infantiles': + item.url = "http://pelis24.tv/tags/Infantil/" + else: + return [] + + itemlist = peliculas(item) + if itemlist[-1].title == ">> Página siguiente": + itemlist.pop() + + # Se captura la excepción, para no interrumpir al canal novedades si un canal falla + except: + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + return itemlist + + +def search(item, texto): + logger.info() + try: + item.extra = texto + return buscar(item) + # Se captura la excepci?n, para no interrumpir al buscador global si un canal falla + except: + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def buscar(item): + itemlist = [] + if not item.page: + item.page = 1 + + url = "http://pelis24.tv/index.php?do=search&subaction=search&search_start=%s&story=%s" % ( + item.page, item.extra.replace(" ", "+")) + + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '<a class="sres-wrap clearfix" href="([^"]+).*?' + patron += '<img src="([^"]+)" alt="([^"]+)"' + + matches = re.compile(patron, re.DOTALL).findall(data) + + for url, thumbnail, title in matches: + if "/series/" in url: + # Descartamos las series + continue + + if not thumbnail.startswith("http"): + thumbnail = "http://www.pelis24.tv" + thumbnail + contentTitle = title.split("/")[0] + + itemlist.append( + Item(channel=item.channel, action="findvideos", title=contentTitle, url=url, thumbnail=thumbnail, + contentTitle=contentTitle)) + + if itemlist: + itemlist.append(item.clone(title=">> Página siguiente", action="buscar", thumbnail=thumbnail_host, + page=item.page + 1)) + + return itemlist + + +def genero(item): + logger.info() + itemlist = [] + generos = ["Animación", "Aventuras", "Bélico", "Ciencia+ficción", "Crimen", "Comedia", + "Deporte", "Drama", "Fantástico", "Infantil", "Musical", "Romance", "Terror", "Thriller"] + + for g in generos: + itemlist.append(Item(channel=item.channel, action="peliculas", title=g.replace('+', ' '), + thumbnail=thumbnail_host, url=item.url + g + "/")) + + return itemlist + + +def peliculas(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + data = scrapertools.find_single_match(data, "dle-content(.*?)not-main clearfix") + + patron = '<div class="movie-img img-box">.*?' + patron += '<img src="([^"]+).*?' + patron += 'href="([^"]+).*?' + patron += '<div class="movie-series">([^<]+)</div>' + patron += '<span><a href=[^>]+>([^<]+)</a>' + + matches = re.compile(patron, re.DOTALL).findall(data) + + for thumbnail, url, title, quality in matches: + if "/series/" in url: + # Descartamos las series + continue + + if not thumbnail.startswith("http"): + thumbnail = "http://www.pelis24.tv" + thumbnail + contentTitle = title.split("/")[0] + title = "%s (%s)" % (contentTitle, quality) + + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, + contentQuality=quality, contentTitle=contentTitle)) + + # Extrae el paginador + next_page = scrapertools.find_single_match(data, '<span class="pnext"><a href="([^"]+)') + if next_page: + itemlist.append(Item(channel=item.channel, action="peliculas", title=">> Página siguiente", + thumbnail=thumbnail_host, url=next_page)) + + return itemlist diff --git a/plugin.video.alfa/channels/pelisadicto.json b/plugin.video.alfa/channels/pelisadicto.json new file mode 100755 index 00000000..c9e6794b --- /dev/null +++ b/plugin.video.alfa/channels/pelisadicto.json @@ -0,0 +1,34 @@ +{ + "id": "pelisadicto", + "name": "Pelisadicto", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "pelisadicto.png", + "banner": "pelisadicto.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ], + "categories": [ + "movie", + "latino" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/pelisadicto.py b/plugin.video.alfa/channels/pelisadicto.py new file mode 100755 index 00000000..a4238985 --- /dev/null +++ b/plugin.video.alfa/channels/pelisadicto.py @@ -0,0 +1,221 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import config +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + + +def mainlist(item): + logger.info() + + itemlist = [] + itemlist.append( + Item(channel=item.channel, title="Últimas agregadas", action="agregadas", url="http://pelisadicto.com", + viewmode="movie_with_plot")) + itemlist.append( + Item(channel=item.channel, title="Listado por género", action="porGenero", url="http://pelisadicto.com")) + itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="http://pelisadicto.com")) + + return itemlist + + +def porGenero(item): + logger.info() + + itemlist = [] + itemlist.append( + Item(channel=item.channel, action="agregadas", title="Acción", url="http://pelisadicto.com/genero/Acción/1", + viewmode="movie_with_plot")) + if config.get_setting("adult_mode") != 0: + itemlist.append( + Item(channel=item.channel, action="agregadas", title="Adulto", url="http://pelisadicto.com/genero/Adulto/1", + viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, action="agregadas", title="Animación", + url="http://pelisadicto.com/genero/Animación/1", viewmode="movie_with_plot")) + itemlist.append( + Item(channel=item.channel, action="agregadas", title="Aventura", url="http://pelisadicto.com/genero/Aventura/1", + viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, action="agregadas", title="Biográfico", + url="http://pelisadicto.com/genero/Biográfico/1", viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, action="agregadas", title="Ciencia Ficción", + url="http://pelisadicto.com/genero/Ciencia Ficción/1", viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, action="agregadas", title="Cine Negro", + url="http://pelisadicto.com/genero/Cine Negro/1", viewmode="movie_with_plot")) + itemlist.append( + Item(channel=item.channel, action="agregadas", title="Comedia", url="http://pelisadicto.com/genero/Comedia/1", + viewmode="movie_with_plot")) + itemlist.append( + Item(channel=item.channel, action="agregadas", title="Corto", url="http://pelisadicto.com/genero/Corto/1", + viewmode="movie_with_plot")) + itemlist.append( + Item(channel=item.channel, action="agregadas", title="Crimen", url="http://pelisadicto.com/genero/Crimen/1", + viewmode="movie_with_plot")) + itemlist.append( + Item(channel=item.channel, action="agregadas", title="Deporte", url="http://pelisadicto.com/genero/Deporte/1", + viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, action="agregadas", title="Documental", + url="http://pelisadicto.com/genero/Documental/1", viewmode="movie_with_plot")) + itemlist.append( + Item(channel=item.channel, action="agregadas", title="Drama", url="http://pelisadicto.com/genero/Drama/1", + viewmode="movie_with_plot")) + itemlist.append( + Item(channel=item.channel, action="agregadas", title="Familiar", url="http://pelisadicto.com/genero/Familiar/1", + viewmode="movie_with_plot")) + itemlist.append( + Item(channel=item.channel, action="agregadas", title="Fantasía", url="http://pelisadicto.com/genero/Fantasía/1", + viewmode="movie_with_plot")) + itemlist.append( + Item(channel=item.channel, action="agregadas", title="Guerra", url="http://pelisadicto.com/genero/Guerra/1", + viewmode="movie_with_plot")) + itemlist.append( + Item(channel=item.channel, action="agregadas", title="Historia", url="http://pelisadicto.com/genero/Historia/1", + viewmode="movie_with_plot")) + itemlist.append( + Item(channel=item.channel, action="agregadas", title="Misterio", url="http://pelisadicto.com/genero/Misterio/1", + viewmode="movie_with_plot")) + itemlist.append( + Item(channel=item.channel, action="agregadas", title="Música", url="http://pelisadicto.com/genero/Música/1", + viewmode="movie_with_plot")) + itemlist.append( + Item(channel=item.channel, action="agregadas", title="Musical", url="http://pelisadicto.com/genero/Musical/1", + viewmode="movie_with_plot")) + itemlist.append( + Item(channel=item.channel, action="agregadas", title="Romance", url="http://pelisadicto.com/genero/Romance/1", + viewmode="movie_with_plot")) + itemlist.append( + Item(channel=item.channel, action="agregadas", title="Terror", url="http://pelisadicto.com/genero/Terror/1", + viewmode="movie_with_plot")) + itemlist.append( + Item(channel=item.channel, action="agregadas", title="Thriller", url="http://pelisadicto.com/genero/Thriller/1", + viewmode="movie_with_plot")) + itemlist.append( + Item(channel=item.channel, action="agregadas", title="Western", url="http://pelisadicto.com/genero/Western/1", + viewmode="movie_with_plot")) + + return itemlist + + +def search(item, texto): + logger.info() + + ''' + texto_get = texto.replace(" ","%20") + texto_post = texto.replace(" ","+") + item.url = "http://pelisadicto.com/buscar/%s?search=%s" % (texto_get,texto_post) + ''' + + texto = texto.replace(" ", "+") + item.url = "http://pelisadicto.com/buscar/%s" % texto + + try: + return agregadas(item) + # Se captura la excepci?n, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def agregadas(item): + logger.info() + itemlist = [] + ''' + # Descarga la pagina + if "?search=" in item.url: + url_search = item.url.split("?search=") + data = scrapertools.cache_page(url_search[0], url_search[1]) + else: + data = scrapertools.cache_page(item.url) + logger.info("data="+data) + ''' + + data = scrapertools.cache_page(item.url) + # logger.info("data="+data) + + # Extrae las entradas + fichas = re.sub(r"\n|\s{2}", "", scrapertools.get_match(data, '<ul class="thumbnails">(.*?)</ul>')) + + # <li class="col-xs-6 col-sm-2 CALDVD"><a href="/pelicula/101-dalmatas" title="Ver 101 dálmatas Online" class="thumbnail thumbnail-artist-grid"><img class="poster" style="width: 180px; height: 210px;" src="/img/peliculas/101-dalmatas.jpg" alt="101 dálmatas"/><div class="calidad">DVD</div><div class="idiomas"><img src="/img/1.png" height="20" width="30" /></div><div class="thumbnail-artist-grid-name-container-1"><div class="thumbnail-artist-grid-name-container-2"><span class="thumbnail-artist-grid-name">101 dálmatas</span></div></div></a></li> + + patron = 'href="([^"]+)".*?' # url + patron += 'src="([^"]+)" ' # thumbnail + patron += 'alt="([^"]+)' # title + + matches = re.compile(patron, re.DOTALL).findall(fichas) + for url, thumbnail, title in matches: + url = urlparse.urljoin(item.url, url) + thumbnail = urlparse.urljoin(url, thumbnail) + + itemlist.append(Item(channel=item.channel, action="findvideos", title=title + " ", fulltitle=title, url=url, + thumbnail=thumbnail, show=title)) + + # Paginación + try: + + # <ul class="pagination"><li class="active"><span>1</span></li><li><span><a href="2">2</a></span></li><li><span><a href="3">3</a></span></li><li><span><a href="4">4</a></span></li><li><span><a href="5">5</a></span></li><li><span><a href="6">6</a></span></li></ul> + + current_page_number = int(scrapertools.get_match(item.url, '/(\d+)$')) + item.url = re.sub(r"\d+$", "%s", item.url) + next_page_number = current_page_number + 1 + next_page = item.url % (next_page_number) + itemlist.append(Item(channel=item.channel, action="agregadas", title="Página siguiente >>", url=next_page, + viewmode="movie_with_plot")) + except: + pass + + return itemlist + + +def findvideos(item): + logger.info() + + itemlist = [] + plot = "" + + data = re.sub(r"\n|\s{2}", "", scrapertools.cache_page(item.url)) + + # <!-- SINOPSIS --> <h2>Sinopsis de 101 dálmatas</h2> <p>Pongo y Perdita, los dálmatas protagonistas, son una feliz pareja canina que vive rodeada de sus cachorros y con sus amos Roger y Anita. Pero su felicidad está amenazada. Cruella de Ville, una pérfida mujer que vive en una gran mansión y adora los abrigos de pieles, se entera de que los protagonistas tienen quince cachorros dálmatas. Entonces, la idea de secuestrarlos para hacerse un exclusivo abrigo de pieles se convierte en una obsesión enfermiza. Para hacer realidad su sueño contrata a dos ladrones.</p> + + patron = "<!-- SINOPSIS --> " + patron += "<h2>[^<]+</h2> " + patron += "<p>([^<]+)</p>" + matches = re.compile(patron, re.DOTALL).findall(data) + if matches: + plot = matches[0] + + # Descarga la pagina + data = scrapertools.cache_page(item.url) + patron = '<tr>.*?' + patron += '<td><img src="(.*?)".*?<td>(.*?)</td>.*?<td>(.*?)</td>.*?<a href="(.*?)".*?</tr>' + matches = re.compile(patron, re.DOTALL).findall(data) + for scrapedidioma, scrapedcalidad, scrapedserver, scrapedurl in matches: + idioma = "" + if "/img/1.png" in scrapedidioma: idioma = "Castellano" + if "/img/2.png" in scrapedidioma: idioma = "Latino" + if "/img/3.png" in scrapedidioma: idioma = "Subtitulado" + title = item.title + " [" + scrapedcalidad + "][" + idioma + "][" + scrapedserver + "]" + + itemlist.append( + Item(channel=item.channel, action="play", title=title, fulltitle=title, url=scrapedurl, thumbnail="", + plot=plot, show=item.show)) + return itemlist + + +def play(item): + logger.info() + + itemlist = servertools.find_video_items(data=item.url) + + for videoitem in itemlist: + videoitem.title = item.title + videoitem.fulltitle = item.fulltitle + videoitem.thumbnail = item.thumbnail + videoitem.channel = item.channel + + return itemlist diff --git a/plugin.video.alfa/channels/peliscity.json b/plugin.video.alfa/channels/peliscity.json new file mode 100755 index 00000000..d899b48e --- /dev/null +++ b/plugin.video.alfa/channels/peliscity.json @@ -0,0 +1,25 @@ +{ + "id": "peliscity", + "name": "Peliscity", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "peliscity.png", + "banner": "peliscity.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ], + "categories": [ + "movie", + "latino", + "VOS" + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/peliscity.py b/plugin.video.alfa/channels/peliscity.py new file mode 100755 index 00000000..b9bb794d --- /dev/null +++ b/plugin.video.alfa/channels/peliscity.py @@ -0,0 +1,158 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + + +def mainlist(item): + logger.info() + + itemlist = [] + itemlist.append( + Item(channel=item.channel, title="Últimas agregadas", action="agregadas", url="http://peliscity.com", + viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, title="Peliculas HD", action="agregadas", + url="http://peliscity.com/calidad/hd-real-720", viewmode="movie_with_plot")) + itemlist.append( + Item(channel=item.channel, title="Listado por género", action="porGenero", url="http://peliscity.com")) + itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="http://peliscity.com/?s=")) + itemlist.append(Item(channel=item.channel, title="Idioma", action="porIdioma", url="http://peliscity.com/")) + + return itemlist + + +def porIdioma(item): + itemlist = [] + itemlist.append(Item(channel=item.channel, title="Castellano", action="agregadas", + url="http://www.peliscity.com/idioma/espanol-castellano/", viewmode="movie_with_plot")) + itemlist.append( + Item(channel=item.channel, title="VOS", action="agregadas", url="http://www.peliscity.com/idioma/subtitulada/", + viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, title="Latino", action="agregadas", + url="http://www.peliscity.com/idioma/espanol-latino/", viewmode="movie_with_plot")) + + return itemlist + + +def porGenero(item): + logger.info() + + itemlist = [] + data = scrapertools.cache_page(item.url) + + logger.info("data=" + data) + patron = 'cat-item.*?href="([^"]+).*?>(.*?)<' + + matches = re.compile(patron, re.DOTALL).findall(data) + + for urlgen, genero in matches: + itemlist.append(Item(channel=item.channel, action="agregadas", title=genero, url=urlgen, folder=True, + viewmode="movie_with_plot")) + + return itemlist + + +def search(item, texto): + logger.info() + texto_post = texto.replace(" ", "+") + item.url = "http://www.peliscity.com/?s=" + texto_post + + try: + return listaBuscar(item) + # Se captura la excepci?n, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def agregadas(item): + logger.info() + itemlist = [] + + data = scrapertools.cache_page(item.url) + logger.info("data=" + data) + + patron = 'class=\'reflectMe\' src="([^"]+).*?class="infor".*?href="([^"]+).*?<h2>(.*?)<.*?class="sinopsis">(.*?)<' # url + + matches = re.compile(patron, re.DOTALL).findall(data) + + for thumbnail, url, title, sinopsis in matches: + url = urlparse.urljoin(item.url, url) + thumbnail = urlparse.urljoin(url, thumbnail) + itemlist.append(Item(channel=item.channel, action="findvideos", title=title + " ", fulltitle=title, url=url, + thumbnail=thumbnail, show=title, plot=sinopsis)) + + # Paginación + try: + patron = 'tima">.*?href="([^"]+)" ><i' + + next_page = re.compile(patron, re.DOTALL).findall(data) + + itemlist.append(Item(channel=item.channel, action="agregadas", title="Página siguiente >>", url=next_page[0], + viewmode="movie_with_plot")) + except: + pass + + return itemlist + + +def listaBuscar(item): + logger.info() + itemlist = [] + + data = scrapertools.cache_page(item.url) + data = re.sub(r"\n", " ", data) + logger.info("data=" + data) + + patron = 'class="row"> <a.*?="([^"]+).*?src="([^"]+).*?title="([^"]+).*?class="text-list">(.*?)</p>' + + matches = re.compile(patron, re.DOTALL).findall(data) + + for url, thumbnail, title, sinopsis in matches: + itemlist.append(Item(channel=item.channel, action="findvideos", title=title + " ", fulltitle=title, url=url, + thumbnail=thumbnail, show=title, plot=sinopsis)) + + return itemlist + + +def findvideos(item): + logger.info() + + itemlist = [] + plot = item.plot + + # Descarga la pagina + data = scrapertools.cache_page(item.url) + patron = 'class="optxt"><span>(.*?)<.*?width.*?class="q">(.*?)</span.*?cursor: hand" rel="(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedidioma, scrapedcalidad, scrapedurl in matches: + idioma = "" + scrapedserver = re.findall("http[s*]?://(.*?)/", scrapedurl) + title = item.title + " [" + scrapedcalidad + "][" + scrapedidioma + "][" + scrapedserver[0] + "]" + if not ("omina.farlante1" in scrapedurl or "404" in scrapedurl): + itemlist.append( + Item(channel=item.channel, action="play", title=title, fulltitle=title, url=scrapedurl, thumbnail="", + plot=plot, show=item.show)) + return itemlist + + +def play(item): + logger.info() + + itemlist = servertools.find_video_items(data=item.url) + + for videoitem in itemlist: + videoitem.title = item.title + videoitem.fulltitle = item.fulltitle + videoitem.thumbnail = item.thumbnail + videoitem.channel = item.channel + + return itemlist diff --git a/plugin.video.alfa/channels/peliscon.json b/plugin.video.alfa/channels/peliscon.json new file mode 100755 index 00000000..d08a4de2 --- /dev/null +++ b/plugin.video.alfa/channels/peliscon.json @@ -0,0 +1,37 @@ +{ + "id": "peliscon", + "name": "Peliscon", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "http://imgur.com/yTQRPUJ.png", + "version": 1, + "changes": [ + { + "date": "28/06/2017", + "description": "Release" + } + ], + "categories": [ + "movie", + "tvshow" + ], + "settings": [ + { + "id": "modo_grafico", + "type": "bool", + "label": "Buscar información extra", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/peliscon.py b/plugin.video.alfa/channels/peliscon.py new file mode 100755 index 00000000..7fc5ee89 --- /dev/null +++ b/plugin.video.alfa/channels/peliscon.py @@ -0,0 +1,918 @@ +# -*- coding: utf-8 -*- + +import re +from threading import Thread + +import xbmc +import xbmcgui +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item + +ACTION_SHOW_FULLSCREEN = 36 +ACTION_GESTURE_SWIPE_LEFT = 511 +ACTION_SELECT_ITEM = 7 +ACTION_PREVIOUS_MENU = 10 +ACTION_MOVE_LEFT = 1 +ACTION_MOVE_RIGHT = 2 +ACTION_MOVE_DOWN = 4 +ACTION_MOVE_UP = 3 +OPTION_PANEL = 6 +OPTIONS_OK = 5 + +__modo_grafico__ = config.get_setting('modo_grafico', "peliscon") + + +# Para la busqueda en bing evitando baneos + +def browser(url): + import mechanize + + # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing + br = mechanize.Browser() + # Browser options + br.set_handle_equiv(False) + br.set_handle_gzip(True) + br.set_handle_redirect(True) + br.set_handle_referer(False) + br.set_handle_robots(False) + # Follows refresh 0 but not hangs on refresh > 0 + br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) + # Want debugging messages? + # br.set_debug_http(True) + # br.set_debug_redirects(True) + # br.set_debug_responses(True) + + # User-Agent (this is cheating, ok?) + # br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')] + # br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')] + # Open some site, let's pick a random one, the first that pops in mind + r = br.open(url) + response = r.read() + print response + if "img,divreturn" in response: + r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url) + print "prooooxy" + response = r.read() + + return response + + +api_key = "2e2160006592024ba87ccdf78c28f49f" +api_fankey = "dffe90fba4d02c199ae7a9e71330c987" + + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append( + item.clone(title="[COLOR aqua][B]Películas[/B][/COLOR]", action="scraper", url="http://peliscon.com/peliculas/", + thumbnail="http://imgur.com/FrcWTS8.png", fanart="http://imgur.com/MGQyetQ.jpg", + contentType="movie")) + itemlist.append(itemlist[-1].clone(title="[COLOR aqua][B]Series[/B][/COLOR]", action="scraper", + url="http://peliscon.com/series/", thumbnail="http://imgur.com/FrcWTS8.png", + fanart="http://imgur.com/i41eduI.jpg", contentType="tvshow")) + itemlist.append(item.clone(title="[COLOR aqua][B] Últimos capitulos[/B][/COLOR]", action="ul_cap", + url="http://peliscon.com/episodios/", thumbnail="http://imgur.com/FrcWTS8.png", + fanart="http://imgur.com/i41eduI.jpg", contentType="tvshow")) + itemlist.append(itemlist[-1].clone(title="[COLOR crimson][B]Buscar[/B][/COLOR]", action="search", + thumbnail="http://imgur.com/FrcWTS8.png", fanart="http://imgur.com/h1b7tfN.jpg")) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = "https://peliscon.com/?s=" + texto + item.extra = "search" + try: + return buscador(item) + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def buscador(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = scrapertools.find_multiple_matches(data, + '<div class="result-item">.*?href="([^"]+)".*?alt="([^"]+)".*?<span class=".*?">([^"]+)</span>.*?<span class="year">([^"]+)</span>') + + for url, title, genere, year in patron: + + if "Serie" in genere: + checkmt = "tvshow" + genere = "[COLOR aqua][B]" + genere + "[/B][/COLOR]" + else: + checkmt = "movie" + genere = "[COLOR cadetblue][B]" + genere + "[/B][/COLOR]" + titulo = "[COLOR crimson]" + title + "[/COLOR]" + " [ " + genere + " ] " + + if checkmt == "movie": + new_item = item.clone(action="findvideos", title=titulo, url=url, fulltitle=title, contentTitle=title, + contentType="movie", library=True) + else: + + new_item = item.clone(action="findtemporadas", title=titulo, url=url, fulltitle=title, contentTitle=title, + show=title, contentType="tvshow", library=True) + + new_item.infoLabels['year'] = year + itemlist.append(new_item) + + try: + from core import tmdb + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + for item in itemlist: + if not "Siguiente >>" in item.title: + if "0." in str(item.infoLabels['rating']): + item.infoLabels['rating'] = "[COLOR indianred]Sin puntuacíon[/COLOR]" + else: + item.infoLabels['rating'] = "[COLOR springgreen]" + str(item.infoLabels['rating']) + "[/COLOR]" + item.title = item.title + " " + str(item.infoLabels['rating']) + except: + pass + ## Paginación + next = scrapertools.find_single_match(data, '<div class=\'resppages\'><a href="([^"]+)"') + if len(next) > 0: + url = next + + itemlist.append(item.clone(title="[COLOR springgreen][B]Siguiente >>[/B][/COLOR]", action="buscador", url=url)) + return itemlist + + +def scraper(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + if item.contentType == "movie": + + patron = scrapertools.find_multiple_matches(data, + '<div class="poster"><a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)".*?/flags/(.*?).png.*?<span>(.*?)</span>') + + for url, thumb, title, idioma, year in patron: + titulo = title + title = re.sub(r"!|¡", "", title) + title = title.replace("Autosia", "Autopsia") + title = re.sub(r"’|PRE-Estreno", "'", title) + new_item = item.clone(action="findvideos", title="[COLOR aqua]" + titulo + "[/COLOR]", url=url, + fulltitle=title, contentTitle=title, contentType="movie", extra=year, library=True) + new_item.infoLabels['year'] = year + itemlist.append(new_item) + + + else: + + patron = scrapertools.find_multiple_matches(data, + '<div class="poster"><a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)".*?<span>(.*?)</span>') + + for url, thumb, title, year in patron: + titulo = title.strip() + title = re.sub(r"\d+x.*", "", title) + new_item = item.clone(action="findtemporadas", title="[COLOR aqua]" + titulo + "[/COLOR]", url=url, + thumbnail=thumb, fulltitle=title, contentTitle=title, show=title, + contentType="tvshow", library=True) + new_item.infoLabels['year'] = year + itemlist.append(new_item) + + ## Paginación + next = scrapertools.find_single_match(data, '<div class=\'resppages\'><a href="([^"]+)"') + if len(next) > 0: + url = next + + itemlist.append( + item.clone(title="[COLOR springgreen][B]Siguiente >>[/B][/COLOR]", thumbnail="http://imgur.com/a7lQAld.png", + url=url)) + try: + from core import tmdb + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + for item in itemlist: + if not "Siguiente >>" in item.title: + if "0." in str(item.infoLabels['rating']): + item.infoLabels['rating'] = "[COLOR indianred]Sin puntuacíon[/COLOR]" + else: + item.infoLabels['rating'] = "[COLOR springgreen]" + str(item.infoLabels['rating']) + "[/COLOR]" + item.title = item.title + " " + str(item.infoLabels['rating']) + + except: + pass + + for item_tmdb in itemlist: + logger.info(str(item_tmdb.infoLabels['tmdb_id'])) + + return itemlist + + +def ul_cap(item): + itemlist = [] + logger.info() + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = scrapertools.find_multiple_matches(data, + '<div class="poster">.*?<img src="([^"]+)" alt="([^"]+):.*?href="([^"]+)"><span class="b">(\d+x\d+)<\/span>') + + for thumb, title, url, cap in patron: + temp = re.sub(r"x\d+", "", cap) + epi = re.sub(r"\d+x", "", cap) + titulo = title.strip() + "--" + "[COLOR red][B]" + cap + "[/B][/COLOR]" + title = re.sub(r"\d+x.*", "", title) + # filtro_thumb = thumb.replace("https://image.tmdb.org/t/p/w300", "") + # filtro_list = {"poster_path": filtro_thumb} + # filtro_list = filtro_list.items() + # url_tv = scrapertools.find_single_match(url,'episodios/(.*?)/') + new_item = item.clone(action="findvideos", title="[COLOR aqua]" + titulo + "[/COLOR]", url=url, thumbnail=thumb, + fulltitle=title, contentTitle=title, show=title, contentType="tvshow", temp=temp, epi=epi, + library=True) + + itemlist.append(new_item) + + ## Paginación + next = scrapertools.find_single_match(data, '<div class=\'resppages\'><a href="([^"]+)"') + if len(next) > 0: + url = next + + itemlist.append( + item.clone(title="[COLOR springgreen][B]Siguiente >>[/B][/COLOR]", thumbnail="http://imgur.com/a7lQAld.png", + url=url)) + try: + from core import tmdb + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + + for item in itemlist: + + if not "Siguiente >>" in item.title: + + if "0." in str(item.infoLabels['rating']): + item.infoLabels['rating'] = "[COLOR indianred]Sin puntuacíon[/COLOR]" + else: + item.infoLabels['rating'] = "[COLOR springgreen] (" + str(item.infoLabels['rating']) + ")[/COLOR]" + item.title = item.title + " " + str(item.infoLabels['rating']) + + except: + pass + + for item_tmdb in itemlist: + logger.info(str(item_tmdb.infoLabels['tmdb_id'])) + + return itemlist + + +def findtemporadas(item): + logger.info() + itemlist = [] + + if not item.temp: + th = Thread(target=get_art(item)) + th.setDaemon(True) + th.start() + check_temp = None + else: + + check_temp = "yes" + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + if len(item.extra.split("|")): + if len(item.extra.split("|")) >= 4: + fanart = item.extra.split("|")[2] + extra = item.extra.split("|")[3] + try: + fanart_extra = item.extra.split("|")[4] + except: + fanart_extra = item.extra.split("|")[3] + try: + fanart_info = item.extra.split("|")[5] + except: + fanart_extra = item.extra.split("|")[3] + elif len(item.extra.split("|")) == 3: + fanart = item.extra.split("|")[2] + extra = item.extra.split("|")[0] + fanart_extra = item.extra.split("|")[0] + fanart_info = item.extra.split("|")[1] + elif len(item.extra.split("|")) == 2: + fanart = item.extra.split("|")[1] + extra = item.extra.split("|")[0] + fanart_extra = item.extra.split("|")[0] + fanart_info = item.extra.split("|")[1] + else: + extra = item.extra + fanart_extra = item.extra + fanart_info = item.extra + try: + logger.info(fanart_extra) + logger.info(fanart_info) + except: + fanart_extra = item.fanart + fanart_info = item.fanart + + bloque_episodios = scrapertools.find_multiple_matches(data, 'Temporada (\d+) <i>(.*?)</div></li></ul></div></div>') + for temporada, bloque_epis in bloque_episodios: + item.infoLabels = item.InfoLabels + item.infoLabels['season'] = temporada + + itemlist.append(item.clone(action="epis", + title="[COLOR cornflowerblue][B]Temporada [/B][/COLOR]" + "[COLOR darkturquoise][B]" + temporada + "[/B][/COLOR]", + url=bloque_epis, contentType=item.contentType, contentTitle=item.contentTitle, + show=item.show, extra=item.extra, fanart_extra=fanart_extra, fanart_info=fanart_info, + datalibrary=data, check_temp=check_temp, folder=True)) + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + for item in itemlist: + item.fanart = fanart + item.extra = extra + if item.temp: + item.thumbnail = item.infoLabels['temporada_poster'] + + if config.get_videolibrary_support() and itemlist: + if len(bloque_episodios) == 1: + extra = "epis" + else: + extra = "epis###serie_add" + + infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'], 'tvdb_id': item.infoLabels['tvdb_id'], + 'imdb_id': item.infoLabels['imdb_id']} + itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", text_color="0xFFe5ffcc", + action="add_serie_to_library", extra=extra, url=item.url, + contentSerieName=item.fulltitle, infoLabels=infoLabels, + thumbnail='http://imgur.com/3ik73p8.png', datalibrary=data)) + return itemlist + + +def epis(item): + logger.info() + itemlist = [] + + if item.extra == "serie_add": + item.url = item.datalibrary + + patron = scrapertools.find_multiple_matches(item.url, '<div class="imagen"><a href="([^"]+)".*?"numerando">(.*?)<') + for url, epi in patron: + episodio = scrapertools.find_single_match(epi, '\d+ - (\d+)') + item.infoLabels['episode'] = episodio + epi = re.sub(r" - ", "X", epi) + + itemlist.append( + item.clone(title="[COLOR deepskyblue]Episodio " + "[COLOR red]" + epi, url=url, action="findvideos", + show=item.show, fanart=item.extra, extra=item.extra, fanart_extra=item.fanart_extra, + fanart_info=item.fanart_info, check_temp=item.check_temp, folder=True)) + if item.extra != "serie_add": + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + for item in itemlist: + item.fanart = item.extra + if item.infoLabels['title']: title = "[COLOR royalblue]" + item.infoLabels['title'] + "[/COLOR]" + item.title = item.title + " -- \"" + title + "\"" + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + if item.temp: + url_epis = item.url + + data = httptools.downloadpage(item.url).data + + if not item.infoLabels['episode'] or item.temp: + th = Thread(target=get_art(item)) + th.setDaemon(True) + th.start() + + if item.contentType != "movie": + + if not item.infoLabels['episode']: + capitulo = scrapertools.find_single_match(item.title, '(\d+x\d+)') + url_capitulo = scrapertools.find_single_match(data, + '<a href="(http://www.divxtotal.com/wp-content/uploads/.*?' + capitulo + '.*?.torrent)') + + if len(item.extra.split("|")) >= 2: + extra = item.extra + else: + extra = item.fanart + else: + capitulo = item.title + url_capitulo = item.url + + try: + fanart = item.fanart_extra + except: + fanart = item.extra.split("|")[0] + + url_data = scrapertools.find_multiple_matches(data, '<div id="option-(.*?)".*?src="([^"]+)"') + for option, url in url_data: + server, idioma = scrapertools.find_single_match(data, + 'href="#option-' + option + '">.*?</b>(.*?)<span class="dt_flag">.*?flags/(.*?).png') + + if not item.temp: + item.infoLabels['year'] = None + if item.temp: + capitulo = re.sub(r".*--.*", "", capitulo) + title = "[COLOR darkcyan][B]Ver capítulo [/B][/COLOR]" + "[COLOR red][B]" + capitulo + "[/B][/COLOR]" + new_item = item.clone(title=title, url=url, action="play", fanart=fanart, thumbnail=item.thumbnail, + server_v=server, idioma=idioma, extra=item.extra, fulltitle=item.fulltitle, + folder=False) + new_item.infoLabels['episode'] = item.epi + new_item.infoLabels['season'] = item.temp + itemlist.append(new_item) + else: + title = "[COLOR darkcyan][B]Ver capítulo [/B][/COLOR]" + "[COLOR red][B]" + capitulo + "[/B][/COLOR]" + " " + "[COLOR darkred]" + server + " ( " + idioma + " )" + "[/COLOR]" + itemlist.append(Item(channel=item.channel, title=title, url=url, action="play", fanart=fanart, + thumbnail=item.thumbnail, extra=item.extra, fulltitle=item.fulltitle, + folder=False)) + + if item.temp: + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + for item in itemlist: + if item.infoLabels['title']: title_inf = "[COLOR royalblue]" + item.infoLabels['title'] + "[/COLOR]" + item.title = item.title + " -- \"" + title_inf + "\"" + " " + "[COLOR darkred]" + item.server_v + " ( " + item.idioma + " )" + "[/COLOR]" + if item.infoLabels['episode'] and item.library or item.temp and item.library: + thumbnail = scrapertools.find_single_match(item.extra, 'http://assets.fanart.tv/.*jpg') + if thumbnail == "": + thumbnail = item.thumbnail + if not "assets.fanart" in item.fanart_info: + fanart = item.fanart_info + else: + fanart = item.fanart + if item.temp: + item.infoLabels['tvdb_id'] = item.tvdb + + itemlist.append( + Item(channel=item.channel, title="[COLOR steelblue][B] info[/B][/COLOR]", action="info_capitulos", + fanart=fanart, thumbnail=item.thumb_art, thumb_info=item.thumb_info, extra=item.extra, + show=item.show, InfoLabels=item.infoLabels, folder=False)) + if item.temp and not item.check_temp: + url_epis = re.sub(r"-\dx.*", "", url_epis) + url_epis = url_epis.replace("episodios", "series") + itemlist.append( + Item(channel=item.channel, title="[COLOR salmon][B]Todos los episodios[/B][/COLOR]", url=url_epis, + action="findtemporadas", server="torrent", fanart=item.extra.split("|")[1], + thumbnail=item.infoLabels['thumbnail'], extra=item.extra + "|" + item.thumbnail, + contentType=item.contentType, contentTitle=item.contentTitle, InfoLabels=item.infoLabels, + thumb_art=item.thumb_art, thumb_info=item.thumbnail, fulltitle=item.fulltitle, + library=item.library, temp=item.temp, folder=True)) + + + + else: + + url_data = scrapertools.find_multiple_matches(data, '<div id="option-(.*?)".*?src="([^"]+)"') + for option, url in url_data: + server, idioma = scrapertools.find_single_match(data, + 'href="#option-' + option + '">.*?</b>(.*?)<span class="dt_flag">.*?flags/(.*?).png') + title = server + " ( " + idioma + " )" + item.infoLabels['year'] = None + + itemlist.append(Item(channel=item.channel, title="[COLOR dodgerblue][B]" + title + " [/B][/COLOR]", url=url, + action="play", fanart=item.fanart, thumbnail=item.thumbnail, extra=item.extra, + InfoLabels=item.infoLabels, folder=True)) + + if item.library and config.get_videolibrary_support() and len(itemlist) > 0: + infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'], + 'title': item.infoLabels['title']} + itemlist.append(Item(channel=item.channel, title="Añadir esta película a la videoteca", + action="add_pelicula_to_library", url=item.url, fanart=item.extra.split("|")[0], + infoLabels=infoLabels, text_color="0xFFe5ffcc", + thumbnail='http://imgur.com/3ik73p8.png')) + + return itemlist + + +def play(item): + itemlist = [] + videolist = servertools.find_video_items(data=item.url) + for video in videolist: + itemlist.append( + Item(channel=item.channel, title="[COLOR saddlebrown][B]" + video.server + "[/B][/COLOR]", url=video.url, + server=video.server, action="play", fanart=item.fanart, thumbnail=item.thumbnail, extra=item.extra, + InfoLabels=item.infoLabels, folder=False)) + return itemlist + + +def info_capitulos(item, images={}): + logger.info() + itemlist = [] + + try: + url = "http://thetvdb.com/api/1D62F2F90030C444/series/" + str(item.InfoLabels['tvdb_id']) + "/default/" + str( + item.InfoLabels['season']) + "/" + str(item.InfoLabels['episode']) + "/es.xml" + if "/0" in url: + url = url.replace("/0", "/") + from core import jsontools + data = httptools.downloadpage(url).data + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + + if "<filename>episodes" in data: + image = scrapertools.find_single_match(data, '<Data>.*?<filename>(.*?)</filename>') + image = "http://thetvdb.com/banners/" + image + else: + try: + image = item.InfoLabels['episodio_imagen'] + except: + image = "http://imgur.com/ZiEAVOD.png" + + foto = item.thumb_info + if not ".png" in foto: + foto = "http://imgur.com/PRiEW1D.png" + try: + title = item.InfoLabels['episodio_titulo'] + except: + title = "" + title = "[COLOR red][B]" + title + "[/B][/COLOR]" + + try: + plot = "[COLOR peachpuff]" + str(item.InfoLabels['episodio_sinopsis']) + "[/COLOR]" + except: + plot = scrapertools.find_single_match(data, '<Overview>(.*?)</Overview>') + if plot == "": + plot = "Sin información todavia" + try: + rating = item.InfoLabels['episodio_vote_average'] + except: + rating = 0 + try: + + if rating >= 5 and rating < 8: + rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR springgreen][B]" + str(rating) + "[/B][/COLOR]" + elif rating >= 8 and rating < 10: + rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR yellow][B]" + str(rating) + "[/B][/COLOR]" + elif rating == 10: + rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR orangered][B]" + str(rating) + "[/B][/COLOR]" + else: + rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR crimson][B]" + str(rating) + "[/B][/COLOR]" + except: + rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR crimson][B]" + str(rating) + "[/B][/COLOR]" + if "10." in rating: + rating = re.sub(r'10\.\d+', '10', rating) + + + except: + + title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" + plot = "Este capitulo no tiene informacion..." + plot = "[COLOR yellow][B]" + plot + "[/B][/COLOR]" + image = "http://s6.postimg.org/ub7pb76c1/noinfo.png" + foto = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png" + rating = "" + + ventana = TextBox2(title=title, plot=plot, thumbnail=image, fanart=foto, rating=rating) + ventana.doModal() + + +class TextBox2(xbmcgui.WindowDialog): + """ Create a skinned textbox window """ + + def __init__(self, *args, **kwargs): + self.getTitle = kwargs.get('title') + self.getPlot = kwargs.get('plot') + self.getThumbnail = kwargs.get('thumbnail') + self.getFanart = kwargs.get('fanart') + self.getRating = kwargs.get('rating') + + self.background = xbmcgui.ControlImage(70, 20, 1150, 630, 'http://imgur.com/aj4qzTr.jpg') + self.title = xbmcgui.ControlTextBox(120, 60, 430, 50) + self.rating = xbmcgui.ControlTextBox(145, 112, 1030, 45) + self.plot = xbmcgui.ControlTextBox(120, 150, 1056, 100) + self.thumbnail = xbmcgui.ControlImage(120, 300, 1056, 300, self.getThumbnail) + self.fanart = xbmcgui.ControlImage(780, 43, 390, 100, self.getFanart) + + self.addControl(self.background) + self.background.setAnimations( + [('conditional', 'effect=slide start=1000% end=0% time=1500 condition=true tween=bounce',), + ('WindowClose', 'effect=slide delay=800 start=0% end=1000% time=800 condition=true',)]) + self.addControl(self.thumbnail) + self.thumbnail.setAnimations([('conditional', + 'effect=zoom start=0% end=100% delay=2700 time=1500 condition=true tween=elastic easing=inout',), + ('WindowClose', 'effect=slide end=0,700% time=300 condition=true',)]) + self.addControl(self.plot) + self.plot.setAnimations( + [('conditional', 'effect=zoom delay=2000 center=auto start=0 end=100 time=800 condition=true ',), ( + 'conditional', + 'effect=rotate delay=2000 center=auto aceleration=6000 start=0% end=360% time=800 condition=true',), + ('WindowClose', 'effect=zoom center=auto start=100% end=-0% time=600 condition=true',)]) + self.addControl(self.fanart) + self.fanart.setAnimations( + [('WindowOpen', 'effect=slide start=0,-700 delay=1000 time=2500 tween=bounce condition=true',), ( + 'conditional', + 'effect=rotate center=auto start=0% end=360% delay=3000 time=2500 tween=bounce condition=true',), + ('WindowClose', 'effect=slide end=0,-700% time=1000 condition=true',)]) + self.addControl(self.title) + self.title.setText(self.getTitle) + self.title.setAnimations( + [('conditional', 'effect=slide start=-1500% end=0% delay=1000 time=2000 condition=true tween=elastic',), + ('WindowClose', 'effect=slide start=0% end=-1500% time=800 condition=true',)]) + self.addControl(self.rating) + self.rating.setText(self.getRating) + self.rating.setAnimations( + [('conditional', 'effect=fade start=0% end=100% delay=3000 time=1500 condition=true',), + ('WindowClose', 'effect=slide end=0,-700% time=600 condition=true',)]) + xbmc.sleep(200) + + try: + self.plot.autoScroll(7000, 6000, 30000) + except: + + xbmc.executebuiltin( + 'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000,"https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/kodi-icon.png")') + self.plot.setText(self.getPlot) + + def get(self): + self.show() + + def onAction(self, action): + if action == ACTION_PREVIOUS_MENU or action.getId() == ACTION_GESTURE_SWIPE_LEFT or action == 110 or action == 92: + self.close() + + +def test(): + return True + + +def fanartv(item, id_tvdb, id, images={}): + headers = [['Content-Type', 'application/json']] + from core import jsontools + if item.contentType == "movie": + url = "http://webservice.fanart.tv/v3/movies/%s?api_key=cab16e262d72fea6a6843d679aa10300" \ + % id + else: + url = "http://webservice.fanart.tv/v3/tv/%s?api_key=cab16e262d72fea6a6843d679aa10300" % id_tvdb + try: + data = jsontools.load(scrapertools.downloadpage(url, headers=headers)) + if data and not "error message" in data: + for key, value in data.items(): + if key not in ["name", "tmdb_id", "imdb_id", "thetvdb_id"]: + images[key] = value + else: + images = [] + + except: + images = [] + return images + + +def get_art(item): + logger.info() + id = item.infoLabels['tmdb_id'] + check_fanart = item.infoLabels['fanart'] + if item.contentType != "movie": + tipo_ps = "tv" + else: + tipo_ps = "movie" + if not id: + year = item.extra + otmdb = tmdb.Tmdb(texto_buscado=item.fulltitle, year=year, tipo=tipo_ps) + id = otmdb.result.get("id") + + if id == None: + otmdb = tmdb.Tmdb(texto_buscado=item.fulltitle, tipo=tipo_ps) + id = otmdb.result.get("id") + if id == None: + if item.contentType == "movie": + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % ( + item.fulltitle.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data) + subdata_imdb = scrapertools.find_single_match(data, + '<li class="b_algo">(.*?)h="ID.*?<strong>.*?TV Series') + else: + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % ( + item.fulltitle.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data) + subdata_imdb = scrapertools.find_single_match(data, '<li class="b_algo">(.*?)h="ID.*?<strong>') + try: + imdb_id = scrapertools.get_match(subdata_imdb, '<a href=.*?http.*?imdb.com/title/(.*?)/.*?"') + except: + try: + imdb_id = scrapertools.get_match(subdata_imdb, + '<a href=.*?http.*?imdb.com/.*?/title/(.*?)/.*?"') + except: + imdb_id = "" + otmdb = tmdb.Tmdb(external_id=imdb_id, external_source="imdb_id", tipo=tipo_ps, idioma_busqueda="es") + id = otmdb.result.get("id") + + if id == None: + if "(" in item.fulltitle: + title = scrapertools.find_single_match(item.fulltitle, '\(.*?\)') + if item.contentType != "movie": + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % ( + title.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", + data) + subdata_imdb = scrapertools.find_single_match(data, + '<li class="b_algo">(.*?)h="ID.*?<strong>.*?TV Series') + else: + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % ( + title.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", + data) + subdata_imdb = scrapertools.find_single_match(data, + '<li class="b_algo">(.*?)h="ID.*?<strong>') + try: + imdb_id = scrapertools.get_match(subdata_imdb, + '<a href=.*?http.*?imdb.com/title/(.*?)/.*?"') + except: + try: + imdb_id = scrapertools.get_match(subdata_imdb, + '<a href=.*?http.*?imdb.com/.*?/title/(.*?)/.*?"') + except: + imdb_id = "" + otmdb = tmdb.Tmdb(external_id=imdb_id, external_source="imdb_id", tipo=tipo_ps, + idioma_busqueda="es") + id = otmdb.result.get("id") + + if not id: + fanart = item.fanart + + imagenes = [] + itmdb = tmdb.Tmdb(id_Tmdb=id, tipo=tipo_ps) + images = itmdb.result.get("images") + if images: + for key, value in images.iteritems(): + for detail in value: + imagenes.append('http://image.tmdb.org/t/p/original' + detail["file_path"]) + + if item.contentType == "movie": + if len(imagenes) >= 4: + if imagenes[0] != check_fanart: + item.fanart = imagenes[0] + else: + item.fanart = imagenes[1] + if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart: + item.extra = imagenes[1] + "|" + imagenes[2] + + else: + + if imagenes[1] != check_fanart and imagenes[1] != item.fanart: + item.extra = imagenes[1] + "|" + imagenes[3] + elif imagenes[2] != check_fanart: + item.extra = imagenes[2] + "|" + imagenes[3] + else: + item.extra = imagenes[3] + "|" + imagenes[3] + elif len(imagenes) == 3: + + if imagenes[0] != check_fanart: + item.fanart = imagenes[0] + else: + item.fanart = imagenes[1] + + if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart: + item.extra = imagenes[1] + "|" + imagenes[2] + + else: + if imagenes[1] != check_fanart and imagenes[1] != item.fanart: + item.extra = imagenes[0] + "|" + imagenes[1] + elif imagenes[2] != check_fanart: + item.extra = imagenes[1] + "|" + imagenes[2] + else: + item.extra = imagenes[1] + "|" + imagenes[1] + elif len(imagenes) == 2: + if imagenes[0] != check_fanart: + item.fanart = imagenes[0] + else: + item.fanart = imagenes[1] + if imagenes[1] != check_fanart and imagenes[1] != item.fanart: + item.extra = imagenes[0] + "|" + imagenes[1] + else: + item.extra = imagenes[1] + "|" + imagenes[0] + elif len(imagenes) == 1: + item.extra = imagenes[0] + "|" + imagenes[0] + else: + item.extra = item.fanart + "|" + item.fanart + id_tvdb = "" + else: + + if itmdb.result.get("external_ids").get("tvdb_id"): + id_tvdb = itmdb.result.get("external_ids").get("tvdb_id") + if item.temp: + item.tvdb = id_tvdb + + + else: + id_tvdb = "" + + if len(imagenes) >= 6: + + if imagenes[0] != check_fanart: + item.fanart = imagenes[0] + else: + item.fanart = imagenes[1] + if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart: + item.extra = imagenes[1] + "|" + imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + \ + imagenes[5] + else: + if imagenes[1] != check_fanart and imagenes[1] != item.fanart: + item.extra = imagenes[1] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[5] + "|" + \ + imagenes[2] + elif imagenes[2] != check_fanart: + item.extra = imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[5] + "|" + \ + imagenes[1] + else: + item.extra = imagenes[3] + "|" + imagenes[4] + "|" + imagenes[5] + "|" + imagenes[2] + "|" + \ + imagenes[1] + elif len(imagenes) == 5: + if imagenes[0] != check_fanart: + item.fanart = imagenes[0] + else: + item.fanart = imagenes[1] + if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart: + item.extra = imagenes[1] + "|" + imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] + else: + if imagenes[1] != check_fanart and imagenes[1] != item.fanart: + item.extra = imagenes[1] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[2] + elif imagenes[2] != check_fanart: + item.extra = imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[1] + else: + item.extra = imagenes[3] + "|" + imagenes[4] + "|" + imagenes[2] + "|" + imagenes[1] + elif len(imagenes) == 4: + if imagenes[0] != check_fanart: + item.fanart = imagenes[0] + else: + item.fanart = imagenes[1] + if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart: + item.extra = imagenes[1] + "|" + imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] + else: + if imagenes[1] != check_fanart and imagenes[1] != item.fanart: + item.extra = imagenes[1] + "|" + imagenes[3] + "|" + imagenes[2] + elif imagenes[2] != check_fanart: + item.extra = imagenes[2] + "|" + imagenes[3] + "|" + imagenes[1] + else: + item.extra = imagenes[3] + "|" + imagenes[2] + "|" + imagenes[1] + + elif len(imagenes) == 3: + if imagenes[0] != check_fanart: + item.fanart = imagenes[0] + else: + item.fanart = imagenes[1] + if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart: + item.extra = imagenes[1] + "|" + imagenes[2] + else: + if imagenes[1] != check_fanart and imagenes[1] != item.fanart: + item.extra = imagenes[0] + "|" + imagenes[1] + elif imagenes[2] != check_fanart: + item.extra = imagenes[1] + "|" + imagenes[2] + else: + item.extra = imagenes[1] + "|" + imagenes[1] + elif len(imagenes) == 2: + if imagenes[0] != check_fanart: + item.fanart = imagenes[0] + else: + item.fanart = imagenes[1] + if imagenes[1] != check_fanart and imagenes[1] != item.fanart: + item.extra = imagenes[0] + "|" + imagenes[1] + else: + item.extra = imagenes[1] + "|" + imagenes[0] + elif len(imagenes) == 1: + item.extra = imagenes[0] + "|" + imagenes[0] + else: + item.extra = item.fanart + "|" + item.fanart + item.extra = item.extra + images_fanarttv = fanartv(item, id_tvdb, id) + if images_fanarttv: + if item.contentType == "movie": + if images_fanarttv.get("moviedisc"): + item.thumbnail = images_fanarttv.get("moviedisc")[0].get("url") + elif images_fanarttv.get("hdmovielogo"): + item.thumbnail = images_fanarttv.get("hdmovielogo")[0].get("url") + elif images_fanarttv.get("moviethumb"): + item.thumbnail = images_fanarttv.get("moviethumb")[0].get("url") + elif images_fanarttv.get("moviebanner"): + item.thumbnail_ = images_fanarttv.get("moviebanner")[0].get("url") + else: + item.thumbnail = item.thumbnail + else: + + if images_fanarttv.get("hdtvlogo"): + item.thumbnail = images_fanarttv.get("hdtvlogo")[0].get("url") + elif images_fanarttv.get("clearlogo"): + item.thumbnail = images_fanarttv.get("clearlogo")[0].get("url") + item.thumb_info = item.thumbnail + if images_fanarttv.get("hdclearart"): + item.thumb_art = images_fanarttv.get("hdclearart")[0].get("url") + elif images_fanarttv.get("tvbanner"): + item.thumb_art = images_fanarttv.get("tvbanner")[0].get("url") + else: + item.thumb_art = item.thumbnail + + else: + item.extra = item.extra + "|" + item.thumbnail + + +def get_year(url): + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + year = scrapertools.find_single_match(data, 'Fecha de lanzamiento.*?, (\d\d\d\d)') + if year == "": + year = "1111" + return year diff --git a/plugin.video.alfa/channels/pelisdanko.json b/plugin.video.alfa/channels/pelisdanko.json new file mode 100755 index 00000000..9da90562 --- /dev/null +++ b/plugin.video.alfa/channels/pelisdanko.json @@ -0,0 +1,78 @@ +{ + "id": "pelisdanko", + "name": "PelisDanko", + "language": "es", + "active": true, + "adult": false, + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "08/07/2016", + "description": "Correcciones y adaptacion a la nueva version." + } + ], + "thumbnail": "pelisdanko.png", + "banner": "pelisdanko.png", + "categories": [ + "movie", + "latino", + "vos" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en búsqueda global", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Películas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "modo_grafico", + "type": "bool", + "label": "Buscar información extra", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "filterlanguages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 3, + "enabled": true, + "visible": true, + "lvalues": [ + "VOSE", + "Latino", + "Español", + "No filtrar" + ] + }, + { + "id": "filterlinks", + "type": "list", + "label": "Mostrar enlaces de tipo...", + "default": 2, + "enabled": true, + "visible": true, + "lvalues": [ + "Solo Descarga", + "Solo Online", + "No filtrar" + ] + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/pelisdanko.py b/plugin.video.alfa/channels/pelisdanko.py new file mode 100755 index 00000000..8b1a6246 --- /dev/null +++ b/plugin.video.alfa/channels/pelisdanko.py @@ -0,0 +1,308 @@ +# -*- coding: utf-8 -*- + +import re + +from core import config +from core import logger +from core import scrapertools +from core.item import Item + +__modo_grafico__ = config.get_setting('modo_grafico', 'pelisdanko') + +host = "http://pelisdanko.com" +art = "http://pelisdanko.com/img/background.jpg" + + +def mainlist(item): + logger.info() + + itemlist = [] + itemlist.append(item.clone(action="novedades", title="Novedades", url=host + "/novedades", + fanart=art)) + itemlist.append(item.clone(action="novedades", title="Estrenos", url=host + "/estrenos", + fanart=art)) + itemlist.append(item.clone(action="novedades", title="Populares", url=host + "/populares", + fanart=art)) + itemlist.append(item.clone(action="actualizadas", title="Películas actualizadas", url=host, + fanart=art)) + itemlist.append(item.clone(action="indices", title="Índices", fanart=art)) + itemlist.append(item.clone(title="", action="")) + itemlist.append(item.clone(action="search", title="Buscar...", fanart=art)) + + itemlist.append(item.clone(action="configuracion", title="Configurar canal...", fanart=art, + text_color="gold", folder=False)) + + return itemlist + + +def configuracion(item): + from platformcode import platformtools + ret = platformtools.show_channel_settings() + platformtools.itemlist_refresh() + return ret + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = "http://pelisdanko.com/busqueda?terms=%s" % texto + try: + return novedades(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + try: + if categoria == 'peliculas': + item.url = "http://pelisdanko.com/novedades" + itemlist = novedades(item) + + if itemlist[-1].action == "novedades": + itemlist.pop() + + # Se captura la excepción, para no interrumpir al canal novedades si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist + + +def novedades(item): + logger.info() + itemlist = [] + + # Descarga la pagina + data = scrapertools.downloadpage(item.url) + bloque = scrapertools.find_multiple_matches(data, '<div class="col-xs-[\d] col-sm-[\d] col-md-[\d] col-lg-[\d]' + ' text-center"(.*?)</div>') + + for match in bloque: + calidades = scrapertools.find_multiple_matches(match, '<span class="badge badge-critic badge-qualities[^>]+>' + '([^<]+)</span>') + calidad = "[COLOR darkseagreen] " + for quality in calidades: + calidad += "[" + quality + "]" + patron = 'title="([^"]+)".*?href="([^"]+)".*?class="img-responsive img-thumbnail" src="([^"]+)"' + matches = scrapertools.find_multiple_matches(match, patron) + for scrapedtitle, scrapedurl, scrapedthumbnail in matches: + contentTitle = scrapedtitle[:] + scrapedtitle = "[COLOR darkorange][B]" + scrapedtitle + "[/B][/COLOR]" + calidad + "[/COLOR]" + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + itemlist.append(item.clone(action="enlaces", title=bbcode_kodi2html(scrapedtitle), + url=scrapedurl, thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, + fulltitle=contentTitle, filtro=False, contentTitle=contentTitle, + context=["buscar_trailer"], contentType="movie", trailer=True)) + + # Busca enlaces de paginas siguientes... + next_page_url = scrapertools.find_single_match(data, '<a href="([^"]+)" rel="next">') + if len(next_page_url) > 0: + itemlist.append(item.clone(action="novedades", title=">> Página siguiente", url=next_page_url)) + + return itemlist + + +def actualizadas(item): + logger.info() + itemlist = [] + + # Descarga la pagina + data = scrapertools.downloadpage(item.url) + bloque_big = scrapertools.find_single_match(data, 'Últimas actualizaciones(.*?)<div class="col-xs-10 col-md-8 ' + 'text-left">') + bloque = scrapertools.find_multiple_matches(bloque_big, '<div class="col-xs-[\d] col-sm-[\d] col-md-[\d]' + ' col-lg-[\d] text-center"(.*?)<br><br>') + + for match in bloque: + calidades = scrapertools.find_multiple_matches(match, '<span class="badge badge-critic badge-qualities[^>]+>' + '([^<]+)</span>') + calidad = "[COLOR darkseagreen] " + for quality in calidades: + calidad += "[" + quality + "]" + languages = scrapertools.find_multiple_matches(match, '<img width="28".*?alt="([^"]+)"') + idiomas = " (" + for idioma in languages: + idioma = idioma.replace('ES_', '').replace('ES', 'CAST') + if idioma != "CAST" and idioma != "LAT": + idioma = "VOSE" + idiomas += idioma + "/" + patron = 'title="([^"]+)".*?href="([^"]+)".*?class="img-responsive img-thumbnail" src="([^"]+)"' + matches = scrapertools.find_multiple_matches(match, patron) + for scrapedtitle, scrapedurl, scrapedthumbnail in matches: + contentTitle = scrapedtitle[:] + scrapedtitle = "[COLOR darkorange][B]" + scrapedtitle + "[/B][/COLOR]" + calidad + idiomas[ + :-1] + ")[/COLOR]" + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + itemlist.append(item.clone(action="enlaces", title=bbcode_kodi2html(scrapedtitle), + url=scrapedurl, thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, + fulltitle=contentTitle, filtro=False, contentTitle=contentTitle, + context=["buscar_trailer"], contentType="movie")) + + return itemlist + + +def indices(item): + logger.info() + itemlist = [] + + item.text_color = "orchid" + itemlist.append(item.clone(action="indice_list", title="Género", url=host, fulltitle="genero")) + itemlist.append(item.clone(action="indice_list", title="Alfabético", url=host, fulltitle="letra")) + itemlist.append(item.clone(action="indice_list", title="Idioma", url=host, fulltitle="idioma")) + itemlist.append(item.clone(action="indice_list", title="Calidad", url=host, fulltitle="calidad")) + itemlist.append(item.clone(action="indice_list", title="Nacionalidad", url=host, fulltitle="nacionalidad")) + + return itemlist + + +def indice_list(item): + logger.info() + itemlist = [] + # Descarga la pagina + data = scrapertools.downloadpage(item.url) + + patron = '<a href="(http://pelisdanko.com/%s/[^"]+)">([^<]+)</a>' % item.fulltitle + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedtitle in matches: + scrapedtitle = scrapedtitle.capitalize() + itemlist.append(item.clone(action="novedades", title=scrapedtitle, url=scrapedurl)) + + return itemlist + + +def enlaces(item): + logger.info() + item.extra = "" + item.text_color = "" + itemlist = [] + # Descarga la pagina + data = scrapertools.downloadpage(item.url) + data = re.sub(r"\n|\r|\t|\s{2}", '', data) + item.fanart = scrapertools.find_single_match(data, "CUSTOM BACKGROUND.*?url\('([^']+)'") + item.infoLabels["plot"] = scrapertools.find_single_match(data, 'dt>Sinopsis</dt> <dd class=[^>]+>(.*?)</dd>') + year = scrapertools.find_single_match(data, '<dt>Estreno</dt> <dd>(\d+)</dd>') + + try: + from core import tmdb + item.infoLabels['year'] = int(year) + # Obtenemos los datos basicos de todas las peliculas mediante multihilos + tmdb.set_infoLabels_item(item, __modo_grafico__) + except: + pass + + filtro_idioma = config.get_setting("filterlanguages", item.channel) + filtro_enlaces = config.get_setting("filterlinks", item.channel) + + dict_idiomas = {'CAST': 2, 'LAT': 1, 'VOSE': 0} + + if filtro_enlaces != 0: + itemlist.append(item.clone(action="", title="Enlaces Online", text_color="dodgerblue", text_bold=True)) + itemlist = bloque_enlaces(data, filtro_idioma, dict_idiomas, itemlist, "ss", item) + if filtro_enlaces != 1: + itemlist.append(item.clone(action="", title="Enlaces Descarga", text_color="dodgerblue", text_bold=True)) + itemlist = bloque_enlaces(data, filtro_idioma, dict_idiomas, itemlist, "dd", item) + + trailer_id = scrapertools.find_single_match(data, 'data:\s*\{\s*id:\s*"([^"]+)"') + data_trailer = scrapertools.downloadpage("http://pelisdanko.com/trailer", post="id=%s" % trailer_id) + url_trailer = scrapertools.find_single_match(data_trailer, 'src="([^"]+)"') + if url_trailer != "": + url_trailer = url_trailer.replace("embed/", "watch?v=") + item.infoLabels['trailer'] = url_trailer + itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler", + text_color="magenta")) + + return itemlist + + +def bloque_enlaces(data, filtro_idioma, dict_idiomas, itemlist, type, item): + logger.info() + bloque = scrapertools.find_single_match(data, '<div role="tabpanel" class="tab-pane fade" id="tab-' + + type + '">(.*?)</table>') + patron = '<tr class="rip hover".*?data-slug="([^"]+)".*?src="http://pelisdanko.com/img/flags/(.*?).png"' \ + '.*?<span class="label label-default quality[^>]+>([^<]+)</span>.*?<td class="small">([^<]+)</td>' + matches = scrapertools.find_multiple_matches(bloque, patron) + filtrados = [] + for slug, flag, quality, date in matches: + if flag != "ES" and flag != "ES_LAT": + flag = "VOSE" + flag = flag.replace('ES_LAT', 'LAT').replace('ES', 'CAST') + scrapedurl = "%s/%s/%s?#%s" % (item.url, slug, type, type) + scrapedtitle = " [COLOR firebrick]Mostrar enlaces: [/COLOR][COLOR goldenrod][" \ + + flag + "/" + quality + "][/COLOR][COLOR khaki] " + date + "[/COLOR]" + if filtro_idioma == 3 or item.filtro: + itemlist.append(item.clone(title=bbcode_kodi2html(scrapedtitle), action="findvideos", + url=scrapedurl, id_enlaces=slug, calidad=quality)) + else: + idioma = dict_idiomas[flag] + if idioma == filtro_idioma: + itemlist.append(item.clone(title=bbcode_kodi2html(scrapedtitle), + action="findvideos", url=scrapedurl, id_enlaces=slug)) + else: + if flag not in filtrados: + filtrados.append(flag) + + if filtro_idioma != 3: + if len(filtrados) > 0: + title = bbcode_kodi2html("[COLOR orangered] Mostrar enlaces filtrados en %s[/COLOR]") % ", ".join( + filtrados) + itemlist.append(item.clone(title=title, action="enlaces", url=item.url, filtro=True)) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + + if item.url[-2:] == "ss": + prefix = "strms" + else: + prefix = "lnks" + # Descarga la pagina + data = scrapertools.downloadpage(item.url) + + # Parametros para redireccion donde muestra los enlaces + data_slug = scrapertools.find_single_match(data, '<div id="ad" data-id="[^"]+" data-slug="([^"]+)"') + data_id = scrapertools.find_single_match(data, '<tr class="rip hover" data-id="([^"]+)"') + url = "http://pelisdanko.com/%s/%s/%s/%s" % (prefix, data_id, item.id_enlaces, data_slug) + data = scrapertools.downloadpage(url, post="") + + from core import servertools + video_item_list = servertools.find_video_items(data=data) + for video_item in video_item_list: + title = "[COLOR green]%s[/COLOR] | [COLOR darkorange][%s][/COLOR]" % (video_item.server, item.calidad) + itemlist.append(item.clone(title=bbcode_kodi2html(title), url=video_item.url, action="play", + server=video_item.server, text_color="")) + + # Opción "Añadir esta película a la videoteca de XBMC" + if config.get_videolibrary_support() and len(itemlist) > 0 and item.category != "Cine": + itemlist.append(Item(channel=item.channel, title="Añadir película a la videoteca", url=item.url, + infoLabels={'title': item.fulltitle}, action="add_pelicula_to_library", + fulltitle=item.fulltitle, text_color="green", id_enlaces=item.id_enlaces)) + + return itemlist + + +def bbcode_kodi2html(text): + if config.get_platform().startswith("plex") or config.get_platform().startswith("mediaserver"): + import re + text = re.sub(r'\[COLOR\s([^\]]+)\]', + r'<span style="color: \1">', + text) + text = text.replace('[/COLOR]', '</span>') \ + .replace('[CR]', '<br>') \ + .replace('[B]', '<strong>') \ + .replace('[/B]', '</strong>') \ + .replace('"color: white"', '"color: auto"') + + return text diff --git a/plugin.video.alfa/channels/pelisencasa.json b/plugin.video.alfa/channels/pelisencasa.json new file mode 100755 index 00000000..fc6f25fe --- /dev/null +++ b/plugin.video.alfa/channels/pelisencasa.json @@ -0,0 +1,34 @@ +{ + "id": "pelisencasa", + "name": "PelisEnCasa", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "https://s14.postimg.org/iqiq0bxn5/pelisencasa.png", + "banner": "https://s18.postimg.org/j775ehbg9/pelisencasa_banner.png", + "version": 1, + "date": "17/03/2017", + "changes": "First release", + "categories": [ + "latino", + "movie" + ], + "settings": [ + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_infantiles", + "type": "bool", + "label": "Incluir en Novedades - Infantiles", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/pelisencasa.py b/plugin.video.alfa/channels/pelisencasa.py new file mode 100755 index 00000000..eef0db7a --- /dev/null +++ b/plugin.video.alfa/channels/pelisencasa.py @@ -0,0 +1,218 @@ +# -*- coding: utf-8 -*- + +import re + +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import tmdb +from core.item import Item +from lib import jsunpack + +host = 'http://pelisencasa.net' + +tgenero = {"Comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png", + "Suspense": "https://s13.postimg.org/wmw6vl1cn/suspenso.png", + "Drama": "https://s16.postimg.org/94sia332d/drama.png", + "Acción": "https://s3.postimg.org/y6o9puflv/accion.png", + "Aventura": "https://s10.postimg.org/6su40czih/aventura.png", + "Romance": "https://s15.postimg.org/fb5j8cl63/romance.png", + "Animación": "https://s13.postimg.org/5on877l87/animacion.png", + "Ciencia ficción": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png", + "Terror": "https://s7.postimg.org/yi0gij3gb/terror.png", + "Documental": "https://s16.postimg.org/7xjj4bmol/documental.png", + "Música": "https://s29.postimg.org/bbxmdh9c7/musical.png", + "Western": "https://s23.postimg.org/lzyfbjzhn/western.png", + "Fantasía": "https://s13.postimg.org/65ylohgvb/fantasia.png", + "Guerra": "https://s23.postimg.org/71itp9hcr/belica.png", + "Misterio": "https://s1.postimg.org/w7fdgf2vj/misterio.png", + "Crimen": "https://s4.postimg.org/6z27zhirx/crimen.png", + "Historia": "https://s15.postimg.org/fmc050h1n/historia.png", + "Familia": "https://s7.postimg.org/6s7vdhqrf/familiar.png"} + +tletras = {'#': 'https://s32.postimg.org/drojt686d/image.png', + 'a': 'https://s32.postimg.org/llp5ekfz9/image.png', + 'b': 'https://s32.postimg.org/y1qgm1yp1/image.png', + 'c': 'https://s32.postimg.org/vlon87gmd/image.png', + 'd': 'https://s32.postimg.org/3zlvnix9h/image.png', + 'e': 'https://s32.postimg.org/bgv32qmsl/image.png', + 'f': 'https://s32.postimg.org/y6u7vq605/image.png', + 'g': 'https://s32.postimg.org/9237ib6jp/image.png', + 'h': 'https://s32.postimg.org/812yt6pk5/image.png', + 'i': 'https://s32.postimg.org/6nbbxvqat/image.png', + 'j': 'https://s32.postimg.org/axpztgvdx/image.png', + 'k': 'https://s32.postimg.org/976yrzdut/image.png', + 'l': 'https://s32.postimg.org/fmal2e9yd/image.png', + 'm': 'https://s32.postimg.org/m19lz2go5/image.png', + 'n': 'https://s32.postimg.org/b2ycgvs2t/image.png', + 'o': 'https://s32.postimg.org/c6igsucpx/image.png', + 'p': 'https://s32.postimg.org/jnro82291/image.png', + 'q': 'https://s32.postimg.org/ve5lpfv1h/image.png', + 'r': 'https://s32.postimg.org/nmovqvqw5/image.png', + 's': 'https://s32.postimg.org/zd2t89jol/image.png', + 't': 'https://s32.postimg.org/wk9lo8jc5/image.png', + 'u': 'https://s32.postimg.org/w8s5bh2w5/image.png', + 'v': 'https://s32.postimg.org/e7dlrey91/image.png', + 'w': 'https://s32.postimg.org/fnp49k15x/image.png', + 'x': 'https://s32.postimg.org/dkep1w1d1/image.png', + 'y': 'https://s32.postimg.org/um7j3zg85/image.png', + 'z': 'https://s32.postimg.org/jb4vfm9d1/image.png'} + + +def mainlist(item): + logger.info() + + itemlist = [] + + itemlist.append(item.clone(title="Todas", action="lista", thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png', + fanart='https://s18.postimg.org/fwvaeo6qh/todas.png', url=host)) + + itemlist.append( + item.clone(title="Generos", action="seccion", thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png', + fanart='https://s3.postimg.org/5s9jg2wtf/generos.png', url=host, extra='generos')) + + itemlist.append( + item.clone(title="Alfabetico", action="seccion", thumbnail='https://s17.postimg.org/fwi1y99en/a-z.png', + fanart='https://s17.postimg.org/fwi1y99en/a-z.png', url=host, extra='letras')) + + itemlist.append(item.clone(title="Buscar", action="search", url=host + '/?s=', + thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png', + fanart='https://s30.postimg.org/pei7txpa9/buscar.png')) + + return itemlist + + +def lista(item): + logger.info() + + itemlist = [] + data = httptools.downloadpage(item.url).data + if item.extra != 'letras': + patron = '<li class="TPostMv">.*?<a href="(.*?)"><div class="Image">.*?src="(.*?)\?resize=.*?".*?class="Title">(.*?)<\/h2>.*?' + patron += '<span class="Year">(.*?)<\/span>.*?<span class="Qlty">(.*?)<\/span><\/p><div class="Description"><p>(.*?)<\/p>' + else: + patron = '<td class="MvTbImg"> <a href="(.*?)".*?src="(.*?)\?resize=.*?".*?<strong>(.*?)<\/strong> <\/a><\/td><td>(.*?)<\/td><td>.*?' + patron += 'class="Qlty">(.*?)<\/span><\/p><\/td><td>(.*?)<\/td><td>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, calidad, scrapedplot in matches: + url = scrapedurl + thumbnail = scrapedthumbnail + plot = scrapedplot + contentTitle = scrapedtitle + title = contentTitle + ' (' + calidad + ')' + year = scrapedyear + fanart = '' + itemlist.append( + Item(channel=item.channel, action='findvideos', title=title, url=url, thumbnail=thumbnail, plot=plot, + fanart=fanart, contentTitle=contentTitle, infoLabels={'year': year})) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + # Paginacion + + if itemlist != []: + actual_page_url = item.url + next_page = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="(.*?)">') + if next_page != '': + itemlist.append(Item(channel=item.channel, action="lista", title='Siguiente >>>', url=next_page, + thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png')) + return itemlist + + +def seccion(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + if item.extra == 'generos': + patron = 'menu-item-object-category menu-item-.*?"><a href="(.*?)">(.*?)<\/a><\/li>' + else: + patron = '<li><a href="(.*?\/letter\/.*?)">(.*?)<\/a><\/li>' + + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + thumbnail = '' + if item.extra == 'generos' and scrapedtitle in tgenero: + thumbnail = tgenero[scrapedtitle] + elif scrapedtitle.lower() in tletras: + thumbnail = tletras[scrapedtitle.lower()] + fanart = '' + title = scrapedtitle + url = scrapedurl + + itemlist.append( + Item(channel=item.channel, action="lista", title=title, fulltitle=item.title, url=url, thumbnail=thumbnail, + fanart=fanart, extra=item.extra)) + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = 'class="Num">.*?<\/span>.*?href="(.*?)" class="Button STPb">.*?<\/a>.*?<span>(.*?)<\/span><\/td><td><span>(.*?)<\/span><\/td><td><span>.*?<\/span>' + matches = re.compile(patron, re.DOTALL).findall(data) + infoLabels = item.infoLabels + for scrapedurl, servidor, idioma in matches: + new_item = (item.clone(url=scrapedurl, servidor=servidor, idioma=idioma, infoLabels=infoLabels)) + itemlist += get_video_urls(new_item) + + if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': + itemlist.append( + Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url, + action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle)) + + itemlist.insert(len(itemlist) - 1, item.clone(channel='trailertools', action='buscartrailer', + title='[COLOR orange]Trailer en Youtube[/COLOR]')) + + return itemlist + + +def get_video_urls(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = scrapertools.find_single_match(data, '<script type="text\/javascript">(.*?)<\/script>') + data = jsunpack.unpack(data) + patron = '"file":"(.*?)","label":"(.*?)","type":"video.*?"}' + subtitle = scrapertools.find_single_match(data, 'tracks:\[{"file":"(.*?)","label":".*?","kind":"captions"}') + matches = re.compile(patron, re.DOTALL).findall(data) + for url, calidad in matches: + if item.servidor == 'PELISENCASA': + item.servidor = 'Directo' + title = item.contentTitle + ' (' + item.idioma + ')' + ' (' + calidad + ')' + ' (' + item.servidor + ')' + itemlist.append(item.clone(title=title, url=url, calidad=calidad, action='play', subtitle=subtitle)) + + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = item.url + texto + + if texto != '': + return lista(item) + else: + return [] + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + try: + if categoria == 'peliculas': + item.url = host + elif categoria == 'infantiles': + item.url = host + '/category/animacion/' + itemlist = lista(item) + if itemlist[-1].title == 'Siguiente >>>': + itemlist.pop() + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist diff --git a/plugin.video.alfa/channels/pelisfox.json b/plugin.video.alfa/channels/pelisfox.json new file mode 100755 index 00000000..210d18e1 --- /dev/null +++ b/plugin.video.alfa/channels/pelisfox.json @@ -0,0 +1,54 @@ +{ + "id": "pelisfox", + "name": "pelisfox", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "https://s14.postimg.org/c43etc1lt/pelisfox.png", + "banner": "https://s30.postimg.org/p6twg905d/pelisfox-banner.png", + "version": 1, + "changes": [ + { + "date": "16/06/2017", + "description": "Fix pagina siguiente" + }, + { + "date": "25/05/2017", + "description": "cambios esteticos" + }, + { + "date": "05/05/2017", + "description": "First release" + } + ], + "categories": [ + "latino", + "movie" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_infantiles", + "type": "bool", + "label": "Incluir en Novedades - Infantiles", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/pelisfox.py b/plugin.video.alfa/channels/pelisfox.py new file mode 100755 index 00000000..854bf5c9 --- /dev/null +++ b/plugin.video.alfa/channels/pelisfox.py @@ -0,0 +1,302 @@ +# -*- coding: utf-8 -*- + +import re + +from core import config +from core import httptools +from core import jsontools +from core import logger +from core import scrapertools +from core import tmdb +from core.item import Item + +tgenero = {"Drama": "https://s16.postimg.org/94sia332d/drama.png", + u"Accción": "https://s3.postimg.org/y6o9puflv/accion.png", + u"Animación": "https://s13.postimg.org/5on877l87/animacion.png", + u"Ciencia Ficción": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png", + "Terror": "https://s7.postimg.org/yi0gij3gb/terror.png", + } + +audio = {'LAT': '[COLOR limegreen]LATINO[/COLOR]', 'SUB': '[COLOR red]Subtitulado[/COLOR]'} + +host = 'http://pelisfox.tv' + + +def mainlist(item): + logger.info() + + itemlist = [] + + itemlist.append(item.clone(title="Ultimas", + action="lista", + thumbnail='https://s22.postimg.org/cb7nmhwv5/ultimas.png', + fanart='https://s22.postimg.org/cb7nmhwv5/ultimas.png', + url=host + '/estrenos/' + )) + + itemlist.append(item.clone(title="Generos", + action="seccion", + url=host, + thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png', + fanart='https://s3.postimg.org/5s9jg2wtf/generos.png', + seccion='generos' + )) + + itemlist.append(item.clone(title="Por Año", + action="seccion", + url=host + '/peliculas/2017/', + thumbnail='https://s8.postimg.org/7eoedwfg5/pora_o.png', + fanart='https://s8.postimg.org/7eoedwfg5/pora_o.png', + seccion='anios' + )) + + itemlist.append(item.clone(title="Por Actor", + action="seccion", + url=host + '/actores/', + thumbnail='https://s17.postimg.org/w25je5zun/poractor.png', + fanart='https://s17.postimg.org/w25je5zun/poractor.png', + seccion='actor' + )) + + itemlist.append(item.clone(title="Buscar", + action="search", + url=host + '/api/elastic/suggest?query=', + thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png', + fanart='https://s30.postimg.org/pei7txpa9/buscar.png' + )) + + return itemlist + + +def lista(item): + logger.info() + + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data) + + if item.seccion != 'actor': + patron = '<li class=item-serie.*?><a href=(.*?) title=(.*?)><img src=(.*?) alt=><span ' + patron += 'class=s-title><strong>.*?<\/strong><p>(.*?)<\/p><\/span><\/a><\/li>' + else: + patron = '<li><a href=(\/pelicula\/.*?)><figure><img src=(.*?) alt=><\/figure><p class=title>(.*?)<\/p><p ' + patron += 'class=year>(.*?)<\/p>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear in matches: + url = host + scrapedurl + if item.seccion != 'actor': + thumbnail = scrapedthumbnail + contentTitle = scrapedtitle + else: + thumbnail = scrapedtitle + contentTitle = scrapedthumbnail + plot = '' + year = scrapedyear + title = contentTitle + ' (' + year + ')' + itemlist.append( + Item(channel=item.channel, + action='findvideos', + title=title, + url=url, + thumbnail=thumbnail, + plot=plot, + contentTitle=contentTitle, + infoLabels={'year': year} + )) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + # Paginacion + + if itemlist != []: + actual_page = scrapertools.find_single_match(data, '<a class=active item href=.*?>(.*?)<\/a>') + if actual_page: + next_page_num = int(actual_page) + 1 + next_page = scrapertools.find_single_match(data, + '<li><a class= item href=(.*?)\?page=.*?&limit=.*?>Siguiente') + next_page_url = host + next_page + '?page=%s' % next_page_num + if next_page != '': + itemlist.append(Item(channel=item.channel, + action="lista", + title='Siguiente >>>', + url=next_page_url, + thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png' + )) + return itemlist + + +def seccion(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data) + if item.seccion == 'generos': + patron = '<a href=(\/peliculas\/[\D].*?\/) title=Películas de .*?>(.*?)<\/a>' + elif item.seccion == 'anios': + patron = '<li class=.*?><a href=(.*?)>(\d{4})<\/a> <\/li>' + elif item.seccion == 'actor': + patron = '<li><a href=(.*?)><div.*?<div class=photopurple title=(.*?)><\/div><img src=(.*?)><\/figure>' + matches = re.compile(patron, re.DOTALL).findall(data) + + if item.seccion != 'actor': + for scrapedurl, scrapedtitle in matches: + title = scrapedtitle.decode('utf-8') + thumbnail = '' + if item.seccion == 'generos': + thumbnail = tgenero[title] + fanart = '' + url = host + scrapedurl + + itemlist.append( + Item(channel=item.channel, + action="lista", + title=title, + fulltitle=item.title, + url=url, + thumbnail=thumbnail, + fanart=fanart + )) + else: + for scrapedurl, scrapedname, scrapedthumbnail in matches: + thumbnail = scrapedthumbnail + fanart = '' + title = scrapedname + url = host + scrapedurl + + itemlist.append(Item(channel=item.channel, + action="lista", + title=title, + fulltitle=item.title, + url=url, + thumbnail=thumbnail, + fanart=fanart, + seccion=item.seccion + )) + # Paginacion + + if itemlist != []: + next_page = scrapertools.find_single_match(data, '<li><a class= item href=(.*?)&limit=.*?>Siguiente <') + next_page_url = host + next_page + if next_page != '': + itemlist.append(item.clone(action="seccion", + title='Siguiente >>>', + url=next_page_url, + thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png' + )) + + return itemlist + + +def busqueda(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + dict_data = jsontools.load(data) + resultados = dict_data['result'][0]['options'] + + for resultado in resultados: + if 'title' in resultado['_source']: + title = resultado['_source']['title'] + thumbnail = 'http://s3.amazonaws.com/pelisfox' + '/' + resultado['_source']['cover'] + plot = resultado['_source']['sinopsis'] + url = host + resultado['_source']['url'] + '/' + + itemlist.append(item.clone(title=title, + thumbnail=thumbnail, + plot=plot, + url=url, + action='findvideos', + contentTitle=title + )) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = item.url + texto + + if texto != '': + return busqueda(item) + else: + return [] + + +def findvideos(item): + logger.info() + itemlist = [] + templist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data) + patron = '<li data-quality=(.*?) data-lang=(.*?)><a href=(.*?) title=.*?' + matches = matches = re.compile(patron, re.DOTALL).findall(data) + for quality, lang, scrapedurl in matches: + url = host + scrapedurl + title = item.title + ' (' + lang + ') (' + quality + ')' + templist.append(item.clone(title=title, + language=lang, + url=url + )) + + for videoitem in templist: + + data = httptools.downloadpage(videoitem.url).data + data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data) + id = scrapertools.find_single_match(data, 'var _SOURCE =.*?source:(.*?),') + if videoitem.language == 'SUB': + sub = scrapertools.find_single_match(data, 'var _SOURCE =.*?srt:(.*?),') + sub = sub.replace('\\', '') + else: + sub = '' + new_url = 'http://iplay.one/api/embed?id=%s&token=8908d9f846&%s' % (id, sub) + + data = httptools.downloadpage(new_url).data + + patron = 'file":"(.*?)","label":"(.*?)","type":".*?"}' + matches = matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, quality in matches: + title = videoitem.contentTitle + ' (' + quality + ') (' + audio[videoitem.language] + ')' + url = scrapedurl.replace('\\', '') + itemlist.append(item.clone(title=title, + action='play', + url=url, + subtitle=sub, + server='directo', + quality=quality, + language='lang' + )) + + if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': + itemlist.append( + Item(channel=item.channel, + title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', + url=item.url, + action="add_pelicula_to_library", + extra="findvideos", + contentTitle=item.contentTitle + )) + return itemlist + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + # categoria='peliculas' + try: + if categoria == 'peliculas': + item.url = host + '/estrenos/' + item.extra = 'peliculas' + elif categoria == 'infantiles': + item.url = host + 'http://pelisfox.tv/peliculas/animacion/' + item.extra = 'peliculas' + itemlist = todas(item) + if itemlist[-1].title == 'Siguiente >>>': + itemlist.pop() + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist diff --git a/plugin.video.alfa/channels/pelisgratis.json b/plugin.video.alfa/channels/pelisgratis.json new file mode 100755 index 00000000..63d1efb1 --- /dev/null +++ b/plugin.video.alfa/channels/pelisgratis.json @@ -0,0 +1,46 @@ +{ + "id": "pelisgratis", + "name": "PelisGratis", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "https://s30.postimg.org/kilh4lr4x/pelisgratis.png", + "banner": "https://s15.postimg.org/nztottswb/pelisgratis-banner.png", + "version": 1, + "changes": [ + { + "date": "26/05/2017", + "description": "First release" + } + ], + "categories": [ + "latino", + "movie" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_infantiles", + "type": "bool", + "label": "Incluir en Novedades - Infantiles", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/pelisgratis.py b/plugin.video.alfa/channels/pelisgratis.py new file mode 100755 index 00000000..2152b52e --- /dev/null +++ b/plugin.video.alfa/channels/pelisgratis.py @@ -0,0 +1,293 @@ +# -*- coding: utf-8 -*- + +import re + +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item + +tgenero = {"Comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png", + "Suspense": "https://s13.postimg.org/wmw6vl1cn/suspenso.png", + "Drama": "https://s16.postimg.org/94sia332d/drama.png", + "Acción": "https://s3.postimg.org/y6o9puflv/accion.png", + "Aventura": "https://s10.postimg.org/6su40czih/aventura.png", + "Romance": "https://s15.postimg.org/fb5j8cl63/romance.png", + "Animación": "https://s13.postimg.org/5on877l87/animacion.png", + "Ciencia ficción": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png", + "Terror": "https://s7.postimg.org/yi0gij3gb/terror.png", + "Documental": "https://s16.postimg.org/7xjj4bmol/documental.png", + "Música": "https://s29.postimg.org/bbxmdh9c7/musical.png", + "Fantasía": "https://s13.postimg.org/65ylohgvb/fantasia.png", + "Misterio": "https://s1.postimg.org/w7fdgf2vj/misterio.png", + "Crimen": "https://s4.postimg.org/6z27zhirx/crimen.png", + "Familia": "https://s7.postimg.org/6s7vdhqrf/familiar.png", + "Guerra": "https://s4.postimg.org/n1h2jp2jh/guerra.png", + "Western": "https://s23.postimg.org/lzyfbjzhn/western.png", + "Historia": "https://s15.postimg.org/fmc050h1n/historia.png" + } + +thumbletras = {'#': 'https://s32.postimg.org/drojt686d/image.png', + 'a': 'https://s32.postimg.org/llp5ekfz9/image.png', + 'b': 'https://s32.postimg.org/y1qgm1yp1/image.png', + 'c': 'https://s32.postimg.org/vlon87gmd/image.png', + 'd': 'https://s32.postimg.org/3zlvnix9h/image.png', + 'e': 'https://s32.postimg.org/bgv32qmsl/image.png', + 'f': 'https://s32.postimg.org/y6u7vq605/image.png', + 'g': 'https://s32.postimg.org/9237ib6jp/image.png', + 'h': 'https://s32.postimg.org/812yt6pk5/image.png', + 'i': 'https://s32.postimg.org/6nbbxvqat/image.png', + 'j': 'https://s32.postimg.org/axpztgvdx/image.png', + 'k': 'https://s32.postimg.org/976yrzdut/image.png', + 'l': 'https://s32.postimg.org/fmal2e9yd/image.png', + 'm': 'https://s32.postimg.org/m19lz2go5/image.png', + 'n': 'https://s32.postimg.org/b2ycgvs2t/image.png', + 'o': 'https://s32.postimg.org/c6igsucpx/image.png', + 'p': 'https://s32.postimg.org/jnro82291/image.png', + 'q': 'https://s32.postimg.org/ve5lpfv1h/image.png', + 'r': 'https://s32.postimg.org/nmovqvqw5/image.png', + 's': 'https://s32.postimg.org/zd2t89jol/image.png', + 't': 'https://s32.postimg.org/wk9lo8jc5/image.png', + 'u': 'https://s32.postimg.org/w8s5bh2w5/image.png', + 'v': 'https://s32.postimg.org/e7dlrey91/image.png', + 'w': 'https://s32.postimg.org/fnp49k15x/image.png', + 'x': 'https://s32.postimg.org/dkep1w1d1/image.png', + 'y': 'https://s32.postimg.org/um7j3zg85/image.png', + 'z': 'https://s32.postimg.org/jb4vfm9d1/image.png' + } + +audio = {'Latino': '[COLOR limegreen]LATINO[/COLOR]', 'Español': '[COLOR yellow]ESPAÑOL[/COLOR]', + 'Sub Español': '[COLOR red]SUB ESPAÑOL[/COLOR]'} + +host = 'http://pelisgratis.tv/' + + +def mainlist(item): + logger.info() + + itemlist = [] + + itemlist.append(item.clone(title="Estrenos", + action="lista", + thumbnail='https://s21.postimg.org/fy69wzm93/estrenos.png', + fanart='https://s21.postimg.org/fy69wzm93/estrenos.png', + url=host + 'estrenos' + )) + + itemlist.append(item.clone(title="Todas", + action="lista", + thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png', + fanart='https://s18.postimg.org/fwvaeo6qh/todas.png', + url=host + )) + + itemlist.append(item.clone(title="Generos", + action="seccion", + url=host, + thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png', + fanart='https://s3.postimg.org/5s9jg2wtf/generos.png', + extra='generos' + )) + + itemlist.append(item.clone(title="Alfabetico", + action="seccion", + url=host, + thumbnail='https://s17.postimg.org/fwi1y99en/a-z.png', + fanart='https://s17.postimg.org/fwi1y99en/a-z.png', + extra='a-z' + )) + + itemlist.append(item.clone(title="Mas Vistas", + action="lista", + thumbnail='https://s9.postimg.org/wmhzu9d7z/vistas.png', + fanart='https://s9.postimg.org/wmhzu9d7z/vistas.png', + url=host + 'peliculas-mas-vistas' + )) + + itemlist.append(item.clone(title="Mas Votadas", + action="lista", + thumbnail='https://s7.postimg.org/9kg1nthzf/votadas.png', + fanart='https://s7.postimg.org/9kg1nthzf/votadas.png', + url=host + 'peliculas-mas-votadas' + )) + + itemlist.append(item.clone(title="Buscar", + action="search", + url=host + '?s=', + thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png', + fanart='https://s30.postimg.org/pei7txpa9/buscar.png' + )) + + return itemlist + + +def get_source(url): + logger.info() + data = httptools.downloadpage(url, add_referer=True).data + data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data) + return data + + +def lista(item): + logger.info() + itemlist = [] + data = get_source(item.url) + patron = 'class=(?:MvTbImg|TPostMv).*?href=(.*?)\/(?:>| class).*?src=(.*?) class=attachment.*?' + patron += '(?:strong|class=Title)>(.*?)<.*?(?:<td|class=Year)>(.*?)<.*?class=Qlty>(.*?)<.*?' + patron += '(?:<td|class=Description)>(.*?)<(?:\/td|\/p)>' + + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedquality, scrapedplot in matches: + url = scrapedurl + thumbnail = scrapedthumbnail + plot = scrapedplot + quality = scrapedquality + contentTitle = scrapedtitle + title = contentTitle + ' (%s)' % quality + year = scrapedyear + + itemlist.append(item.clone(action='findvideos', + title=title, + url=url, + thumbnail=thumbnail, + plot=plot, + contentTitle=contentTitle, + quality=quality, + infoLabels={'year': year} + )) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + # Paginacion + + if itemlist != []: + actual_page_url = item.url + next_page = scrapertools.find_single_match(data, '<a class=nextpostslink rel=next href=(.*?)>') + if next_page != '': + itemlist.append(item.clone(action="lista", + title='Siguiente >>>', + url=next_page, + thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png' + )) + return itemlist + + +def seccion(item): + logger.info() + itemlist = [] + data = get_source(item.url) + if item.extra == 'generos': + patron = '<li class=cat-item cat-item-.*?><a href=(.*?)>(.*?)<\/li>' + elif item.extra == 'a-z': + patron = '<li><a href=(.*?)>(\w|#)<\/a><\/li>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + url = scrapedurl + thumbnail = '' + if item.extra == 'generos': + title = re.sub(r'<\/a> \(\d+\)', '', scrapedtitle) + cantidad = re.findall(r'.*?<\/a> \((\d+)\)', scrapedtitle) + th_title = title + title = title + ' (' + cantidad[0] + ')' + if th_title in tgenero: + thumbnail = tgenero[th_title] + else: + title = scrapedtitle + if title.lower() in thumbletras: + thumbnail = thumbletras[title.lower()] + + itemlist.append(item.clone(action='lista', title=title, url=url, thumbnail=thumbnail)) + + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = item.url + texto + if texto != '': + return lista(item) + + +def findvideos(item): + logger.info() + itemlist = [] + duplicados = [] + + data = get_source(item.url) + data = data.replace('amp;', '') + data_page = data + + patron = 'class=TPlayerTb id=(.*?)><iframe width="560" height="315" src="(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + for option, video_page in matches: + language = scrapertools.find_single_match(data_page, 'TPlayerNv=%s><span>.*?<center>(.*?)<\/center>' % option) + if language == 'Castellano': + language = 'Español' + if language in audio: + id_audio = audio[language] + else: + id_audio = language + if 'redirect' in video_page or 'yourplayer' in video_page: + data = get_source('http:%s' % video_page) + + patron = 'label:(.*?),.*?file:(.*?)&app.*?}' + matches = re.compile(patron, re.DOTALL).findall(data) + for video_url in matches: + + url = video_url[1] + url = url.replace('\/', '/') + title = item.contentTitle + ' [%s][%s]' % (video_url[0], id_audio) + server = 'directo' + if url not in duplicados: + itemlist.append(item.clone(action='play', + title=title, + url=url, + server=server + )) + duplicados.append(url) + else: + if video_page not in duplicados: + itemlist.extend(servertools.find_video_items(data=video_page)) + duplicados.append(video_page) + + for video_item in itemlist: + if video_item.server != 'directo': + video_item.channel = item.channel + video_item.quality = item.quality + video_item.title = item.contentTitle + ' [%s][%s]' % (video_item.server, id_audio) + + if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': + itemlist.append(item.clone(title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', + url=item.url, + action="add_pelicula_to_library", + extra="findvideos", + contentTitle=item.contentTitle + )) + return itemlist + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + # categoria='peliculas' + try: + if categoria == 'peliculas': + item.url = host + 'estrenos' + elif categoria == 'infantiles': + item.url = host + 'animacion' + itemlist = lista(item) + if itemlist[-1].title == 'Siguiente >>>': + itemlist.pop() + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist diff --git a/plugin.video.alfa/channels/pelisipad.json b/plugin.video.alfa/channels/pelisipad.json new file mode 100755 index 00000000..7ca7abaf --- /dev/null +++ b/plugin.video.alfa/channels/pelisipad.json @@ -0,0 +1,43 @@ +{ + "id": "pelisipad", + "name": "PelisIpad", + "language": "es", + "active": true, + "adult": false, + "version": 1, + "changes": [ + { + "date": "21/03/17", + "description": "Primera versión" + } + ], + "thumbnail": "http://i.imgur.com/FzLmGKK.png", + "categories": [ + "movie", + "tvshow" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": false, + "visible": false + }, + { + "id": "perfil", + "type": "list", + "label": "Perfil de color", + "default": 2, + "enabled": true, + "visible": true, + "lvalues": [ + "Perfil 3", + "Perfil 2", + "Perfil 1", + "Ninguno" + ] + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/pelisipad.py b/plugin.video.alfa/channels/pelisipad.py new file mode 100755 index 00000000..b0bd6613 --- /dev/null +++ b/plugin.video.alfa/channels/pelisipad.py @@ -0,0 +1,583 @@ +# -*- coding: utf-8 -*- + +import re +import unicodedata + +from core import config +from core import httptools +from core import jsontools +from core import logger +from core import scrapertools +from core.item import Item + +host = "http://pelisipad.com/black_json/%s" +ext = "/list.js" + +__perfil__ = config.get_setting('perfil', "pelisipad") + +# Fijar perfil de color +perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFF088A08', '0xFFFFD700'], + ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFF088A08', '0xFFFFD700'], + ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFF088A08', '0xFFFFD700']] +if __perfil__ < 3: + color1, color2, color3, color4, color5, color6 = perfil[__perfil__] +else: + color1 = color2 = color3 = color4 = color5 = color6 = "" + + +def mainlist(item): + logger.info() + item.viewmode = "movie" + itemlist = [] + + itemlist.append(Item(channel=item.channel, title="Películas", action="submenu", text_color=color1, + thumbnail=host % "list/peliculas/thumbnail_167x250.jpg", + fanart=host % "list/peliculas/background_1080.jpg", viewmode="movie")) + itemlist.append(Item(channel=item.channel, title="Series", action="submenu", text_color=color1, + thumbnail=host % "list/series/thumbnail_167x250.jpg", + fanart=host % "list/series/background_1080.jpg", viewmode="movie")) + itemlist.append(Item(channel=item.channel, title="Películas Infantiles", action="entradasconlistas", + url=host % "list/peliculas-infantiles" + ext, text_color=color1, + thumbnail=host % "list/peliculas-infantiles/thumbnail_167x250.jpg", + fanart=host % "list/peliculas-infantiles/background_1080.jpg", viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, title="", action="")) + itemlist.append(Item(channel=item.channel, title="Configuración", action="configuracion", text_color=color6)) + + return itemlist + + +def configuracion(item): + from platformcode import platformtools + ret = platformtools.show_channel_settings() + platformtools.itemlist_refresh() + return ret + + +def submenu(item): + logger.info() + itemlist = [] + + if "Series" in item.title: + itemlist.append(Item(channel=item.channel, title="Nuevos Capítulos", action="nuevos_cap", + url=host % "list/nuevos-capitulos" + ext, text_color=color2, + thumbnail=host % "list/nuevos-capitulos/thumbnail_167x250.jpg", + fanart=host % "list/nuevos-capitulos/background_1080.jpg", viewmode="movie")) + itemlist.append(Item(channel=item.channel, title="Nuevas Temporadas", action="nuevos_cap", + url=host % "list/nuevos-capitulos" + ext, text_color=color2, + thumbnail=host % "list/nuevos-capitulos/thumbnail_167x250.jpg", + fanart=host % "list/nuevos-capitulos/background_1080.jpg", viewmode="movie")) + itemlist.append(Item(channel=item.channel, title="Series más vistas", action="series", text_color=color2, + url=host % "list/series" + ext, viewmode="movie_with_plot", + thumbnail=item.thumbnail, fanart=item.fanart, contentTitle="Series")) + itemlist.append(Item(channel=item.channel, title="Lista de Series A-Z", action="series", text_color=color2, + url=host % "list/series" + ext, thumbnail=item.thumbnail, + fanart=item.fanart, contentTitle="Series", viewmode="movie_with_plot")) + else: + itemlist.append(Item(channel=item.channel, title="Novedades", action="entradas", + url=host % "list/ultimas-peliculas" + ext, text_color=color2, + thumbnail=host % "list/ultimas-peliculas/thumbnail_167x250.jpg", + fanart=host % "list/ultimas-peliculas/background_1080.jpg", viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, title="Destacados", action="entradas", + url=host % "list/000-novedades" + ext, text_color=color2, + thumbnail=host % "list/screener/thumbnail_167x250.jpg", + fanart=host % "list/screener/background_1080.jpg", viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, title="Más vistas", action="entradas", + url=host % "list/peliculas-mas-vistas" + ext, text_color=color2, + thumbnail=host % "list/peliculas-mas-vistas/thumbnail_167x250.jpg", + fanart=host % "list/peliculas-mas-vistas/background_1080.jpg", viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, title="Categorías", action="cat", url=host % "list/peliculas" + ext, + text_color=color2, thumbnail=item.thumbnail, fanart=item.fanart)) + + return itemlist + + +def cat(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = jsontools.load(data) + + exception = ["peliculas-mas-vistas", "ultimas-peliculas"] + for child in data["b"]: + if child["id"] in exception: + continue + child['name'] = child['name'].replace("ciencia-ficcion", "Ciencia Ficción").replace("-", " ") + url = host % "list/%s" % child["id"] + ext + # Fanart + fanart = host % "list/%s/background_1080.jpg" % child["id"] + # Thumbnail + thumbnail = host % "list/%s/thumbnail_167x250.jpg" % child["id"] + title = unicode(child['name'], "utf-8").capitalize().encode("utf-8") + itemlist.append( + Item(channel=item.channel, action="entradasconlistas", title=title, url=url, + thumbnail=thumbnail, fanart=fanart, text_color=color2)) + itemlist.sort(key=lambda it: it.title) + + return itemlist + + +def entradas(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = jsontools.load(data) + + if "Destacados" in item.title: + itemlist.append(item.clone(title="Aviso: Si una película no tiene (imagen/carátula) NO va a funcionar", + action="", text_color=color4)) + + for child in data["a"]: + infolabels = {} + + infolabels['originaltitle'] = child['originalTitle'] + infolabels['plot'] = child['description'] + infolabels['year'] = child['year'] + if child.get('tags'): infolabels['genre'] = ', '.join([x.strip() for x in child['tags']]) + infolabels['rating'] = child['rateHuman'].replace(',', '.') + infolabels['votes'] = child['rateCount'] + if child.get('runtime'): + try: + infolabels['duration'] = int(child['runtime'].replace(" min.", "")) * 60 + except: + pass + if child.get('cast'): infolabels['cast'] = child['cast'].split(",") + infolabels['director'] = child['director'] + url = host % "movie/%s/movie.js" % child["id"] + # Fanart + fanart = host % "movie/%s/background_480.jpg" % child["id"] + if child.get("episode"): + thumbnail = host % "movie/%s/thumbnail_200x112.jpg" % child["id"] + else: + thumbnail = host % "movie/%s/poster_167x250.jpg" % child["id"] + + if child['height'] < 720: + quality = "[B] [SD][/B]" + elif child['height'] < 1080: + quality = "[B] [720p][/B]" + elif child['height'] >= 1080: + quality = "[B] [1080p][/B]" + fulltitle = unicodedata.normalize('NFD', unicode(child['name'], 'utf-8')).encode('ASCII', 'ignore') \ + .decode("utf-8") + if child['name'] == "": + title = child['id'].rsplit(".", 1)[0] + else: + title = child['name'] + if child['year']: + title += " (" + child['year'] + ")" + title += quality + + video_urls = [] + for k, v in child.get("video", {}).items(): + for vid in v: + video_urls.append(["http://%s.pelisipad.com/s/transcoder/%s" % (vid["server"], vid["url"]) + "?%s", + vid["height"]]) + + itemlist.append(Item(channel=item.channel, action="findvideos", server="", title=title, url=url, + thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, infoLabels=infolabels, + contentTitle=fulltitle, video_urls=video_urls, text_color=color3)) + + return itemlist + + +def entradasconlistas(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = jsontools.load(data) + + # Si hay alguna lista + contentSerie = False + contentList = False + if data.get('b'): + for child in data['b']: + infolabels = {} + + infolabels['originaltitle'] = child['originalTitle'] + infolabels['plot'] = child['description'] + infolabels['year'] = data['year'] + if child.get('tags'): infolabels['genre'] = ', '.join([x.strip() for x in child['tags']]) + infolabels['rating'] = child['rateHuman'].replace(',', '.') + infolabels['votes'] = child['rateCount'] + if child.get('runtime'): + try: + infolabels['duration'] = int(child['runtime'].replace(" min.", "")) * 60 + except: + pass + if child.get('cast'): infolabels['cast'] = child['cast'].split(",") + infolabels['director'] = child['director'] + season = child.get('season', '') + if season.isdigit() and not contentList: + contentSerie = True + action = "episodios" + else: + contentSerie = False + contentList = True + action = "entradasconlistas" + + url = host % "list/%s" % child["id"] + ext + title = re.sub(r"(\w)-(\w)", '\g<1> \g<2>', child['name']) + fulltitle = re.sub(r"(\w)-(\w)", '\g<1> \g<2>', child['name']) + if not title: + title = re.sub(r"(\w)-(\w)", '\g<1> \g<2>', child['id']) + fulltitle = re.sub(r"(\w)-(\w)", '\g<1> \g<2>', child['id']) + title = unicode(title, "utf-8").capitalize().encode("utf-8") + fulltitle = unicode(fulltitle, "utf-8").capitalize().encode("utf-8") + show = "" + if contentSerie: + title += " (Serie TV)" + show = fulltitle + thumbnail = host % "list/%s/thumbnail_167x250.jpg" % child["id"] + fanart = host % "list/%s/background_1080.jpg" % child["id"] + + itemlist.append(Item(channel=item.channel, action=action, title=title, + url=url, thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, show=show, + infoLabels=infolabels, contentTitle=fulltitle, viewmode="movie_with_plot", + text_color=color3)) + else: + contentList = True + + if contentSerie and itemlist: + itemlist.sort(key=lambda it: it.infoLabels['season'], reverse=True) + + if itemlist: + itemlist.insert(0, Item(channel=item.channel, title="**LISTAS**", action="", text_color=color4, text_bold=True, + thumbnail=item.thumbnail, fanart=item.fanart)) + + if data.get("a") and itemlist: + itemlist.append(Item(channel=item.channel, title="**VÍDEOS**", action="", text_color=color6, text_bold=True, + thumbnail=item.thumbnail, fanart=item.fanart)) + + for child in data.get("a", []): + infolabels = {} + + infolabels['originaltitle'] = child['originalTitle'] + infolabels['plot'] = child['description'] + infolabels['year'] = data['year'] + if child.get('tags'): infolabels['genre'] = ', '.join([x.strip() for x in child['tags']]) + infolabels['rating'] = child['rateHuman'].replace(',', '.') + infolabels['votes'] = child['rateCount'] + if child.get('runtime'): + try: + infolabels['duration'] = int(child['runtime'].replace(" min.", "")) * 60 + except: + pass + if child.get('cast'): infolabels['cast'] = child['cast'].split(",") + infolabels['director'] = child['director'] + url = host % "movie/%s/movie.js" % child["id"] + # Fanart + fanart = host % "movie/%s/background_1080.jpg" % child["id"] + if child.get("episode"): + thumbnail = host % "movie/%s/thumbnail.jpg" % child["id"] + else: + thumbnail = host % "movie/%s/poster_167x250.jpg" % child["id"] + + if child['height'] < 720: + quality = "[B] [SD][/B]" + elif child['height'] < 1080: + quality = "[B] [720p][/B]" + elif child['height'] >= 1080: + quality = "[B] [1080p][/B]" + fulltitle = unicodedata.normalize('NFD', unicode(child['name'], 'utf-8')).encode('ASCII', 'ignore') \ + .decode("utf-8") + if not child['name']: + title = child['id'].rsplit(".", 1)[0] + else: + title = child['name'] + if child['year']: + title += " (" + child['year'] + ")" + title += quality + + video_urls = [] + for k, v in child.get("video", {}).items(): + for vid in v: + video_urls.append(["http://%s.pelisipad.com/s/transcoder/%s" % (vid["server"], vid["url"]) + "?%s", + vid["height"]]) + + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, video_urls=video_urls, + thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, infoLabels=infolabels, + contentTitle=fulltitle, viewmode="movie_with_plot", text_color=color3)) + + # Se añade item para añadir la lista de vídeos a la videoteca + if data.get('a') and itemlist and contentList and config.get_videolibrary_support(): + itemlist.append(Item(channel=item.channel, text_color=color5, title="Añadir esta lista a la videoteca", + url=item.url, action="listas")) + elif contentSerie and config.get_videolibrary_support(): + itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", text_color=color5, + url=item.url, action="add_serie_to_library", show=item.show, + fulltitle=item.fulltitle, extra="episodios")) + + return itemlist + + +def series(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = jsontools.load(data) + for child in data.get("b", []): + infolabels = {} + + infolabels['originaltitle'] = child['originalTitle'] + infolabels['plot'] = child['description'] + infolabels['year'] = child['year'] + if child.get('tags'): infolabels['genre'] = ', '.join([x.strip() for x in child['tags']]) + infolabels['rating'] = child['rateHuman'].replace(',', '.') + infolabels['votes'] = child['rateCount'] + if child.get('cast'): infolabels['cast'] = child['cast'].split(",") + infolabels['director'] = child['director'] + if child.get('runtime'): + try: + infolabels['duration'] = int(child['runtime'].replace(" min.", "")) * 60 + except: + pass + infolabels['mediatype'] = "tvshow" + if child['season']: infolabels['season'] = child['season'] + + url = host % "list/%s" % child["id"] + ext + # Fanart + fanart = host % "list/%s/background_1080.jpg" % child["id"] + # Thumbnail + thumbnail = host % "list/%s/thumbnail_167x250.jpg" % child["id"] + fulltitle = child['name'] + title = fulltitle + " [%s]" % child['year'] + if child.get("numberOfSeasons") and "- Temporada" not in title: + title += " (Temps:%s)" % child['numberOfSeasons'] + + itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, text_color=color3, + thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, infoLabels=infolabels, + contentTitle=fulltitle, viewmode="movie_with_plot", show=fulltitle)) + + if "A-Z" in item.title: + itemlist.sort(key=lambda it: it.title) + + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = jsontools.load(data) + + capitulos = [] + if data.get("b"): + for child in data["b"]: + for child2 in child["a"]: + capitulos.append([child["season"], child2, child["id"]]) + else: + for child in data.get("a", []): + capitulos.append(['', child, '']) + + for season, child, id_season in capitulos: + infoLabels = item.infoLabels.copy() + + if child.get('runtime'): + try: + infoLabels['duration'] = int(child['runtime'].replace(" min.", "")) * 60 + except: + pass + if not season or not season.isdigit(): + season = scrapertools.find_single_match(child['name'], '(\d+)x\d+') + try: + infoLabels['season'] = int(season) + except: + infoLabels['season'] = 0 + + if not child['episode']: + episode = scrapertools.find_single_match(child['name'], '\d+x(\d+)') + if not episode: + episode = "0" + infoLabels['episode'] = int(episode) + else: + infoLabels['episode'] = int(child['episode']) + infoLabels['mediatype'] = "episode" + + url = host % "movie/%s/movie.js" % child["id"] + thumbnail = host % "movie/%s/thumbnail_200x112.jpg" % child["id"] + if id_season: + fanart = host % "list/%s/background_1080.jpg" % id_season + else: + fanart = item.fanart + + video_urls = [] + for k, v in child.get("video", {}).items(): + for vid in v: + video_urls.append(["http://%s.pelisipad.com/s/transcoder/%s" % (vid["server"], vid["url"]) + "?%s", + vid["height"]]) + + try: + title = fulltitle = child['name'].rsplit(" ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1] + except: + title = fulltitle = child['id'].replace("-", " ") + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, + fanart=fanart, fulltitle=fulltitle, contentTitle=fulltitle, viewmode="movie", + show=item.show, infoLabels=infoLabels, video_urls=video_urls, extra="episodios", + text_color=color3)) + + itemlist.sort(key=lambda it: (it.infoLabels["season"], it.infoLabels["episode"]), reverse=True) + if itemlist and config.get_videolibrary_support(): + itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", text_color=color5, + url=item.url, action="add_serie_to_library", infoLabels=item.infoLabels, + show=item.show, extra="episodios")) + + return itemlist + + +def nuevos_cap(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = jsontools.load(data) + capitulos = [] + if "Nuevas" in item.title: + for child in data["b"]: + capitulos.append([child["season"], child]) + else: + for child in data["a"]: + capitulos.append(['', child]) + + for season, child in capitulos: + infoLabels = item.infoLabels + if child.get('runtime'): + try: + infoLabels['duration'] = int(child['runtime'].replace(" min.", "")) * 60 + except: + pass + if not season: + season = scrapertools.find_single_match(child['name'], '(\d+)x\d+') + try: + infoLabels['season'] = int(season) + except: + infoLabels['season'] = 0 + + if "Nuevos" in item.title: + if not child['episode']: + episode = scrapertools.find_single_match(child['name'], '\d+x(\d+)') + if not episode: + episode = "0" + infoLabels['episode'] = int(episode) + else: + infoLabels['episode'] = int(child['episode']) + infoLabels['mediatype'] = "episode" + + if "Nuevos" in item.title: + url = host % "movie/%s/movie.js" % child["id"] + action = "findvideos" + thumbnail = host % "movie/%s/thumbnail_200x112.jpg" % child["id"] + fanart = item.fanart + else: + url = host % "list/%s" % child["season"] + ext + action = "episodios" + thumbnail = host % "list/%s/thumbnail_167x250.jpg" % child["id"] + fanart = host % "list/%s/background_1080.jpg" % child["id"] + + video_urls = [] + for k, v in child.get("video", {}).items(): + for vid in v: + video_urls.append(["http://%s.pelisipad.com/s/transcoder/%s" % (vid["server"], vid["url"]) + "?%s", + vid["height"]]) + + if "Nuevos" in item.title: + title = fulltitle = child['name'].rsplit(" ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1] + else: + title = fulltitle = child['name'] + + itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail, + fanart=fanart, fulltitle=fulltitle, contentTitle=fulltitle, viewmode="movie", + show=item.fulltitle, infoLabels=infoLabels, video_urls=video_urls, extra="nuevos", + text_color=color3)) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + + if not item.video_urls: + data = httptools.downloadpage(item.url) + if not data.sucess: + itemlist.append(item.clone(title="Película no disponible", action="")) + return itemlist + data = jsontools.load(data.data) + + item.video_urls = [] + for k, v in data.get("video", {}).items(): + for vid in v: + item.video_urls.append(["http://%s.pelisipad.com/s/transcoder/%s" % (vid["server"], vid["url"]) + "?%s", + vid["height"]]) + + if item.video_urls: + import random + import base64 + item.video_urls.sort(key=lambda it: (it[1], random.random()), reverse=True) + i = 0 + calidad_actual = "" + for vid, calidad in item.video_urls: + title = "Ver vídeo en %sp" % calidad + if calidad != calidad_actual: + i = 0 + calidad_actual = calidad + + if i % 2 == 0: + title += " [COLOR purple]Mirror %s[/COLOR] - %s" % (str(i + 1), item.fulltitle) + else: + title += " [COLOR green]Mirror %s[/COLOR] - %s" % (str(i + 1), item.fulltitle) + url = vid % "%s" % base64.b64decode("dHQ9MTQ4MDE5MDQ1MSZtbT1NRzZkclhFand6QmVzbmxSMHNZYXhBJmJiPUUwb1dVVVgx" + "WTBCQTdhWENpeU9paUE=") + itemlist.append(item.clone(title=title, action="play", url=url, server="directo", video_urls="")) + i += 1 + + if itemlist and item.extra == "" and config.get_videolibrary_support(): + itemlist.append(Item(channel=item.channel, title="Añadir enlaces a la videoteca", text_color=color5, + contentTitle=item.fulltitle, url=item.url, action="add_pelicula_to_library", + infoLabels={'title': item.fulltitle}, extra="findvideos", fulltitle=item.fulltitle)) + + return itemlist + + +def listas(item): + logger.info() + # Para añadir listas a la videoteca en carpeta CINE + itemlist = [] + data = httptools.downloadpage(item.url).data + data = jsontools.load(data) + for child in data.get("a", []): + infolabels = {} + + # Fanart + fanart = host % "movie/%s/background_1080.jpg" % child["id"] + thumbnail = host % "movie/%s/poster_167x250.jpg" % child["id"] + + url = host % "movie/%s/movie.js" % child["id"] + if child['name'] == "": + title = scrapertools.slugify(child['id'].rsplit(".", 1)[0]) + else: + title = scrapertools.slugify(child['name']) + title = title.replace('-', ' ').replace('_', ' ') + title = unicode(title, "utf-8").capitalize().encode("utf-8") + infolabels['title'] = title + try: + from core import videolibrarytools + new_item = item.clone(title=title, url=url, fulltitle=title, fanart=fanart, extra="findvideos", + thumbnail=thumbnail, infoLabels=infolabels, category="Cine") + videolibrarytools.add_pelicula_to_library(new_item) + error = False + except: + error = True + import traceback + logger.error(traceback.format_exc()) + + if not error: + itemlist.append(Item(channel=item.channel, title='Lista añadida correctamente a la videoteca', action="")) + else: + itemlist.append(Item(channel=item.channel, title='ERROR. Han ocurrido uno o varios errores en el proceso', + action="")) + + return itemlist diff --git a/plugin.video.alfa/channels/pelismagnet.json b/plugin.video.alfa/channels/pelismagnet.json new file mode 100755 index 00000000..5418faa0 --- /dev/null +++ b/plugin.video.alfa/channels/pelismagnet.json @@ -0,0 +1,50 @@ +{ + "id": "pelismagnet", + "name": "PelisMagnet", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "pelismagnet.png", + "banner": "pelismagnet.png", + "changes": { + "change": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "21/02/2017", + "description": "Reparado a causa del cambio a https y añadida opcion para buscar info en tmdb" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ] + }, + "version": 1, + "categories": [ + "torrent", + "movie", + "tvshow", + "documentary" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": true, + "visible": true + }, + { + "id": "modo_grafico", + "type": "bool", + "label": "Buscar información extra en TMDB", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/pelismagnet.py b/plugin.video.alfa/channels/pelismagnet.py new file mode 100755 index 00000000..53631ca7 --- /dev/null +++ b/plugin.video.alfa/channels/pelismagnet.py @@ -0,0 +1,301 @@ +# -*- coding: utf-8 -*- + +import re +import urllib + +from core import config +from core import httptools +from core import jsontools +from core import logger +from core import scrapertools +from core.item import Item + +host = 'http://pelismag.net' +api = host + '/api' +api_serie = host + "/seapi" +api_temp = host + "/sapi" +__modo_grafico__ = config.get_setting("modo_grafico", "pelismagnet") + + +def mainlist(item): + logger.info() + + itemlist = list() + itemlist.append(Item(channel=item.channel, action="pelis", title="[B]Peliculas[/B]", + url=api + "?sort_by=''&page=0")) + itemlist.append(Item(channel=item.channel, action="pelis", title=" Estrenos", + url=api + "?sort_by=date_added&page=0")) + itemlist.append(Item(channel=item.channel, action="pelis", title=" + Populares", url=api + "?page=0")) + itemlist.append(Item(channel=item.channel, action="pelis", title=" + Valoradas", + url=api + "?sort_by=rating&page=0")) + itemlist.append(Item(channel=item.channel, action="menu_ord", title=" Ordenado por...", + url=api)) + itemlist.append( + Item(channel=item.channel, action="search", title=" Buscar...", url=api + "?keywords=%s&page=0")) + itemlist.append(Item(channel=item.channel, action="series", title="[B]Series[/B]", + url=api_serie + "?sort_by=''&page=0")) + itemlist.append(Item(channel=item.channel, action="series", title=" Recientes", + url=api_serie + "?sort_by=date_added&page=0")) + itemlist.append(Item(channel=item.channel, action="series", title=" + Populares", url=api_serie + "?page=0")) + itemlist.append(Item(channel=item.channel, action="series", title=" + Valoradas", + url=api_serie + "?sort_by=rating&page=0")) + itemlist.append(Item(channel=item.channel, action="menu_ord", title=" Ordenado por...", + url=api_serie)) + itemlist.append(Item(channel=item.channel, action="search", title=" Buscar...", + url=api_serie + "?keywords=%s&page=0")) + itemlist.append(Item(channel=item.channel, action="configuracion", title="Configurar canal")) + + return itemlist + + +def configuracion(item): + from platformcode import platformtools + ret = platformtools.show_channel_settings() + platformtools.itemlist_refresh() + return ret + + +def menu_ord(item): + logger.info() + + itemlist = list() + itemlist.append(Item(channel=item.channel, action="menu_alf", title="Alfabético", + url=item.url)) + itemlist.append(Item(channel=item.channel, action="menu_genero", title="Género", + url=item.url)) + + return itemlist + + +def menu_alf(item): + logger.info() + + itemlist = [] + + for letra in ['[0-9]', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', + 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']: + itemlist.append(Item(channel=item.channel, action="series", title=letra, + url=item.url + "?keywords=^" + letra + "&page=0")) + + return itemlist + + +def menu_genero(item): + logger.info() + + itemlist = [] + + response = httptools.downloadpage("https://kproxy.com/") + url = "https://kproxy.com/doproxy.jsp" + post = "page=%s&x=34&y=14" % urllib.quote(host + "/principal") + response = httptools.downloadpage(url, post, follow_redirects=False).data + url = scrapertools.find_single_match(response, '<meta http-equiv="refresh".*?url=([^"]+)"') + data = httptools.downloadpage(url).data + + data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data) + + data = scrapertools.find_single_match(data, '<ul class="dropdown-menu.*?>(.*?)</ul>') + patron = '<li><a href="genero/([^"]+)">(.*?)<' + matches = re.compile(patron, re.DOTALL).findall(data) + + for genero, nombre in matches: + itemlist.append(Item(channel=item.channel, action="series", title=nombre, + url=item.url + "?genre=" + genero + "&page=0")) + + return itemlist + + +def series(item): + logger.info() + itemlist = [] + + response = httptools.downloadpage("https://kproxy.com/") + url = "https://kproxy.com/doproxy.jsp" + post = "page=%s&x=34&y=14" % urllib.quote(item.url) + response = httptools.downloadpage(url, post, follow_redirects=False).data + url = scrapertools.find_single_match(response, '<meta http-equiv="refresh".*?url=([^"]+)"') + data = httptools.downloadpage(url).data + + lista = jsontools.load(data) + if item.extra == "next": + lista_ = lista[25:] + else: + lista_ = lista[:25] + + for i in lista_: + + punt = i.get("puntuacio", "") + valoracion = "" + if punt and not 0: + valoracion = " (Val: {punt})".format(punt=punt) + + title = "{nombre}{val}".format(nombre=i.get("nom", ""), val=valoracion) + url = "{url}?id={id}".format(url=api_temp, id=i.get("id", "")) + + thumbnail = "" + fanart = "" + if i.get("posterurl", ""): + thumbnail = "http://image.tmdb.org/t/p/w342{file}".format(file=i.get("posterurl", "")) + if i.get("backurl", ""): + fanart = "http://image.tmdb.org/t/p/w1280{file}".format(file=i.get("backurl", "")) + + plot = i.get("info", "") + if plot is None: + plot = "" + + infoLabels = {'plot': plot, 'year': i.get("year"), 'tmdb_id': i.get("id"), 'mediatype': 'tvshow'} + + itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, server="torrent", + thumbnail=thumbnail, fanart=fanart, infoLabels=infoLabels, contentTitle=i.get("nom"), + show=i.get("nom"))) + + from core import tmdb + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + + if len(lista_) == 25 and item.extra == "next": + url = re.sub(r'page=(\d+)', r'page=' + str(int(re.search('\d+', item.url).group()) + 1), item.url) + itemlist.append(Item(channel=item.channel, action="series", title=">> Página siguiente", url=url)) + elif len(lista_) == 25: + itemlist.append( + Item(channel=item.channel, action="series", title=">> Página siguiente", url=item.url, extra="next")) + + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + + response = httptools.downloadpage("https://kproxy.com/") + url = "https://kproxy.com/doproxy.jsp" + post = "page=%s&x=34&y=14" % urllib.quote(item.url) + response = httptools.downloadpage(url, post, follow_redirects=False).data + url = scrapertools.find_single_match(response, '<meta http-equiv="refresh".*?url=([^"]+)"') + data = httptools.downloadpage(url).data + + data = jsontools.load(data) + for i in data.get("temporadas", []): + + titulo = "{temporada} ({total} Episodios)".format(temporada=i.get("nomtemporada", ""), + total=len(i.get("capituls", "0"))) + itemlist.append(Item(channel=item.channel, action="episodios", title=titulo, url=item.url, + server="torrent", fanart=item.fanart, thumbnail=item.thumbnail, plot=data.get("info", ""), + folder=False)) + + for j in i.get("capituls", []): + + numero = j.get("infocapitul", "") + if not numero: + numero = "{temp}x{cap}".format(temp=i.get("numerotemporada", ""), cap=j.get("numerocapitul", "")) + + titulo = j.get("nomcapitul", "") + if not titulo: + titulo = "Capítulo {num}".format(num=j.get("numerocapitul", "")) + + calidad = "" + if j.get("links", {}).get("calitat", ""): + calidad = " [{calidad}]".format(calidad=j.get("links", {}).get("calitat", "")) + + title = " {numero} {titulo}{calidad}".format(numero=numero, titulo=titulo, calidad=calidad) + + if j.get("links", {}).get("magnet", ""): + url = j.get("links", {}).get("magnet", "") + else: + return [Item(channel=item.channel, title='No hay enlace magnet disponible para este capitulo')] + + plot = i.get("overviewcapitul", "") + if plot is None: + plot = "" + + infoLabels = item.infoLabels + if plot: + infoLabels["plot"] = plot + infoLabels["season"] = i.get("numerotemporada") + infoLabels["episode"] = j.get("numerocapitul") + itemlist.append( + Item(channel=item.channel, action="play", title=title, url=url, server="torrent", infoLabels=infoLabels, + thumbnail=item.thumbnail, fanart=item.fanart, show=item.show, contentTitle=item.contentTitle, + contentSeason=i.get("numerotemporada"), contentEpisodeNumber=j.get("numerocapitul"))) + + return itemlist + + +def pelis(item): + logger.info() + + itemlist = [] + + response = httptools.downloadpage("https://kproxy.com/") + url = "https://kproxy.com/doproxy.jsp" + post = "page=%s&x=34&y=14" % urllib.quote(item.url) + response = httptools.downloadpage(url, post, follow_redirects=False).data + url = scrapertools.find_single_match(response, '<meta http-equiv="refresh".*?url=([^"]+)"') + data = httptools.downloadpage(url).data + + lista = jsontools.load(data) + if item.extra == "next": + lista_ = lista[25:] + else: + lista_ = lista[:25] + + for i in lista_: + punt = i.get("puntuacio", "") + valoracion = "" + + if punt and not 0: + valoracion = " (Val: {punt})".format(punt=punt) + + if i.get("magnets", {}).get("M1080", {}).get("magnet", ""): + url = i.get("magnets", {}).get("M1080", {}).get("magnet", "") + calidad = "[{calidad}]".format(calidad=i.get("magnets", {}).get("M1080", {}).get("quality", "")) + else: + url = i.get("magnets", {}).get("M720", {}).get("magnet", "") + calidad = "[{calidad}]".format(calidad=i.get("magnets", {}).get("M720", {}).get("quality", "")) + + if not url: + continue + + title = "{nombre} {calidad}{val}".format(nombre=i.get("nom", ""), val=valoracion, calidad=calidad) + + thumbnail = "" + fanart = "" + if i.get("posterurl", ""): + thumbnail = "http://image.tmdb.org/t/p/w342{file}".format(file=i.get("posterurl", "")) + if i.get("backurl", ""): + fanart = "http://image.tmdb.org/t/p/w1280{file}".format(file=i.get("backurl", "")) + + plot = i.get("info", "") + if plot is None: + plot = "" + infoLabels = {'plot': plot, 'year': i.get("year"), 'tmdb_id': i.get("id")} + + itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, server="torrent", + thumbnail=thumbnail, fanart=fanart, infoLabels=infoLabels, contentTitle=i.get("nom"))) + + from core import tmdb + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + + if len(lista_) == 25 and item.extra == "next": + url = re.sub(r'page=(\d+)', r'page=' + str(int(re.search('\d+', item.url).group()) + 1), item.url) + itemlist.append(Item(channel=item.channel, action="pelis", title=">> Página siguiente", url=url)) + elif len(lista_) == 25: + itemlist.append( + Item(channel=item.channel, action="pelis", title=">> Página siguiente", url=item.url, extra="next")) + + return itemlist + + +def search(item, texto): + logger.info() + try: + item.url = item.url % texto.replace(' ', '%20') + if "/seapi" in item.url: + return series(item) + else: + return pelis(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] diff --git a/plugin.video.alfa/channels/pelispedia.json b/plugin.video.alfa/channels/pelispedia.json new file mode 100755 index 00000000..43890255 --- /dev/null +++ b/plugin.video.alfa/channels/pelispedia.json @@ -0,0 +1,86 @@ +{ + "id": "pelispedia", + "name": "PelisPedia", + "active": true, + "adult": false, + "language": "es", + "fanart": "http://i.imgur.com/9QbyJrf.jpg", + "thumbnail": "pelispedia.png", + "banner": "pelispedia.png", + "version": 1, + "changes": [ + { + "date": "07/06/17", + "description": "Reconoce servidor raptu" + }, + { + "date": "21/04/17", + "description": "Corregido método play, fix ordenación de episodios" + }, + { + "date": "06/04/17", + "description": "Se permite configurar sin color, varias correcciones" + }, + { + "date": "01/02/17", + "description": "Corregido método play" + }, + { + "date": "19/07/16", + "description": "Corregido el método play" + }, + { + "date": "01/07/16", + "description": "Eliminado código innecesario." + }, + { + "date": "06/06/16", + "description": "Añadida info extra de tmdb. Añadido el canal en la busqueda de novedades" + } + ], + "categories": [ + "movie", + "tvshow", + "vos" + ], + "settings": [ + { + "id": "modo_grafico", + "type": "bool", + "label": "Buscar información extra", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "perfil", + "type": "list", + "label": "Perfil de color", + "default": 3, + "enabled": true, + "visible": true, + "lvalues": [ + "Sin color", + "Perfil 3", + "Perfil 2", + "Perfil 1" + ] + }, + { + "id": "orden_episodios", + "type": "bool", + "label": "Mostrar los episodios de las series en orden descendente", + "default": false, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/pelispedia.py b/plugin.video.alfa/channels/pelispedia.py new file mode 100755 index 00000000..595e52cb --- /dev/null +++ b/plugin.video.alfa/channels/pelispedia.py @@ -0,0 +1,665 @@ +# -*- coding: utf-8 -*- + +import re +import urllib +import urlparse + +from core import channeltools +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item +from platformcode import platformtools + +__channel__ = "pelispedia" + +CHANNEL_HOST = "http://www.pelispedia.tv/" + +# Configuracion del canal +try: + __modo_grafico__ = config.get_setting('modo_grafico', __channel__) + __perfil__ = config.get_setting('perfil', __channel__) +except: + __modo_grafico__ = True + __perfil__ = 0 + +# Fijar perfil de color +perfil = [['0xFF6E2802', '0xFFFAA171', '0xFFE9D7940'], + ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'], + ['0xFF58D3F7', '0xFF2E64FE', '0xFF0404B4']] + +if __perfil__ - 1 >= 0: + color1, color2, color3 = perfil[__perfil__ - 1] +else: + color1 = color2 = color3 = "" + +parameters = channeltools.get_channel_parameters(__channel__) +fanart_host = parameters['fanart'] +thumbnail_host = parameters['thumbnail'] + + +def mainlist(item): + logger.info() + + itemlist = list() + itemlist.append(Item(channel=__channel__, title="Películas", text_color=color1, fanart=fanart_host, folder=False, + thumbnail=thumbnail_host, text_bold=True)) + itemlist.append( + Item(channel=__channel__, action="listado", title=" Novedades", text_color=color2, viewcontent="movies", + url=urlparse.urljoin(CHANNEL_HOST, "movies/all/"), fanart=fanart_host, extra="movies", + viewmode="movie_with_plot", + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Directors%20Chair.png")) + itemlist.append( + Item(channel=__channel__, action="listado_alfabetico", title=" Por orden alfabético", text_color=color2, + url=urlparse.urljoin(CHANNEL_HOST, "movies/all/"), extra="movies", fanart=fanart_host, + viewmode="thumbnails", + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/A-Z.png")) + itemlist.append(Item(channel=__channel__, action="listado_genero", title=" Por género", text_color=color2, + url=urlparse.urljoin(CHANNEL_HOST, "movies/all/"), extra="movies", fanart=fanart_host, + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Genre.png")) + itemlist.append(Item(channel=__channel__, action="listado_anio", title=" Por año", text_color=color2, + url=urlparse.urljoin(CHANNEL_HOST, "movies/all/"), extra="movies", fanart=fanart_host, + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Year.png")) + # itemlist.append(Item(channel=__channel__, action="search", title=" Buscar...", text_color=color2, + # url=urlparse.urljoin(CHANNEL_HOST, "buscar/?s="), extra="movies", fanart=fanart_host)) + + itemlist.append(Item(channel=__channel__, title="Series", text_color=color1, fanart=fanart_host, folder=False, + thumbnail=thumbnail_host, text_bold=True)) + itemlist.append( + Item(channel=__channel__, action="listado", title=" Novedades", text_color=color2, viewcontent="tvshows", + url=urlparse.urljoin(CHANNEL_HOST, "series/all/"), extra="serie", fanart=fanart_host, + viewmode="movie_with_plot", + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/TV%20Series.png")) + itemlist.append(Item(channel=__channel__, action="listado_alfabetico", title=" Por orden alfabético", + text_color=color2, extra="serie", fanart=fanart_host, viewmode="thumbnails", + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/A-Z.png")) + itemlist.append(Item(channel=__channel__, action="listado_genero", title=" Por género", extra="serie", + text_color=color2, fanart=fanart_host, url=urlparse.urljoin(CHANNEL_HOST, "series/all/"), + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Genre.png")) + itemlist.append( + Item(channel=__channel__, action="listado_anio", title=" Por año", extra="serie", text_color=color2, + fanart=fanart_host, url=urlparse.urljoin(CHANNEL_HOST, "series/all/"), + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Year.png")) + # itemlist.append(Item(channel=__channel__, action="search", title=" Buscar...", text_color=color2, + # url=urlparse.urljoin(CHANNEL_HOST, "series/buscar/?s="), extra="serie", fanart=fanart_host)) + + itemlist.append(Item(channel=__channel__, title="", fanart=fanart_host, folder=False, thumbnail=thumbnail_host)) + + itemlist.append(Item(channel=__channel__, action="settings", title="Configuración", text_color=color1, + fanart=fanart_host, text_bold=True, + thumbnail=config.get_thumb("thumb_setting_0.png"))) + + return itemlist + + +def settings(item): + return platformtools.show_channel_settings() + + +def listado_alfabetico(item): + logger.info() + + itemlist = [] + + for letra in '0ABCDEFGHIJKLMNOPQRSTUVWXYZ': + + cadena = "series/letra/" + if item.extra == "movies": + cadena = 'movies/all/?letra=' + viewcontent = "movies" + if letra == '0': + cadena += "Num" + else: + cadena += letra + else: + viewcontent = "tvshows" + if letra == '0': + cadena += "num/" + else: + cadena += letra + "/" + + itemlist.append( + Item(channel=__channel__, action="listado", title=letra, url=urlparse.urljoin(CHANNEL_HOST, cadena), + extra=item.extra, text_color=color2, viewcontent=viewcontent, + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/A-Z.png")) + + return itemlist + + +def listado_genero(item): + logger.info() + + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data) + + if item.extra == "movies": + cadena = 'movies/all/?gender=' + viewcontent = "movies" + patron = '<select name="gender" id="genres" class="auxBtn1">.*?</select>' + data = scrapertools.find_single_match(data, patron) + patron = '<option value="([^"]+)".+?>(.*?)</option>' + + else: + cadena = "series/genero/" + viewcontent = "tvshows" + patron = '<select id="genres">.*?</select>' + data = scrapertools.find_single_match(data, patron) + patron = '<option name="([^"]+)".+?>(.*?)</option>' + + matches = re.compile(patron, re.DOTALL).findall(data) + + for key, value in matches[1:]: + cadena2 = cadena + key + if item.extra != "movies": + cadena2 += "/" + + itemlist.append( + Item(channel=__channel__, action="listado", title=value, url=urlparse.urljoin(CHANNEL_HOST, cadena2), + extra=item.extra, text_color=color2, fanart=fanart_host, viewcontent=viewcontent, + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Genre.png")) + + return itemlist + + +def listado_anio(item): + logger.info() + + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data) + + if item.extra == "movies": + cadena = 'movies/all/?year=' + viewcontent = "movies" + patron = '<select name="year" id="years" class="auxBtn1">.*?</select>' + data = scrapertools.find_single_match(data, patron) + patron = '<option value="([^"]+)"' + titulo = 'Películas del año ' + else: + cadena = "series/anio/" + viewcontent = "tvshows" + patron = '<select id="year">.*?</select>' + data = scrapertools.find_single_match(data, patron) + patron = '<option name="([^"]+)"' + titulo = 'Series del año ' + + matches = re.compile(patron, re.DOTALL).findall(data) + + for value in matches[1:]: + cadena2 = cadena + value + + if item.extra != "movies": + cadena2 += "/" + + itemlist.append(Item(channel=__channel__, action="listado", title=titulo + value, extra=item.extra, + url=urlparse.urljoin(CHANNEL_HOST, cadena2), text_color=color2, fanart=fanart_host, + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Year.png", + viewcontent=viewcontent)) + + return itemlist + + +def search(item, texto): + # Funcion de busqueda desactivada + logger.info("texto=%s" % texto) + + item.url = item.url + "%" + texto.replace(' ', '+') + "%" + + try: + return listado(item) + + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def newest(categoria): + item = Item() + try: + if categoria == 'peliculas': + item.url = urlparse.urljoin(CHANNEL_HOST, "movies/all/") + item.extra = "movies" + + else: + return [] + + itemlist = listado(item) + if itemlist[-1].action == "listado": + itemlist.pop() + + # Se captura la excepción, para no interrumpir al canal novedades si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + return itemlist + + +def listado(item): + logger.info() + itemlist = [] + + action = "findvideos" + content_type = "movie" + + if item.extra == 'serie': + action = "temporadas" + content_type = "tvshow" + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data) + # logger.info("data -- {}".format(data)) + + patron = '<li[^>]+><a href="([^"]+)" alt="([^<|\(]+).*?<img src="([^"]+).*?>.*?<span>\(([^)]+).*?' \ + '<p class="font12">(.*?)</p>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear, scrapedplot in matches[:28]: + title = "%s (%s)" % (scrapertools.unescape(scrapedtitle.strip()), scrapedyear) + plot = scrapertools.entityunescape(scrapedplot) + + new_item = Item(channel=__channel__, title=title, url=urlparse.urljoin(CHANNEL_HOST, scrapedurl), action=action, + thumbnail=scrapedthumbnail, plot=plot, context="", extra=item.extra, text_color=color3, + contentType=content_type, fulltitle=title) + + if item.extra == 'serie': + new_item.show = scrapertools.unescape(scrapedtitle.strip()) + # fix en algunos casos la url está mal + new_item.url = new_item.url.replace(CHANNEL_HOST + "pelicula", CHANNEL_HOST + "serie") + else: + new_item.fulltitle = scrapertools.unescape(scrapedtitle.strip()) + new_item.infoLabels = {'year': scrapedyear} + # logger.debug(new_item.tostring()) + + itemlist.append(new_item) + + # Obtenemos los datos basicos de todas las peliculas mediante multihilos + tmdb.set_infoLabels(itemlist, __modo_grafico__) + + # numero de registros que se muestran por página, se fija a 28 por cada paginación + if len(matches) >= 28: + + file_php = "666more" + tipo_serie = "" + + if item.extra == "movies": + anio = scrapertools.find_single_match(item.url, "(?:year=)(\w+)") + letra = scrapertools.find_single_match(item.url, "(?:letra=)(\w+)") + genero = scrapertools.find_single_match(item.url, "(?:gender=|genre=)(\w+)") + params = "letra=%s&year=%s&genre=%s" % (letra, anio, genero) + + else: + tipo2 = scrapertools.find_single_match(item.url, "(?:series/|tipo2=)(\w+)") + tipo_serie = "&tipo=serie" + + if tipo2 != "all": + file_php = "letra" + tipo_serie += "&tipo2=" + tipo2 + + genero = "" + if tipo2 == "anio": + genero = scrapertools.find_single_match(item.url, "(?:anio/|genre=)(\w+)") + if tipo2 == "genero": + genero = scrapertools.find_single_match(item.url, "(?:genero/|genre=)(\w+)") + if tipo2 == "letra": + genero = scrapertools.find_single_match(item.url, "(?:letra/|genre=)(\w+)") + + params = "genre=%s" % genero + + url = "http://www.pelispedia.tv/api/%s.php?rangeStart=28&rangeEnd=28%s&%s" % (file_php, tipo_serie, params) + + if "rangeStart" in item.url: + ant_inicio = scrapertools.find_single_match(item.url, "rangeStart=(\d+)&") + inicio = str(int(ant_inicio) + 28) + url = item.url.replace("rangeStart=" + ant_inicio, "rangeStart=" + inicio) + + itemlist.append(Item(channel=__channel__, action="listado", title=">> Página siguiente", extra=item.extra, + url=url, thumbnail=thumbnail_host, fanart=fanart_host, text_color=color2)) + + return itemlist + + +def episodios(item): + logger.info() + + itemlist = [] + + # Descarga la página + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data) + + patron = '<li class="clearfix gutterVertical20"><a href="([^"]+)".*?><small>(.*?)</small>.*?' \ + '<span class.+?>(.*?)</span>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle, scrapedname in matches: + # logger.info("scrap {}".format(scrapedtitle)) + patron = 'Season\s+(\d),\s+Episode\s+(\d+)' + match = re.compile(patron, re.DOTALL).findall(scrapedtitle) + season, episode = match[0] + + if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season): + continue + + title = "%sx%s: %s" % (season, episode.zfill(2), scrapertools.unescape(scrapedname)) + new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title, + contentType="episode") + if 'infoLabels' not in new_item: + new_item.infoLabels = {} + + new_item.infoLabels['season'] = season + new_item.infoLabels['episode'] = episode.zfill(2) + + itemlist.append(new_item) + + # TODO no hacer esto si estamos añadiendo a la videoteca + if not item.extra: + # Obtenemos los datos de todos los capitulos de la temporada mediante multihilos + tmdb.set_infoLabels(itemlist, __modo_grafico__) + for i in itemlist: + if i.infoLabels['title']: + # Si el capitulo tiene nombre propio añadirselo al titulo del item + i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels['episode'], i.infoLabels['title']) + if i.infoLabels.has_key('poster_path'): + # Si el capitulo tiene imagen propia remplazar al poster + i.thumbnail = i.infoLabels['poster_path'] + + itemlist.sort(key=lambda it: int(it.infoLabels['episode']), + reverse=config.get_setting('orden_episodios', __channel__)) + + # Opción "Añadir esta serie a la videoteca" + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url, + action="add_serie_to_library", extra="episodios", show=item.show, category="Series", + text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host)) + + return itemlist + + +def temporadas(item): + logger.info() + itemlist = [] + + # Descarga la página + data = httptools.downloadpage(item.url).data + + data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data) + + if not item.fanart: + patron = '<div class="hero-image"><img src="([^"]+)"' + item.fanart = scrapertools.find_single_match(data, patron) + + patron = '<h3 class="pt15 pb15 dBlock clear seasonTitle">([^<]+).*?' + patron += '<div class="bpM18 bpS25 mt15 mb20 noPadding"><figure><img src="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) > 1: + for scrapedseason, scrapedthumbnail in matches: + temporada = scrapertools.find_single_match(scrapedseason, '(\d+)') + new_item = item.clone(text_color=color2, action="episodios", season=temporada, thumbnail=scrapedthumbnail) + new_item.infoLabels['season'] = temporada + new_item.extra = "" + itemlist.append(new_item) + + # Obtenemos los datos de todas las temporadas de la serie mediante multihilos + tmdb.set_infoLabels(itemlist, __modo_grafico__) + for i in itemlist: + i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle']) + if i.infoLabels['title']: + # Si la temporada tiene nombre propio añadirselo al titulo del item + i.title += " - %s" % (i.infoLabels['title']) + if i.infoLabels.has_key('poster_path'): + # Si la temporada tiene poster propio remplazar al de la serie + i.thumbnail = i.infoLabels['poster_path'] + + itemlist.sort(key=lambda it: it.title) + + # Opción "Añadir esta serie a la videoteca" + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url, + action="add_serie_to_library", extra="episodios", show=item.show, category="Series", + text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host)) + + return itemlist + else: + return episodios(item) + + +def findvideos(item): + logger.info() + logger.info("item.url %s" % item.url) + itemlist = [] + + # Descarga la página + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data) + + patron = '<iframe src=".+?id=(\d+)' + key = scrapertools.find_single_match(data, patron) + url = CHANNEL_HOST + 'api/iframes.php?id=%s&update1.1' % key + + headers = dict() + headers["Referer"] = item.url + data = httptools.downloadpage(url, headers=headers).data + + # Descarta la opción descarga que es de publicidad + patron = '<a href="(?!http://go.ad2up.com)([^"]+)".+?><img src="/api/img/([^.]+)' + matches = scrapertools.find_multiple_matches(data, patron) + + for scrapedurl, scrapedtitle in matches: + # En algunos vídeos hay opción flash "vip" con varias calidades + if "api/vip.php" in scrapedurl: + data_vip = httptools.downloadpage(scrapedurl).data + patron = '<a href="([^"]+)".+?><img src="/api/img/([^.]+).*?<span class="text">([^<]+)<' + matches_vip = re.compile(patron, re.DOTALL).findall(data_vip) + for url, titlevip, calidad in matches_vip: + title = "Ver vídeo en [" + titlevip + "] " + calidad + itemlist.append(item.clone(title=title, url=url, action="play")) + # fix se ignora esta url ya que no devuelve videos + elif "http://www.pelispedia.tv/Pe_Player_Html6/index.php?" in scrapedurl: + continue + else: + title = "Ver vídeo en [" + scrapedtitle + "]" + new_item = item.clone(title=title, url=scrapedurl, action="play", extra=item.url, referer=url) + itemlist.append(new_item) + + # Opción "Añadir esta pelicula a la videoteca" + if item.extra == "movies" and config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append(Item(channel=__channel__, title="Añadir esta película a la videoteca", url=item.url, + infoLabels=item.infoLabels, action="add_pelicula_to_library", extra="findvideos", + fulltitle=item.title, text_color=color2)) + + return itemlist + + +def play(item): + logger.info("url=%s" % item.url) + + itemlist = [] + + subtitle = "" + + # html5 - http://www.pelispedia.vip + if item.url.startswith("http://www.pelispedia.vip"): + + headers = dict() + headers["Referer"] = item.referer + data = httptools.downloadpage(item.url, headers=headers).data + data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) + + from lib import jsunpack + match = scrapertools.find_single_match(data, '\.</div><script type="text/rocketscript">(.*?)</script>') + data = jsunpack.unpack(match) + data = data.replace("\\'", "'") + + subtitle = scrapertools.find_single_match(data, "tracks:\[{file:'([^']+)',label:'Spanish'") + media_urls = scrapertools.find_multiple_matches(data, "{file:'(.+?)',label:'(.+?)',type:'video/mp4'") + + # la calidad más baja tiene que ir primero + media_urls = sorted(media_urls, key=lambda k: k[1]) + + if len(media_urls) > 0: + for url, desc in media_urls: + itemlist.append([desc, url, 0, subtitle]) + + # otro html5 - https://pelispedia.co/ver/f.php + elif item.url.startswith("https://pelispedia.co/ver/f.php"): + + headers = dict() + headers["Referer"] = item.referer + data = httptools.downloadpage(item.url, headers=headers).data + + sub = scrapertools.find_single_match(data, "subtitulo='([^']+)'") + data_sub = httptools.downloadpage(sub).data + subtitle = save_sub(data_sub) + + from lib import jsunpack + match = scrapertools.find_single_match(data, '<script type="text/rocketscript">(.*?)</script>') + data = jsunpack.unpack(match) + data = data.replace("\\'", "'") + + media_urls = scrapertools.find_multiple_matches(data, "{file:'(.+?)',label:'(.+?)'") + + # la calidad más baja tiene que ir primero + media_urls = sorted(media_urls, key=lambda k: k[1]) + + if len(media_urls) > 0: + for url, desc in media_urls: + itemlist.append([desc, url, 0, subtitle]) + + # NUEVO + # otro html5 - http://player.pelispedia.tv/ver?v= + elif item.url.startswith("http://player.pelispedia.tv/ver?v="): + _id = scrapertools.find_single_match(item.url, 'ver\?v=(.+?)$') + + headers = dict() + headers["Referer"] = item.referer + data = httptools.downloadpage(item.url, headers=headers).data + + sub = scrapertools.find_single_match(data, 'var parametros = "\?pic=20&id=([^&]+)&sub=ES";') + sub = "http://player.pelispedia.tv/cdn" + sub + data_sub = httptools.downloadpage(sub).data + subtitle = save_sub(data_sub) + + csrf_token = scrapertools.find_single_match(data, '<meta name="csrf-token" content="([^"]+)">') + + ct = "" + iv = "" + s = "" + pre_token = '{"ct": %s,"iv": %s,"s":%s}' % (ct, iv, s) + + import base64 + token = base64.b64encode(pre_token) + + url = "http://player.pelispedia.tv/template/protected.php" + post = "fv=%s&url=%s&sou=%s&token=%s" % ("0", _id, "pic", token) + # eyJjdCI6IkVNYUd3Z2IwS2szSURzSGFGdkxGWlE9PSIsIml2IjoiZDI0NzhlYzU0OTZlYTJkNWFlOTFkZjAzZTVhZTNlNmEiLCJzIjoiOWM3MTM3MjNhMTkyMjFiOSJ9 + data = httptools.downloadpage(url, post=post).data + + logger.debug("datito %s " % data) + + media_urls = scrapertools.find_multiple_matches(data, '"url":"([^"]+)".*?"width":([^,]+),') + + # la calidad más baja tiene que ir primero + media_urls = sorted(media_urls, key=lambda k: int(k[1])) + + if len(media_urls) > 0: + for url, desc in media_urls: + itemlist.append([desc, url, 0, subtitle]) + + # netu + elif item.url.startswith("http://www.pelispedia.tv/netu.html?"): + url = item.url.replace("http://www.pelispedia.tv/netu.html?url=", "") + + from servers import netutv + media_urls = netutv.get_video_url(urllib.unquote(url)) + itemlist.append(media_urls[0]) + + # flash + elif item.url.startswith("http://www.pelispedia.tv"): + key = scrapertools.find_single_match(item.url, 'index.php\?id=([^&]+).+?sub=([^&]+)&.+?imagen=([^&]+)') + + # if len(key) > 2: + # thumbnail = key[2] + if key[1] != "": + url_sub = "http://www.pelispedia.tv/sub/%s.srt" % key[1] + data_sub = httptools.downloadpage(url_sub).data + subtitle = save_sub(data_sub) + + url = "http://www.pelispedia.tv/gkphp_flv/plugins/gkpluginsphp.php" + post = "link=" + urllib.quote(key[0]) + + data = httptools.downloadpage(url, post=post).data + + media_urls = scrapertools.find_multiple_matches(data, 'link":"([^"]+)","type":"([^"]+)"') + + # la calidad más baja tiene que ir primero + media_urls = sorted(media_urls, key=lambda k: k[1]) + + if len(media_urls) > 0: + for url, desc in media_urls: + url = url.replace("\\", "") + itemlist.append([desc, url, 0, subtitle]) + + # openload + elif item.url.startswith("https://load.pelispedia.co/embed/openload.co"): + + url = item.url.replace("/embed/", "/stream/") + data = httptools.downloadpage(url).data + url = scrapertools.find_single_match(data, '<meta name="og:url" content="([^"]+)"') + + from servers import openload + media_urls = openload.get_video_url(url) + itemlist.append(media_urls[0]) + + # raptu + elif item.url.startswith("https://load.pelispedia.co/embed/raptu.com"): + url = item.url.replace("/embed/", "/stream/") + data = httptools.downloadpage(url).data + url = scrapertools.find_single_match(data, '<meta property="og:url" content="([^"]+)"') + from servers import raptu + media_urls = raptu.get_video_url(url) + if len(media_urls) > 0: + for desc, url, numero, subtitle in media_urls: + itemlist.append([desc, url, numero, subtitle]) + + else: + itemlist = servertools.find_video_items(data=item.url) + for videoitem in itemlist: + videoitem.title = item.title + videoitem.channel = __channel__ + + return itemlist + + +def save_sub(data): + import os + try: + ficherosubtitulo = os.path.join(config.get_data_path(), 'subtitulo_pelispedia.srt') + if os.path.exists(ficherosubtitulo): + try: + os.remove(ficherosubtitulo) + except IOError: + logger.error("Error al eliminar el archivo " + ficherosubtitulo) + raise + + fichero = open(ficherosubtitulo, "wb") + fichero.write(data) + fichero.close() + subtitle = ficherosubtitulo + except: + subtitle = "" + logger.error("Error al descargar el subtítulo") + + return subtitle diff --git a/plugin.video.alfa/channels/pelispekes.json b/plugin.video.alfa/channels/pelispekes.json new file mode 100755 index 00000000..5f7e2304 --- /dev/null +++ b/plugin.video.alfa/channels/pelispekes.json @@ -0,0 +1,23 @@ +{ + "id": "pelispekes", + "name": "PelisPekes", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "pelispekes.png", + "banner": "pelispekes.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ], + "categories": [ + "movie" + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/pelispekes.py b/plugin.video.alfa/channels/pelispekes.py new file mode 100755 index 00000000..4a384c8d --- /dev/null +++ b/plugin.video.alfa/channels/pelispekes.py @@ -0,0 +1,84 @@ +# -*- coding: utf-8 -*- + +import re + +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + + +def mainlist(item): + logger.info() + itemlist = [] + + if item.url == "": + item.url = "http://www.pelispekes.com/" + + data = scrapertools.cachePage(item.url) + ''' + <div class="poster-media-card"> + <a href="http://www.pelispekes.com/un-gallo-con-muchos-huevos/" title="Un gallo con muchos Huevos"> + <div class="poster"> + <div class="title"> + <span class="under-title">Animacion</span> + </div> + <span class="rating"> + <i class="glyphicon glyphicon-star"></i><span class="rating-number">6.2</span> + </span> + <div class="poster-image-container"> + <img width="300" height="428" src="http://image.tmdb.org/t/p/w185/cz3Kb6Xa1q0uCrsTIRDS7fYOZyw.jpg" title="Un gallo con muchos Huevos" alt="Un gallo con muchos Huevos"/> + ''' + patron = '<div class="poster-media-card"[^<]+' + patron += '<a href="([^"]+)" title="([^"]+)"[^<]+' + patron += '<div class="poster"[^<]+' + patron += '<div class="title"[^<]+' + patron += '<span[^<]+</span[^<]+' + patron += '</div[^<]+' + patron += '<span class="rating"[^<]+' + patron += '<i[^<]+</i><span[^<]+</span[^<]+' + patron += '</span[^<]+' + patron += '<div class="poster-image-container"[^<]+' + patron += '<img width="\d+" height="\d+" src="([^"]+)"' + + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle, scrapedthumbnail in matches: + url = scrapedurl + title = scrapedtitle + thumbnail = scrapedthumbnail + plot = "" + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, fanart=thumbnail, + plot=plot, hasContentDetails=True, contentTitle=title, contentThumbnail=thumbnail)) + + # Extrae la pagina siguiente + next_page_url = scrapertools.find_single_match(data, + '<a href="([^"]+)"><i class="glyphicon glyphicon-chevron-right') + if next_page_url != "": + itemlist.append(Item(channel=item.channel, action="mainlist", title=">> Página siguiente", url=next_page_url, + viewmode="movie")) + + return itemlist + + +def findvideos(item): + logger.info("item=" + item.tostring()) + + ''' + <h2>Sinopsis</h2> + <p>Para que todo salga bien en la prestigiosa Academia Werth, la pequeña y su madre se mudan a una casa nueva. La pequeña es muy seria y madura para su edad y planea estudiar durante las vacaciones siguiendo un estricto programa organizado por su madre; pero sus planes son perturbados por un vecino excéntrico y generoso. Él le enseña un mundo extraordinario en donde todo es posible. Un mundo en el que el Aviador se topó alguna vez con el misterioso Principito. Entonces comienza la aventura de la pequeña en el universo del Principito. Y así descubre nuevamente su infancia y comprenderá que sólo se ve bien con el corazón. Lo esencial es invisible a los ojos. Adaptación de la novela homónima de Antoine de Saint-Exupery.</p> + <div + ''' + + # Descarga la página para obtener el argumento + data = scrapertools.cachePage(item.url) + data = data.replace("www.pelispekes.com/player/tune.php?nt=", "netu.tv/watch_video.php?v=") + + item.plot = scrapertools.find_single_match(data, '<h2>Sinopsis</h2>(.*?)<div') + item.plot = scrapertools.htmlclean(item.plot).strip() + item.contentPlot = item.plot + logger.info("plot=" + item.plot) + + return servertools.find_video_items(item=item, data=data) diff --git a/plugin.video.alfa/channels/pelisplus.json b/plugin.video.alfa/channels/pelisplus.json new file mode 100755 index 00000000..14c1f328 --- /dev/null +++ b/plugin.video.alfa/channels/pelisplus.json @@ -0,0 +1,71 @@ +{ + "id": "pelisplus", + "name": "PelisPlus", + "compatible": { + "addon_version": "4.3" + }, + "active": true, + "adult": false, + "language": "es", + "thumbnail": "https://s13.postimg.org/z5dbzfasn/pelisplus.png", + "banner": "https://s16.postimg.org/p9xz2vlo5/pelisplus_banner.png", + "version": 1, + "changes": [ + { + "date": "24/06/2017", + "description": "Cambios para autoplay" + }, + { + "date": "06/06/2017", + "description": "compatibilidad con AutoPlay" + }, + { + "date": "03/06/2017", + "description": "Reparado por falla del canal" + }, + { + "date": "25/05/2017", + "description": "cambios esteticos" + }, + { + "date": "02/05/2017", + "description": "fix findvideos" + }, + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "06/12/2016", + "description": "Release." + } + ], + "categories": [ + "latino", + "movie", + "tvshow", + "documentary" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": false, + "visible": false + }, + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "No filtrar", + "Latino" + ] + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/pelisplus.py b/plugin.video.alfa/channels/pelisplus.py new file mode 100755 index 00000000..fd2994b2 --- /dev/null +++ b/plugin.video.alfa/channels/pelisplus.py @@ -0,0 +1,542 @@ +# -*- coding: utf-8 -*- + +import re + +from channels import autoplay +from channels import filtertools +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import tmdb +from core.item import Item + +host = "http://www.pelisplus.tv/" + +headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'], + ['Referer', host]] + +patrones = ['<img src="([^"]+)" alt=".*?" class="picture-movie">', + '<span>Sinopsis:<\/span>.([^<]+)<span class="text-detail-hide"><\/span>.<\/p>'] + +IDIOMA = {'latino': 'Latino'} +list_language = IDIOMA.values() + +list_quality = ['1080p', + '720p', + '480p', + '360p', + '240p' + ] +list_servers = [ + 'directo', + 'openload', + 'thevideos' +] + + +def mainlist(item): + logger.info() + + autoplay.init(item.channel, list_servers, list_quality) + itemlist = [] + + itemlist.append( + item.clone(title="Peliculas", + action="menupeliculas", + thumbnail='https://s8.postimg.org/6wqwy2c2t/peliculas.png', + fanart='https://s8.postimg.org/6wqwy2c2t/peliculas.png', + extra='peliculas/' + )) + + itemlist.append( + item.clone(title="Series", + action="menuseries", + thumbnail='https://s27.postimg.org/iahczwgrn/series.png', + fanart='https://s27.postimg.org/iahczwgrn/series.png', + extra='peliculas/' + )) + + itemlist.append( + item.clone(title="Documentales", + action="lista", + url=host + 'documentales/pag-1', + thumbnail='https://s16.postimg.org/7xjj4bmol/documental.png', + fanart='https://s16.postimg.org/7xjj4bmol/documental.png', + extra='documentales/' + )) + + autoplay.show_option(item.channel, itemlist) + + return itemlist + + +def menupeliculas(item): + logger.info() + itemlist = [] + + itemlist.append(item.clone(title="Todas", + action="lista", + url=host + 'peliculas/pag-1', + thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png', + fanart='https://s18.postimg.org/fwvaeo6qh/todas.png', + extra='peliculas/' + )) + + itemlist.append(item.clone(title="Ultimas", + action="lista", + url=host + 'estrenos/pag-1', + thumbnail='https://s22.postimg.org/cb7nmhwv5/ultimas.png', + fanart='https://s22.postimg.org/cb7nmhwv5/ultimas.png', + extra='estrenos/' + )) + + itemlist.append(item.clone(title="Generos", + action="generos", + url=host + 'peliculas/pag-1', + thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png', + fanart='https://s3.postimg.org/5s9jg2wtf/generos.png', + extra='documentales/' + )) + + itemlist.append(item.clone(title="Buscar", + action="search", + url=host + 'busqueda/?s=', + thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png', + fanart='https://s30.postimg.org/pei7txpa9/buscar.png', + extra='peliculas/' + )) + + return itemlist + + +def menuseries(item): + logger.info() + itemlist = [] + + itemlist.append(item.clone(title="Todas", + action="lista", + url=host + "series/pag-1", + thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png', + fanart='https://s18.postimg.org/fwvaeo6qh/todas.png', + extra='series/' + )) + + itemlist.append(item.clone(title="Generos", + action="generos", + url=host + 'series/pag-1', + thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png', + fanart='https://s3.postimg.org/5s9jg2wtf/generos.png', + extra='series/' + )) + + itemlist.append(item.clone(title="Buscar", + action="search", + url=host + 'busqueda/?s=', + thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png', + fanart='https://s30.postimg.org/pei7txpa9/buscar.png', + extra='series/' + )) + + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = item.url + texto + try: + if texto != '': + return lista(item) + else: + return [] + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def lista(item): + logger.info() + + itemlist = [] + + if 'series/' in item.extra: + accion = 'temporadas' + tipo = 'tvshow' + else: + accion = 'findvideos' + tipo = 'movie' + + data = httptools.downloadpage(item.url).data + + if item.title != 'Buscar': + patron = '<img.*?width="147" heigh="197".*?src="([^"]+)".*?>.*?.<i class="icon online-play"><\/i>.*?.<h2 ' \ + 'class="title title-.*?">.*?.<a href="([^"]+)" title="([^"]+)">.*?>' + actual = scrapertools.find_single_match(data, + '<a href="http:\/\/www.pelisplus.tv\/.*?\/pag-([^p]+)pag-2" ' + 'class="page bicon last"><<\/a>') + else: + patron = '<img data-original="([^"]+)".*?width="147" heigh="197".*?src=.*?>.*?\n<i class="icon ' \ + 'online-play"><\/i>.*?\n<h2 class="title title-.*?">.*?\n<a href="([^"]+)" title="([^"]+)">.*?>' + actual = '' + + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedthumbnail, scrapedurl, scrapedtitle in matches: + url = scrapedurl + title = scrapertools.decodeHtmlentities(scrapedtitle) + thumbnail = scrapedthumbnail + + filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w154", "") + filtro_list = {"poster_path": filtro_thumb} # Nombre del campo a filtrar y valor en los resultados de la api + # de tmdb + filtro_list = filtro_list.items() + + if item.title != 'Buscar': + itemlist.append( + Item(channel=item.channel, + contentType=tipo, + action=accion, + title=title, + url=scrapedurl, + thumbnail=thumbnail, + fulltitle=scrapedtitle, + infoLabels={'filtro': filtro_list}, + contentTitle=scrapedtitle, + contentSerieName=scrapedtitle, + extra=item.extra, + context=autoplay.context + )) + else: + item.extra = item.extra.rstrip('s/') + if item.extra in url: + itemlist.append( + Item(channel=item.channel, + contentType=tipo, + action=accion, + title=scrapedtitle, + url=scrapedurl, + thumbnail=scrapedthumbnail, + fulltitle=scrapedtitle, + infoLabels={'filtro': filtro_list}, + contentTitle=scrapedtitle, + contentSerieName=scrapedtitle, + extra=item.extra, + context=autoplay.context + )) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + + # Encuentra los elementos que no tienen plot y carga las paginas correspondientes para obtenerlo# + for item in itemlist: + if item.infoLabels['plot'] == '': + data = httptools.downloadpage(item.url).data + item.fanart = scrapertools.find_single_match(data, 'meta property="og:image" content="([^"]+)" \/>') + item.plot = scrapertools.find_single_match(data, + '<span>Sinopsis:<\/span>.([^<]+)<span ' + 'class="text-detail-hide"><\/span>.<\/p>') + + # Paginacion + if item.title != 'Buscar' and actual != '': + if itemlist != []: + next_page = str(int(actual) + 1) + next_page_url = host + item.extra + 'pag-' + next_page + itemlist.append( + Item(channel=item.channel, + action="lista", + title='Siguiente >>>', + url=next_page_url, + thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png', + extra=item.extra + )) + return itemlist + + +def temporadas(item): + logger.info() + itemlist = [] + templist = [] + data = httptools.downloadpage(item.url).data + + patron = '<span class="ico accordion_down"><\/span>Temporada([^<]+)' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedtitle in matches: + infoLabels = item.infoLabels + url = item.url + title = 'Temporada ' + scrapedtitle.strip(' \r\n') + thumbnail = scrapertools.find_single_match(data, '<img src="([^"]+)" alt="" class="picture-movie">') + plot = scrapertools.find_single_match(data, + '<span>Sinopsis:<\/span>.([^<]+).<span class="text-detail-hide"><\/span>') + fanart = scrapertools.find_single_match(data, '<img src="([^"]+)"/>.*?</a>') + contentSeasonNumber = scrapedtitle.strip(' \r\n') + itemlist.append( + Item(channel=item.channel, + action="episodios", + title=title, + fulltitle=item.title, + url=url, + thumbnail=thumbnail, + plot=plot, + fanart=fanart, + extra=scrapedtitle.rstrip('\n'), + contentSerieName=item.contentSerieName, + contentSeasonNumber=contentSeasonNumber, + infoLabels={'season': contentSeasonNumber}, + context=item.context + )) + + if item.extra == 'temporadas': + for tempitem in itemlist: + templist += episodios(tempitem) + else: + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append( + Item(channel=item.channel, + title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', + url=item.url, + action="add_serie_to_library", + extra="temporadas", + contentSerieName=item.contentSerieName, + contentSeasonNumber=contentSeasonNumber + )) + if item.extra == 'temporadas': + return templist + else: + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = '<span class="ico season_play"><\/span>([^<]+)<\/a>.<a href="([^"]+)" class="season-online enabled">' + temporada = 'temporada/' + item.extra.strip(' ') + matches = re.compile(patron, re.DOTALL).findall(data) + infoLabels = item.infoLabels + + for scrapedtitle, scrapedurl in matches: + + if temporada in scrapedurl: + url = scrapedurl + contentSeasonNumber = re.findall(r'temporada.*?(\d+)', url) + capitulo = re.findall(r'Capitulo \d+', scrapedtitle) + contentEpisodeNumber = re.findall(r'\d+', capitulo[0]) + contentEpisodeNumber = contentEpisodeNumber[0] + infoLabels['episode'] = contentEpisodeNumber + title = contentSeasonNumber[0] + 'x' + contentEpisodeNumber + ' - ' + scrapedtitle + + thumbnail = scrapertools.find_single_match(data, '<img src="([^"]+)" alt="" class="picture-movie">') + plot = '' + fanart = '' + itemlist.append( + Item(channel=item.channel, + action="findvideos", + title=title, + fulltitle=item.title, + url=url, + thumbnail=thumbnail, + plot=plot, + fanart=fanart, + extra=scrapedtitle, + contentSeasonNumber=item.contentSeasonNumber, + infoLabels=infoLabels, + context=item.context + )) + if item.extra != 'temporadas': + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + itemlist = fail_tmdb(itemlist) + return itemlist + + +def fail_tmdb(itemlist): + logger.info() + realplot = '' + for item in itemlist: + if item.infoLabels['plot'] == '': + data = httptools.downloadpage(item.url).data + if item.fanart == '': + item.fanart = scrapertools.find_single_match(data, patrones[0]) + realplot = scrapertools.find_single_match(data, patrones[1]) + item.plot = scrapertools.remove_htmltags(realplot) + return itemlist + + +def generos(item): + tgenero = {"Comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png", + "Suspense": "https://s13.postimg.org/wmw6vl1cn/suspenso.png", + "Drama": "https://s16.postimg.org/94sia332d/drama.png", + "Accion": "https://s3.postimg.org/y6o9puflv/accion.png", + "Aventura": "https://s10.postimg.org/6su40czih/aventura.png", + "Romance": "https://s15.postimg.org/fb5j8cl63/romance.png", + "Animacion": "https://s13.postimg.org/5on877l87/animacion.png", + "Ciencia Ficcion": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png", + "Terror": "https://s7.postimg.org/yi0gij3gb/terror.png", + "Documental": "https://s16.postimg.org/7xjj4bmol/documental.png", + "Musica": "https://s29.postimg.org/bbxmdh9c7/musical.png", + "Western": "https://s23.postimg.org/lzyfbjzhn/western.png", + "Fantasia": "https://s13.postimg.org/65ylohgvb/fantasia.png", + "Guerra": "https://s23.postimg.org/71itp9hcr/belica.png", + "Misterio": "https://s1.postimg.org/w7fdgf2vj/misterio.png", + "Crimen": "https://s4.postimg.org/6z27zhirx/crimen.png", + "Historia": "https://s15.postimg.org/fmc050h1n/historia.png", + "Pelicula De La Television": "https://s9.postimg.org/t8xb14fb3/delatv.png", + "Foreign": "https://s29.postimg.org/jdc2m158n/extranjera.png"} + + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = '<i class="s-upper" id="([^"]+)"><\/i>.<span>([^<]+)<\/span>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + + url = scrapedurl + 'pag-1' + title = scrapedtitle + if scrapedtitle in tgenero: + thumbnail = tgenero[scrapedtitle] + fanart = tgenero[scrapedtitle] + else: + thumbnail = '' + fanart = '' + extra = scrapedurl.replace('http://www.pelisplus.tv/', '') + itemlist.append( + Item(channel=item.channel, + action="lista", + title=title, + fulltitle=item.title, + url=url, + thumbnail=thumbnail, + fanart=fanart, + extra=extra + )) + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + duplicados = [] + datas = httptools.downloadpage(item.url).data + patron = "<iframe.*?src='([^']+)' frameborder='0' allowfullscreen.*?" + matches = re.compile(patron, re.DOTALL).findall(datas) + + for scrapedurl in matches: + + if 'elreyxhd' or 'pelisplus.biz' in scrapedurl: + patronr = '' + data = httptools.downloadpage(scrapedurl, headers=headers).data + + quote = scrapertools.find_single_match(data, 'sources.*?file.*?http') + if quote and "'" in quote: + patronr = "file:'([^']+)',label:'([^.*?]+)',type:.*?'.*?}" + elif '"' in quote: + patronr = '{file:"(.*?)",label:"(.*?)"}' + if patronr != '': + matchesr = re.compile(patronr, re.DOTALL).findall(data) + + for scrapedurl, scrapedcalidad in matchesr: + url = scrapedurl + language = 'latino' + quality = scrapedcalidad.decode('cp1252').encode('utf8') + title = item.contentTitle + ' (' + str(scrapedcalidad) + ')' + thumbnail = item.thumbnail + fanart = item.fanart + if url not in duplicados: + itemlist.append(item.clone(action="play", + title=title, + url=url, + thumbnail=thumbnail, + fanart=fanart, + show=title, + extra='directo', + language=language, + quality=quality, + server='directo', + )) + duplicados.append(url) + + url = scrapedurl + from core import servertools + itemlist.extend(servertools.find_video_items(data=datas)) + + for videoitem in itemlist: + # videoitem.infoLabels = item.infoLabels + videoitem.channel = item.channel + if videoitem.quality == '' or videoitem.language == '': + videoitem.quality = 'default' + videoitem.language = 'Latino' + if videoitem.server != '': + videoitem.thumbnail = servertools.guess_server_thumbnail(videoitem.server) + else: + videoitem.thumbnail = item.thumbnail + videoitem.server = 'directo' + videoitem.action = 'play' + videoitem.fulltitle = item.title + + if videoitem.extra != 'directo' and 'youtube' not in videoitem.url: + videoitem.title = item.contentTitle + ' (' + videoitem.server + ')' + + n = 0 + for videoitem in itemlist: + if 'youtube' in videoitem.url: + videoitem.title = '[COLOR orange]Trailer en' + ' (' + videoitem.server + ')[/COLOR]' + itemlist[n], itemlist[-1] = itemlist[-1], itemlist[n] + n = n + 1 + + if item.extra == 'findvideos' and 'youtube' in itemlist[-1]: + itemlist.pop(1) + + # Requerido para FilterTools + + itemlist = filtertools.get_links(itemlist, item, list_language) + + # Requerido para AutoPlay + + autoplay.start(itemlist, item) + + if 'serie' not in item.url: + if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': + itemlist.append( + Item(channel=item.channel, + title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', + url=item.url, + action="add_pelicula_to_library", + extra="findvideos", + contentTitle=item.contentTitle + )) + + return itemlist + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + item.extra = 'estrenos/' + try: + if categoria == 'peliculas': + item.url = host + 'estrenos/pag-1' + + elif categoria == 'infantiles': + item.url = host + 'peliculas/animacion/pag-1' + + elif categoria == 'documentales': + item.url = host + 'documentales/pag-1' + item.extra = 'documentales/' + + itemlist = lista(item) + if itemlist[-1].title == 'Siguiente >>>': + itemlist.pop() + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist + + itemlist = filtertools.get_links(itemlist, item, list_language) diff --git a/plugin.video.alfa/channels/pelisxporno.json b/plugin.video.alfa/channels/pelisxporno.json new file mode 100755 index 00000000..9dd87023 --- /dev/null +++ b/plugin.video.alfa/channels/pelisxporno.json @@ -0,0 +1,37 @@ +{ + "id": "pelisxporno", + "name": "Pelisxporno", + "active": true, + "adult": true, + "language": "es", + "thumbnail": "http://i.imgur.com/ywMHwat.png", + "banner": "pelisxporno.png", + "changes": [ + { + "date": "28/05/2017", + "description": "Corregido por cambios en toda la web" + }, + { + "date": "21/02/2017", + "description": "Canal modernizado con los últimos cambios y mejorado" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ], + "version": 1, + "categories": [ + "adult" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/pelisxporno.py b/plugin.video.alfa/channels/pelisxporno.py new file mode 100755 index 00000000..21f23531 --- /dev/null +++ b/plugin.video.alfa/channels/pelisxporno.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- + +from core import httptools +from core import logger +from core import scrapertools + + +def mainlist(item): + logger.info() + + itemlist = [] + itemlist.append(item.clone(action="lista", title="Novedades", url="http://www.pelisxporno.com/?order=date")) + itemlist.append(item.clone(action="categorias", title="Categorías", url="http://www.pelisxporno.com/categorias/")) + itemlist.append(item.clone(action="search", title="Buscar", url="http://www.pelisxporno.com/?s=%s")) + + return itemlist + + +def search(item, texto): + logger.info() + item.url = item.url % texto + return lista(item) + + +def lista(item): + logger.info() + itemlist = [] + + # Descarga la pagina + data = httptools.downloadpage(item.url).data + + # Extrae las entradas (carpetas) + patron = '<div class="Picture">.*?href="([^"]+)".*?<img src="([^"]+)".*?' \ + '<span class="fa-clock.*?>([^<]+)<.*?<h2 class="Title">.*?>([^<]+)</a>' \ + '.*?<p>(.*?)</p>' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedthumbnail, duration, scrapedtitle, plot in matches: + if duration: + scrapedtitle += " (%s)" % duration + + itemlist.append(item.clone(action="findvideos", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, + infoLabels={'plot': plot})) + + # Extrae la marca de siguiente página + next_page = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="([^"]+)"') + if next_page: + itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page)) + + return itemlist + + +def categorias(item): + logger.info() + itemlist = [] + + # Descarga la pagina + data = httptools.downloadpage(item.url).data + + # Extrae las entradas (carpetas) + patron = '<figure class="Picture">.*?<a href="([^"]+)".*?src="([^"]+)".*?<a.*?>(.*?)</a>' \ + '.*?<span class="fa-film Clr3B">(\d+)' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedthumbnail, scrapedtitle, cantidad in matches: + if cantidad: + scrapedtitle += " (%s vídeos)" % cantidad + itemlist.append(item.clone(action="lista", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail)) + + return itemlist diff --git a/plugin.video.alfa/channels/pepecine.json b/plugin.video.alfa/channels/pepecine.json new file mode 100755 index 00000000..4dd1174b --- /dev/null +++ b/plugin.video.alfa/channels/pepecine.json @@ -0,0 +1,75 @@ +{ + "id": "pepecine", + "name": "Pepecine", + "active": true, + "adult": false, + "language": "es", + "fanart": "https://d12.usercdn.com/i/02278/u875vjx9c0xs.png", + "thumbnail": "pepecine.png", + "banner": "pepecine.png", + "version": 1, + "changes": [ + { + "date": "26/06/17", + "description": "Desactivacion temporal del canal por cambios en web" + }, + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "31/01/17", + "description": "Solucionado bug al añadir contenido a la videoteca" + }, + { + "date": "26/01/17", + "description": "Fix por cambios en la web" + }, + { + "date": "01/07/16", + "description": "Eliminado código innecesario." + }, + { + "date": "26/05/16", + "description": "Corregir bug al añadir serie a la videoteca desde el listado de episodios" + } + ], + "categories": [ + "movie", + "tvshow" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_infantiles", + "type": "bool", + "label": "Incluir en Novedades - Infantiles", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_series", + "type": "bool", + "label": "Incluir en Novedades - Episodios de series", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/pepecine.py b/plugin.video.alfa/channels/pepecine.py new file mode 100755 index 00000000..3ff644b7 --- /dev/null +++ b/plugin.video.alfa/channels/pepecine.py @@ -0,0 +1,430 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import config +from core import httptools +from core import jsontools +from core import logger +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item, InfoLabels + +__url_base__ = "http://pepecine.net" +__chanel__ = "pepecine" +fanart_host = "https://d12.usercdn.com/i/02278/u875vjx9c0xs.png" + + +def mainlist(item): + logger.info() + + itemlist = [] + url_peliculas = urlparse.urljoin(__url_base__, "plugins/ultimas-peliculas-updated.php") + itemlist.append( + Item(channel=__chanel__, title="Películas", text_color="0xFFEB7600", text_bold=True, fanart=fanart_host, + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/channels/pepecine/movies.png")) + itemlist.append(Item(channel=__chanel__, action="listado", title=" Novedades", page=0, viewcontent="movies", + text_color="0xFFEB7600", extra="movie", fanart=fanart_host, url=url_peliculas, + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/channels/pepecine/movies.png")) + itemlist.append(Item(channel=__chanel__, action="sub_filtrar", title=" Filtrar películas por género", + text_color="0xFFEB7600", extra="movie", fanart=fanart_host, url=url_peliculas, + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/channels/pepecine/movies_filtrar.png")) + itemlist.append(Item(channel=__chanel__, action="search", title=" Buscar películas por título", + text_color="0xFFEB7600", extra="movie", fanart=fanart_host, url=url_peliculas, + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/channels/pepecine/movies_buscar.png")) + + url_series = urlparse.urljoin(__url_base__, "plugins/series-episodios-updated.php") + itemlist.append( + Item(channel=__chanel__, title="Series", text_color="0xFFEB7600", text_bold=True, fanart=fanart_host, + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/channels/pepecine/tv.png")) + itemlist.append(Item(channel=__chanel__, action="listado", title=" Novedades", page=0, viewcontent="tvshows", + text_color="0xFFEB7600", extra="series", fanart=fanart_host, url=url_series, + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/channels/pepecine/tv.png")) + itemlist.append(Item(channel=__chanel__, action="sub_filtrar", title=" Filtrar series por género", + text_color="0xFFEB7600", extra="series", fanart=fanart_host, url=url_series, + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/channels/pepecine/tv_filtrar.png")) + itemlist.append(Item(channel=__chanel__, action="search", title=" Buscar series por título", + text_color="0xFFEB7600", extra="series", fanart=fanart_host, url=url_series, + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/channels/pepecine/tv_buscar.png")) + itemlist.append(Item(channel=__chanel__, action="listado", title=" Ultimos episodios actualizados", + text_color="0xFFEB7600", extra="series_novedades", fanart=fanart_host, + url=urlparse.urljoin(__url_base__, "plugins/ultimos-capitulos-updated.php"), + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/channels/pepecine/tv.png")) + + return itemlist + + +def sub_filtrar(item): + logger.info() + itemlist = [] + generos = ("acción", "animación", "aventura", "ciencia ficción", "comedia", "crimen", + "documental", "drama", "familia", "fantasía", "guerra", "historia", "misterio", + "música", "musical", "romance", "terror", "thriller", "western") + thumbnail = ('https://d12.usercdn.com/i/02278/spvnq8hghtok.jpg', + 'https://d12.usercdn.com/i/02278/olhbpe7phjas.jpg', + 'https://d12.usercdn.com/i/02278/8xm23q2vewtt.jpg', + 'https://d12.usercdn.com/i/02278/o4vuvd7q4bau.jpg', + 'https://d12.usercdn.com/i/02278/v7xq7k9bj3dh.jpg', + 'https://d12.usercdn.com/i/02278/yo5uj9ff7jmg.jpg', + 'https://d12.usercdn.com/i/02278/ipeodwh6vw6t.jpg', + 'https://d12.usercdn.com/i/02278/0c0ra1wb11ro.jpg', + 'https://d12.usercdn.com/i/02278/zn85t6f2oxdv.jpg', + 'https://d12.usercdn.com/i/02278/ipk94gsdqzwa.jpg', + 'https://d12.usercdn.com/i/02278/z5hsi6fr4yri.jpg', + 'https://d12.usercdn.com/i/02278/nq0jvyp7vlb9.jpg', + 'https://d12.usercdn.com/i/02278/tkbe7p3rjmps.jpg', + 'https://d12.usercdn.com/i/02278/is60ge4zv1ve.jpg', + 'https://d12.usercdn.com/i/02278/86ubk310hgn8.jpg', + 'https://d12.usercdn.com/i/02278/ph1gfpgtljf7.jpg', + 'https://d12.usercdn.com/i/02278/bzp3t2edgorg.jpg', + 'https://d12.usercdn.com/i/02278/31i1xkd8m30b.jpg', + 'https://d12.usercdn.com/i/02278/af05ulgs20uf.jpg') + + if item.extra == "movie": + viewcontent = "movies" + else: + viewcontent = "tvshows" + + for g, t in zip(generos, thumbnail): + itemlist.append(item.clone(action="listado", title=g.capitalize(), filtro=("genero", g), thumbnail=t, + viewcontent=viewcontent)) + + return itemlist + + +def search(item, texto): + logger.info("search:" + texto) + # texto = texto.replace(" ", "+") + item.filtro = ("search", texto.lower()) + try: + return listado(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def newest(categoria): + itemlist = [] + item = Item() + try: + if categoria == 'peliculas': + item.url = urlparse.urljoin(__url_base__, "plugins/ultimas-peliculas-updated.php") + item.extra = "movie" + + elif categoria == 'infantiles': + item.url = urlparse.urljoin(__url_base__, "plugins/ultimas-peliculas-updated.php") + item.filtro = ("genero", "animación") + item.extra = "movie" + + elif categoria == 'series': + item.url = urlparse.urljoin(__url_base__, "plugins/ultimos-capitulos-updated.php") + item.extra = "series_novedades" + + else: + return [] + + item.action = "listado" + itemlist = listado(item) + if itemlist[-1].action == "listado": + itemlist.pop() + + # Se captura la excepción, para no interrumpir al canal novedades si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist + + +def listado(item): + logger.info() + itemlist = [] + + try: + data_dict = jsontools.load(httptools.downloadpage(item.url).data) + except: + return itemlist # Devolvemos lista vacia + + # Filtrado y busqueda + if item.filtro: + for i in data_dict["result"][:]: + if (item.filtro[0] == "genero" and item.filtro[1] not in i['genre'].lower()) or \ + (item.filtro[0] == "search" and item.filtro[1] not in i['title'].lower()): + data_dict["result"].remove(i) + + if not item.page: + item.page = 0 + + offset = int(item.page) * 30 + limit = offset + 30 + + for i in data_dict["result"][offset:limit]: + infoLabels = InfoLabels() + idioma = '' + + if item.extra == "movie": + action = "findvideos" + # viewcontent = 'movies' + infoLabels["title"] = i["title"] + title = '%s (%s)' % (i["title"], i['year']) + url = urlparse.urljoin(__url_base__, "ver-pelicula-online/" + str(i["id"])) + + elif item.extra == "series": + action = "get_temporadas" + # viewcontent = 'seasons' + title = i["title"] + infoLabels['tvshowtitle'] = i["title"] + url = urlparse.urljoin(__url_base__, "episodio-online/" + str(i["id"])) + + else: # item.extra=="series_novedades": + action = "findvideos" + # viewcontent = 'episodes' + infoLabels['tvshowtitle'] = i["title"] + infoLabels['season'] = i['season'] + infoLabels['episode'] = i['episode'].zfill(2) + flag = scrapertools.find_single_match(i["label"], '(\s*\<img src=.*\>)') + idioma = i["label"].replace(flag, "") + title = '%s %sx%s (%s)' % (i["title"], infoLabels["season"], infoLabels["episode"], idioma) + url = urlparse.urljoin(__url_base__, "episodio-online/" + str(i["id"])) + + if i.has_key("poster") and i["poster"]: + thumbnail = re.compile("/w\d{3}/").sub("/w500/", i["poster"]) + else: + thumbnail = item.thumbnail + if i.has_key("background") and i["background"]: + fanart = i["background"] + else: + fanart = item.fanart + + # Rellenamos el diccionario de infoLabels + infoLabels['title_id'] = i['id'] # title_id: identificador de la pelicula/serie en pepecine.com + if i['genre']: infoLabels['genre'] = i['genre'] + if i['year']: infoLabels['year'] = i['year'] + # if i['tagline']: infoLabels['plotoutline']=i['tagline'] + if i['plot']: + infoLabels['plot'] = i['plot'] + else: + infoLabels['plot'] = "" + if i['runtime']: infoLabels['duration'] = int(i['runtime']) * 60 + if i['imdb_rating']: + infoLabels['rating'] = i['imdb_rating'] + elif i['tmdb_rating']: + infoLabels['rating'] = i['tmdb_rating'] + if i['tmdb_id']: infoLabels['tmdb_id'] = i['tmdb_id'] + if i['imdb_id']: infoLabels['imdb_id'] = i['imdb_id'] + + newItem = Item(channel=item.channel, action=action, title=title, url=url, extra=item.extra, + fanart=fanart, thumbnail=thumbnail, viewmode="movie_with_plot", # viewcontent=viewcontent, + language=idioma, text_color="0xFFFFCE9C", infoLabels=infoLabels) + newItem.year = i['year'] + newItem.contentTitle = i['title'] + if 'season' in infoLabels and infoLabels['season']: + newItem.contentSeason = infoLabels['season'] + if 'episode' in infoLabels and infoLabels['episode']: + newItem.contentEpisodeNumber = infoLabels['episode'] + itemlist.append(newItem) + + # Obtenemos los datos basicos mediante multihilos + tmdb.set_infoLabels(itemlist) + + # Paginacion + if len(data_dict["result"]) > limit: + itemlist.append(item.clone(text_color="0xFF994D00", title=">> Pagina siguiente >>", page=item.page + 1)) + + return itemlist + + +def get_temporadas(item): + logger.info() + + itemlist = [] + infoLabels = {} + + data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data) + patron = 'vars.title =(.*?)};' + try: + data_dict = jsontools.load(scrapertools.get_match(data, patron) + '}') + except: + return itemlist # Devolvemos lista vacia + + if item.extra == "serie_add": + itemlist = get_episodios(item) + + else: + if len(data_dict["season"]) == 1: + # Si solo hay una temporada ... + item.infoLabels['season'] = data_dict["season"][0]["number"] + itemlist = get_episodios(item) + + else: # ... o si hay mas de una temporada + item.viewcontent = "seasons" + data_dict["season"].sort(key=lambda x: (x['number'])) # ordenamos por numero de temporada + for season in data_dict["season"]: + # filtramos enlaces por temporada + enlaces = filter(lambda l: l["season"] == season['number'], data_dict["link"]) + if enlaces: + item.infoLabels['season'] = season['number'] + title = '%s Temporada %s' % (item.title, season['number']) + + itemlist.append(item.clone(action="get_episodios", title=title, + text_color="0xFFFFCE9C", viewmode="movie_with_plot")) + + # Obtenemos los datos de todas las temporadas mediante multihilos + tmdb.set_infoLabels(itemlist) + + if config.get_videolibrary_support() and itemlist: + infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'], 'tvdb_id': item.infoLabels['tvdb_id'], + 'imdb_id': item.infoLabels['imdb_id']} + itemlist.append( + Item(channel=item.channel, title="Añadir esta serie a la videoteca", text_color="0xFFe5ffcc", + action="add_serie_to_library", extra='get_episodios###serie_add', url=item.url, + contentSerieName=data_dict["title"], infoLabels=infoLabels, + thumbnail='https://raw.githubusercontent.com/master-1970/resources/master/images/channels/pepecine/tv.png')) + + return itemlist + + +def get_episodios(item): + logger.info() + itemlist = [] + # infoLabels = item.infoLabels + + data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data) + patron = 'vars.title =(.*?)};' + try: + data_dict = jsontools.load(scrapertools.get_match(data, patron) + '}') + except: + return itemlist # Devolvemos lista vacia + + # Agrupar enlaces por episodios temXcap + temXcap_dict = {} + for link in data_dict['link']: + try: + season = str(int(link['season'])) + episode = str(int(link['episode'])).zfill(2) + except: + continue + + if int(season) != item.infoLabels["season"] and item.extra != "serie_add": + # Descartamos episodios de otras temporadas, excepto si los queremos todos + continue + + title_id = link['title_id'] + id = season + "x" + episode + if id in temXcap_dict: + l = temXcap_dict[id] + l.append(link) + temXcap_dict[id] = l + else: + temXcap_dict[id] = [link] + + # Ordenar lista de enlaces por temporada y capitulo + temXcap_list = temXcap_dict.items() + temXcap_list.sort(key=lambda x: (int(x[0].split("x")[0]), int(x[0].split("x")[1]))) + for episodio in temXcap_list: + title = '%s (%s)' % (item.contentSerieName, episodio[0]) + item.infoLabels['season'], item.infoLabels['episode'] = episodio[0].split('x') + itemlist.append(item.clone(action="findvideos", title=title, + viewmode="movie_with_plot", text_color="0xFFFFCE9C")) + + if item.extra != "serie_add": + # Obtenemos los datos de todos los capitulos de la temporada mediante multihilos + tmdb.set_infoLabels(itemlist) + for i in itemlist: + # Si el capitulo tiene nombre propio añadirselo al titulo del item + title = "%s: %s" % (i.title, i.infoLabels['title']) + i.title = title + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + + data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data) + patron = 'vars.title =(.*?)};' + try: + data_dict = jsontools.load(scrapertools.get_match(data, patron) + '}') + except: + return itemlist # Devolvemos lista vacia + + lista_servers = servertools.get_servers_list() + + for link in data_dict["link"]: + if item.contentType == 'episode' \ + and (item.contentSeason != link['season'] or item.contentEpisodeNumber != link['episode']): + # Si buscamos enlaces de un episodio descartamos los q no sean de este episodio + continue + + url = link["url"] + flag = scrapertools.find_single_match(link["label"], '(\s*\<img src=.*\>)') + idioma = link["label"].replace(flag, "") + if link["quality"] != "?": + calidad = (' [' + link["quality"] + ']') + else: + calidad = "" + video = find_videos(link["url"], lista_servers) + + if video["servidor"] != "": + servidor = video["servidor"] + url = video["url"] + title = "Ver en " + servidor.capitalize() + calidad + ' (' + idioma + ')' + itemlist.append(item.clone(action="play", viewmode="list", server=servidor, title=title, + text_color="0xFF994D00", url=url, folder=False)) + + if config.get_videolibrary_support() and itemlist and item.contentType == "movie": + infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'], + 'title': item.infoLabels['title']} + itemlist.append(Item(channel=item.channel, title="Añadir esta película a la videoteca", + action="add_pelicula_to_library", url=item.url, infoLabels=infoLabels, + text_color="0xFFe5ffcc", + thumbnail='https://raw.githubusercontent.com/master-1970/resources/master/images/channels/pepecine/tv.png')) + + return itemlist + + +def find_videos(url, lista_servers): + # logger.info() + ret = {'titulo': "", + 'url': "", + 'servidor': ""} + + # Ejecuta el find_videos en cada servidor hasta que encuentra una coicidencia + for serverid in lista_servers: + try: + servers_module = __import__("servers." + serverid) + server_module = getattr(servers_module, serverid) + devuelve = server_module.find_videos(url) + + if devuelve: + ret["titulo"] = devuelve[0][0] + ret["url"] = devuelve[0][1] + ret["servidor"] = devuelve[0][2] + # reordenar el listado, es probable q el proximo enlace sea del mismo servidor + lista_servers.remove(serverid) + lista_servers.insert(0, serverid) + break + + except ImportError: + logger.error("No existe conector para #" + serverid + "#") + # import traceback + # logger.info(traceback.format_exc()) + except: + logger.error("Error en el conector #" + serverid + "#") + import traceback + logger.error(traceback.format_exc()) + + return ret + + +def episodios(item): + # Necesario para las actualizaciones automaticas + return get_temporadas(Item(channel=__chanel__, url=item.url, show=item.show, extra="serie_add")) diff --git a/plugin.video.alfa/channels/playmax.json b/plugin.video.alfa/channels/playmax.json new file mode 100755 index 00000000..a1aa1b0e --- /dev/null +++ b/plugin.video.alfa/channels/playmax.json @@ -0,0 +1,133 @@ +{ + "id": "playmax", + "name": "PlayMax", + "language": "es", + "active": true, + "adult": false, + "version": 1, + "changes": [ + { + "date": "17/04/2017", + "description": "Corregidos indices de series y peliculas" + }, + { + "date": "28/03/2017", + "description": "Añadidas opciones listas y corregidos capitulos de series" + }, + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "09/01/2017", + "description": "Canal renovado desde cero." + } + ], + "thumbnail": "http://i.imgur.com/X5Z40kT.png", + "banner": "playmax.png", + "categories": [ + "movie", + "tvshow" + ], + "settings": [ + { + "id": "playmaxuser", + "type": "text", + "color": "0xFF25AA48", + "label": "@30014", + "enabled": true, + "visible": true + }, + { + "id": "playmaxpassword", + "type": "text", + "color": "0xFF25AA48", + "hidden": true, + "label": "@30015", + "enabled": "!eq(-1,'')", + "visible": true + }, + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en búsqueda global", + "default": false, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Películas", + "default": false, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_series", + "type": "bool", + "label": "Incluir en Novedades - Series", + "default": false, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_infantiles", + "type": "bool", + "label": "Incluir en Novedades - Infantiles", + "default": false, + "enabled": true, + "visible": true + }, + { + "id": "modo_grafico", + "type": "bool", + "color": "0xFFd50b0b", + "label": "Buscar información extra", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "menu_info", + "type": "bool", + "color": "0xFFd50b0b", + "label": "Mostrar menú intermedio película/episodio", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "perfil", + "type": "list", + "label": "Perfil de color", + "default": 3, + "enabled": true, + "visible": true, + "lvalues": [ + "Sin color", + "Perfil 3", + "Perfil 2", + "Perfil 1" + ] + }, + { + "id": "last_page", + "type": "bool", + "color": "0xFFd50b0b", + "label": "Ocultar opción elegir página en películas (Kodi)", + "default": false, + "enabled": true, + "visible": true + }, + { + "id": "order_web", + "type": "bool", + "color": "0xFFd50b0b", + "label": "Usar el mismo orden de los enlaces que la web", + "default": false, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/playmax.py b/plugin.video.alfa/channels/playmax.py new file mode 100755 index 00000000..9eebc406 --- /dev/null +++ b/plugin.video.alfa/channels/playmax.py @@ -0,0 +1,923 @@ +# -*- coding: utf-8 -*- + +import re + +from core import config +from core import httptools +from core import jsontools as json +from core import logger +from core import scrapertools +from core import tmdb +from core.item import Item + +sid = config.get_setting("sid_playmax", "playmax") +apikey = "0ea143087685e9e0a23f98ae" +__modo_grafico__ = config.get_setting('modo_grafico', 'playmax') +__perfil__ = config.get_setting('perfil', "playmax") +__menu_info__ = config.get_setting('menu_info', 'playmax') + +# Fijar perfil de color +perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFF088A08'], + ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFF088A08'], + ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFF088A08']] + +if __perfil__ - 1 >= 0: + color1, color2, color3, color4, color5 = perfil[__perfil__ - 1] +else: + color1 = color2 = color3 = color4 = color5 = "" + +host = "https://playmax.mx" + + +def login(): + logger.info() + + try: + user = config.get_setting("playmaxuser", "playmax") + password = config.get_setting("playmaxpassword", "playmax") + if user == "" and password == "": + return False, "Para ver los enlaces de este canal es necesario registrarse en playmax.mx" + elif user == "" or password == "": + return False, "Usuario o contraseña en blanco. Revisa tus credenciales" + + data = httptools.downloadpage("https://playmax.mx/ucp.php?mode=login").data + if re.search(r'(?i)class="hb_user_data" title="%s"' % user, data): + if not config.get_setting("sid_playmax", "playmax"): + sid_ = scrapertools.find_single_match(data, 'sid=([^"]+)"') + if not sid_: + sid_ = scrapertools.find_single_match(config.get_cookie_data(), 'playmax.*?_sid\s*([A-z0-9]+)') + config.set_setting("sid_playmax", sid_, "playmax") + return True, "" + + confirm_id = scrapertools.find_single_match(data, 'name="confirm_id" value="([^"]+)"') + sid_log = scrapertools.find_single_match(data, 'name="sid" value="([^"]+)"') + post = "username=%s&password=%s&autologin=on&agreed=true&change_lang=0&confirm_id=%s&login=&sid=%s" \ + "&redirect=index.php&login=Entrar" % (user, password, confirm_id, sid_log) + data = httptools.downloadpage("https://playmax.mx/ucp.php?mode=login", post=post).data + if "contraseña incorrecta" in data: + logger.error("Error en el login") + return False, "Contraseña errónea. Comprueba tus credenciales" + elif "nombre de usuario incorrecto" in data: + logger.error("Error en el login") + return False, "Nombre de usuario no válido. Comprueba tus credenciales" + else: + logger.info("Login correcto") + sid_ = scrapertools.find_single_match(data, 'sid=([^"]+)"') + if not sid_: + sid_ = scrapertools.find_single_match(config.get_cookie_data(), 'playmax.*?_sid\s*([A-z0-9]+)') + config.set_setting("sid_playmax", sid_, "playmax") + # En el primer logueo se activa la busqueda global y la seccion novedades + if not config.get_setting("primer_log", "playmax"): + config.set_setting("include_in_global_search", True, "playmax") + config.set_setting("include_in_newest_peliculas", True, "playmax") + config.set_setting("include_in_newest_series", True, "playmax") + config.set_setting("include_in_newest_infantiles", True, "playmax") + config.set_setting("primer_log", False, "playmax") + return True, "" + except: + import traceback + logger.error(traceback.format_exc()) + return False, "Error en el login. Comprueba tus credenciales o si la web está operativa" + + +def mainlist(item): + logger.info() + itemlist = [] + item.text_color = color1 + + logueado, error_message = login() + + if not logueado: + config.set_setting("include_in_global_search", False, "playmax") + itemlist.append(item.clone(title=error_message, action="configuracion", folder=False)) + return itemlist + + itemlist.append(item.clone(title="Películas", action="", text_color=color2)) + item.contentType = "movie" + itemlist.append( + item.clone(title=" Novedades", action="fichas", url=host + "/catalogo.php?tipo[]=2&ad=2&ordenar=" + "novedades&con_dis=on")) + itemlist.append( + item.clone(title=" Populares", action="fichas", url=host + "/catalogo.php?tipo[]=2&ad=2&ordenar=" + "pop&con_dis=on")) + itemlist.append(item.clone(title=" Índices", action="indices")) + + itemlist.append(item.clone(title="Series", action="", text_color=color2)) + item.contentType = "tvshow" + itemlist.append(item.clone(title=" Nuevos capítulos", action="fichas", url=host + "/catalogo.php?tipo[]=1&ad=2&" + "ordenar=novedades&con_dis=on")) + itemlist.append(item.clone(title=" Nuevas series", action="fichas", url=host + "/catalogo.php?tipo[]=1&ad=2&" + "ordenar=año&con_dis=on")) + itemlist.append(item.clone(title=" Índices", action="indices")) + + item.contentType = "movie" + itemlist.append(item.clone(title="Documentales", action="fichas", text_color=color2, + url=host + "/catalogo.php?&tipo[]=3&ad=2&ordenar=novedades&con_dis=on")) + itemlist.append(item.clone(title="Listas", action="listas", text_color=color2, + url=host + "/listas.php?apikey=%s&sid=%s&start=0" % (apikey, sid), extra="listas")) + itemlist.append(item.clone(action="search", title="Buscar...", text_color=color2)) + itemlist.append(item.clone(action="acciones_cuenta", title="Tus fichas", text_color=color4)) + itemlist.append(item.clone(title="Configuración del canal", action="configuracion", text_color="gold")) + + return itemlist + + +def search(item, texto): + logger.info() + item.url = "%s/buscar.php?apikey=%s&sid=%s&buscar=%s&modo=[fichas]&start=0" % (host, apikey, sid, texto) + try: + return busqueda(item) + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def busqueda(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = json.Xml2Json(data).result + + for f in data["Data"]["Fichas"]["Ficha"]: + title = "%s (%s)" % (f["Title"], f["Year"]) + infolab = {'year': f["Year"]} + thumbnail = f["Poster"] + url = "%s/ficha.php?f=%s" % (host, f["Id"]) + action = "findvideos" + if __menu_info__: + action = "menu_info" + if f["IsSerie"] == "1": + tipo = "tvshow" + show = f["Title"] + if not __menu_info__: + action = "episodios" + else: + tipo = "movie" + show = "" + + itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, text_color=color2, + contentTitle=f["Title"], show=show, contentType=tipo, infoLabels=infolab, + thumbnail=thumbnail)) + + if __modo_grafico__: + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + + total = int(data["Data"]["totalResultsFichas"]) + actualpage = int(scrapertools.find_single_match(item.url, "start=(\d+)")) + if actualpage + 20 < total: + next_page = item.url.replace("start=%s" % actualpage, "start=%s" % (actualpage + 20)) + itemlist.append(Item(channel=item.channel, action="busqueda", title=">> Página Siguiente", + url=next_page, thumbnail=item.thumbnail)) + + return itemlist + + +def configuracion(item): + from platformcode import platformtools + ret = platformtools.show_channel_settings() + platformtools.itemlist_refresh() + return ret + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + try: + if categoria == 'series': + item.channel = "playmax" + item.extra = "newest" + item.url = host + "/catalogo.php?tipo[]=1&ad=2&ordenar=novedades&con_dis=on" + item.contentType = "tvshow" + itemlist = fichas(item) + + if itemlist[-1].action == "fichas": + itemlist.pop() + elif categoria == 'peliculas': + item.channel = "playmax" + item.extra = "newest" + item.url = host + "/catalogo.php?tipo[]=2&ad=2&ordenar=novedades&con_dis=on" + item.contentType = "movie" + itemlist = fichas(item) + + if itemlist[-1].action == "fichas": + itemlist.pop() + elif categoria == 'infantiles': + item.channel = "playmax" + item.extra = "newest" + item.url = host + "/catalogo.php?tipo[]=2&genero[]=60&ad=2&ordenar=novedades&con_dis=on" + item.contentType = "movie" + itemlist = fichas(item) + + if itemlist[-1].action == "fichas": + itemlist.pop() + + # Se captura la excepción, para no interrumpir al canal novedades si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist + + +def indices(item): + logger.info() + itemlist = [] + + tipo = "2" + if item.contentType == "tvshow": + tipo = "1" + if "Índices" in item.title: + if item.contentType == "tvshow": + itemlist.append(item.clone(title="Populares", action="fichas", url=host + "/catalogo.php?tipo[]=1&ad=2&" + "ordenar=pop&con_dis=on")) + itemlist.append(item.clone(title="Más vistas", action="fichas", url=host + "/catalogo.php?tipo[]=%s&ad=2&" + "ordenar=siempre&con_dis=on" % tipo)) + itemlist.append(item.clone(title="Mejor valoradas", action="fichas", url=host + "/catalogo.php?tipo[]=%s&ad=2&" + "ordenar=valoracion&con_dis=on" % tipo)) + itemlist.append(item.clone(title="Géneros", url=host + "/catalogo.php")) + itemlist.append(item.clone(title="Idiomas", url=host + "/catalogo.php")) + if item.contentType == "movie": + itemlist.append(item.clone(title="Por calidad", url=host + "/catalogo.php")) + itemlist.append(item.clone(title="Por año")) + itemlist.append(item.clone(title="Por país", url=host + "/catalogo.php")) + + return itemlist + + if "Géneros" in item.title: + data = httptools.downloadpage(item.url).data + patron = '<div class="sel gen" value="([^"]+)">([^<]+)</div>' + matches = scrapertools.find_multiple_matches(data, patron) + for value, genero in matches: + url = item.url + "?tipo[]=%s&generos[]=%s&ad=2&ordenar=novedades&con_dis=on" % (tipo, value) + itemlist.append(item.clone(action="fichas", title=genero, url=url)) + elif "Idiomas" in item.title: + data = httptools.downloadpage(item.url).data + bloque = scrapertools.find_single_match(data, 'oname="Idioma">Cualquier(.*?)<input') + patron = '<div class="sel" value="([^"]+)">([^<]+)</div>' + matches = scrapertools.find_multiple_matches(bloque, patron) + for value, idioma in matches: + url = item.url + "?tipo[]=%s&ad=2&ordenar=novedades&con_dis=on&e_idioma=%s" % (tipo, value) + itemlist.append(item.clone(action="fichas", title=idioma, url=url)) + elif "calidad" in item.title: + data = httptools.downloadpage(item.url).data + bloque = scrapertools.find_single_match(data, 'oname="Calidad">Cualquier(.*?)<input') + patron = '<div class="sel" value="([^"]+)">([^<]+)</div>' + matches = scrapertools.find_multiple_matches(bloque, patron) + for value, calidad in matches: + url = item.url + "?tipo[]=%s&ad=2&ordenar=novedades&con_dis=on&e_calidad=%s" % (tipo, value) + itemlist.append(item.clone(action="fichas", title=calidad, url=url)) + elif "país" in item.title: + data = httptools.downloadpage(item.url).data + bloque = scrapertools.find_single_match(data, 'oname="País">Todos(.*?)<input') + patron = '<div class="sel" value="([^"]+)">([^<]+)</div>' + matches = scrapertools.find_multiple_matches(bloque, patron) + for value, pais in matches: + url = item.url + "?tipo[]=%s&ad=2&ordenar=novedades&con_dis=on&pais=%s" % (tipo, value) + itemlist.append(item.clone(action="fichas", title=pais, url=url)) + else: + from datetime import datetime + year = datetime.now().year + for i in range(year, 1899, -1): + url = "%s/catalogo.php?tipo[]=%s&del=%s&al=%s&año=personal&ad=2&ordenar=novedades&con_dis=on" \ + % (host, tipo, i, i) + itemlist.append(item.clone(action="fichas", title=str(i), url=url)) + + return itemlist + + +def fichas(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + + fichas_marca = {'1': 'Siguiendo', '2': 'Pendiente', '3': 'Favorita', '4': 'Vista', '5': 'Abandonada'} + patron = '<div class="c_fichas_image">.*?href="\.([^"]+)".*?src="\.([^"]+)".*?' \ + '<div class="c_fichas_data".*?marked="([^"]*)".*?serie="([^"]*)".*?' \ + '<div class="c_fichas_title">(?:<div class="c_fichas_episode">([^<]+)</div>|)([^<]+)</div>' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedthumbnail, marca, serie, episodio, scrapedtitle in matches: + tipo = "movie" + scrapedurl = host + scrapedurl.rsplit("-dc=")[0] + if not "-dc=" in scrapedurl: + scrapedurl += "-dc=" + scrapedthumbnail = host + scrapedthumbnail + action = "findvideos" + if __menu_info__: + action = "menu_info" + if serie: + tipo = "tvshow" + if episodio: + title = "%s - %s" % (episodio.replace("X", "x"), scrapedtitle) + else: + title = scrapedtitle + + if marca: + title += " [COLOR %s][%s][/COLOR]" % (color4, fichas_marca[marca]) + + new_item = Item(channel=item.channel, action=action, title=title, url=scrapedurl, + thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, contentType=tipo, + text_color=color2) + if new_item.contentType == "tvshow": + new_item.show = scrapedtitle + if not __menu_info__: + new_item.action = "episodios" + + itemlist.append(new_item) + + if itemlist and (item.extra == "listas_plus" or item.extra == "sigo"): + follow = scrapertools.find_single_match(data, '<div onclick="seguir_lista.*?>(.*?)<') + title = "Seguir Lista" + if follow == "Siguiendo": + title = "Dejar de seguir lista" + item.extra = "" + url = host + "/data.php?mode=seguir_lista&apikey=%s&sid=%s&lista=%s" % ( + apikey, sid, item.url.rsplit("/l", 1)[1]) + itemlist.insert(0, item.clone(action="acciones_cuenta", title=title, url=url, text_color=color4, + lista=item.title, folder=False)) + + next_page = scrapertools.find_single_match(data, 'href="([^"]+)" class="next"') + if next_page: + next_page = host + next_page.replace("&", "&") + itemlist.append(Item(channel=item.channel, action="fichas", title=">> Página Siguiente", url=next_page)) + + try: + total = int(scrapertools.find_single_match(data, '<span class="page-dots">.*href.*?>(\d+)')) + except: + total = 0 + if not config.get_setting("last_page", item.channel) and config.is_xbmc() and total > 2 \ + and item.extra != "newest": + itemlist.append(item.clone(action="select_page", title="Ir a página... (Total:%s)" % total, url=next_page, + text_color=color5)) + + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + + # Descarga la página + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data) + + if not item.infoLabels["tmdb_id"]: + item.infoLabels["tmdb_id"] = scrapertools.find_single_match(data, + '<a href="https://www.themoviedb.org/[^/]+/(\d+)') + item.infoLabels["year"] = scrapertools.find_single_match(data, 'class="e_new">(\d{4})') + if not item.infoLabels["genre"]: + item.infoLabels["genre"] = ", ".join(scrapertools.find_multiple_matches(data, + '<a itemprop="genre"[^>]+>([^<]+)</a>')) + if not item.infoLabels["plot"]: + item.infoLabels["plot"] = scrapertools.find_single_match(data, 'itemprop="description">([^<]+)</div>') + + dc = scrapertools.find_single_match(data, "var dc_ic = '\?dc=([^']+)'") + patron = '<div class="f_cl_l_c f_cl_l_c_id[^"]+" c_id="([^"]+)" .*?c_num="([^"]+)" c_name="([^"]+)"' \ + '.*?load_f_links\(\d+\s*,\s*(\d+).*?<div class="([^"]+)" onclick="marcar_capitulo' + matches = scrapertools.find_multiple_matches(data, patron) + lista_epis = [] + for c_id, episodio, title, ficha, status in matches: + episodio = episodio.replace("X", "x") + if episodio in lista_epis: + continue + lista_epis.append(episodio) + url = "https://playmax.mx/c_enlaces_n.php?ficha=%s&c_id=%s&dc=%s" % (ficha, c_id, dc) + title = "%s - %s" % (episodio, title) + if "_mc a" in status: + title = "[COLOR %s]%s[/COLOR] %s" % (color5, u"\u0474".encode('utf-8'), title) + + new_item = Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=item.thumbnail, + fanart=item.fanart, show=item.show, infoLabels=item.infoLabels, text_color=color2, + referer=item.url, contentType="episode") + try: + new_item.infoLabels["season"], new_item.infoLabels["episode"] = episodio.split('x', 1) + except: + pass + itemlist.append(new_item) + + itemlist.sort(key=lambda it: (it.infoLabels["season"], it.infoLabels["episode"]), reverse=True) + if __modo_grafico__: + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + + library_path = config.get_videolibrary_path() + if config.get_videolibrary_support() and not item.extra: + title = "Añadir serie a la videoteca" + if item.infoLabels["imdb_id"] and not library_path.lower().startswith("smb://"): + try: + from core import filetools + path = filetools.join(library_path, "SERIES") + files = filetools.walk(path) + for dirpath, dirname, filename in files: + if item.infoLabels["imdb_id"] in dirpath: + for f in filename: + if f != "tvshow.nfo": + continue + from core import videolibrarytools + head_nfo, it = videolibrarytools.read_nfo(filetools.join(dirpath, dirname, f)) + canales = it.library_urls.keys() + canales.sort() + if "playmax" in canales: + canales.pop(canales.index("playmax")) + canales.insert(0, "[COLOR red]playmax[/COLOR]") + title = "Serie ya en tu videoteca. [%s] ¿Añadir?" % ",".join(canales) + break + except: + import traceback + logger.error(traceback.format_exc()) + pass + + itemlist.append(item.clone(action="add_serie_to_library", title=title, text_color=color5, + extra="episodios###library")) + if itemlist and not __menu_info__: + ficha = scrapertools.find_single_match(item.url, '-f(\d+)-') + itemlist.extend(acciones_fichas(item, sid, ficha)) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + + if item.contentType == "movie": + # Descarga la página + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data) + + if not item.infoLabels["tmdb_id"]: + item.infoLabels["tmdb_id"] = scrapertools.find_single_match(data, '<a href="https://www.themoviedb.org/' + '[^/]+/(\d+)') + item.infoLabels["year"] = scrapertools.find_single_match(data, 'class="e_new">(\d{4})') + + if __modo_grafico__: + tmdb.set_infoLabels_item(item, __modo_grafico__) + if not item.infoLabels["plot"]: + item.infoLabels["plot"] = scrapertools.find_single_match(data, 'itemprop="description">([^<]+)</div>') + if not item.infoLabels["genre"]: + item.infoLabels["genre"] = ", ".join(scrapertools.find_multiple_matches(data, '<a itemprop="genre"[^>]+>' + '([^<]+)</a>')) + + ficha = scrapertools.find_single_match(item.url, '-f(\d+)-') + if not ficha: + ficha = scrapertools.find_single_match(item.url, 'f=(\d+)') + cid = "0" + else: + ficha, cid = scrapertools.find_single_match(item.url, 'ficha=(\d+)&c_id=(\d+)') + + url = "https://playmax.mx/c_enlaces_n.php?apikey=%s&sid=%s&ficha=%s&cid=%s" % (apikey, sid, ficha, cid) + data = httptools.downloadpage(url).data + data = json.Xml2Json(data).result + + for k, v in data["Data"].items(): + try: + if type(v) is dict: + if k == "Online": + order = 1 + elif k == "Download": + order = 0 + else: + order = 2 + + itemlist.append(item.clone(action="", title=k, text_color=color3, order=order)) + if type(v["Item"]) is str: + continue + elif type(v["Item"]) is dict: + v["Item"] = [v["Item"]] + for it in v["Item"]: + thumbnail = "%s/styles/prosilver/imageset/%s.png" % (host, it['Host']) + title = " %s - %s/%s" % (it['Host'].capitalize(), it['Quality'], it['Lang']) + calidad = int(scrapertools.find_single_match(it['Quality'], '(\d+)p')) + calidadaudio = it['QualityA'].replace("...", "") + subtitulos = it['Subtitles'].replace("Sin subtítulos", "") + if subtitulos: + title += " (%s)" % subtitulos + if calidadaudio: + title += " [Audio:%s]" % calidadaudio + + likes = 0 + if it["Likes"] != "0" or it["Dislikes"] != "0": + likes = int(it["Likes"]) - int(it["Dislikes"]) + title += " (%s ok, %s ko)" % (it["Likes"], it["Dislikes"]) + if type(it["Url"]) is dict: + for i, enlace in enumerate(it["Url"]["Item"]): + titulo = title + " (Parte %s)" % (i + 1) + itemlist.append(item.clone(title=titulo, url=enlace, action="play", calidad=calidad, + thumbnail=thumbnail, order=order, like=likes, ficha=ficha, + cid=cid, folder=False)) + else: + url = it["Url"] + itemlist.append(item.clone(title=title, url=url, action="play", calidad=calidad, + thumbnail=thumbnail, order=order, like=likes, ficha=ficha, + cid=cid, folder=False)) + except: + pass + + if not config.get_setting("order_web", "playmax"): + itemlist.sort(key=lambda it: (it.order, it.calidad, it.like), reverse=True) + else: + itemlist.sort(key=lambda it: it.order, reverse=True) + if itemlist: + itemlist.extend(acciones_fichas(item, sid, ficha)) + + if not itemlist and item.contentType != "movie": + url = url.replace("apikey=%s&" % apikey, "") + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data) + + patron = '<div id="f_fde_c"[^>]+>(.*?update_fecha\(\d+\)">)</div>' + estrenos = scrapertools.find_multiple_matches(data, patron) + for info in estrenos: + info = "Estreno en " + scrapertools.htmlclean(info) + itemlist.append(item.clone(action="", title=info)) + + if not itemlist: + itemlist.append(item.clone(action="", title="No hay enlaces disponibles")) + + return itemlist + + +def menu_info(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data) + + item.infoLabels["tmdb_id"] = scrapertools.find_single_match(data, '<a href="https://www.themoviedb.org/[^/]+/(\d+)') + item.infoLabels["year"] = scrapertools.find_single_match(data, 'class="e_new">(\d{4})') + item.infoLabels["plot"] = scrapertools.find_single_match(data, 'itemprop="description">([^<]+)</div>') + item.infoLabels["genre"] = ", ".join(scrapertools.find_multiple_matches(data, + '<a itemprop="genre"[^>]+>([^<]+)</a>')) + if __modo_grafico__: + tmdb.set_infoLabels_item(item, __modo_grafico__) + + action = "findvideos" + title = "Ver enlaces" + if item.contentType == "tvshow": + action = "episodios" + title = "Ver capítulos" + itemlist.append(item.clone(action=action, title=title)) + + carpeta = "CINE" + tipo = "película" + action = "add_pelicula_to_library" + extra = "" + if item.contentType == "tvshow": + carpeta = "SERIES" + tipo = "serie" + action = "add_serie_to_library" + extra = "episodios###library" + + library_path = config.get_videolibrary_path() + if config.get_videolibrary_support(): + title = "Añadir %s a la videoteca" % tipo + if item.infoLabels["imdb_id"] and not library_path.lower().startswith("smb://"): + try: + from core import filetools + path = filetools.join(library_path, carpeta) + files = filetools.walk(path) + for dirpath, dirname, filename in files: + if item.infoLabels["imdb_id"] in dirpath: + namedir = dirpath.replace(path, '')[1:] + for f in filename: + if f != namedir + ".nfo" and f != "tvshow.nfo": + continue + from core import videolibrarytools + head_nfo, it = videolibrarytools.read_nfo(filetools.join(dirpath, f)) + canales = it.library_urls.keys() + canales.sort() + if "playmax" in canales: + canales.pop(canales.index("playmax")) + canales.insert(0, "[COLOR red]playmax[/COLOR]") + title = "%s ya en tu videoteca. [%s] ¿Añadir?" % (tipo.capitalize(), ",".join(canales)) + break + except: + import traceback + logger.error(traceback.format_exc()) + pass + + itemlist.append(item.clone(action=action, title=title, text_color=color5, extra=extra)) + + token_auth = config.get_setting("token_trakt", "tvmoviedb") + if token_auth and item.infoLabels["tmdb_id"]: + extra = "movie" + if item.contentType != "movie": + extra = "tv" + itemlist.append(item.clone(channel="tvmoviedb", title="[Trakt] Gestionar con tu cuenta", action="menu_trakt", + extra=extra)) + itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler", + text_color="magenta", context="")) + + itemlist.append(item.clone(action="", title="")) + ficha = scrapertools.find_single_match(item.url, '-f(\d+)-') + if not ficha: + ficha = scrapertools.find_single_match(item.url, 'f=(\d+)') + + itemlist.extend(acciones_fichas(item, sid, ficha, season=True)) + itemlist.append(item.clone(action="acciones_cuenta", title="Añadir a una lista", text_color=color3, ficha=ficha)) + + return itemlist + + +def acciones_fichas(item, sid, ficha, season=False): + marcarlist = [] + new_item = item.clone() + new_item.infoLabels.pop("duration", None) + estados = [{'following': 'seguir'}, {'favorite': 'favorita'}, {'view': 'vista'}, {'slope': 'pendiente'}] + url = "https://playmax.mx/ficha.php?apikey=%s&sid=%s&f=%s" % (apikey, sid, ficha) + data = httptools.downloadpage(url).data + data = json.Xml2Json(data).result + + try: + marked = data["Data"]["User"]["Marked"] + if new_item.contentType == "episode": + for epi in data["Data"]["Episodes"]["Season_%s" % new_item.infoLabels["season"]]["Item"]: + if int(epi["Episode"]) == new_item.infoLabels["episode"]: + epi_marked = epi["EpisodeViewed"].replace("yes", "ya") + epi_id = epi["Id"] + marcarlist.append(new_item.clone(action="marcar", title="Capítulo %s visto. ¿Cambiar?" % epi_marked, + text_color=color3, epi_id=epi_id)) + break + except: + pass + + try: + tipo = new_item.contentType.replace("movie", "Película").replace("episode", "Serie").replace("tvshow", "Serie") + for status in estados: + for k, v in status.items(): + if k != marked: + title = "Marcar %s como %s" % (tipo.lower(), v) + action = "marcar" + else: + title = "%s marcada como %s" % (tipo, v) + action = "" + if k == "following" and tipo == "Película": + continue + elif k == "following" and tipo == "Serie": + title = title.replace("seguir", "seguida") + if k != marked: + title = "Seguir serie" + action = "marcar" + marcarlist.insert(1, new_item.clone(action=action, title=title, text_color=color4, ficha=ficha, + folder=False)) + continue + + marcarlist.append(new_item.clone(action="marcar", title=title, text_color=color3, ficha=ficha, + folder=False)) + except: + pass + + try: + if season and item.contentType == "tvshow": + seasonlist = [] + for k, v in data["Data"]["Episodes"].items(): + vistos = False + season = k.rsplit("_", 1)[1] + if type(v) is str: + continue + elif type(v["Item"]) is not list: + v["Item"] = [v["Item"]] + + for epi in v["Item"]: + if epi["EpisodeViewed"] == "no": + vistos = True + seasonlist.append( + new_item.clone(action="marcar", title="Marcar temporada %s como vista" % season, + text_color=color1, season=int(season), ficha=ficha, folder=False)) + break + + if not vistos: + seasonlist.append( + new_item.clone(action="marcar", title="Temporada %s ya vista. ¿Revertir?" % season, + text_color=color1, season=int(season), ficha=ficha, folder=False)) + + seasonlist.sort(key=lambda it: it.season, reverse=True) + marcarlist.extend(seasonlist) + except: + pass + return marcarlist + + +def acciones_cuenta(item): + logger.info() + itemlist = [] + + if "Tus fichas" in item.title: + itemlist.append(item.clone(title="Capítulos", url="tf_block_c a", contentType="tvshow")) + itemlist.append(item.clone(title="Series", url="tf_block_s", contentType="tvshow")) + itemlist.append(item.clone(title="Películas", url="tf_block_p")) + itemlist.append(item.clone(title="Documentales", url="tf_block_d")) + return itemlist + elif "Añadir a una lista" in item.title: + data = httptools.downloadpage(host + "/c_listas.php?apikey=%s&sid=%s" % (apikey, sid)).data + data = json.Xml2Json(data).result + itemlist.append(item.clone(title="Crear nueva lista", folder=False)) + if data["Data"]["TusListas"] != "\t": + import random + data = data["Data"]["TusListas"]["Item"] + if type(data) is not list: + data = [data] + for child in data: + image = "" + title = "%s (%s fichas)" % (child["Title"], child["FichasInList"]) + images = [] + for i in range(1, 5): + if "sinimagen.png" not in child["Poster%s" % i]: + images.append(child["Poster%s" % i].replace("/100/", "/400/")) + if images: + image = images[random.randint(0, len(images) - 1)] + url = host + "/data.php?mode=add_listas&apikey=%s&sid=%s&ficha_id=%s" % (apikey, sid, item.ficha) + post = "lista_id[]=%s" % child["Id"] + itemlist.append(item.clone(title=title, url=url, post=post, thumbnail=image, folder=False)) + + return itemlist + elif "Crear nueva lista" in item.title: + from platformcode import platformtools + nombre = platformtools.dialog_input("", "Introduce un nombre para la lista") + if nombre: + dict_priv = {0: 'Pública', 1: 'Privada'} + priv = platformtools.dialog_select("Privacidad de la lista", ['Pública', 'Privada']) + if priv != -1: + url = host + "/data.php?mode=create_list&apikey=%s&sid=%s" % (apikey, sid) + post = "name=%s&private=%s" % (nombre, priv) + data = httptools.downloadpage(url, post) + platformtools.dialog_notification("Lista creada correctamente", + "Nombre: %s - %s" % (nombre, dict_priv[priv])) + platformtools.itemlist_refresh() + return + elif re.search(r"(?i)Seguir Lista", item.title): + from platformcode import platformtools + data = httptools.downloadpage(item.url) + platformtools.dialog_notification("Operación realizada con éxito", "Lista: %s" % item.lista) + return + elif item.post: + from platformcode import platformtools + data = httptools.downloadpage(item.url, item.post).data + platformtools.dialog_notification("Ficha añadida a la lista", "Lista: %s" % item.title) + platformtools.itemlist_refresh() + return + + data = httptools.downloadpage("https://playmax.mx/tusfichas.php").data + data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data) + + bloque = scrapertools.find_single_match(data, item.url + '">(.*?)(?:<div class="tf_blocks|<div class="tf_o_move">)') + matches = scrapertools.find_multiple_matches(bloque, '<div class="tf_menu_mini">([^<]+)<(.*?)<cb></cb></div>') + for category, contenido in matches: + itemlist.append(item.clone(action="", title=category, text_color=color3)) + + patron = '<div class="c_fichas_image">.*?href="\.([^"]+)".*?src="\.([^"]+)".*?serie="([^"]*)".*?' \ + '<div class="c_fichas_title">(?:<div class="c_fichas_episode">([^<]+)</div>|)([^<]+)</div>' + entradas = scrapertools.find_multiple_matches(contenido, patron) + for scrapedurl, scrapedthumbnail, serie, episodio, scrapedtitle in entradas: + tipo = "movie" + scrapedurl = host + scrapedurl + scrapedthumbnail = host + scrapedthumbnail + action = "findvideos" + if __menu_info__: + action = "menu_info" + if serie: + tipo = "tvshow" + if episodio: + title = " %s - %s" % (episodio.replace("X", "x"), scrapedtitle) + else: + title = " " + scrapedtitle + + new_item = Item(channel=item.channel, action=action, title=title, url=scrapedurl, + thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, contentType=tipo, + text_color=color2) + if new_item.contentType == "tvshow": + new_item.show = scrapedtitle + if not __menu_info__: + new_item.action = "episodios" + + itemlist.append(new_item) + + return itemlist + + +def marcar(item): + logger.info() + + if "Capítulo" in item.title: + url = "%s/data.php?mode=capitulo_visto&apikey=%s&sid=%s&c_id=%s" % (host, apikey, sid, item.epi_id) + message = item.title.replace("no", "marcado como").replace("ya", "cambiado a no").replace(" ¿Cambiar?", "") + elif "temporada" in item.title.lower(): + type_marcado = "1" + if "como vista" in item.title: + message = "Temporada %s marcada como vista" % item.season + else: + type_marcado = "2" + message = "Temporada %s marcada como no vista" % item.season + url = "%s/data.php?mode=temporada_vista&apikey=%s&sid=%s&ficha=%s&t_id=%s&type=%s" \ + % (host, apikey, sid, item.ficha, item.season, type_marcado) + else: + message = item.title.replace("Marcar ", "Marcada ").replace("Seguir serie", "Serie en seguimiento") + if "favorita" in item.title: + url = "%s/data.php?mode=marcar_ficha&apikey=%s&sid=%s&ficha=%s&tipo=%s" \ + % (host, apikey, sid, item.ficha, "3") + elif "pendiente" in item.title: + url = "%s/data.php?mode=marcar_ficha&apikey=%s&sid=%s&ficha=%s&tipo=%s" \ + % (host, apikey, sid, item.ficha, "2") + elif "vista" in item.title: + url = "%s/data.php?mode=marcar_ficha&apikey=%s&sid=%s&ficha=%s&tipo=%s" \ + % (host, apikey, sid, item.ficha, "4") + elif "Seguir" in item.title: + url = "%s/data.php?mode=marcar_ficha&apikey=%s&sid=%s&ficha=%s&tipo=%s" \ + % (host, apikey, sid, item.ficha, "2") + data = httptools.downloadpage(url) + url = "%s/data.php?mode=marcar_ficha&apikey=%s&sid=%s&ficha=%s&tipo=%s" \ + % (host, apikey, sid, item.ficha, "1") + + data = httptools.downloadpage(url) + if data.sucess and config.get_platform() != "plex" and item.action != "play": + from platformcode import platformtools + platformtools.dialog_notification("Acción correcta", message) + + +def listas(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = json.Xml2Json(data).result + if item.extra == "listas": + itemlist.append(Item(channel=item.channel, title="Listas más seguidas", action="listas", text_color=color1, + url=item.url + "&orden=1", extra="listas_plus")) + itemlist.append(Item(channel=item.channel, title="Listas con más fichas", action="listas", text_color=color1, + url=item.url + "&orden=2", extra="listas_plus")) + itemlist.append(Item(channel=item.channel, title="Listas aleatorias", action="listas", text_color=color1, + url=item.url + "&orden=3", extra="listas_plus")) + if data["Data"]["ListasSiguiendo"] != "\t": + itemlist.append(Item(channel=item.channel, title="Listas que sigo", action="listas", text_color=color1, + url=item.url, extra="sigo")) + if data["Data"]["TusListas"] != "\t": + itemlist.append(Item(channel=item.channel, title="Mis listas", action="listas", text_color=color1, + url=item.url, extra="mislistas")) + + return itemlist + + elif item.extra == "sigo": + data = data["Data"]["ListasSiguiendo"]["Item"] + elif item.extra == "mislistas": + data = data["Data"]["TusListas"]["Item"] + else: + data = data["Data"]["Listas"]["Item"] + + if type(data) is not list: + data = [data] + import random + for child in data: + image = "" + title = "%s (%s fichas)" % (child["Title"], child["FichasInList"]) + images = [] + for i in range(1, 5): + if "sinimagen.png" not in child["Poster%s" % i]: + images.append(child["Poster%s" % i].replace("/100/", "/400/")) + if images: + image = images[random.randint(0, len(images) - 1)] + url = host + "/l%s" % child["Id"] + itemlist.append(Item(channel=item.channel, action="fichas", url=url, text_color=color3, + thumbnail=image, title=title, extra=item.extra)) + + if len(itemlist) == 20: + start = scrapertools.find_single_match(item.url, 'start=(\d+)') + end = int(start) + 20 + url = re.sub(r'start=%s' % start, 'start=%s' % end, item.url) + itemlist.append(item.clone(title=">> Página Siguiente", url=url)) + + return itemlist + + +def play(item): + logger.info() + from core import servertools + + devuelve = servertools.findvideos(item.url, True) + if devuelve: + item.url = devuelve[0][1] + item.server = devuelve[0][2] + + if config.get_setting("mark_play", "playmax"): + if item.contentType == "movie": + marcar(item.clone(title="marcar como vista")) + else: + marcar(item.clone(title="Capítulo", epi_id=item.cid)) + + return [item] + + +def select_page(item): + import xbmcgui + dialog = xbmcgui.Dialog() + number = dialog.numeric(0, "Introduce el número de página") + if number != "": + number = int(number) * 60 + item.url = re.sub(r'start=(\d+)', "start=%s" % number, item.url) + + return fichas(item) diff --git a/plugin.video.alfa/channels/playpornx.json b/plugin.video.alfa/channels/playpornx.json new file mode 100755 index 00000000..a7147cc5 --- /dev/null +++ b/plugin.video.alfa/channels/playpornx.json @@ -0,0 +1,33 @@ +{ + "id": "playpornx", + "name": "PlayPornX", + "active": true, + "adult": true, + "language": "es", + "thumbnail": "https://s22.postimg.org/eewoqq1w1/playpornx.png", + "banner": "https://s12.postimg.org/is9u6fsul/playpornx_banner.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "07/12/2016", + "description": "Release." + } + ], + "categories": [ + "adult" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": false, + "visible": false + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/playpornx.py b/plugin.video.alfa/channels/playpornx.py new file mode 100755 index 00000000..cbf9550b --- /dev/null +++ b/plugin.video.alfa/channels/playpornx.py @@ -0,0 +1,66 @@ +# -*- coding: utf-8 -*- + +import re + +from core import httptools +from core import logger +from core import scrapertools +from core.item import Item + +host = "http://www.playpornx.net/list-movies/" + + +def mainlist(item): + itemlist = [] + itemlist.append(Item(channel=item.channel, title="Todas", action="lista", + thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png', + fanart='https://s18.postimg.org/fwvaeo6qh/todas.png')) + itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url='http://www.playpornx.net/?s=', + thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png', + fanart='https://s30.postimg.org/pei7txpa9/buscar.png')) + + return itemlist + + +def lista(item): + logger.info() + + itemlist = [] + if item.url == '': item.url = host + data = httptools.downloadpage(item.url).data + patron = '<a class="clip-link" title="([^"]+)" href="([^"]+)">\s*<span class="clip">\s*<img alt=".*?" width="190" height="266" src="([^"]+)" data-qazy="true" \/><span class="vertical-align"><\/span>\s*<\/span>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedtitle, scrapedurl, scrapedthumbnail in matches: + url = scrapedurl + thumbnail = scrapedthumbnail + title = scrapedtitle + + itemlist.append(Item(channel=item.channel, action='findvideos', title=title, url=url, thumbnail=thumbnail)) + + # #Paginacion + + if itemlist != []: + actual_page_url = item.url + next_page = scrapertools.find_single_match(data, 'rel="next" href="([^"]+)"') + if next_page != '': + itemlist.append(Item(channel=item.channel, action="lista", title='Siguiente >>>', url=next_page, + thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png', extra=item.extra)) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = item.url + texto + + try: + if texto != '': + return lista(item) + else: + return [] + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] diff --git a/plugin.video.alfa/channels/plusdede.json b/plugin.video.alfa/channels/plusdede.json new file mode 100755 index 00000000..0b44c371 --- /dev/null +++ b/plugin.video.alfa/channels/plusdede.json @@ -0,0 +1,90 @@ +{ + "id": "plusdede", + "name": "Plusdede", + "active": true, + "adult": false, + "language": "es", + "version": 1, + "changes": [ + { + "date": "12/07/2017", + "description": "First release" + } + ], + "categories": [ + "movie", + "tvshow" + ], + "settings": [ + { + "id": "plusdedeuser", + "type": "text", + "label": "@30014", + "enabled": true, + "visible": true + }, + { + "id": "plusdedepassword", + "type": "text", + "hidden": true, + "label": "@30015", + "enabled": "!eq(-1,'')", + "visible": true + }, + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": "!eq(-1,'') + !eq(-2,'')", + "visible": true + }, + { + "id": "plusdedesortlinks", + "type": "list", + "label": "Ordenar enlaces", + "default": 0, + "enabled": true, + "visible": "!eq(-2,'') + !eq(-3,'')", + "lvalues": [ + "No", + "Por no Reportes", + "Por Idioma", + "Por Calidad", + "Por Idioma y Calidad", + "Por Idioma y no Reportes", + "Por Idioma, Calidad y no Reportes" + ] + }, + { + "id": "plusdedeshowlinks", + "type": "list", + "label": "Mostrar enlaces", + "default": 0, + "enabled": true, + "visible": "!eq(-3,'') + !eq(-4,'')", + "lvalues": [ + "Todos", + "Ver online", + "Descargar" + ] + }, + { + "id": "plusdedenumberlinks", + "type": "list", + "label": "Limitar número de enlaces", + "default": 0, + "enabled": true, + "visible": "!eq(-4,'') + !eq(-5,'')", + "lvalues": [ + "No", + "5", + "10", + "15", + "20", + "25", + "30" + ] + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/plusdede.py b/plugin.video.alfa/channels/plusdede.py new file mode 100755 index 00000000..138d1c13 --- /dev/null +++ b/plugin.video.alfa/channels/plusdede.py @@ -0,0 +1,1006 @@ +# -*- coding: utf-8 -*- + +import os +import re +import sys +import urlparse + +from core import channeltools +from core import config +from core import httptools +from core import jsontools +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item +from platformcode import platformtools + +HOST = 'http://www.plusdede.com' +__channel__ = 'plusdede' +parameters = channeltools.get_channel_parameters(__channel__) +fanart_host = parameters['fanart'] +thumbnail_host = parameters['thumbnail'] +color1, color2, color3 = ['0xFFB10021', '0xFFB10021', '0xFFB10004'] + + +def login(): + url_origen = "https://www.plusdede.com/login?popup=1" + data = httptools.downloadpage(url_origen, follow_redirects=True).data + logger.debug("dataPLUSDEDE=" + data) + if re.search(r'(?i)%s' % config.get_setting("plusdedeuser", "plusdede"), data): + return True + + token = scrapertools.find_single_match(data, '<input name="_token" type="hidden" value="([^"]+)"') + + post = "_token=" + str(token) + "&email=" + str( + config.get_setting("plusdedeuser", "plusdede")) + "&password=" + str( + config.get_setting("plusdedepassword", "plusdede")) + "&app=2131296469" + # logger.debug("dataPLUSDEDE_POST="+post) + url = "https://www.plusdede.com/" + headers = {"Referer": url, "X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": token} + data = httptools.downloadpage("https://www.plusdede.com/login", post=post, headers=headers, + replace_headers=False).data + logger.debug("PLUSDEDE_DATA=" + data) + if "redirect" in data: + return True + else: + return False + + +def mainlist(item): + logger.info() + itemlist = [] + + if config.get_setting("plusdedeuser", "plusdede") == "": + itemlist.append( + Item(channel=item.channel, title="Habilita tu cuenta en la configuración...", action="settingCanal", + url="")) + else: + result = login() + if not result: + itemlist.append(Item(channel=item.channel, action="mainlist", title="Login fallido. Volver a intentar...")) + return itemlist + + item.url = HOST + item.fanart = fanart_host + + item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/Directors%20Chair.png" + itemlist.append(item.clone(title="Películas", action="menupeliculas", text_color=color3, text_blod=True)) + + item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/TV%20Series.png" + itemlist.append(item.clone(title="Series", action="menuseries", text_color=color3, text_blod=True)) + + itemlist.append(item.clone(title="Listas", action="menulistas", text_color=color3, text_blod=True)) + + itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host)) + item.thumbnail = "" + itemlist.append(item.clone(channel=item.channel, action="settingCanal", title="Configuración...", url="")) + return itemlist + + +def settingCanal(item): + return platformtools.show_channel_settings() + + +def menuseries(item): + logger.info() + itemlist = [] + item.url = HOST + item.fanart = fanart_host + item.text_color = None + + item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/Directors%20Chair.png" + itemlist.append(item.clone(title="Películas", action="menupeliculas", text_color=color3, text_blod=True)) + + item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/TV%20Series.png" + itemlist.append(item.clone(title="Series:", folder=False, text_color=color3, text_blod=True, select=True)) + itemlist.append(item.clone(action="peliculas", title=" Novedades", url="https://www.plusdede.com/series")) + itemlist.append(item.clone(action="generos", title=" Por géneros", url="https://www.plusdede.com/series")) + itemlist.append( + item.clone(action="peliculas", title=" Siguiendo", url="https://www.plusdede.com/series/following")) + itemlist.append(item.clone(action="peliculas", title=" Capítulos Pendientes", + url="https://www.plusdede.com/series/mypending/0?popup=1", viewmode="movie")) + itemlist.append( + item.clone(action="peliculas", title=" Favoritas", url="https://www.plusdede.com/series/favorites")) + itemlist.append( + item.clone(action="peliculas", title=" Pendientes", url="https://www.plusdede.com/series/pending")) + itemlist.append(item.clone(action="peliculas", title=" Terminadas", url="https://www.plusdede.com/series/seen")) + itemlist.append( + item.clone(action="peliculas", title=" Recomendadas", url="https://www.plusdede.com/series/recommended")) + itemlist.append(item.clone(action="search", title=" Buscar...", url="https://www.plusdede.com/series")) + itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host)) + + itemlist.append(item.clone(title="Listas", action="menulistas", text_color=color3, text_blod=True)) + itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host)) + item.thumbnail = "" + itemlist.append(Item(channel=item.channel, action="settingCanal", title="Configuración...", url="")) + return itemlist + + +def menupeliculas(item): + logger.info() + + itemlist = [] + item.url = HOST + item.fanart = fanart_host + item.text_color = None + + item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/Directors%20Chair.png" + itemlist.append(item.clone(title="Películas:", folder=False, text_color=color3, text_blod=True, select=True)) + itemlist.append(item.clone(action="peliculas", title=" Novedades", url="https://www.plusdede.com/pelis")) + itemlist.append(item.clone(action="generos", title=" Por géneros", url="https://www.plusdede.com/pelis")) + itemlist.append(item.clone(action="peliculas", title=" Solo HD", url="https://www.plusdede.com/pelis?quality=3")) + itemlist.append( + item.clone(action="peliculas", title=" Pendientes", url="https://www.plusdede.com/pelis/pending")) + itemlist.append( + item.clone(action="peliculas", title=" Recomendadas", url="https://www.plusdede.com/pelis/recommended")) + itemlist.append( + item.clone(action="peliculas", title=" Favoritas", url="https://www.plusdede.com/pelis/favorites")) + itemlist.append(item.clone(action="peliculas", title=" Vistas", url="https://www.plusdede.com/pelis/seen")) + itemlist.append(item.clone(action="search", title=" Buscar...", url="https://www.plusdede.com/pelis")) + + itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host)) + item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/TV%20Series.png" + + itemlist.append(item.clone(title="Series", action="menuseries", text_color=color3, text_blod=True)) + + itemlist.append(item.clone(title="Listas", action="menulistas", text_color=color3, text_blod=True)) + itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host)) + item.thumbnail = "" + itemlist.append(item.clone(channel=item.channel, action="settingCanal", title="Configuración...", url="")) + return itemlist + + +def menulistas(item): + logger.info() + + itemlist = [] + item.url = HOST + item.fanart = fanart_host + item.text_color = None + + item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/Directors%20Chair.png" + itemlist.append(item.clone(title="Películas", action="menupeliculas", text_color=color3, text_blod=True)) + + item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/TV%20Series.png" + + itemlist.append(item.clone(title="Series", action="menuseries", text_color=color3, text_blod=True)) + + itemlist.append(item.clone(title="Listas:", folder=False, text_color=color3, text_blod=True)) + itemlist.append( + item.clone(action="listas", tipo="populares", title=" Populares", url="https://www.plusdede.com/listas")) + itemlist.append( + item.clone(action="listas", tipo="siguiendo", title=" Siguiendo", url="https://www.plusdede.com/listas")) + itemlist.append( + item.clone(action="listas", tipo="tuslistas", title=" Tus Listas", url="https://www.plusdede.com/listas")) + itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host)) + item.thumbnail = "" + itemlist.append(item.clone(channel=item.channel, action="settingCanal", title="Configuración...", url="")) + return itemlist + + +def generos(item): + logger.info() + tipo = item.url.replace("https://www.plusdede.com/", "") + # Descarga la pagina + data = httptools.downloadpage(item.url).data + logger.debug("data=" + data) + + # Extrae las entradas (carpetas) + data = scrapertools.find_single_match(data, + '<select name="genre_id" class="selectpicker" title="Selecciona...">(.*?)</select>') + patron = '<option value="([^"]+)">([^<]+)</option>' + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + for id_genere, title in matches: + title = title.strip() + thumbnail = "" + plot = "" + # https://www.plusdede.com/pelis?genre_id=1 + url = "https://www.plusdede.com/" + tipo + "?genre_id=" + id_genere + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + itemlist.append( + Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot, + fulltitle=title)) + + return itemlist + + +def search(item, texto): + logger.info() + item.tipo = item.url.replace("https://www.plusdede.com/", "") + item.url = "https://www.plusdede.com/search/" + texto = texto.replace(" ", "-") + + item.url = item.url + texto + try: + return buscar(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def buscar(item): + logger.info() + + # Descarga la pagina + headers = {"X-Requested-With": "XMLHttpRequest"} + data = httptools.downloadpage(item.url, headers=headers).data + logger.debug("data=" + data) + + # Extrae las entradas (carpetas) + json_object = jsontools.load(data) + logger.debug("content=" + json_object["content"]) + data = json_object["content"] + + return parse_mixed_results(item, data) + + +def parse_mixed_results(item, data): + itemlist = [] + patron = '<div class="media-dropdown mini dropdown model" data-value="([^"]+)"+' + patron += '.*?<a href="([^"]+)"[^<]data-toggle="tooltip" data-container="body"+' + patron += ' data-delay="500" title="([^"]+)"[^<]+' + patron += '.*?src="([^"]+)"+' + patron += '.*?<div class="year">([^<]+)</div>+' + patron += '.*?<div class="value"><i class="fa fa-star"></i> ([^<]+)</div>' + matches = re.compile(patron, re.DOTALL).findall(data) + + if item.tipo == "lista": + following = scrapertools.find_single_match(data, '<div class="follow-lista-buttons ([^"]+)">') + data_id = scrapertools.find_single_match(data, 'data-model="10" data-id="([^"]+)">') + if following.strip() == "following": + itemlist.append( + Item(channel='plusdede', title="Dejar de seguir", idtemp=data_id, token=item.token, valor="unfollow", + action="plusdede_check", url=item.url, tipo=item.tipo)) + else: + itemlist.append( + Item(channel='plusdede', title="Seguir esta lista", idtemp=data_id, token=item.token, valor="follow", + action="plusdede_check", url=item.url, tipo=item.tipo)) + + for visto, scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear, scrapedvalue in matches: + title = "" + if visto.strip() == "seen": + title += "[visto] " + title += scrapertools.htmlclean(scrapedtitle) + if scrapedyear != '': + title += " (" + scrapedyear + ")" + fulltitle = title + if scrapedvalue != '': + title += " (" + scrapedvalue + ")" + thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) + fanart = thumbnail.replace("mediathumb", "mediabigcover") + plot = "" + # https://www.plusdede.com/peli/the-lego-movie + # https://www.plusdede.com/links/view/slug/the-lego-movie/what/peli?popup=1 + + if "/peli/" in scrapedurl or "/docu/" in scrapedurl: + + # sectionStr = "peli" if "/peli/" in scrapedurl else "docu" + if "/peli/" in scrapedurl: + sectionStr = "peli" + else: + sectionStr = "docu" + referer = urlparse.urljoin(item.url, scrapedurl) + url = urlparse.urljoin(item.url, scrapedurl) + logger.debug("PELII_title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + if item.tipo != "series": + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, extra=referer, url=url, + thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, fanart=fanart, + contentTitle=scrapedtitle, contentType="movie", context=["buscar_trailer"])) + else: + referer = item.url + url = urlparse.urljoin(item.url, scrapedurl) + logger.debug("SERIE_title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + if item.tipo != "pelis": + itemlist.append(Item(channel=item.channel, action="episodios", title=title, extra=referer, url=url, + thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, show=title, fanart=fanart, + contentTitle=scrapedtitle, contentType="tvshow", context=["buscar_trailer"])) + + next_page = scrapertools.find_single_match(data, + '<div class="onclick load-more-icon no-json" data-action="replace" data-url="([^"]+)">') + if next_page != "": + url = urlparse.urljoin("https://www.plusdede.com", next_page).replace("amp;", "") + logger.debug("URL_SIGUIENTE:" + url) + itemlist.append( + Item(channel=item.channel, action="pag_sig", token=item.token, title=">> Página siguiente", + extra=item.extra, url=url)) + + try: + import xbmcplugin + xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_UNSORTED) + xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_TITLE) + except: + pass + return itemlist + + +def siguientes(item): # No utilizada + logger.info() + + # Descarga la pagina + data = httptools.downloadpage(item.url).data + logger.debug("data=" + data) + + # Extrae las entradas (carpetas) + bloque = scrapertools.find_single_match(data, '<h2>Siguiendo</h2>(.*?)<div class="box">') + patron = '<div class="coverMini shadow tiptip" title="([^"]+)">[^<]+' + patron += '<img class="centeredPic centeredPicFalse" onerror="[^"]+" src="([^"]+)"[^<]+' + patron += '<img src="/images/loading-mini.gif" class="loader"/>[^<]+' + patron += '<div class="extra-info"><span class="year">[^<]+' + patron += '</span><span class="value"><i class="icon-star"></i>[^<]+' + patron += '</span></div>[^<]+' + patron += '</div>[^<]+' + patron += '</a>[^<]+' + patron += '<a class="userepiinfo defaultLink" href="([^"]+)">(\d+)x(\d+)' + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + # for scrapedurl,scrapedtitle,scrapedthumbnail in matches: + for scrapedtitle, scrapedthumbnail, scrapedurl, scrapedsession, scrapedepisode in matches: + title = scrapertools.htmlclean(scrapedtitle) + session = scrapertools.htmlclean(scrapedsession) + episode = scrapertools.htmlclean(scrapedepisode) + thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) + fanart = thumbnail.replace("mediathumb", "mediabigcover") + plot = "" + title = session + "x" + episode + " - " + title + # https://www.plusdede.com/peli/the-lego-movie + # https://www.plusdede.com/links/view/slug/the-lego-movie/what/peli?popup=1 + + referer = urlparse.urljoin(item.url, scrapedurl) + url = referer + # itemlist.append( Item(channel=item.channel, action="episodios" , title=title , url=url, thumbnail=thumbnail, plot=plot, fulltitle=title, show=title)) + itemlist.append( + Item(channel=item.channel, action="episodio", title=title, url=url, thumbnail=thumbnail, plot=plot, + fulltitle=title, show=title, fanart=fanart, extra=session + "|" + episode)) + + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + + return itemlist + + +def episodio(item): + logger.info() + itemlist = [] + + # Descarga la pagina + data = httptools.downloadpage(item.url).data + # logger.debug("data="+data) + + session = str(int(item.extra.split("|")[0])) + episode = str(int(item.extra.split("|")[1])) + patrontemporada = '<div class="checkSeason"[^>]+>Temporada ' + session + '<div class="right" onclick="controller.checkSeason(.*?)\s+</div></div>' + matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data) + + for bloque_episodios in matchestemporadas: + logger.debug("bloque_episodios=" + bloque_episodios) + + # Extrae los episodios + patron = '<span class="title defaultPopup" href="([^"]+)"><span class="number">' + episode + ' </span>([^<]+)</span>(\s*</div>\s*<span[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></span><div[^>]*><button[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></button><div class="action([^"]*)" data-action="seen">)?' + matches = re.compile(patron, re.DOTALL).findall(bloque_episodios) + + for scrapedurl, scrapedtitle, info, visto in matches: + # visto_string = "[visto] " if visto.strip()=="active" else "" + if visto.strip() == "active": + visto_string = "[visto] " + else: + visto_string = "" + numero = episode + title = visto_string + session + "x" + numero + " " + scrapertools.htmlclean(scrapedtitle) + thumbnail = "" + plot = "" + # https://www.plusdede.com/peli/the-lego-movie + # https://www.plusdede.com/links/view/slug/the-lego-movie/what/peli?popup=1 + # https://www.plusdede.com/links/viewepisode/id/475011?popup=1 + epid = scrapertools.find_single_match(scrapedurl, "id/(\d+)") + url = "https://www.plusdede.com/links/viewepisode/id/" + epid + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot, + fulltitle=title, fanart=item.fanart, show=item.show)) + logger.debug("Abrimos title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + + itemlist2 = [] + for capitulo in itemlist: + itemlist2 = findvideos(capitulo) + return itemlist2 + + +def peliculas(item): + logger.info() + + # Descarga la pagina + headers = {"X-Requested-With": "XMLHttpRequest"} + data = httptools.downloadpage(item.url, headers=headers).data + # logger.debug("data_DEF_PELICULAS="+data) + + # Extrae las entradas (carpetas) + json_object = jsontools.load(data) + logger.debug("html=" + json_object["content"]) + data = json_object["content"] + + return parse_mixed_results(item, data) + + +def episodios(item): + logger.info() + itemlist = [] + + # Descarga la pagina + idserie = '' + data = httptools.downloadpage(item.url).data + # logger.debug("dataEPISODIOS="+data) + patrontemporada = '<ul.*?<li class="season-header" >([^<]+)<(.*?)\s+</ul>' + matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data) + logger.debug(matchestemporadas) + idserie = scrapertools.find_single_match(data, 'data-model="5" data-id="(\d+)"') + token = scrapertools.find_single_match(data, '_token" content="([^"]+)"') + if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("kodi")): + itemlist.append(Item(channel=item.channel, action="infosinopsis", title="INFO / SINOPSIS", url=item.url, + thumbnail=item.thumbnail, fanart=item.fanart, folder=False)) + for nombre_temporada, bloque_episodios in matchestemporadas: + logger.debug("nombre_temporada=" + nombre_temporada) + logger.debug("bloque_episodios=" + bloque_episodios) + logger.debug("id_serie=" + idserie) + # Extrae los episodios + patron_episodio = '<li><a href="#"(.*?)</a></li>' + # patron = '<li><a href="#" data-id="([^"]*)".*?data-href="([^"]+)">\s*<div class="name">\s*<span class="num">([^<]+)</span>\s*([^<]+)\s*</div>.*?"show-close-footer episode model([^"]+)"' + matches = re.compile(patron_episodio, re.DOTALL).findall(bloque_episodios) + # logger.debug(matches) + for data_episodio in matches: + + scrapeid = scrapertools.find_single_match(data_episodio, '<li><a href="#" data-id="([^"]*)"') + scrapedurl = scrapertools.find_single_match(data_episodio, 'data-href="([^"]+)">\s*<div class="name">') + numero = scrapertools.find_single_match(data_episodio, '<span class="num">([^<]+)</span>') + scrapedtitle = scrapertools.find_single_match(data_episodio, + '<span class="num">.*?</span>\s*([^<]+)\s*</div>') + visto = scrapertools.find_single_match(data_episodio, '"show-close-footer episode model([^"]+)"') + + title = nombre_temporada.replace("Temporada ", "").replace("Extras de la serie", "Extras 0").replace(" ", + "") + "x" + numero + " " + scrapertools.htmlclean( + scrapedtitle) + logger.debug("CAP_VISTO:" + visto) + if visto.strip() == "seen": + title = "[visto] " + title + + thumbnail = item.thumbnail + fanart = item.fanart + plot = "" + # https://www.plusdede.com/peli/the-lego-movie + # https://www.plusdede.com/links/view/slug/the-lego-movie/what/peli?popup=1 + # https://www.plusdede.com/links/viewepisode/id/475011?popup=1 + # epid = scrapertools.find_single_match(scrapedurl,"id/(\d+)") + url = "https://www.plusdede.com" + scrapedurl + itemlist.append( + Item(channel=item.channel, action="findvideos", nom_serie=item.title, tipo="5", title=title, url=url, + thumbnail=thumbnail, plot=plot, fulltitle=title, fanart=fanart, show=item.show)) + + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + + if config.get_videolibrary_support(): + # con año y valoracion la serie no se puede actualizar correctamente, si ademas cambia la valoracion, creara otra carpeta + # Sin año y sin valoración: + show = re.sub(r"\s\(\d+\)\s\(\d+\.\d+\)", "", item.show) + # Sin año: + # show = re.sub(r"\s\(\d+\)", "", item.show) + # Sin valoración: + # show = re.sub(r"\s\(\d+\.\d+\)", "", item.show) + itemlist.append( + Item(channel='plusdede', title="Añadir esta serie a la biblioteca de XBMC", url=item.url, token=token, + action="add_serie_to_library", extra="episodios###", show=show)) + itemlist.append( + Item(channel='plusdede', title="Descargar todos los episodios de la serie", url=item.url, token=token, + action="download_all_episodes", extra="episodios", show=show)) + itemlist.append(Item(channel='plusdede', title="Marcar como Pendiente", tipo="5", idtemp=idserie, token=token, + valor="pending", action="plusdede_check", show=show)) + itemlist.append(Item(channel='plusdede', title="Marcar como Siguiendo", tipo="5", idtemp=idserie, token=token, + valor="following", action="plusdede_check", show=show)) + itemlist.append(Item(channel='plusdede', title="Marcar como Finalizada", tipo="5", idtemp=idserie, token=token, + valor="seen", action="plusdede_check", show=show)) + itemlist.append(Item(channel='plusdede', title="Marcar como Favorita", tipo="5", idtemp=idserie, token=token, + valor="favorite", action="plusdede_check", show=show)) + itemlist.append( + Item(channel='plusdede', title="Quitar marca", tipo="5", idtemp=idserie, token=token, valor="nothing", + action="plusdede_check", show=show)) + itemlist.append( + Item(channel='plusdede', title="Añadir a lista", tipo="5", tipo_esp="lista", idtemp=idserie, token=token, + action="plusdede_check", show=show)) + return itemlist + + +def parse_listas(item, bloque_lista): + logger.info() + + if item.tipo == "populares": + patron = '<div class="lista(.*?)</div>\s*</h4>' + else: + patron = '<div class="lista(.*?)</h4>\s*</div>' + matches = re.compile(patron, re.DOTALL).findall(bloque_lista) + itemlist = [] + + for lista in matches: + scrapedurl = scrapertools.htmlclean(scrapertools.find_single_match(lista, '<a href="([^"]+)">[^<]+</a>')) + scrapedtitle = scrapertools.find_single_match(lista, '<a href="[^"]+">([^<]+)</a>') + scrapedfollowers = scrapertools.find_single_match(lista, 'Follow: <span class="number">([^<]+)') + scrapedseries = scrapertools.find_single_match(lista, '<div class="lista-stat badge">Series: ([^<]+)') + scrapedpelis = scrapertools.find_single_match(lista, '<div class="lista-stat badge">Pelis: ([^<]+)') + + title = scrapertools.htmlclean(scrapedtitle) + ' (' + if scrapedpelis != '': + title += scrapedpelis + ' pelis, ' + if scrapedseries != '': + title += scrapedseries + ' series, ' + if scrapedfollowers != '': + title += scrapedfollowers + ' seguidores' + title += ')' + url = urlparse.urljoin("https://www.plusdede.com", scrapedurl) + thumbnail = "" + itemlist.append( + Item(channel=item.channel, action="peliculas", token=item.token, tipo="lista", title=title, url=url)) + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "], tipo =[lista]") + + nextpage = scrapertools.find_single_match(bloque_lista, + '<div class="onclick load-more-icon no-json" data-action="replace" data-url="([^"]+)"') + if nextpage != '': + url = urlparse.urljoin("https://www.plusdede.com", nextpage) + itemlist.append(Item(channel=item.channel, action="lista_sig", token=item.token, tipo=item.tipo, + title=">> Página siguiente", extra=item.extra, url=url)) + + try: + import xbmcplugin + xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_UNSORTED) + xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_TITLE) + except: + pass + + return itemlist + + +def listas(item): + logger.info() + if item.tipo == "tuslistas": + patron = 'Tus listas(.*?)>Listas que sigues<' + elif item.tipo == "siguiendo": + patron = '<h3>Listas que sigues</h3>(.*?)<h2>Listas populares</h2>' + else: + patron = '<div class="content">\s*<h2>Listas populares(.*?)</div>\s*</div>\s*</div>\s*</div>\s*</div>' + + data = httptools.downloadpage(item.url).data + logger.debug("dataSINHEADERS=" + data) + + item.token = scrapertools.find_single_match(data, '_token" content="([^"]+)"').strip() + logger.debug("token_LISTA_" + item.token) + + bloque_lista = scrapertools.find_single_match(data, patron) + logger.debug("bloque_LISTA" + bloque_lista) + + return parse_listas(item, bloque_lista) + + +def lista_sig(item): + logger.info() + + headers = {"X-Requested-With": "XMLHttpRequest"} + data = httptools.downloadpage(item.url, headers=headers).data + logger.debug("data=" + data) + + return parse_listas(item, data) + + +def pag_sig(item): + logger.info() + + headers = {"X-Requested-With": "XMLHttpRequest"} + data = httptools.downloadpage(item.url, headers=headers).data + logger.debug("data=" + data) + + return parse_mixed_results(item, data) + + +def findvideos(item, verTodos=False): + logger.info() + + # Descarga la pagina + data = httptools.downloadpage(item.url).data + logger.info(data) + # logger.debug("data="+data) + + data_model = scrapertools.find_single_match(data, 'data-model="([^"]+)"') + data_id = scrapertools.find_single_match(data, 'data-id="([^"]+)"') + trailer = "https://www.youtube.com/watch?v=" + scrapertools.find_single_match(data, + 'data-youtube="([^"]+)" class="youtube-link') + + url = "https://www.plusdede.com/aportes/" + data_model + "/" + data_id + "?popup=1" + + data = httptools.downloadpage(url).data + # logger.debug("dataLINKS"+data) + token = scrapertools.find_single_match(data, '_token" content="([^"]+)"') + + patron = '<a target="_blank" (.*?)</a>' + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + idpeli = data_id + if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("kodi")) and data_model == "4": + itemlist.append(Item(channel=item.channel, action="infosinopsis", title="INFO / SINOPSIS", url=item.url, + thumbnail=item.thumbnail, fanart=item.fanart, folder=False)) + + logger.debug("TRAILER_YOUTUBE:" + trailer) + itemlist.append(Item(channel=item.channel, action="play", title="TRAILER", url=item.url, trailer=trailer, + thumbnail=item.thumbnail, fanart=item.fanart, folder=False)) + + itemsort = [] + sortlinks = config.get_setting("plusdedesortlinks", + item.channel) # 0:no, 1:valoracion, 2:idioma, 3:calidad, 4:idioma+calidad, 5:idioma+valoracion, 6:idioma+calidad+valoracion + showlinks = config.get_setting("plusdedeshowlinks", item.channel) # 0:todos, 1:ver online, 2:descargar + + # sortlinks = int(sortlinks) if sortlinks != '' and sortlinks !="No" else 0 + # showlinks = int(showlinks) if showlinks != '' and showlinks !="No" else 0 + + if sortlinks != '' and sortlinks != "No": + sortlinks = int(sortlinks) + else: + sortlinks = 0 + + if showlinks != '' and showlinks != "No": + showlinks = int(showlinks) + else: + showlinks = 0 + + for match in matches: + # logger.debug("match="+match) + + jdown = scrapertools.find_single_match(match, '<span class="fa fa-download"></span>([^<]+)') + if (showlinks == 1 and jdown != '') or ( + showlinks == 2 and jdown == ''): # Descartar enlaces veronline/descargar + continue + idioma_1 = "" + idiomas = re.compile('<img src="https://cdn.plusdede.com/images/flags/([^"]+).png', re.DOTALL).findall(match) + idioma_0 = idiomas[0] + if len(idiomas) > 1: + idioma_1 = idiomas[1] + idioma = idioma_0 + ", SUB " + idioma_1 + else: + idioma_1 = '' + idioma = idioma_0 + + calidad_video = scrapertools.find_single_match(match, + '<span class="fa fa-video-camera"></span>(.*?)</div>').replace( + " ", "").replace("\n", "") + logger.debug("calidad_video=" + calidad_video) + calidad_audio = scrapertools.find_single_match(match, + '<span class="fa fa-headphones"></span>(.*?)</div>').replace( + " ", "").replace("\n", "") + logger.debug("calidad_audio=" + calidad_audio) + + thumb_servidor = scrapertools.find_single_match(match, '<img src="([^"]+)">') + logger.debug("thumb_servidor=" + thumb_servidor) + nombre_servidor = scrapertools.find_single_match(thumb_servidor, "hosts/([^\.]+).png") + logger.debug("nombre_servidor=" + nombre_servidor) + + if jdown != '': + title = "Download " + nombre_servidor + " (" + idioma + ") (Calidad " + calidad_video.strip() + ", audio " + calidad_audio.strip() + ")" + else: + title = "Ver en " + nombre_servidor + " (" + idioma + ") (Calidad " + calidad_video.strip() + ", audio " + calidad_audio.strip() + ")" + + valoracion = 0 + + reports = scrapertools.find_single_match(match, + '<i class="fa fa-exclamation-triangle"></i><br/>\s*<span class="number" data-num="([^"]*)">') + valoracion -= int(reports) + title += " (" + reports + " reps)" + + url = urlparse.urljoin(item.url, scrapertools.find_single_match(match, 'href="([^"]+)"')) + thumbnail = thumb_servidor + plot = "" + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + if sortlinks > 0: + # orden1 para dejar los "downloads" detras de los "ver" al ordenar + # orden2 segun configuración + if sortlinks == 1: + orden = valoracion + elif sortlinks == 2: + orden = valora_idioma(idioma_0, idioma_1) + elif sortlinks == 3: + orden = valora_calidad(calidad_video, calidad_audio) + elif sortlinks == 4: + orden = (valora_idioma(idioma_0, idioma_1) * 100) + valora_calidad(calidad_video, calidad_audio) + elif sortlinks == 5: + orden = (valora_idioma(idioma_0, idioma_1) * 1000) + valoracion + elif sortlinks == 6: + orden = (valora_idioma(idioma_0, idioma_1) * 100000) + ( + valora_calidad(calidad_video, calidad_audio) * 1000) + valoracion + itemsort.append( + {'action': "play", 'title': title, 'data_id': data_id, 'token': token, 'tipo': data_model, 'url': url, + 'thumbnail': thumbnail, 'fanart': item.fanart, 'plot': plot, 'extra': item.url, + 'fulltitle': item.fulltitle, 'orden1': (jdown == ''), 'orden2': orden}) + else: + itemlist.append( + Item(channel=item.channel, action="play", data_id=data_id, token=token, tipo=data_model, title=title, + url=url, thumbnail=thumbnail, fanart=item.fanart, plot=plot, extra=item.url, + fulltitle=item.fulltitle)) + + if sortlinks > 0: + numberlinks = config.get_setting("plusdedenumberlinks", item.channel) # 0:todos, > 0:n*5 (5,10,15,20,...) + # numberlinks = int(numberlinks) if numberlinks != '' and numberlinks !="No" else 0 + if numberlinks != '' and numberlinks != "No": + numberlinks = int(numberlinks) + else: + numberlinks = 0 + + if numberlinks == 0: + verTodos = True + itemsort = sorted(itemsort, key=lambda k: (k['orden1'], k['orden2']), reverse=True) + for i, subitem in enumerate(itemsort): + if verTodos == False and i >= numberlinks: + itemlist.append( + Item(channel=item.channel, action='findallvideos', title='Ver todos los enlaces', url=item.url, + extra=item.extra)) + break + itemlist.append( + Item(channel=item.channel, action=subitem['action'], title=subitem['title'], data_id=subitem['data_id'], + token=subitem['token'], tipo=subitem['tipo'], url=subitem['url'], thumbnail=subitem['thumbnail'], + fanart=subitem['fanart'], plot=subitem['plot'], extra=subitem['extra'], + fulltitle=subitem['fulltitle'])) + + if data_model == "4": + itemlist.append( + Item(channel=item.channel, action="plusdede_check", tipo="4", token=token, title="Marcar como Pendiente", + valor="pending", idtemp=idpeli)) + itemlist.append( + Item(channel=item.channel, action="plusdede_check", tipo="4", token=token, title="Marcar como Vista", + valor="seen", idtemp=idpeli)) + itemlist.append( + Item(channel=item.channel, action="plusdede_check", tipo="4", token=token, title="Marcar como Favorita", + valor="favorite", idtemp=idpeli)) + itemlist.append(Item(channel=item.channel, action="plusdede_check", tipo="4", token=token, title="Quitar Marca", + valor="nothing", idtemp=idpeli)) + itemlist.append( + Item(channel='plusdede', title="Añadir a lista", tipo="4", tipo_esp="lista", idtemp=idpeli, token=token, + action="plusdede_check")) + return itemlist + + +def findallvideos(item): + return findvideos(item, True) + + +def play(item): + itemlist = [] + if "trailer" in item: + url = item.trailer + itemlist = servertools.find_video_items(data=url) + + for videoitem in itemlist: + videoitem.title = item.title + videoitem.fulltitle = item.fulltitle + videoitem.thumbnail = item.thumbnail + videoitem.channel = item.channel + + return itemlist + else: + logger.info("url=" + item.url) + + # Hace la llamada + headers = {'Referer': item.extra} + + data = httptools.downloadpage(item.url, headers=headers).data + # logger.debug("dataLINK="+data) + url = scrapertools.find_single_match(data, + '<a href="([^"]+)" target="_blank"><button class="btn btn-primary">visitar enlace</button>') + url = urlparse.urljoin("https://www.plusdede.com", url) + # logger.debug("DATA_LINK_FINAL:"+url) + + logger.debug("URL_PLAY:" + url) + headers = {'Referer': item.url} + media_url = httptools.downloadpage(url, headers=headers, follow_redirects=False).headers.get("location") + # logger.info("media_url="+media_url) + + itemlist = servertools.find_video_items(data=media_url) + + for videoitem in itemlist: + videoitem.title = item.title + videoitem.fulltitle = item.fulltitle + videoitem.thumbnail = item.thumbnail + videoitem.channel = item.channel + + # Marcar como visto + logger.debug(item) + checkseen(item) + + return itemlist + + +def checkseen(item): + logger.info(item) + url_temp = "" + if item.tipo == "8": + url_temp = "https://www.plusdede.com/set/episode/" + item.data_id + "/seen" + tipo_str = "series" + headers = {"Referer": "https://www.plusdede.com/serie/", "X-Requested-With": "XMLHttpRequest", + "X-CSRF-TOKEN": item.token} + else: + url_temp = "https://www.plusdede.com/set/usermedia/" + item.tipo + "/" + item.data_id + "/seen" + tipo_str = "pelis" + headers = {"Referer": "https://www.plusdede.com/" + tipo_str, "X-Requested-With": "XMLHttpRequest", + "X-CSRF-TOKEN": item.token} + logger.debug("Entrando a checkseen " + url_temp + item.token) + data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers, replace_headers=True).data + return True + + +def infosinopsis(item): + logger.info() + + data = httptools.downloadpage(item.url).data + logger.debug("SINOPSISdata=" + data) + + scrapedtitle = scrapertools.find_single_match(data, '<div class="media-title">([^<]+)</div>') + scrapedvalue = scrapertools.find_single_match(data, '<span class="value">([^<]+)</span>') + scrapedyear = scrapertools.find_single_match(data, + '<strong>Fecha</strong>\s*<div class="mini-content">([^<]+)</div>').strip() + scrapedduration = scrapertools.htmlclean(scrapertools.find_single_match(data, + '<strong>Duración</strong>\s*<div class="mini-content">([^<]+)</div>').strip().replace( + " ", "").replace("\n", "")) + logger.debug(scrapedduration) + scrapedplot = scrapertools.find_single_match(data, '<div class="plot expandable">([^<]+)<div').strip() + logger.debug("SINOPSISdataplot=" + scrapedplot) + generos = scrapertools.find_single_match(data, '<strong>Género</strong>\s*<ul>(.*?)</ul>') + logger.debug("generos=" + generos) + scrapedgenres = re.compile('<li>([^<]+)</li>', re.DOTALL).findall(generos) + scrapedcasting = re.compile( + '<a href="https://www.plusdede.com/star/[^"]+"><div class="text-main">([^<]+)</div></a>\s*<div class="text-sub">\s*([^<]+)</div>', + re.DOTALL).findall(data) + title = scrapertools.htmlclean(scrapedtitle) + plot = "[B]Año: [/B]" + scrapedyear + plot += " [B]Duración: [/B]" + scrapedduration + plot += " [B]Puntuación usuarios: [/B]" + scrapedvalue + plot += "\n[B]Géneros: [/B]" + ", ".join(scrapedgenres) + plot += "\n\n[B]Sinopsis:[/B]\n" + scrapertools.htmlclean(scrapedplot) + plot += "\n\n[B]Casting:[/B]\n" + for actor, papel in scrapedcasting: + plot += actor + " (" + papel.strip() + ")\n" + + tbd = TextBox("DialogTextViewer.xml", os.getcwd(), "Default") + tbd.ask(title, plot) + del tbd + return + + +try: + import xbmcgui + + + class TextBox(xbmcgui.WindowXML): + """ Create a skinned textbox window """ + + def __init__(self, *args, **kwargs): + pass + + def onInit(self): + try: + self.getControl(5).setText(self.text) + self.getControl(1).setLabel(self.title) + except: + pass + + def onClick(self, controlId): + pass + + def onFocus(self, controlId): + pass + + def onAction(self, action): + if action == 7: + self.close() + + def ask(self, title, text): + self.title = title + self.text = text + self.doModal() +except: + pass + + +# Valoraciones de enlaces, los valores más altos se mostrarán primero : + +def valora_calidad(video, audio): + prefs_video = ['hdmicro', 'hd1080', 'hd720', 'hdrip', 'dvdrip', 'rip', 'tc-screener', 'ts-screener'] + prefs_audio = ['dts', '5.1', 'rip', 'line', 'screener'] + + video = ''.join(video.split()).lower() + # pts = (9 - prefs_video.index(video) if video in prefs_video else 1) * 10 + if video in prefs_video: + pts = (9 - prefs_video.index(video)) * 10 + else: + pts = (9 - 1) * 10 + + audio = ''.join(audio.split()).lower() + # pts += 9 - prefs_audio.index(audio) if audio in prefs_audio else 1 + if audio in prefs_audio: + pts = (9 - prefs_audio.index(audio)) * 10 + else: + pts = (9 - 1) * 10 + + return pts + + +def valora_idioma(idioma_0, idioma_1): + prefs = ['spanish', 'latino', 'catalan', 'english', 'french'] + # pts = (9 - prefs.index(idioma_0) if idioma_0 in prefs else 1) * 10 + if idioma_0 in prefs: + pts = (9 - prefs.index(idioma_0)) * 10 + else: + pts = (9 - 1) * 10 + + if idioma_1 != '': # si hay subtítulos + idioma_1 = idioma_1.replace(' SUB', '') + + # pts += 8 - prefs.index(idioma_1) if idioma_1 in prefs else 1 + if idioma_1 in prefs: + pts += 8 - prefs.index(idioma_1) + else: + pts += 8 - 1 + + else: + pts += 9 # sin subtítulos por delante + return pts + + +def plusdede_check(item): + if item.tipo_esp == "lista": + url_temp = "https://www.plusdede.com/listas/addmediapopup/" + item.tipo + "/" + item.idtemp + "?popup=1" + data = httptools.downloadpage(url_temp).data + logger.debug("DATA_CHECK_LISTA:" + data) + + patron = '<div class="lista model" data-model="10" data-id="([^"]+)">+' + patron += '.*?<a href="/lista/[^"]+">([^<]+)</a>+' + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + for id_lista, nombre_lista in matches: + itemlist.append(Item(channel=item.channel, action="plusdede_check", tipo=item.tipo, tipo_esp="add_list", + token=item.token, title=nombre_lista, idlista=id_lista, idtemp=item.idtemp)) + if len(itemlist) < 1: + itemlist.append(Item(channel=item.channel, action="", title="No tienes ninguna lista creada por ti!")) + return itemlist + else: + + if item.tipo == "10" or item.tipo == "lista": + url_temp = "https://www.plusdede.com/set/lista/" + item.idtemp + "/" + item.valor + else: + if (item.tipo_esp == "add_list"): + url_temp = "https://www.plusdede.com/set/listamedia/" + item.idlista + "/add/" + item.tipo + "/" + item.idtemp + else: + url_temp = "https://www.plusdede.com/set/usermedia/" + item.tipo + "/" + item.idtemp + "/" + item.valor + # httptools.downloadpage(url_temp, post="id="+item.idtemp) + if item.tipo == "5": + tipo_str = "series" + elif item.tipo == "lista": + tipo_str = "listas" + else: + tipo_str = "pelis" + headers = {"Referer": "https://www.plusdede.com/" + tipo_str, "X-Requested-With": "XMLHttpRequest", + "X-CSRF-TOKEN": item.token} + data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers, + replace_headers=True).data.strip() + logger.debug("URL_PLUSDEDECHECK_DATA=" + url_temp + " ITEM:TIPO=" + item.tipo) + logger.debug("PLUSDEDECHECK_DATA=" + data) + dialog = platformtools + dialog.ok = platformtools.dialog_ok + if data == "1": + if item.valor != "nothing": + dialog.ok('SUCCESS', 'Marca realizada con éxito!') + elif item.valor == "nothing": + dialog.ok('SUCCESS', 'Marca eliminada con éxito!') + elif item.valor == "unfollow": + dialog.ok('SUCCESS', 'Has dejado de seguir esta lista!') + elif item.valor == "follow": + dialog.ok('SUCCESS', 'Has comenzado a seguir esta lista!') + elif item.tipo_esp == "add_list": + dialog.ok('SUCCESS', 'Añadido a la lista!') + else: + dialog.ok('ERROR', 'No se pudo realizar la acción!') \ No newline at end of file diff --git a/plugin.video.alfa/channels/pordede.json b/plugin.video.alfa/channels/pordede.json new file mode 100755 index 00000000..65a34761 --- /dev/null +++ b/plugin.video.alfa/channels/pordede.json @@ -0,0 +1,108 @@ +{ + "id": "pordede", + "name": "Pordede", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "pordede.png", + "banne": "pordede.png", + "version": 1, + "changes": [ + { + "date": "14/05/2017", + "description": "Eliminada la necesidad de captcha para el login" + }, + { + "date": "09/05/2017", + "description": "Corregida sección siguientes capitulos" + }, + { + "date": "29/03/2017", + "description": "Modificado para loguearse a través de captcha, solo funciona en Kodi" + }, + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ], + "categories": [ + "movie", + "tvshow" + ], + "settings": [ + { + "id": "pordedeuser", + "type": "text", + "label": "@30014", + "enabled": true, + "visible": true + }, + { + "id": "pordedepassword", + "type": "text", + "hidden": true, + "label": "@30015", + "enabled": "!eq(-1,'')", + "visible": true + }, + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": "!eq(-1,'') + !eq(-2,'')", + "visible": true + }, + { + "id": "pordedesortlinks", + "type": "list", + "label": "Ordenar enlaces", + "default": 0, + "enabled": true, + "visible": "!eq(-2,'') + !eq(-3,'')", + "lvalues": [ + "No", + "Por Valoración", + "Por Idioma", + "Por Calidad", + "Por Idioma y Calidad", + "Por Idioma y Valoración", + "Por Idioma, Calidad y Valoración" + ] + }, + { + "id": "pordedeshowlinks", + "type": "list", + "label": "Mostrar enlaces", + "default": 0, + "enabled": true, + "visible": "!eq(-3,'') + !eq(-4,'')", + "lvalues": [ + "Todos", + "Ver online", + "Descargar" + ] + }, + { + "id": "pordedenumberlinks", + "type": "list", + "label": "Limitar número de enlaces", + "default": 0, + "enabled": true, + "visible": "!eq(-4,'') + !eq(-5,'')", + "lvalues": [ + "No", + "5", + "10", + "15", + "20", + "25", + "30" + ] + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/pordede.py b/plugin.video.alfa/channels/pordede.py new file mode 100755 index 00000000..3c311cb1 --- /dev/null +++ b/plugin.video.alfa/channels/pordede.py @@ -0,0 +1,835 @@ +# -*- coding: utf-8 -*- + +import os +import re +import sys +import urlparse + +from core import config +from core import httptools +from core import jsontools +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item +from platformcode import platformtools + + +def login(): + url_origen = "http://www.pordede.com" + data = httptools.downloadpage(url_origen).data + if config.get_setting("pordedeuser", "pordede") in data: + return True + + url = "http://www.pordede.com/api/login/auth?response_type=code&client_id=appclient&redirect_uri=http%3A%2F%2Fwww.pordede.com%2Fapi%2Flogin%2Freturn&state=none" + post = "username=%s&password=%s&authorized=autorizar" % ( + config.get_setting("pordedeuser", "pordede"), config.get_setting("pordedepassword", "pordede")) + data = httptools.downloadpage(url, post).data + if '"ok":true' in data: + return True + else: + return False + + +def mainlist(item): + logger.info() + + itemlist = [] + + if config.get_setting("pordedeuser", "pordede") == "": + itemlist.append( + Item(channel=item.channel, title="Habilita tu cuenta en la configuración...", action="settingCanal", + url="")) + else: + result = login() + if not result: + itemlist.append(Item(channel=item.channel, action="mainlist", title="Login fallido. Volver a intentar...")) + return itemlist + itemlist.append(Item(channel=item.channel, action="menuseries", title="Series", url="")) + itemlist.append(Item(channel=item.channel, action="menupeliculas", title="Películas y documentales", url="")) + itemlist.append(Item(channel=item.channel, action="listas_sigues", title="Listas que sigues", + url="http://www.pordede.com/lists/following")) + itemlist.append(Item(channel=item.channel, action="tus_listas", title="Tus listas", + url="http://www.pordede.com/lists/yours")) + itemlist.append( + Item(channel=item.channel, action="listas_sigues", title="Top listas", url="http://www.pordede.com/lists")) + itemlist.append(Item(channel=item.channel, action="settingCanal", title="Configuración...", url="")) + return itemlist + + +def settingCanal(item): + return platformtools.show_channel_settings() + + +def menuseries(item): + logger.info() + + itemlist = [] + itemlist.append(Item(channel=item.channel, action="peliculas", title="Novedades", + url="http://www.pordede.com/series/loadmedia/offset/0/showlist/hot")) + itemlist.append( + Item(channel=item.channel, action="generos", title="Por géneros", url="http://www.pordede.com/series")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="Siguiendo", + url="http://www.pordede.com/series/following")) + itemlist.append(Item(channel=item.channel, action="siguientes", title="Siguientes Capítulos", + url="http://www.pordede.com/main/index", viewmode="movie")) + itemlist.append( + Item(channel=item.channel, action="peliculas", title="Favoritas", url="http://www.pordede.com/series/favorite")) + itemlist.append( + Item(channel=item.channel, action="peliculas", title="Pendientes", url="http://www.pordede.com/series/pending")) + itemlist.append( + Item(channel=item.channel, action="peliculas", title="Terminadas", url="http://www.pordede.com/series/seen")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="Recomendadas", + url="http://www.pordede.com/series/recommended")) + itemlist.append(Item(channel=item.channel, action="search", title="Buscar...", url="http://www.pordede.com/series")) + + return itemlist + + +def menupeliculas(item): + logger.info() + + itemlist = [] + itemlist.append(Item(channel=item.channel, action="peliculas", title="Novedades", + url="http://www.pordede.com/pelis/loadmedia/offset/0/showlist/hot")) + itemlist.append( + Item(channel=item.channel, action="generos", title="Por géneros", url="http://www.pordede.com/pelis")) + itemlist.append( + Item(channel=item.channel, action="peliculas", title="Favoritas", url="http://www.pordede.com/pelis/favorite")) + itemlist.append( + Item(channel=item.channel, action="peliculas", title="Pendientes", url="http://www.pordede.com/pelis/pending")) + itemlist.append( + Item(channel=item.channel, action="peliculas", title="Vistas", url="http://www.pordede.com/pelis/seen")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="Recomendadas", + url="http://www.pordede.com/pelis/recommended")) + itemlist.append(Item(channel=item.channel, action="search", title="Buscar...", url="http://www.pordede.com/pelis")) + + return itemlist + + +def generos(item): + logger.info() + + # Descarga la pagina + data = httptools.downloadpage(item.url).data + logger.debug("data=" + data) + + # Extrae las entradas (carpetas) + data = scrapertools.find_single_match(data, '<div class="section genre">(.*?)</div>') + patron = '<a class="mediaFilterLink" data-value="([^"]+)" href="([^"]+)">([^<]+)<span class="num">\((\d+)\)</span></a>' + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + for textid, scrapedurl, scrapedtitle, cuantos in matches: + title = scrapedtitle.strip() + " (" + cuantos + ")" + thumbnail = "" + plot = "" + # http://www.pordede.com/pelis/loadmedia/offset/30/genre/science%20fiction/showlist/all?popup=1 + if "/pelis" in item.url: + url = "http://www.pordede.com/pelis/loadmedia/offset/0/genre/" + textid.replace(" ", + "%20") + "/showlist/all" + else: + url = "http://www.pordede.com/series/loadmedia/offset/0/genre/" + textid.replace(" ", + "%20") + "/showlist/all" + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + itemlist.append( + Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot, + fulltitle=title)) + + return itemlist + + +def search(item, texto): + logger.info() + + if item.url == "": + item.url = "http://www.pordede.com/pelis" + + texto = texto.replace(" ", "-") + + # Mete el referer en item.extra + item.extra = item.url + item.url = item.url + "/loadmedia/offset/0/query/" + texto + "/years/1950/on/undefined/showlist/all" + try: + return buscar(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def buscar(item): + logger.info() + + # Descarga la pagina + headers = {"X-Requested-With": "XMLHttpRequest"} + data = httptools.downloadpage(item.url, headers=headers).data + logger.debug("data=" + data) + + # Extrae las entradas (carpetas) + json_object = jsontools.load(data) + logger.debug("html=" + json_object["html"]) + data = json_object["html"] + + return parse_mixed_results(item, data) + + +def parse_mixed_results(item, data): + patron = '<a class="defaultLink extended" href="([^"]+)"[^<]+' + patron += '<div class="coverMini shadow tiptip" title="([^"]+)"[^<]+' + patron += '<img class="centeredPic.*?src="([^"]+)"' + patron += '[^<]+<img[^<]+<div class="extra-info">' + patron += '<span class="year">([^<]+)</span>' + patron += '<span class="value"><i class="icon-star"></i>([^<]+)</span>' + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear, scrapedvalue in matches: + title = scrapertools.htmlclean(scrapedtitle) + if scrapedyear != '': + title += " (" + scrapedyear + ")" + fulltitle = title + if scrapedvalue != '': + title += " (" + scrapedvalue + ")" + thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) + fanart = thumbnail.replace("mediathumb", "mediabigcover") + plot = "" + # http://www.pordede.com/peli/the-lego-movie + # http://www.pordede.com/links/view/slug/the-lego-movie/what/peli?popup=1 + + if "/peli/" in scrapedurl or "/docu/" in scrapedurl: + + # sectionStr = "peli" if "/peli/" in scrapedurl else "docu" + if "/peli/" in scrapedurl: + sectionStr = "peli" + else: + sectionStr = "docu" + + referer = urlparse.urljoin(item.url, scrapedurl) + url = referer.replace("/{0}/".format(sectionStr), "/links/view/slug/") + "/what/{0}".format(sectionStr) + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, extra=referer, url=url, + thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, fanart=fanart, + contentTitle=scrapedtitle, contentType="movie", context=["buscar_trailer"])) + else: + referer = item.url + url = urlparse.urljoin(item.url, scrapedurl) + itemlist.append( + Item(channel=item.channel, action="episodios", title=title, extra=referer, url=url, thumbnail=thumbnail, + plot=plot, fulltitle=fulltitle, show=title, fanart=fanart, + contentTitle=scrapedtitle, contentType="tvshow", context=["buscar_trailer"])) + + next_page = scrapertools.find_single_match(data, '<div class="loadingBar" data-url="([^"]+)"') + if next_page != "": + url = urlparse.urljoin("http://www.pordede.com", next_page) + itemlist.append( + Item(channel=item.channel, action="lista", title=">> Página siguiente", extra=item.extra, url=url)) + + try: + import xbmcplugin + xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_UNSORTED) + xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_TITLE) + except: + pass + + return itemlist + + +def siguientes(item): + logger.info() + + # Descarga la pagina + data = httptools.downloadpage(item.url).data + logger.debug("data=" + data) + + # Extrae las entradas (carpetas) + bloque = scrapertools.find_single_match(data, '<h2>Siguiendo</h2>(.*?)<div class="box">') + patron = '<div class="coverMini shadow tiptip" title="([^"]+)">[^<]+' + patron += '<img class="centeredPic centeredPicFalse" onerror="[^"]+" src="([^"]+)"[^<]+' + patron += '<img src="/images/loading-mini.gif" class="loader"/>[^<]+' + patron += '<div class="extra-info"><span class="year">[^<]+' + patron += '</span><span class="value"><i class="icon-star"></i>[^<]+' + patron += '</span></div>[^<]+' + patron += '</div>[^<]+' + patron += '</a>[^<]+' + patron += '<a class="userepiinfo defaultLink" href="([^"]+)">(\d+)x(\d+)' + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + # for scrapedurl,scrapedtitle,scrapedthumbnail in matches: + for scrapedtitle, scrapedthumbnail, scrapedurl, scrapedsession, scrapedepisode in matches: + title = scrapertools.htmlclean(scrapedtitle) + session = scrapertools.htmlclean(scrapedsession) + episode = scrapertools.htmlclean(scrapedepisode) + thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) + fanart = thumbnail.replace("mediathumb", "mediabigcover") + plot = "" + title = session + "x" + episode + " - " + title + # http://www.pordede.com/peli/the-lego-movie + # http://www.pordede.com/links/view/slug/the-lego-movie/what/peli?popup=1 + + referer = urlparse.urljoin(item.url, scrapedurl) + url = referer + # itemlist.append( Item(channel=item.channel, action="episodios" , title=title , url=url, thumbnail=thumbnail, plot=plot, fulltitle=title, show=title)) + itemlist.append( + Item(channel=item.channel, action="episodio", title=title, url=url, thumbnail=thumbnail, plot=plot, + fulltitle=title, show=title, fanart=fanart, extra=session + "|" + episode)) + + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + + return itemlist + + +def episodio(item): + logger.info() + itemlist = [] + + # Descarga la pagina + data = httptools.downloadpage(item.url).data + logger.debug("data=" + data) + + session = str(int(item.extra.split("|")[0])) + episode = str(int(item.extra.split("|")[1])) + patrontemporada = '<div class="checkSeason"[^>]+>Temporada ' + session + '<div class="right" onclick="controller.checkSeason(.*?)\s+</div></div>' + matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data) + + for bloque_episodios in matchestemporadas: + logger.debug("bloque_episodios=" + bloque_episodios) + + # Extrae los episodios + patron = '<span class="title defaultPopup" href="([^"]+)"><span class="number">' + episode + ' </span>([^<]+)</span>(\s*</div>\s*<span[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></span><div[^>]*><button[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></button><div class="action([^"]*)" data-action="seen">)?' + matches = re.compile(patron, re.DOTALL).findall(bloque_episodios) + + for scrapedurl, scrapedtitle, info, visto in matches: + # visto_string = "[visto] " if visto.strip()=="active" else "" + if visto.strip() == "active": + visto_string = "[visto] " + else: + visto_string = "" + numero = episode + title = visto_string + session + "x" + numero + " " + scrapertools.htmlclean(scrapedtitle) + thumbnail = "" + plot = "" + # http://www.pordede.com/peli/the-lego-movie + # http://www.pordede.com/links/view/slug/the-lego-movie/what/peli?popup=1 + # http://www.pordede.com/links/viewepisode/id/475011?popup=1 + epid = scrapertools.find_single_match(scrapedurl, "id/(\d+)") + url = "http://www.pordede.com/links/viewepisode/id/" + epid + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot, + fulltitle=title, fanart=item.fanart, show=item.show)) + logger.debug("Abrimos title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + + itemlist2 = [] + for capitulo in itemlist: + itemlist2 = findvideos(capitulo) + return itemlist2 + + +def peliculas(item): + logger.info() + + # Descarga la pagina + headers = {"X-Requested-With": "XMLHttpRequest"} + data = httptools.downloadpage(item.url, headers=headers).data + logger.debug("data=" + data) + + # Extrae las entradas (carpetas) + json_object = jsontools.load(data) + logger.debug("html=" + json_object["html"]) + data = json_object["html"] + + return parse_mixed_results(item, data) + + +def episodios(item): + logger.info() + itemlist = [] + + # Descarga la pagina + idserie = '' + data = httptools.downloadpage(item.url).data + logger.debug("data=" + data) + + patrontemporada = '<div class="checkSeason"[^>]+>([^<]+)<div class="right" onclick="controller.checkSeason(.*?)\s+</div></div>' + matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data) + + idserie = scrapertools.find_single_match(data, + '<div id="layout4" class="itemProfile modelContainer" data-model="serie" data-id="(\d+)"') + + for nombre_temporada, bloque_episodios in matchestemporadas: + logger.debug("nombre_temporada=" + nombre_temporada) + logger.debug("bloque_episodios=" + bloque_episodios) + + # Extrae los episodios + patron = '<span class="title defaultPopup" href="([^"]+)"><span class="number">([^<]+)</span>([^<]+)</span>(\s*</div>\s*<span[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></span><div[^>]*><button[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></button><div class="action([^"]*)" data-action="seen">)?' + matches = re.compile(patron, re.DOTALL).findall(bloque_episodios) + + for scrapedurl, numero, scrapedtitle, info, visto in matches: + # visto_string = "[visto] " if visto.strip()=="active" else "" + if visto.strip() == "active": + visto_string = "[visto] " + else: + visto_string = "" + + title = visto_string + nombre_temporada.replace("Temporada ", "").replace("Extras", + "Extras 0") + "x" + numero + " " + scrapertools.htmlclean( + scrapedtitle) + thumbnail = item.thumbnail + fanart = item.fanart + plot = "" + # http://www.pordede.com/peli/the-lego-movie + # http://www.pordede.com/links/view/slug/the-lego-movie/what/peli?popup=1 + # http://www.pordede.com/links/viewepisode/id/475011?popup=1 + epid = scrapertools.find_single_match(scrapedurl, "id/(\d+)") + url = "http://www.pordede.com/links/viewepisode/id/" + epid + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot, + fulltitle=title, fanart=fanart, show=item.show)) + + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + + if config.get_videolibrary_support(): + # con año y valoracion la serie no se puede actualizar correctamente, si ademas cambia la valoracion, creara otra carpeta + # Sin año y sin valoración: + show = re.sub(r"\s\(\d+\)\s\(\d+\.\d+\)", "", item.show) + # Sin año: + # show = re.sub(r"\s\(\d+\)", "", item.show) + # Sin valoración: + # show = re.sub(r"\s\(\d+\.\d+\)", "", item.show) + itemlist.append(Item(channel='pordede', title="Añadir esta serie a la videoteca", url=item.url, + action="add_serie_to_library", extra="episodios###", show=show)) + itemlist.append(Item(channel='pordede', title="Descargar todos los episodios de la serie", url=item.url, + action="download_all_episodes", extra="episodios", show=show)) + itemlist.append(Item(channel='pordede', title="Marcar como Pendiente", tipo="serie", idtemp=idserie, valor="1", + action="pordede_check", show=show)) + itemlist.append(Item(channel='pordede', title="Marcar como Siguiendo", tipo="serie", idtemp=idserie, valor="2", + action="pordede_check", show=show)) + itemlist.append(Item(channel='pordede', title="Marcar como Finalizada", tipo="serie", idtemp=idserie, valor="3", + action="pordede_check", show=show)) + itemlist.append(Item(channel='pordede', title="Marcar como Favorita", tipo="serie", idtemp=idserie, valor="4", + action="pordede_check", show=show)) + itemlist.append(Item(channel='pordede', title="Quitar marca", tipo="serie", idtemp=idserie, valor="0", + action="pordede_check", show=show)) + + return itemlist + + +def parse_listas(item, patron): + logger.info() + + # Descarga la pagina + headers = {"X-Requested-With": "XMLHttpRequest"} + data = httptools.downloadpage(item.url, headers=headers).data + logger.debug("data=" + data) + + # Extrae las entradas (carpetas) + json_object = jsontools.load(data) + logger.debug("html=" + json_object["html"]) + data = json_object["html"] + + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + for scrapedurl, scrapedtitle, scrapeduser, scrapedfichas in matches: + title = scrapertools.htmlclean(scrapedtitle + ' (' + scrapedfichas + ' fichas, por ' + scrapeduser + ')') + url = urlparse.urljoin(item.url, scrapedurl) + "/offset/0/loadmedia" + thumbnail = "" + itemlist.append(Item(channel=item.channel, action="lista", title=title, url=url)) + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + + nextpage = scrapertools.find_single_match(data, 'data-url="(/lists/loadlists/offset/[^"]+)"') + if nextpage != '': + url = urlparse.urljoin(item.url, nextpage) + itemlist.append( + Item(channel=item.channel, action="listas_sigues", title=">> Página siguiente", extra=item.extra, url=url)) + + try: + import xbmcplugin + xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_UNSORTED) + xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_TITLE) + except: + pass + + return itemlist + + +def listas_sigues(item): + logger.info() + + patron = '<div class="clearfix modelContainer" data-model="lista"[^<]+' + patron += '<span class="title"><span class="name"><a class="defaultLink" href="([^"]+)">([^<]+)</a>' + patron += '</span>[^<]+<a[^>]+>([^<]+)</a></span>\s+<div[^<]+<div[^<]+</div>\s+<div class="info">\s+<p>([0-9]+)' + + return parse_listas(item, patron) + + +def tus_listas(item): + logger.info() + + patron = '<div class="clearfix modelContainer" data-model="lista"[^<]+' + patron += '<div class="right"[^<]+' + patron += '<button[^<]+</button[^<]+' + patron += '<button[^<]+</button[^<]+' + patron += '</div[^<]+' + patron += '<span class="title"><span class="name"><a class="defaultLink" href="([^"]+)">([^<]+)</a>' + patron += '</span>[^<]+<a[^>]+>([^<]+)</a></span>\s+<div[^<]+<div[^<]+</div>\s+<div class="info">\s+<p>([0-9]+)' + + return parse_listas(item, patron) + + +def lista(item): + logger.info() + + # Descarga la pagina + headers = {"X-Requested-With": "XMLHttpRequest"} + data = httptools.downloadpage(item.url, headers=headers).data + logger.debug("data=" + data) + + # Extrae las entradas (carpetas) + json_object = jsontools.load(data) + logger.debug("html=" + json_object["html"]) + data = json_object["html"] + + return parse_mixed_results(item, data) + + +def findvideos(item, verTodos=False): + logger.info() + # logger.debug(item.tostring('\n')) + + # Descarga la pagina + data = httptools.downloadpage(item.url).data + logger.info(data) + # logger.debug("data="+data) + + # Extrae las entradas (carpetas) + # json_object = jsontools.load_json(data) + # logger.debug("html="+json_object["html"]) + # data = json_object["html"] + + sesion = scrapertools.find_single_match(data, 'SESS = "([^"]+)";') + logger.debug("sesion=" + sesion) + + patron = '<a target="_blank" class="a aporteLink(.*?)</a>' + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + idpeli = scrapertools.find_single_match(data, + '<div class="buttons"><button class="defaultPopup onlyLogin" href="/links/create/ref_id/(\d+)/ref_model/4">Añadir enlace') + + if (config.get_platform().startswith("xbmc") or config.get_platform().startswith( + "kodi")) and "/what/peli" in item.url: + itemlist.append(Item(channel=item.channel, action="infosinopsis", title="INFO / SINOPSIS", url=item.url, + thumbnail=item.thumbnail, fanart=item.fanart, folder=False)) + + itemsort = [] + sortlinks = config.get_setting("pordedesortlinks", + item.channel) # 0:no, 1:valoracion, 2:idioma, 3:calidad, 4:idioma+calidad, 5:idioma+valoracion, 6:idioma+calidad+valoracion + showlinks = config.get_setting("pordedeshowlinks", item.channel) # 0:todos, 1:ver online, 2:descargar + + # sortlinks = int(sortlinks) if sortlinks != '' and sortlinks !="No" else 0 + # showlinks = int(showlinks) if showlinks != '' and showlinks !="No" else 0 + + if sortlinks != '' and sortlinks != "No": + sortlinks = int(sortlinks) + else: + sortlinks = 0 + + if showlinks != '' and showlinks != "No": + showlinks = int(showlinks) + else: + showlinks = 0 + + for match in matches: + logger.debug("match=" + match) + + jdown = scrapertools.find_single_match(match, '<div class="jdownloader">[^<]+</div>') + if (showlinks == 1 and jdown != '') or ( + showlinks == 2 and jdown == ''): # Descartar enlaces veronline/descargar + continue + + idiomas = re.compile('<div class="flag([^"]+)">([^<]+)</div>', re.DOTALL).findall(match) + idioma_0 = ( + idiomas[0][0].replace(" ", "").strip() + " " + idiomas[0][1].replace(" ", "").strip()).strip() + if len(idiomas) > 1: + idioma_1 = ( + idiomas[1][0].replace(" ", "").strip() + " " + idiomas[1][1].replace(" ", "").strip()).strip() + idioma = idioma_0 + ", " + idioma_1 + else: + idioma_1 = '' + idioma = idioma_0 + + calidad_video = scrapertools.find_single_match(match, + '<div class="linkInfo quality"><i class="icon-facetime-video"></i>([^<]+)</div>') + logger.debug("calidad_video=" + calidad_video) + calidad_audio = scrapertools.find_single_match(match, + '<div class="linkInfo qualityaudio"><i class="icon-headphones"></i>([^<]+)</div>') + logger.debug("calidad_audio=" + calidad_audio) + + thumb_servidor = scrapertools.find_single_match(match, '<div class="hostimage"[^<]+<img\s*src="([^"]+)">') + logger.debug("thumb_servidor=" + thumb_servidor) + nombre_servidor = scrapertools.find_single_match(thumb_servidor, "popup_([^\.]+)\.png") + logger.debug("nombre_servidor=" + nombre_servidor) + + # title = ("Download " if jdown != '' else "Ver en ")+nombre_servidor+" ("+idioma+") (Calidad "+calidad_video.strip()+", audio "+calidad_audio.strip()+")" + if jdown != '': + title = "Download " + nombre_servidor + " (" + idioma + ") (Calidad " + calidad_video.strip() + ", audio " + calidad_audio.strip() + ")" + else: + title = "Ver en " + nombre_servidor + " (" + idioma + ") (Calidad " + calidad_video.strip() + ", audio " + calidad_audio.strip() + ")" + + cuenta = [] + valoracion = 0 + for idx, val in enumerate(['1', '2', 'report']): + nn = scrapertools.find_single_match(match, + '<span\s+data-num="([^"]+)"\s+class="defaultPopup"\s+href="/likes/popup/value/' + val + '/') + if nn != '0' and nn != '': + cuenta.append(nn + ' ' + ['ok', 'ko', 'rep'][idx]) + # valoracion += int(nn) if val == '1' else -int(nn) + if val == '1': + valoracion += int(nn) + else: + valoracion += -int(nn) + + if len(cuenta) > 0: + title += ' (' + ', '.join(cuenta) + ')' + + url = urlparse.urljoin(item.url, scrapertools.find_single_match(match, 'href="([^"]+)"')) + thumbnail = thumb_servidor + plot = "" + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + if sortlinks > 0: + # orden1 para dejar los "downloads" detras de los "ver" al ordenar + # orden2 segun configuración + if sortlinks == 1: + orden = valoracion + elif sortlinks == 2: + orden = valora_idioma(idioma_0, idioma_1) + elif sortlinks == 3: + orden = valora_calidad(calidad_video, calidad_audio) + elif sortlinks == 4: + orden = (valora_idioma(idioma_0, idioma_1) * 100) + valora_calidad(calidad_video, calidad_audio) + elif sortlinks == 5: + orden = (valora_idioma(idioma_0, idioma_1) * 1000) + valoracion + elif sortlinks == 6: + orden = (valora_idioma(idioma_0, idioma_1) * 100000) + ( + valora_calidad(calidad_video, calidad_audio) * 1000) + valoracion + itemsort.append( + {'action': "play", 'title': title, 'url': url, 'thumbnail': thumbnail, 'fanart': item.fanart, + 'plot': plot, 'extra': sesion + "|" + item.url, 'fulltitle': item.fulltitle, 'orden1': (jdown == ''), + 'orden2': orden}) + else: + itemlist.append( + Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, fanart=item.fanart, + plot=plot, extra=sesion + "|" + item.url, fulltitle=item.fulltitle)) + + if sortlinks > 0: + numberlinks = config.get_setting("pordedenumberlinks", item.channel) # 0:todos, > 0:n*5 (5,10,15,20,...) + # numberlinks = int(numberlinks) if numberlinks != '' and numberlinks !="No" else 0 + if numberlinks != '' and numberlinks != "No": + numberlinks = int(numberlinks) + else: + numberlinks = 0 + + if numberlinks == 0: + verTodos = True + itemsort = sorted(itemsort, key=lambda k: (k['orden1'], k['orden2']), reverse=True) + for i, subitem in enumerate(itemsort): + if verTodos == False and i >= numberlinks: + itemlist.append( + Item(channel=item.channel, action='findallvideos', title='Ver todos los enlaces', url=item.url, + extra=item.extra)) + break + itemlist.append( + Item(channel=item.channel, action=subitem['action'], title=subitem['title'], url=subitem['url'], + thumbnail=subitem['thumbnail'], fanart=subitem['fanart'], plot=subitem['plot'], + extra=subitem['extra'], fulltitle=subitem['fulltitle'])) + + if "/what/peli" in item.url or "/what/docu" in item.url: + itemlist.append( + Item(channel=item.channel, action="pordede_check", tipo="peli", title="Marcar como Pendiente", valor="1", + idtemp=idpeli)) + itemlist.append( + Item(channel=item.channel, action="pordede_check", tipo="peli", title="Marcar como Vista", valor="3", + idtemp=idpeli)) + itemlist.append( + Item(channel=item.channel, action="pordede_check", tipo="peli", title="Marcar como Favorita", valor="4", + idtemp=idpeli)) + itemlist.append(Item(channel=item.channel, action="pordede_check", tipo="peli", title="Quitar Marca", valor="0", + idtemp=idpeli)) + + return itemlist + + +def findallvideos(item): + return findvideos(item, True) + + +def play(item): + logger.info("url=" + item.url) + + # Marcar como visto + checkseen(item.extra.split("|")[1]) + + # Hace la llamada + headers = {'Referer': item.extra.split("|")[1]} + + data = httptools.downloadpage(item.url, post="_s=" + item.extra.split("|")[0], headers=headers).data + url = scrapertools.find_single_match(data, '<p class="nicetry links">\s+<a href="([^"]+)" target="_blank"') + url = urlparse.urljoin(item.url, url) + + headers = {'Referer': item.url} + media_url = httptools.downloadpage(url, headers=headers, follow_redirects=False).headers.get("location") + logger.info("media_url=" + media_url) + + itemlist = servertools.find_video_items(data=media_url) + + for videoitem in itemlist: + videoitem.title = item.title + videoitem.fulltitle = item.fulltitle + videoitem.thumbnail = item.thumbnail + videoitem.channel = item.channel + + return itemlist + + +def checkseen(item): + logger.info(item) + + if "/viewepisode/" in item: + episode = item.split("/")[-1] + httptools.downloadpage("http://www.pordede.com/ajax/action", + post="model=episode&id=" + episode + "&action=seen&value=1") + + if "/what/peli" in item: + data = httptools.downloadpage(item).data + # GET MOVIE ID + movieid = scrapertools.find_single_match(data, 'href="/links/create/ref_id/([0-9]+)/ref_model/') + httptools.downloadpage("http://www.pordede.com/ajax/mediaaction", + post="model=peli&id=" + movieid + "&action=status&value=3") + + return True + + +def infosinopsis(item): + logger.info() + + url_aux = item.url.replace("/links/view/slug/", "/peli/").replace("/what/peli", "") + # Descarga la pagina + + data = httptools.downloadpage(url_aux).data + logger.debug("data=" + data) + + scrapedtitle = scrapertools.find_single_match(data, '<h1>([^<]+)</h1>') + scrapedvalue = scrapertools.find_single_match(data, '<span class="puntuationValue" data-value="([^"]+)"') + scrapedyear = scrapertools.find_single_match(data, '<h2 class="info">[^<]+</h2>\s*<p class="info">([^<]+)</p>') + scrapedduration = scrapertools.find_single_match(data, '<h2 class="info">[^<]+</h2>\s*<p class="info">([^<]+)</p>', + 1) + scrapedplot = scrapertools.find_single_match(data, '<div class="info text"[^>]+>([^<]+)</div>') + # scrapedthumbnail = scrapertools.find_single_match(data,'<meta property="og:image" content="([^"]+)"') + # thumbnail = scrapedthumbnail.replace("http://www.pordede.comhttp://", "http://").replace("mediacover", "mediathumb") + scrapedgenres = re.compile('href="/pelis/index/genre/[^"]+">([^<]+)</a>', re.DOTALL).findall(data) + scrapedcasting = re.compile('href="/star/[^"]+">([^<]+)</a><br/><span>([^<]+)</span>', re.DOTALL).findall(data) + + title = scrapertools.htmlclean(scrapedtitle) + plot = "Año: [B]" + scrapedyear + "[/B]" + plot += " , Duración: [B]" + scrapedduration + "[/B]" + plot += " , Puntuación usuarios: [B]" + scrapedvalue + "[/B]" + plot += "\nGéneros: " + ", ".join(scrapedgenres) + plot += "\n\nSinopsis:\n" + scrapertools.htmlclean(scrapedplot) + plot += "\n\nCasting:\n" + for actor, papel in scrapedcasting: + plot += actor + " (" + papel + "). " + + tbd = TextBox("DialogTextViewer.xml", os.getcwd(), "Default") + tbd.ask(title, plot) + del tbd + return + + +try: + import xbmcgui + + + class TextBox(xbmcgui.WindowXML): + """ Create a skinned textbox window """ + + def __init__(self, *args, **kwargs): + pass + + def onInit(self): + try: + self.getControl(5).setText(self.text) + self.getControl(1).setLabel(self.title) + except: + pass + + def onClick(self, controlId): + pass + + def onFocus(self, controlId): + pass + + def onAction(self, action): + if action == 7: + self.close() + + def ask(self, title, text): + self.title = title + self.text = text + self.doModal() +except: + pass + + +# Valoraciones de enlaces, los valores más altos se mostrarán primero : + +def valora_calidad(video, audio): + prefs_video = ['hdmicro', 'hd1080', 'hd720', 'hdrip', 'dvdrip', 'rip', 'tc-screener', 'ts-screener'] + prefs_audio = ['dts', '5.1', 'rip', 'line', 'screener'] + + video = ''.join(video.split()).lower() + # pts = (9 - prefs_video.index(video) if video in prefs_video else 1) * 10 + if video in prefs_video: + pts = (9 - prefs_video.index(video)) * 10 + else: + pts = (9 - 1) * 10 + + audio = ''.join(audio.split()).lower() + # pts += 9 - prefs_audio.index(audio) if audio in prefs_audio else 1 + if audio in prefs_audio: + pts = (9 - prefs_audio.index(audio)) * 10 + else: + pts = (9 - 1) * 10 + + return pts + + +def valora_idioma(idioma_0, idioma_1): + prefs = ['spanish', 'spanish LAT', 'catalan', 'english', 'french'] + + # pts = (9 - prefs.index(idioma_0) if idioma_0 in prefs else 1) * 10 + if idioma_0 in prefs: + pts = (9 - prefs.index(idioma_0)) * 10 + else: + pts = (9 - 1) * 10 + + if idioma_1 != '': # si hay subtítulos + idioma_1 = idioma_1.replace(' SUB', '') + + # pts += 8 - prefs.index(idioma_1) if idioma_1 in prefs else 1 + if idioma_1 in prefs: + pts += 8 - prefs.index(idioma_1) + else: + pts += 8 - 1 + + else: + pts += 9 # sin subtítulos por delante + return pts + + +def pordede_check(item): + httptools.downloadpage("http://www.pordede.com/ajax/mediaaction", + post="model=" + item.tipo + "&id=" + item.idtemp + "&action=status&value=" + item.valor) diff --git a/plugin.video.alfa/channels/pornhub.json b/plugin.video.alfa/channels/pornhub.json new file mode 100755 index 00000000..6a7f4be6 --- /dev/null +++ b/plugin.video.alfa/channels/pornhub.json @@ -0,0 +1,34 @@ +{ + "id": "pornhub", + "name": "PornHub", + "active": true, + "adult": true, + "language": "es", + "fanart": "http://i.imgur.com/PwFvoss.jpg", + "thumbnail": "http://s22.postimg.org/5lzcocfqp/pornhub_logo.jpg", + "banner": "pornhub.png", + "version": 1, + "changes": [ + { + "date": "29/04/2017", + "description": "Reparados enlaces a los vídeos" + }, + { + "date": "14/09/2016", + "description": "Cambios en la web" + } + ], + "categories": [ + "adult" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/pornhub.py b/plugin.video.alfa/channels/pornhub.py new file mode 100755 index 00000000..c2c2cb21 --- /dev/null +++ b/plugin.video.alfa/channels/pornhub.py @@ -0,0 +1,127 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import httptools +from core import logger +from core import scrapertools +from core.item import Item + + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append(Item(channel=item.channel, action="peliculas", title="Novedades", fanart=item.fanart, + url="http://es.pornhub.com/video?o=cm")) + itemlist.append(Item(channel=item.channel, action="categorias", title="Categorias", fanart=item.fanart, + url="http://es.pornhub.com/categories?o=al")) + itemlist.append(Item(channel=item.channel, action="search", title="Buscar", fanart=item.fanart, + url="http://es.pornhub.com/video/search?search=%s&o=mr")) + return itemlist + + +def search(item, texto): + logger.info() + + item.url = item.url % texto + try: + return peliculas(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def categorias(item): + logger.info() + itemlist = [] + + # Descarga la página + data = httptools.downloadpage(item.url).data + data = scrapertools.find_single_match(data, '<div id="categoriesStraightImages">(.*?)</ul>') + + # Extrae las categorias + patron = '<li class="cat_pic" data-category=".*?' + patron += '<a href="([^"]+)".*?' + patron += '<img src="([^"]+)".*?' + patron += 'alt="([^"]+)"' + + matches = re.compile(patron, re.DOTALL).findall(data) + for scrapedurl, scrapedthumbnail, scrapedtitle in matches: + if "?" in scrapedurl: + url = urlparse.urljoin(item.url, scrapedurl + "&o=cm") + else: + url = urlparse.urljoin(item.url, scrapedurl + "?o=cm") + + itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=url, fanart=item.fanart, + thumbnail=scrapedthumbnail)) + + itemlist.sort(key=lambda x: x.title) + return itemlist + + +def peliculas(item): + logger.info() + itemlist = [] + + # Descarga la página + data = httptools.downloadpage(item.url).data + videodata = scrapertools.find_single_match(data, 'videos search-video-thumbs">(.*?)<div class="reset"></div>') + + # Extrae las peliculas + patron = '<div class="phimage">.*?' + patron += '<a href="([^"]+)" title="([^"]+).*?' + patron += '<var class="duration">([^<]+)</var>(.*?)</div>.*?' + patron += 'data-mediumthumb="([^"]+)"' + + matches = re.compile(patron, re.DOTALL).findall(videodata) + + for url, scrapedtitle, duration, scrapedhd, thumbnail in matches: + title = scrapedtitle.replace("&amp;", "&") + " (" + duration + ")" + + scrapedhd = scrapertools.find_single_match(scrapedhd, '<span class="hd-thumbnail">(.*?)</span>') + if scrapedhd == 'HD': + title += ' [HD]' + + url = urlparse.urljoin(item.url, url) + itemlist.append( + Item(channel=item.channel, action="play", title=title, url=url, fanart=item.fanart, thumbnail=thumbnail)) + + if itemlist: + # Paginador + patron = '<li class="page_next"><a href="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + if matches: + url = urlparse.urljoin(item.url, matches[0].replace('&', '&')) + itemlist.append( + Item(channel=item.channel, action="peliculas", title=">> Página siguiente", fanart=item.fanart, + url=url)) + + return itemlist + + +def play(item): + logger.info() + itemlist = [] + + # Descarga la página + data = httptools.downloadpage(item.url).data + + quality = scrapertools.find_multiple_matches(data, '"id":"quality([^"]+)"') + for q in quality: + match = scrapertools.find_single_match(data, 'var quality_%s=(.*?);' % q) + match = re.sub(r'(/\*.*?\*/)', '', match).replace("+", "") + url = "" + for s in match.split(): + val = scrapertools.find_single_match(data, 'var %s=(.*?);' % s.strip()) + if "+" in val: + values = scrapertools.find_multiple_matches(val, '"([^"]+)"') + val = "".join(values) + + url += val.replace('"', "") + itemlist.append([".mp4 %s [directo]" % q, url]) + + return itemlist diff --git a/plugin.video.alfa/channels/porntrex.json b/plugin.video.alfa/channels/porntrex.json new file mode 100755 index 00000000..b525c7b1 --- /dev/null +++ b/plugin.video.alfa/channels/porntrex.json @@ -0,0 +1,28 @@ +{ + "id": "porntrex", + "name": "Porntrex", + "language": "es", + "active": true, + "adult": true, + "version": 1, + "changes": [ + { + "date": "29/04/2017", + "description": "Primera versión" + } + ], + "thumbnail": "http://i.imgur.com/n8SUCE9.png?1", + "categories": [ + "adult" + ], + "settings": [ + { + "id": "menu_info", + "type": "bool", + "label": "Mostrar menú antes de reproducir con imágenes", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/porntrex.py b/plugin.video.alfa/channels/porntrex.py new file mode 100755 index 00000000..7c9f98c3 --- /dev/null +++ b/plugin.video.alfa/channels/porntrex.py @@ -0,0 +1,315 @@ +# -*- coding: utf-8 -*- + +import re +import urllib +import urlparse + +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core.item import Item + +host = "https://www.porntrex.com" + + +def mainlist(item): + logger.info() + itemlist = [] + + config.set_setting("url_error", False, "porntrex") + itemlist.append(item.clone(action="lista", title="Nuevos Vídeos", url=host + "/latest-updates/")) + itemlist.append(item.clone(action="lista", title="Mejor Valorados", url=host + "/top-rated/")) + itemlist.append(item.clone(action="lista", title="Más Vistos", url=host + "/most-popular/")) + itemlist.append(item.clone(action="categorias", title="Categorías", url=host + "/categories/")) + itemlist.append(item.clone(action="categorias", title="Modelos", + url=host + "/models/?mode=async&function=get_block&block_id=list_models_models" \ + "_list&sort_by=total_videos")) + itemlist.append(item.clone(action="playlists", title="Listas", url=host + "/playlists/")) + itemlist.append(item.clone(action="tags", title="Tags", url=host + "/tags/")) + itemlist.append(item.clone(title="Buscar...", action="search")) + itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False)) + + return itemlist + + +def configuracion(item): + from platformcode import platformtools + ret = platformtools.show_channel_settings() + platformtools.itemlist_refresh() + return ret + + +def search(item, texto): + logger.info() + item.url = "%s/search/%s/" % (host, texto.replace("+", "-")) + item.extra = texto + try: + return lista(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def lista(item): + logger.info() + itemlist = [] + + # Descarga la pagina + data = get_data(item.url) + + action = "play" + if config.get_setting("menu_info", "porntrex"): + action = "menu_info" + + # Extrae las entradas + patron = '<div class="video-item.*?href="([^"]+)" title="([^"]+)".*?data-original="([^"]+)"(.*?)<div class="durations">.*?</i>([^<]+)<' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedtitle, scrapedthumbnail, quality, duration in matches: + if "go.php?" in scrapedurl: + scrapedurl = urllib.unquote(scrapedurl.split("/go.php?u=")[1].split("&")[0]) + scrapedthumbnail = urlparse.urljoin(host, scrapedthumbnail) + else: + scrapedurl = urlparse.urljoin(host, scrapedurl) + if duration: + scrapedtitle = "%s - %s" % (duration, scrapedtitle) + if '>HD<' in quality: + scrapedtitle += " [COLOR red][HD][/COLOR]" + + itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, + fanart=scrapedthumbnail)) + + # Extrae la marca de siguiente página + if item.extra: + next_page = scrapertools.find_single_match(data, '<li class="next">.*?from_videos\+from_albums:(\d+)') + if next_page: + if "from_videos=" in item.url: + next_page = re.sub(r'&from_videos=(\d+)', '&from_videos=%s' % next_page, item.url) + else: + next_page = "%s?mode=async&function=get_block&block_id=list_videos_videos_list_search_result" \ + "&q=%s&category_ids=&sort_by=post_date&from_videos=%s" % (item.url, item.extra, next_page) + itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page)) + else: + next_page = scrapertools.find_single_match(data, '<li class="next">.*?href="([^"]*)"') + if next_page and not next_page.startswith("#"): + if "go.php?" in next_page: + next_page = urllib.unquote(next_page.split("/go.php?u=")[1].split("&")[0]) + else: + next_page = urlparse.urljoin(host, next_page) + itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page)) + else: + next_page = scrapertools.find_single_match(data, '<li class="next">.*?from:(\d+)') + if next_page: + if "from=" in item.url: + next_page = re.sub(r'&from=(\d+)', '&from=%s' % next_page, item.url) + else: + next_page = "%s?mode=async&function=get_block&block_id=list_videos_common_videos_list&sort_by=post_date&from=%s" % ( + item.url, next_page) + itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page)) + + return itemlist + + +def categorias(item): + logger.info() + itemlist = [] + + # Descarga la pagina + data = get_data(item.url) + + # Extrae las entradas + patron = '<a class="item" href="([^"]+)" title="([^"]+)".*?src="([^"]+)".*?<div class="videos">([^<]+)<' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedtitle, scrapedthumbnail, videos in matches: + if "go.php?" in scrapedurl: + scrapedurl = urllib.unquote(scrapedurl.split("/go.php?u=")[1].split("&")[0]) + scrapedthumbnail = urllib.unquote(scrapedthumbnail.split("/go.php?u=")[1].split("&")[0]) + else: + scrapedurl = urlparse.urljoin(host, scrapedurl) + if videos: + scrapedtitle = "%s (%s)" % (scrapedtitle, videos) + itemlist.append(item.clone(action="lista", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, + fanart=scrapedthumbnail)) + + # Extrae la marca de siguiente página + next_page = scrapertools.find_single_match(data, '<li class="next">.*?from:(\d+)') + if next_page: + if "from=" in item.url: + next_page = re.sub(r'&from=(\d+)', '&from=%s' % next_page, item.url) + else: + next_page = "%s&from=%s" % (item.url, next_page) + itemlist.append(item.clone(action="categorias", title=">> Página Siguiente", url=next_page)) + + return itemlist + + +def playlists(item): + logger.info() + itemlist = [] + + # Descarga la pagina + data = get_data(item.url) + + # Extrae las entradas + patron = '<div class="item.*?href="([^"]+)" title="([^"]+)".*?data-original="([^"]+)".*?<div class="totalplaylist">([^<]+)<' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedtitle, scrapedthumbnail, videos in matches: + if "go.php?" in scrapedurl: + scrapedurl = urllib.unquote(scrapedurl.split("/go.php?u=")[1].split("&")[0]) + scrapedthumbnail = urlparse.urljoin(host, scrapedthumbnail) + else: + scrapedurl = urlparse.urljoin(host, scrapedurl) + if videos: + scrapedtitle = "%s (%s)" % (scrapedtitle, videos) + itemlist.append(item.clone(action="videos", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, + fanart=scrapedthumbnail)) + + # Extrae la marca de siguiente página + next_page = scrapertools.find_single_match(data, '<li class="next">.*?href="([^"]+)"') + if next_page: + if "go.php?" in next_page: + next_page = urllib.unquote(next_page.split("/go.php?u=")[1].split("&")[0]) + else: + next_page = urlparse.urljoin(host, next_page) + itemlist.append(item.clone(action="playlists", title=">> Página Siguiente", url=next_page)) + + return itemlist + + +def videos(item): + logger.info() + itemlist = [] + + # Descarga la pagina + data = get_data(item.url) + + action = "play" + if config.get_setting("menu_info", "porntrex"): + action = "menu_info" + # Extrae las entradas + patron = '<a href="([^"]+)" class="item ".*?data-original="([^"]+)".*?<strong class="title">\s*([^<]+)<' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedthumbnail, scrapedtitle in matches: + scrapedtitle = scrapedtitle.strip() + if "go.php?" in scrapedurl: + scrapedurl = urllib.unquote(scrapedurl.split("/go.php?u=")[1].split("&")[0]) + scrapedthumbnail = urlparse.urljoin(host, scrapedthumbnail) + itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, + fanart=scrapedthumbnail)) + + # Extrae la marca de siguiente página + next_page = scrapertools.find_single_match(data, '<li class="next">.*?from:(\d+)') + if next_page: + if "from=" in item.url: + next_page = re.sub(r'&from=(\d+)', '&from=%s' % next_page, item.url) + else: + next_page = "%s?mode=async&function=get_block&block_id=playlist_view_playlist_view&sort_by" \ + "=added2fav_date&&from=%s" % (item.url, next_page) + itemlist.append(item.clone(action="videos", title=">> Página Siguiente", url=next_page)) + + return itemlist + + +def play(item): + logger.info() + itemlist = [] + + data = get_data(item.url) + + patron = '(?:video_url|video_alt_url[0-9]*)\s*:\s*\'([^\']+)\'.*?(?:video_url_text|video_alt_url[0-9]*_text)\s*:\s*\'([^\']+)\'' + matches = scrapertools.find_multiple_matches(data, patron) + if not matches: + patron = '<iframe.*?height="(\d+)".*?video_url\s*:\s*\'([^\']+)\'' + matches = scrapertools.find_multiple_matches(data, patron) + for url, quality in matches: + if "http" in quality: + calidad = url + url = quality + quality = calidad + "p" + + itemlist.append(['.mp4 %s [directo]' % quality, url]) + + if item.extra == "play_menu": + return itemlist, data + + return itemlist + + +def menu_info(item): + logger.info() + itemlist = [] + + video_urls, data = play(item.clone(extra="play_menu")) + itemlist.append(item.clone(action="play", title="Ver -- %s" % item.title, video_urls=video_urls)) + + matches = scrapertools.find_multiple_matches(data, '<img class="thumb lazy-load".*?data-original="([^"]+)"') + for i, img in enumerate(matches): + if i == 0: + continue + img = urlparse.urljoin(host, img) + title = "Imagen %s" % (str(i)) + itemlist.append(item.clone(action="", title=title, thumbnail=img, fanart=img)) + + return itemlist + + +def tags(item): + logger.info() + itemlist = [] + data = get_data(item.url) + + if item.title == "Tags": + letras = [] + matches = scrapertools.find_multiple_matches(data, '<strong class="title".*?>\s*(.*?)</strong>') + for title in matches: + title = title.strip() + if title not in letras: + letras.append(title) + itemlist.append(Item(channel=item.channel, action="tags", url=item.url, title=title, extra=title)) + else: + if not item.length: + item.length = 0 + + bloque = scrapertools.find_single_match(data, + '>%s</strong>(.*?)(?:(?!%s)(?!#)[A-Z#]{1}</strong>|<div class="footer-margin">)' % ( + item.extra, item.extra)) + matches = scrapertools.find_multiple_matches(bloque, '<a href="([^"]+)">\s*(.*?)</a>') + for url, title in matches[item.length:item.length + 100]: + if "go.php?" in url: + url = urllib.unquote(url.split("/go.php?u=")[1].split("&")[0]) + itemlist.append(Item(channel=item.channel, action="lista", url=url, title=title)) + + if len(itemlist) >= 100: + itemlist.append(Item(channel=item.channel, action="tags", url=item.url, title=">> Página siguiente", + length=item.length + 100, extra=item.extra)) + + return itemlist + + +def get_data(url_orig): + try: + if config.get_setting("url_error", "porntrex"): + raise Exception + response = httptools.downloadpage(url_orig) + if not response.data or "urlopen error [Errno 1]" in str(response.code): + raise Exception + except: + config.set_setting("url_error", True, "porntrex") + import random + server_random = ['nl', 'de', 'us'] + server = server_random[random.randint(0, 2)] + url = "https://%s.hideproxy.me/includes/process.php?action=update" % server + post = "u=%s&proxy_formdata_server=%s&allowCookies=1&encodeURL=0&encodePage=0&stripObjects=0&stripJS=0&go=" \ + % (urllib.quote(url_orig), server) + while True: + response = httptools.downloadpage(url, post, follow_redirects=False) + if response.headers.get("location"): + url = response.headers["location"] + post = "" + else: + break + + return response.data diff --git a/plugin.video.alfa/channels/puyasubs.json b/plugin.video.alfa/channels/puyasubs.json new file mode 100755 index 00000000..e31841e6 --- /dev/null +++ b/plugin.video.alfa/channels/puyasubs.json @@ -0,0 +1,59 @@ +{ + "id": "puyasubs", + "name": "PuyaSubs!", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "http://i.imgur.com/aad4psi.png", + "banner": "http://i.imgur.com/trFDT39.png", + "version": 1, + "changes": [ + { + "date": "28/05/2017", + "description": "Cambio de dominio" + }, + { + "date": "09/05/2017", + "description": "Corregida seccion lista de torrents y regex de listados" + }, + { + "date": "10/11/2016", + "description": "Primera versión" + } + ], + "categories": [ + "anime" + ], + "settings": [ + { + "id": "perfil", + "type": "list", + "label": "Perfil de color", + "default": 2, + "enabled": true, + "visible": true, + "lvalues": [ + "Perfil 3", + "Perfil 2", + "Perfil 1", + "Ninguno" + ] + }, + { + "id": "modo_grafico", + "type": "bool", + "label": "Buscar información extra (TMDB)", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_anime", + "type": "bool", + "label": "Incluir en Novedades - Anime", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/puyasubs.py b/plugin.video.alfa/channels/puyasubs.py new file mode 100755 index 00000000..8791935a --- /dev/null +++ b/plugin.video.alfa/channels/puyasubs.py @@ -0,0 +1,385 @@ +# -*- coding: utf-8 -*- + +import re + +from core import config +from core import httptools +from core import jsontools +from core import logger +from core import scrapertools +from core.item import Item + +__modo_grafico__ = config.get_setting('modo_grafico', 'puyasubs') +__perfil__ = config.get_setting('perfil', "puyasubs") + +# Fijar perfil de color +perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'], + ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'], + ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']] +if __perfil__ < 3: + color1, color2, color3, color4, color5 = perfil[__perfil__] +else: + color1 = color2 = color3 = color4 = color5 = "" + + +def mainlist(item): + logger.info() + + itemlist = list() + + itemlist.append(Item(channel=item.channel, action="listado", title="Novedades Anime", thumbnail=item.thumbnail, + url="http://puya.si/?cat=4", text_color=color1)) + itemlist.append(Item(channel=item.channel, action="listado", title="Novedades Doramas", thumbnail=item.thumbnail, + url="http://puya.si/?cat=142", text_color=color1)) + itemlist.append(Item(channel=item.channel, action="", title="Descargas", text_color=color2)) + itemlist.append(Item(channel=item.channel, action="descargas", title=" Descargas Animes y Doramas en proceso", + thumbnail=item.thumbnail, url="http://puya.si/?page_id=25501", text_color=color1)) + itemlist.append(Item(channel=item.channel, action="descargas", title=" Descargas Animes Finalizados", + thumbnail=item.thumbnail, url="http://puya.si/?page_id=15388", text_color=color1)) + itemlist.append(Item(channel=item.channel, action="letra", title=" Descargas Animes Finalizados por Letra", + thumbnail=item.thumbnail, url="http://puya.si/?page_id=15388", text_color=color1)) + itemlist.append(Item(channel=item.channel, action="descargas", title=" Descargas Doramas Finalizados", + thumbnail=item.thumbnail, url="http://puya.si/?page_id=25507", text_color=color1)) + itemlist.append(Item(channel=item.channel, action="descargas", title=" Descargas Películas y Ovas", + thumbnail=item.thumbnail, url="http://puya.si/?page_id=25503", text_color=color1)) + itemlist.append(Item(channel=item.channel, action="torrents", title="Lista de Torrents", thumbnail=item.thumbnail, + url="https://www.frozen-layer.com/buscar/descargas", text_color=color1)) + + itemlist.append(Item(channel=item.channel, action="search", title="Buscar anime/dorama/película", + thumbnail=item.thumbnail, url="http://puya.si/?s=", text_color=color3)) + + itemlist.append(item.clone(title="Configurar canal", action="configuracion", text_color=color5, folder=False)) + return itemlist + + +def configuracion(item): + from platformcode import platformtools + ret = platformtools.show_channel_settings() + platformtools.itemlist_refresh() + return ret + + +def search(item, texto): + texto = texto.replace(" ", "+") + item.url += texto + item.extra = "busqueda" + try: + return listado(item) + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def listado(item): + logger.info() + + itemlist = list() + + data = httptools.downloadpage(item.url).data + bloques = scrapertools.find_multiple_matches(data, '<h2 class="entry-title">(.*?)</article>') + patron = 'href="([^"]+)".*?>(.*?)</a>.*?(?:<span class="bl_categ">(.*?)|</span>)</footer>' + for bloque in bloques: + matches = scrapertools.find_multiple_matches(bloque, patron) + for url, title, cat in matches: + thumb = scrapertools.find_single_match(bloque, 'src="([^"]+)"') + tipo = "tvshow" + if item.extra == "busqueda" and cat: + if "Anime" not in cat and "Dorama" not in cat and "Película" not in cat: + continue + if "Película" in cat or "Movie" in title: + tipo = "movie" + contenttitle = title.replace("[TeamDragon] ", "").replace("[PuyaSubs!] ", "").replace("[Puya+] ", "") + contenttitle = scrapertools.find_single_match(contenttitle, + "(.*?)(?:\s+\[|\s+–|\s+–| Episodio| [0-9]{2,3})") + filtro_tmdb = {"original_language": "ja"}.items() + itemlist.append(Item(channel=item.channel, action="findvideos", url=url, title=title, thumbnail=thumb, + contentTitle=contenttitle, show=contenttitle, contentType=tipo, + infoLabels={'filtro': filtro_tmdb}, text_color=color1)) + + if ("cat=4" in item.url or item.extra == "busqueda") and not item.extra == "novedades": + from core import tmdb + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + + next_page = scrapertools.find_single_match(data, "<span class='current'>.*?<a href='([^']+)'") + if next_page: + next_page = next_page.replace("&", "&") + itemlist.append(Item(channel=item.channel, action="listado", url=next_page, title=">> Página Siguiente", + thumbnail=item.thumbnail, extra=item.extra, text_color=color2)) + + return itemlist + + +def descargas(item): + logger.info() + + itemlist = list() + if not item.pagina: + item.pagina = 0 + + data = httptools.downloadpage(item.url).data + patron = '<li><a href="(http://puya.si/\?page_id=\d+|http://safelinking.net/[0-9A-z]+)">(.*?)</a>' + if item.letra: + bloque = scrapertools.find_single_match(data, + '<li>(?:<strong>|)' + item.letra + '(?:</strong>|)</li>(.*?)</ol>') + matches = scrapertools.find_multiple_matches(bloque, patron) + else: + matches = scrapertools.find_multiple_matches(data, patron) + for url, title in matches[item.pagina:item.pagina + 20]: + contenttitle = title.replace("[TeamDragon] ", "").replace("[PuyaSubs!] ", "") \ + .replace("[Puya+] ", "") + contenttitle = re.sub(r'(\[[^\]]*\])', '', contenttitle).strip() + filtro_tmdb = {"original_language": "ja"}.items() + + tipo = "tvshow" + if "page_id=25503" in item.url: + tipo = "movie" + + action = "findvideos" + if "safelinking" in url: + action = "extract_safe" + itemlist.append(Item(channel=item.channel, action=action, url=url, title=title, contentTitle=contenttitle, + show=contenttitle, contentType=tipo, infoLabels={'filtro': filtro_tmdb}, + text_color=color1)) + + from core import tmdb + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + + if len(matches) > item.pagina + 20: + pagina = item.pagina + 20 + itemlist.append(Item(channel=item.channel, action="descargas", url=item.url, title=">> Página Siguiente", + thumbnail=item.thumbnail, pagina=pagina, letra=item.letra, text_color=color2)) + + return itemlist + + +def letra(item): + logger.info() + + itemlist = list() + data = httptools.downloadpage(item.url).data + patron = '<li>(?:<strong>|)([A-z#]{1})(?:</strong>|)</li>' + matches = scrapertools.find_multiple_matches(data, patron) + for match in matches: + itemlist.append(Item(channel=item.channel, title=match, action="descargas", letra=match, url=item.url, + thumbnail=item.thumbnail, text_color=color1)) + + return itemlist + + +def torrents(item): + logger.info() + + itemlist = list() + if not item.pagina: + item.pagina = 0 + + post = "utf8=%E2%9C%93&busqueda=puyasubs&search=Buscar&tab=anime&con_seeds=con_seeds" + data = httptools.downloadpage(item.url, post).data + + patron = "<td>.*?href='([^']+)' title='descargar torrent'>.*?title='informacion de (.*?)'.*?<td class='fecha'>.*?<td>(.*?)</td>" \ + ".*?<span class=\"stats\d+\">(\d+)</span>.*?<span class=\"stats\d+\">(\d+)</span>" + matches = scrapertools.find_multiple_matches(data, patron) + for url, title, size, seeds, leechers in matches[item.pagina:item.pagina + 25]: + contentTitle = title + if "(" in contentTitle: + contentTitle = contentTitle.split("(")[0] + + size = size.strip() + filtro_tmdb = {"original_language": "ja"}.items() + title += " [COLOR %s][Semillas:%s[/COLOR]|[COLOR %s]Leech:%s[/COLOR]|%s]" % ( + color4, seeds, color5, leechers, size) + url = "https://www.frozen-layer.com" + url + + itemlist.append(Item(channel=item.channel, action="play", url=url, title=title, contentTitle=contentTitle, + server="torrent", show=contentTitle, contentType="tvshow", text_color=color1, + infoLabels={'filtro': filtro_tmdb})) + + from core import tmdb + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + + if len(matches) > item.pagina + 25: + pagina = item.pagina + 25 + itemlist.append(Item(channel=item.channel, action="torrents", url=item.url, title=">> Página Siguiente", + thumbnail=item.thumbnail, pagina=pagina, text_color=color2)) + else: + next_page = scrapertools.find_single_match(data, 'href="([^"]+)" rel="next"') + if next_page: + next_page = "https://www.frozen-layer.com" + next_page + itemlist.append(Item(channel=item.channel, action="torrents", url=next_page, title=">> Página Siguiente", + thumbnail=item.thumbnail, pagina=0, text_color=color2)) + + return itemlist + + +def findvideos(item): + logger.info() + if item.infoLabels["tmdb_id"] and not item.infoLabels["plot"]: + from core import tmdb + tmdb.set_infoLabels_item(item, True, idioma_busqueda="en") + + itemlist = list() + + data = httptools.downloadpage(item.url).data + idiomas = scrapertools.find_single_match(data, 'Subtitulo:\s*(.*?)<br />') + calidades = ['720p', '1080p'] + torrentes = scrapertools.find_multiple_matches(data, '<a href="(https://www.frozen-layer.com/descargas[^"]+)"') + if torrentes: + for i, enlace in enumerate(torrentes): + title = "Ver por Torrent %s" % idiomas + if ">720p" in data and ">1080p" in data: + try: + title = "[%s] %s" % (calidades[i], title) + except: + pass + itemlist.append(item.clone(title=title, action="play", url=enlace, server="torrent")) + + onefichier = scrapertools.find_multiple_matches(data, '<a href="(https://1fichier.com/[^"]+)"') + if onefichier: + for i, enlace in enumerate(onefichier): + title = "Ver por 1fichier %s" % idiomas + if ">720p" in data and ">1080p" in data: + try: + title = "[%s] %s" % (calidades[i], title) + except: + pass + itemlist.append(item.clone(title=title, action="play", url=enlace, server="onefichier")) + + safelink = scrapertools.find_multiple_matches(data, '<a href="(http(?:s|)://safelinking.net/[^"]+)"') + if safelink: + for i, safe in enumerate(safelink): + headers = [['Content-Type', 'application/json;charset=utf-8']] + hash = safe.rsplit("/", 1)[1] + post = jsontools.dump({"hash": hash}) + data_sf = httptools.downloadpage("http://safelinking.net/v1/protected", post, headers).data + data_sf = jsontools.load(data_sf) + + for link in data_sf.get("links"): + enlace = link["url"] + domain = link["domain"] + title = "Ver por %s" % domain + action = "play" + if "mega" in domain: + server = "mega" + if "/#F!" in enlace: + action = "carpeta" + + elif "1fichier" in domain: + server = "onefichier" + if "/dir/" in enlace: + action = "carpeta" + + title += " %s" % idiomas + if ">720p" in data and ">1080p" in data: + try: + title = "[%s] %s" % (calidades[i], title) + except: + pass + itemlist.append(item.clone(title=title, action=action, url=enlace, server=server)) + + return itemlist + + +def carpeta(item): + logger.info() + itemlist = list() + + if item.server == "onefichier": + data = httptools.downloadpage(item.url).data + + patron = '<tr>.*?<a href="([^"]+)".*?>(.*?)</a>.*?<td class="normal">(.*?)</td>' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedtitle, size in matches: + scrapedtitle += " (%s) [1fichier]" % size + itemlist.append(Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="play", + server="onefichier", text_color=color1, thumbnail=item.thumbnail, + infoLabels=item.infoLabels)) + else: + from megaserver import Client + from platformcode import platformtools + + c = Client(url=item.url) + + files = c.get_files() + c.stop() + for enlace in files: + file_id = enlace["id"] + itemlist.append( + Item(channel=item.channel, title=enlace["name"], url=item.url + "|" + file_id, action="play", + server="mega", text_color=color1, thumbnail=item.thumbnail, + infoLabels=item.infoLabels)) + + itemlist.sort(key=lambda item: item.title) + return itemlist + + +def extract_safe(item): + logger.info() + if item.infoLabels["tmdb_id"] and not item.infoLabels["plot"]: + from core import tmdb + tmdb.set_infoLabels_item(item, True, idioma_busqueda="en") + itemlist = list() + + hash = item.url.rsplit("/", 1)[1] + headers = [['Content-Type', 'application/json;charset=utf-8']] + post = jsontools.dump({"hash": hash}) + data = httptools.downloadpage("http://safelinking.net/v1/protected", post, headers).data + data = jsontools.load(data) + + for link in data.get("links"): + enlace = link["url"] + domain = link["domain"] + title = "Ver por %s" % domain + action = "play" + if "mega" in domain: + server = "mega" + if "/#F!" in enlace: + action = "carpeta" + + elif "1fichier" in domain: + server = "onefichier" + if "/dir/" in enlace: + action = "carpeta" + + itemlist.append(item.clone(title=title, action=action, url=enlace, server=server)) + + return itemlist + + +def play(item): + logger.info() + itemlist = list() + + if item.server == "torrent" and "frozen" in item.url and not item.url.endswith(".torrent"): + data = httptools.downloadpage(item.url).data + enlace = scrapertools.find_single_match(data, "<div id='descargar_torrent'>.*?href='([^']+)'") + if enlace: + itemlist.append(item.clone(url=enlace)) + else: + itemlist.append(item) + + return itemlist + + +def newest(categoria): + logger.info() + item = Item() + try: + item.url = "http://puya.si/?cat=4" + item.extra = "novedades" + itemlist = listado(item) + + if itemlist[-1].action == "listado": + itemlist.pop() + for it in itemlist: + it.contentTitle = it.title + + # Se captura la excepción, para no interrumpir al canal novedades si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist diff --git a/plugin.video.alfa/channels/pymovie.json b/plugin.video.alfa/channels/pymovie.json new file mode 100755 index 00000000..79f10b3e --- /dev/null +++ b/plugin.video.alfa/channels/pymovie.json @@ -0,0 +1,64 @@ +{ + "id": "pymovie", + "name": "pymovie", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "https://s27.postimg.org/hvmvz7vab/pymovie.png", + "banner": "https://s28.postimg.org/3k0wjnwul/pymovie_banner.png", + "version": 1, + "changes": [ + { + "date": "25/05/2017", + "description": "cambios esteticos" + }, + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "04/01/2017", + "description": "Release." + } + ], + "categories": [ + "latino", + "movie", + "tvshow", + "documentary" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": false, + "visible": false + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_infantiles", + "type": "bool", + "label": "Incluir en Novedades - Infantiles", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_documentales", + "type": "bool", + "label": "Incluir en Novedades - Documentales", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/pymovie.py b/plugin.video.alfa/channels/pymovie.py new file mode 100755 index 00000000..0e7eda7b --- /dev/null +++ b/plugin.video.alfa/channels/pymovie.py @@ -0,0 +1,400 @@ +# -*- coding: utf-8 -*- + +import re + +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + +host = "http://www.pymovie.com.mx" + +headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'], + ['Referer', host]] + +tgenero = {"comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png", + "drama": "https://s16.postimg.org/94sia332d/drama.png", + "accion": "https://s3.postimg.org/y6o9puflv/accion.png", + "aventura": "https://s10.postimg.org/6su40czih/aventura.png", + "romance": "https://s15.postimg.org/fb5j8cl63/romance.png", + "animacion": "https://s13.postimg.org/5on877l87/animacion.png", + "ciencia ficcion": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png", + "terror": "https://s7.postimg.org/yi0gij3gb/terror.png", + "musical": "https://s29.postimg.org/bbxmdh9c7/musical.png", + "deporte": "https://s13.postimg.org/xuxf5h06v/deporte.png", + "artes Marciales": "https://s24.postimg.org/w1aw45j5h/artesmarciales.png", + "intriga": "https://s27.postimg.org/v9og43u2b/intriga.png", + "infantil": "https://s23.postimg.org/g5rmazozv/infantil.png", + "mexicanas": "https://s3.postimg.org/p36ntnxfn/mexicana.png", + "espionaje": "https://s2.postimg.org/5hv64b989/espionaje.png", + "biografia": "https://s15.postimg.org/5lrpbx323/biografia.png"} + +tcalidad = {'hd-1080': '[COLOR limegreen]HD-1080[/COLOR]', 'hd-720': '[COLOR limegreen]HD-720[/COLOR]', + 'blueray': '[COLOR limegreen]BLUERAY[/COLOR]', 'dvd': '[COLOR limegreen]DVD[/COLOR]', + 'cam': '[COLOR red]CAM[/COLOR]'} + +tcalidad2 = {'hd-1080': 'https://s21.postimg.org/4h1s0t1wn/hd1080.png', + 'hd-720': 'https://s12.postimg.org/lthu7v4q5/hd720.png', 'blueray': '', + 'dvd': 'https://s1.postimg.org/m89hus1tb/dvd.png', 'cam': 'https://s11.postimg.org/ad4o5wpz7/cam.png'} + + +def mainlist(item): + logger.info() + + itemlist = [] + + itemlist.append(item.clone(title="Peliculas", action="menupeliculas", + thumbnail='https://s8.postimg.org/6wqwy2c2t/peliculas.png', + fanart='https://s8.postimg.org/6wqwy2c2t/peliculas.png', extra='peliculas/')) + + itemlist.append(itemlist[-1].clone(title="Series", action="menuseries", + thumbnail='https://s27.postimg.org/iahczwgrn/series.png', + fanart='https://s27.postimg.org/iahczwgrn/series.png', extra='peliculas/')) + + itemlist.append(itemlist[-1].clone(title="Documentales", action="menudocumental", + thumbnail='https://s16.postimg.org/7xjj4bmol/documental.png', + fanart='https://s16.postimg.org/7xjj4bmol/documental.png', extra='documental')) + + return itemlist + + +def menupeliculas(item): + logger.info() + itemlist = [] + + itemlist.append(Item(channel=item.channel, title="Ultimas", action="lista", url=host + '/Ordenar/Estreno/?page=1', + thumbnail='https://s22.postimg.org/cb7nmhwv5/ultimas.png', + fanart='https://s22.postimg.org/cb7nmhwv5/ultimas.png', extra='Estreno')) + + itemlist.append(Item(channel=item.channel, title="Todas", action="lista", url=host + '?page=1', + thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png', + fanart='https://s18.postimg.org/fwvaeo6qh/todas.png', extra='todas')) + + itemlist.append(Item(channel=item.channel, title="Generos", action="seccion", url=host, + thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png', + fanart='https://s3.postimg.org/5s9jg2wtf/generos.png', extra='generos')) + + itemlist.append( + Item(channel=item.channel, title="Alfabetico", action="lista", url=host + '/Ordenar/Alfabetico/?page=1', + thumbnail='https://s17.postimg.org/fwi1y99en/a-z.png', fanart='https://s17.postimg.org/fwi1y99en/a-z.png', + extra='Alfabetico')) + + itemlist.append(Item(channel=item.channel, title="Calidad", action="seccion", url=host, + thumbnail='https://s13.postimg.org/6nzv8nlkn/calidad.png', + fanart='https://s13.postimg.org/6nzv8nlkn/calidad.png', extra='calidad')) + + itemlist.append( + Item(channel=item.channel, title="Mas Vistas", action="lista", url=host + '/Ordenar/MasVistas/?page=1', + thumbnail='https://s9.postimg.org/wmhzu9d7z/vistas.png', + fanart='https://s9.postimg.org/wmhzu9d7z/vistas.png', extra='Estreno')) + + itemlist.append( + Item(channel=item.channel, title="Mas Votadas", action="lista", url=host + '/Ordenar/MasVotos/?page=1', + thumbnail='https://s7.postimg.org/9kg1nthzf/votadas.png', + fanart='https://s7.postimg.org/9kg1nthzf/votadas.png', extra='Estreno')) + + itemlist.append( + Item(channel=item.channel, title="Calificacion", action="lista", url=host + '/Ordenar/Calificacion/?page=1', + thumbnail='https://s18.postimg.org/mjqrl49h5/calificacion.png', + fanart='https://s18.postimg.org/mjqrl49h5/calificacion.png', extra='Estreno')) + + return itemlist + + +def menuseries(item): + logger.info() + itemlist = [] + + itemlist.append(Item(channel=item.channel, title="Ultimas", action="lista", url=host + "/Series-estreno/?page=1", + thumbnail='https://s22.postimg.org/cb7nmhwv5/ultimas.png', + fanart='https://s22.postimg.org/cb7nmhwv5/ultimas.png', extra='series')) + + itemlist.append(Item(channel=item.channel, title="Generos", action="seccion", url=host, + thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png', + fanart='https://s3.postimg.org/5s9jg2wtf/generos.png', extra='series-generos')) + + itemlist.append( + Item(channel=item.channel, title="Alfabetico", action="lista", url=host + '/Ordernar-Serie/Alfabetico/?page=1', + thumbnail='https://s17.postimg.org/fwi1y99en/a-z.png', fanart='https://s17.postimg.org/fwi1y99en/a-z.png', + extra='series-alpha')) + + itemlist.append( + Item(channel=item.channel, title="Mas Vistas", action="lista", url=host + '/Ordernar-Serie/MasVistas/?page=1', + thumbnail='https://s9.postimg.org/wmhzu9d7z/vistas.png', + fanart='https://s9.postimg.org/wmhzu9d7z/vistas.png', extra='series-masvistas')) + + itemlist.append( + Item(channel=item.channel, title="Mas Votadas", action="lista", url=host + '/Ordernar-Serie/Masvotos/?page=1', + thumbnail='https://s7.postimg.org/9kg1nthzf/votadas.png', + fanart='https://s7.postimg.org/9kg1nthzf/votadas.png', extra='series-masvotadas')) + + itemlist.append(Item(channel=item.channel, title="Recomendadas", action="lista", + url=host + '/Ordernar-Serie/Recomendadas/?page=1', + thumbnail='https://s12.postimg.org/s881laywd/recomendadas.png', + fanart='https://s12.postimg.org/s881laywd/recomendadas.png', extra='series-recomendadas')) + + return itemlist + + +def menudocumental(item): + logger.info() + itemlist = [] + + itemlist.append(Item(channel=item.channel, title="Todas", action="lista", url=host + "/Documentales/?page=1", + thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png', + fanart='https://s18.postimg.org/fwvaeo6qh/todas.png', extra='documental')) + + itemlist.append(Item(channel=item.channel, title="Alfabetico", action="lista", + url=host + "/OrdenarDocumental/Alfabetico/?page=1", + thumbnail='https://s17.postimg.org/fwi1y99en/a-z.png', + fanart='https://s17.postimg.org/fwi1y99en/a-z.png', extra='documental')) + + itemlist.append(Item(channel=item.channel, title="Mas Vistas", action="lista", + url=host + "/OrdenarDocumental/MasVistas/?page=1", + thumbnail='https://s9.postimg.org/wmhzu9d7z/vistas.png', + fanart='https://s9.postimg.org/wmhzu9d7z/vistas.png', extra='documental')) + + return itemlist + + +def lista(item): + logger.info() + + if item.extra == 'series': + accion = 'episodiosxtemp' + elif 'series-' in item.extra: + accion = 'temporadas' + else: + accion = 'findvideos' + + itemlist = [] + data = httptools.downloadpage(item.url).data + + if 'series' in item.extra or item.extra == 'documental': + patron = '<h2 itemprop="name" >([^<]+)<\/h2><a href="([^.]+)" title=".*?" ><img.*?src="([^"]+)".*?class=".*?boren2"\/([^<]+)' + else: + patron = '<h2 itemprop="name" >([^<]+)<\/h2><a href="([^.]+)" title=".*?" ><img.*?src="([^"]+)".*?class=".*?boren2".*?>([^<]+)' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedtitle, scrapedurl, scrapedthumbnail, scrapedcalidad in matches: + url = scrapertools.decodeHtmlentities(host + scrapedurl) + url = url.strip(' ') + + scrapedcalidad = scrapedcalidad.strip(' ') + scrapedcalidad = scrapedcalidad.strip('p') + scrapedcalidad = scrapedcalidad.lower() + if 'series' in item.extra or item.extra == 'documental': + title = scrapertools.decodeHtmlentities(scrapedtitle) + else: + calidad = tcalidad[scrapedcalidad] + title = scrapertools.decodeHtmlentities(scrapedtitle) + ' (' + calidad + ') ' + + thumbnail = scrapedthumbnail + fanart = '' + plot = '' + + itemlist.append(Item(channel=item.channel, action=accion, title=title, url=url, thumbnail=thumbnail, plot=plot, + fanart=fanart, contentSerieName=scrapedtitle, contentTitle=scrapedtitle, extra=item.extra)) + + # Paginacion + if itemlist != []: + actual_page_url = item.url + next_page = scrapertools.find_single_match(data, '<a href="\?page=([^"]+)" class="next">next &') + while item.url[-1] != '=': + item.url = item.url[:-1] + next_page_url = item.url + next_page + if next_page != '': + itemlist.append(Item(channel=item.channel, action="lista", title='Siguiente >>>', url=next_page_url, + thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png', extra=item.extra)) + return itemlist + + +def temporadas(item): + logger.info() + itemlist = [] + templist = [] + data = httptools.downloadpage(item.url).data + + patron = 'class="listatemporadas" ><a href="([^"]+)" title=".*?" ><img src="([^"]+)" width="80" height="100" title=".*?alt=".*?<h3>([^<]+)<' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedthumbnail, scrapedtitle in matches: + url = host + scrapedurl + title = scrapedtitle + thumbnail = scrapedthumbnail + plot = '' + fanart = '' + contentSeasonNumber = scrapedtitle.replace('Temporada ', '') + + itemlist.append(Item(channel=item.channel, action="episodiosxtemp", title=title, fulltitle=item.title, url=url, + thumbnail=thumbnail, plot=plot, fanart=fanart, contentSerieName=item.contentSerieName, + contentSeasonNumber=contentSeasonNumber)) + + if item.extra == 'temporadas': + for tempitem in itemlist: + templist += episodiosxtemp(tempitem) + + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append( + Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url, + action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName)) + + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + templist = temporadas(item) + for tempitem in templist: + itemlist += episodiosxtemp(tempitem) + + return itemlist + + +def episodiosxtemp(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = '<a href="\/VerCapitulo\/([^"]+)">' + matches = re.compile(patron, re.DOTALL).findall(data) + ep = 1 + for scrapedtitle in matches: + scrapedtitle = scrapedtitle.replace(item.contentSeasonNumber + 'x' + '0' + str(ep), '') + url = host + '/VerCapitulo/' + scrapedtitle.replace(' ', '-') + title = item.contentSeasonNumber + 'x' + str(ep) + ' ' + scrapedtitle.strip('/') + + thumbnail = item.thumbnail + plot = '' + fanart = '' + plot = '' + contentEpisodeNumber = ep + + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=item.title, url=url, + thumbnail=thumbnail, plot=plot, fanart=fanart, extra='series', + contentSerieName=item.contentSerieName, contentSeasonNumber=item.contentSeasonNumber, + contentEpisodeNumber=contentEpisodeNumber)) + ep = ep + 1 + + return itemlist + + +def seccion(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = '<option class="opselect" value="([^"]+)".*?>([^<]+)<\/option>' + matches = re.compile(patron, re.DOTALL).findall(data) + if item.extra == 'generos': + oplista = tgenero + opdir = '/Categoria/' + elif item.extra == 'calidad': + oplista = tcalidad + opdir = '/Calidad/' + elif item.extra == 'series-generos': + oplista = tgenero + opdir = '/Categoria-Series/' + + for scrapeddir, scrapedtitle in matches: + + url = item.url + opdir + scrapeddir + '/?page=1' + title = scrapedtitle.upper() + + if 'generos' in item.extra and scrapedtitle.lower() in oplista: + thumbnail = oplista[scrapedtitle.lower()] + fanart = oplista[scrapedtitle.lower()] + + elif 'calidad' in item.extra and scrapedtitle.lower() in oplista: + thumbnail = tcalidad2[scrapedtitle.lower()] + fanart = tcalidad[scrapedtitle.lower()] + + else: + thumbnail = '' + fanart = '' + + if scrapedtitle.lower() in oplista: + itemlist.append(Item(channel=item.channel, action="lista", title=title, fulltitle=item.title, url=url, + thumbnail=thumbnail, fanart=fanart, extra=item.extra)) + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + audio = {'Latino': '[COLOR limegreen]LATINO[/COLOR]', 'Español': '[COLOR yellow]ESPAÑOL[/COLOR]', + 'Ingles': '[COLOR red]ORIGINAL SUBTITULADO[/COLOR]', 'Latino-Ingles': 'DUAL'} + data = httptools.downloadpage(item.url).data + + if item.extra != 'series': + patron = 'data-video="([^"]+)" class="reproductorVideo"><ul><li>([^<]+)<\/li><li>([^<]+)<\/li>' + tipotitle = item.contentTitle + elif item.extra == 'series': + tipotitle = str(item.contentSeasonNumber) + 'x' + str(item.contentEpisodeNumber) + ' ' + item.contentSerieName + patron = '<li class="enlaces-l"><a href="([^"]+)" target="_blank"><ul><li>([^<]+)<.*?>([^<]+)<.*?>Reproducir<' + + matches = re.compile(patron, re.DOTALL).findall(data) + + if item.extra != 'documental': + n = 0 + + for scrapedurl, scrapedcalidad, scrapedaudio in matches: + if 'series' in item.extra: + datab = httptools.downloadpage(host + scrapedurl).data + url = scrapertools.find_single_match(datab, 'class="reproductor"><iframe src="([^"]+)"') + print url + 'esta es la direccion' + else: + url = scrapedurl + + title = tipotitle + idioma = audio[scrapedaudio] + itemlist.extend(servertools.find_video_items(data=url)) + if n < len(itemlist): + itemlist[n].title = tipotitle + ' (' + idioma + ' ) ' + '(' + itemlist[n].server + ' )' + n = n + 1 + else: + url = scrapertools.find_single_match(data, 'class="reproductor"><iframe src="([^"]+)"') + itemlist.extend(servertools.find_video_items(data=url)) + + for videoitem in itemlist: + if item.extra == 'documental': + videoitem.title = item.title + ' (' + videoitem.server + ')' + videoitem.channel = item.channel + videoitem.action = "play" + videoitem.folder = False + + if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'series': + itemlist.append( + Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url, + action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle)) + + return itemlist + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + item.extra = 'Estrenos' + try: + if categoria == 'peliculas': + item.url = host + '/Ordenar/Estreno/?page=1' + + elif categoria == 'infantiles': + item.url = host + '/Categoria/Animacion/?page=1' + + elif categoria == 'documentales': + item.url = host + '/Documentales/?page=1' + item.extra = 'documental' + + itemlist = lista(item) + if itemlist[-1].title == 'Siguiente >>>': + itemlist.pop() + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist diff --git a/plugin.video.alfa/channels/qserie.json b/plugin.video.alfa/channels/qserie.json new file mode 100755 index 00000000..6839e07b --- /dev/null +++ b/plugin.video.alfa/channels/qserie.json @@ -0,0 +1,28 @@ +{ + "id": "qserie", + "name": "QSerie", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "https://s31.postimg.org/dousrbu9n/qserie.png", + "banner": "https://s32.postimg.org/nk1quq1wl/qserie_banner.png", + "version": 1, + "changes": [ + { + "date": "25/05/2017", + "description": "cambios esteticos" + }, + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "04/01/2017", + "description": "Release." + } + ], + "categories": [ + "latino", + "tvshow" + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/qserie.py b/plugin.video.alfa/channels/qserie.py new file mode 100755 index 00000000..401db1c8 --- /dev/null +++ b/plugin.video.alfa/channels/qserie.py @@ -0,0 +1,371 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import tmdb +from core.item import Item + +host = 'http://www.qserie.com' + + +def mainlist(item): + logger.info() + + itemlist = [] + + itemlist.append(Item(channel=item.channel, title="Series", action="todas", url=host, + thumbnail='https://s27.postimg.org/iahczwgrn/series.png', + fanart='https://s27.postimg.org/iahczwgrn/series.png')) + + itemlist.append(Item(channel=item.channel, title="Generos", action="generos", url=host, + thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png', + fanart='https://s3.postimg.org/5s9jg2wtf/generos.png')) + + itemlist.append(Item(channel=item.channel, title="Alfabetico", action="lasmas", url=host, + thumbnail='https://s17.postimg.org/fwi1y99en/a-z.png', + fanart='https://s17.postimg.org/fwi1y99en/a-z.png', extra='letras')) + + itemlist.append(Item(channel=item.channel, title="Ultimas Agregadas", action="ultimas", url=host, + thumbnail='https://s22.postimg.org/cb7nmhwv5/ultimas.png', + fanart='https://s22.postimg.org/cb7nmhwv5/ultimas.png')) + + itemlist.append(Item(channel=item.channel, title="Mas Vistas", action="lasmas", url=host, + thumbnail='https://s9.postimg.org/wmhzu9d7z/vistas.png', + fanart='https://s9.postimg.org/wmhzu9d7z/vistas.png', extra='Vista')) + + itemlist.append(Item(channel=item.channel, title="Mas Votadas", action="lasmas", url=host, + thumbnail='https://s7.postimg.org/9kg1nthzf/votadas.png', + fanart='https://s7.postimg.org/9kg1nthzf/votadas.png', extra='Votos')) + + return itemlist + + +def todas(item): + logger.info() + audio = {'Latino': '[COLOR limegreen]LATINO[/COLOR]', 'Español': '[COLOR yellow]ESPAÑOL[/COLOR]', + 'Sub Español': '[COLOR red]ORIGINAL SUBTITULADO[/COLOR]'} + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |<br>", "", data) + + patron = '<h2 class=.*?><a href="([^"]+)" title="([^"]+)">.*?\/h2>.*?<img src="([^"]+)".*?\/><\/a>.*?<p>([^<]+)<\/p>.*?<strong>Genero<\/strong>: .*?, (.*?)<\/div>.*?<img src=.*?>([^<]+)<\/div>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedplot, scrapedyear, scrapedidioma in matches: + idioma = scrapedidioma.strip() + idioma = scrapertools.decodeHtmlentities(idioma) + url = urlparse.urljoin(item.url, scrapedurl) + year = scrapedyear + if idioma in audio: + idioma = audio[idioma] + else: + idioma = audio['Sub Español'] + + title = scrapertools.decodeHtmlentities(scrapedtitle) + ' (' + idioma + ')' + thumbnail = scrapedthumbnail + plot = scrapedplot + fanart = 'https://s31.postimg.org/dousrbu9n/qserie.png' + itemlist.append( + Item(channel=item.channel, action="temporadas", title=title, url=url, thumbnail=thumbnail, plot=plot, + fanart=fanart, extra=idioma, contentSerieName=scrapedtitle, infoLabels={'year': year})) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + # Paginacion + siguiente = '' + title = '' + actual = scrapertools.find_single_match(data, '<li><a href=".*?"><span><b>([^<]+)<\/b><\/span><\/a><\/li>') + ultima = scrapertools.find_single_match(data, '<li><a href=".*?page=([^"]+)">Ultima<\/a><\/li>') + if 'page' in item.title: + while not item.url.endswith('='): item.url = item.url[:-1] + if actual: + siguiente = int(actual) + 1 + if item.url.endswith('='): + siguiente_url = item.url + str(siguiente) + else: + siguiente_url = item.url + '?&page=' + str(siguiente) + if actual and ultima and siguiente <= int(ultima): + titlen = 'Pagina Siguiente >>> ' + str(actual) + '/' + str(ultima) + fanart = 'https://s32.postimg.org/4q1u1hxnp/qserie.png' + thumbnail = 'https://s16.postimg.org/9okdu7hhx/siguiente.png' + itemlist.append(Item(channel=item.channel, action="todas", title=titlen, url=siguiente_url, fanart=fanart, + thumbnail=thumbnail)) + return itemlist + + +def temporadas(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + url_base = item.url + patron = '<a href="javascript:.*?;" class="lccn"><b>([^<]+)<\/b><\/a>' + matches = re.compile(patron, re.DOTALL).findall(data) + infoLabels = item.infoLabels + temp = 1 + if matches: + for scrapedtitle in matches: + url = url_base + title = scrapedtitle + thumbnail = item.thumbnail + plot = item.plot + contentSeasonNumber = str(temp) + + infoLabels['season'] = contentSeasonNumber + fanart = scrapertools.find_single_match(data, '<img src="([^"]+)"/>.*?</a>') + itemlist.append( + Item(channel=item.channel, action="episodiosxtemp", title=title, fulltitle=item.title, url=url, + thumbnail=thumbnail, plot=plot, fanart=fanart, contentSeasonNumber=contentSeasonNumber, + contentSerieName=item.contentSerieName, infoLabels=infoLabels)) + temp = temp + 1 + + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', + url=item.url, + action="add_serie_to_library", extra="episodios", + contentSerieName=item.contentSerieName)) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + return itemlist + else: + item.title = '' + item.modo = 'unico' + return episodiosxtemp(item) + + +def episodios(item): + logger.info() + itemlist = [] + templist = temporadas(item) + if item.modo == 'unico': + itemlist += episodiosxtemp(item) + else: + for tempitem in templist: + itemlist += episodiosxtemp(tempitem) + + return itemlist + + +def episodiosxtemp(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + infoLabels = item.infoLabels + temp = item.contentSeasonNumber + if item.title == '': + temp = '1' + item.contenSeasonNumber = temp + infoLabels['season'] = temp + + patron = '<li><a href="([^"]+)" class="lcc"><b>([^<]+)<\/b>.*?<\/a><\/li>' + + else: + patron = '<li><a href="([^"]+)" class="lcc"><b>([^<]+)<\/b> - Temp\. ' + temp + '<\/a><\/li>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + url = urlparse.urljoin(item.url, scrapedurl) + capitulo = re.findall(r'\d+', scrapedtitle) + contentEpisodeNumber = str(capitulo[0]) + infoLabels['episode'] = contentEpisodeNumber + title = item.contentSerieName + ' ' + temp + 'x' + contentEpisodeNumber + thumbnail = item.thumbnail + plot = item.plot + fanart = item.fanart + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=item.fulltitle, url=url, + thumbnail=item.thumbnail, plot=plot, extra=item.extra, extra1=item.extra1, + extra2=item.extra2, infoLabels=infoLabels)) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + if item.modo == 'unico': + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', + url=item.url, + action="add_serie_to_library", extra="episodios", + contentSerieName=item.contentSerieName, modo='unico', + contentSeasonNumber=item.contenSeasonNumber)) + + return itemlist + + +def generos(item): + tgenero = {"comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png", + "suspenso": "https://s13.postimg.org/wmw6vl1cn/suspenso.png", + "drama": "https://s16.postimg.org/94sia332d/drama.png", + "acción": "https://s3.postimg.org/y6o9puflv/accion.png", + "aventura": "https://s10.postimg.org/6su40czih/aventura.png", + "aventuras": "https://s10.postimg.org/6su40czih/aventura.png", + "romance": "https://s15.postimg.org/fb5j8cl63/romance.png", + "infantil": "https://s23.postimg.org/g5rmazozv/infantil.png", + "ciencia ficción": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png", + "terror": "https://s7.postimg.org/yi0gij3gb/terror.png", + "anime": 'https://s2.postimg.org/s38borokp/anime.png', + "animes": "https://s2.postimg.org/s38borokp/anime.png", + "dibujos": "https://s2.postimg.org/aqwqksyop/dibujos.png", + "documental": "https://s16.postimg.org/7xjj4bmol/documental.png", + "fantástico": "https://s10.postimg.org/pbkbs6j55/fantastico.png", + "intriga": "https://s27.postimg.org/v9og43u2b/intriga.png", + "musical": "https://s29.postimg.org/bbxmdh9c7/musical.png", + "secuela": "https://s7.postimg.org/bti0nauh7/secuela.png", + "thriller (suspenso)": "https://s22.postimg.org/5y9g0jsu9/thriller.png", + "western": "https://s23.postimg.org/lzyfbjzhn/western.png"} + + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = '<li><a title="([^"]+)" href="([^"]+)" onclick=.*?' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedtitle, scrapedurl in matches: + url = urlparse.urljoin(item.url, scrapedurl) + title = scrapedtitle.decode('cp1252') + title = title.encode('utf-8') + if title.lower() in tgenero: + thumbnail = tgenero[title.lower()] + fanart = tgenero[title.lower()] + else: + thumbnail = '' + fanart = '' + plot = '' + itemlist.append( + Item(channel=item.channel, action="todas", title=title.lower(), fulltitle=item.fulltitle, url=url, + thumbnail=thumbnail, plot=plot, fanart=fanart)) + + return itemlist + + +def ultimas(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + realplot = '' + patron = '<li><a title="([^"]+)" href="([^"]+)"><strong>.*?</a></li>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedtitle, scrapedurl in matches: + url = urlparse.urljoin(item.url, scrapedurl) + data = httptools.downloadpage(scrapedurl).data + thumbnail = scrapertools.get_match(data, '<link rel="image_src" href="([^"]+)"/>') + realplot = scrapertools.find_single_match(data, '<p itemprop="articleBody">([^<]+)<\/p> ') + plot = scrapertools.remove_htmltags(realplot) + inutil = re.findall(r' Temporada \d', scrapedtitle) + title = scrapedtitle + title = scrapertools.decodeHtmlentities(title) + realtitle = scrapedtitle.replace(inutil[0], '') + fanart = 'https://s22.postimg.org/cb7nmhwv5/ultimas.png' + itemlist.append( + Item(channel=item.channel, action="temporadas", title=title, url=url, thumbnail=thumbnail, plot=plot, + fanart=fanart, contentSerieName=realtitle)) + + return itemlist + + +def lasmas(item): + thumbletras = {'0-9': 'https://s32.postimg.org/drojt686d/image.png', + '0 - 9': 'https://s32.postimg.org/drojt686d/image.png', + '#': 'https://s32.postimg.org/drojt686d/image.png', + 'a': 'https://s32.postimg.org/llp5ekfz9/image.png', + 'b': 'https://s32.postimg.org/y1qgm1yp1/image.png', + 'c': 'https://s32.postimg.org/vlon87gmd/image.png', + 'd': 'https://s32.postimg.org/3zlvnix9h/image.png', + 'e': 'https://s32.postimg.org/bgv32qmsl/image.png', + 'f': 'https://s32.postimg.org/y6u7vq605/image.png', + 'g': 'https://s32.postimg.org/9237ib6jp/image.png', + 'h': 'https://s32.postimg.org/812yt6pk5/image.png', + 'i': 'https://s32.postimg.org/6nbbxvqat/image.png', + 'j': 'https://s32.postimg.org/axpztgvdx/image.png', + 'k': 'https://s32.postimg.org/976yrzdut/image.png', + 'l': 'https://s32.postimg.org/fmal2e9yd/image.png', + 'm': 'https://s32.postimg.org/m19lz2go5/image.png', + 'n': 'https://s32.postimg.org/b2ycgvs2t/image.png', + 'o': 'https://s32.postimg.org/c6igsucpx/image.png', + 'p': 'https://s32.postimg.org/jnro82291/image.png', + 'q': 'https://s32.postimg.org/ve5lpfv1h/image.png', + 'r': 'https://s32.postimg.org/nmovqvqw5/image.png', + 's': 'https://s32.postimg.org/zd2t89jol/image.png', + 't': 'https://s32.postimg.org/wk9lo8jc5/image.png', + 'u': 'https://s32.postimg.org/w8s5bh2w5/image.png', + 'v': 'https://s32.postimg.org/e7dlrey91/image.png', + 'w': 'https://s32.postimg.org/fnp49k15x/image.png', + 'x': 'https://s32.postimg.org/dkep1w1d1/image.png', + 'y': 'https://s32.postimg.org/um7j3zg85/image.png', + 'z': 'https://s32.postimg.org/jb4vfm9d1/image.png'} + + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + realplot = '' + if item.extra == 'letras': + patron = '<li><a href="([^"]+)" title="Series que comienzan con.*?">([^<]+)</a></li>' + else: + patron = '<a href="([^"]+)" title="([^V]+)' + item.extra + '.*?">' + + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + url = urlparse.urljoin(item.url, scrapedurl) + if item.extra != 'letras': + data = httptools.downloadpage(scrapedurl).data + thumbnail = scrapertools.get_match(data, '<link rel="image_src" href="([^"]+)"/>') + realplot = scrapertools.find_single_match(data, '<p itemprop="articleBody">([^<]+)<\/p> ') + plot = scrapertools.remove_htmltags(realplot) + action = 'temporadas' + else: + if scrapedtitle.lower() in thumbletras: + thumbnail = thumbletras[scrapedtitle.lower()] + else: + thumbnail = '' + plot = '' + action = 'todas' + title = scrapedtitle.replace(': ', '') + title = scrapertools.decodeHtmlentities(title) + if item.extra == 'letras': + fanart = 'https://s17.postimg.org/fwi1y99en/a-z.png' + elif item.extra == 'Vista': + fanart = 'https://s9.postimg.org/wmhzu9d7z/vistas.png' + else: + fanart = '' + + itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail, plot=plot, + fanart=fanart, contentSerieName=scrapedtitle)) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + + anterior = scrapertools.find_single_match(data, '<a class="left" href="([^"]+)" title="Cap.tulo Anterior"></a>') + siguiente = scrapertools.find_single_match(data, '<a class="right" href="([^"]+)" title="Cap.tulo Siguiente"></a>') + titulo = scrapertools.find_single_match(data, + '<h1 class="tithd bold fs18px lnht30px ico_b pdtop10px">([^<]+)</h1> ') + existe = scrapertools.find_single_match(data, '<center>La pel.cula que quieres ver no existe.</center>') + + from core import servertools + itemlist.extend(servertools.find_video_items(data=data)) + for videoitem in itemlist: + if 'youtube' in videoitem.url: + itemlist.remove(videoitem) + for videoitem in itemlist: + videoitem.channel = item.channel + videoitem.action = "play" + videoitem.folder = False + videoitem.fanart = item.fanart + videoitem.title = titulo + " " + videoitem.server + if item.extra2 != 'todos': + data = httptools.downloadpage(anterior).data + existe = scrapertools.find_single_match(data, '<center>La pel.cula que quieres ver no existe.</center>') + if not existe: + itemlist.append(Item(channel=item.channel, action="findvideos", title='Capitulo Anterior', url=anterior, + thumbnail='https://s1.postimg.org/dbq8gvldb/anterior.png', folder=True)) + + data = httptools.downloadpage(siguiente).data + existe = scrapertools.find_single_match(data, '<center>La pel.cula que quieres ver no existe.</center>') + if not existe: + itemlist.append(Item(channel=item.channel, action="findvideos", title='Capitulo Siguiente', url=siguiente, + thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png', folder=True)) + + return itemlist diff --git a/plugin.video.alfa/channels/quierodibujosanimados.json b/plugin.video.alfa/channels/quierodibujosanimados.json new file mode 100755 index 00000000..cba08198 --- /dev/null +++ b/plugin.video.alfa/channels/quierodibujosanimados.json @@ -0,0 +1,24 @@ +{ + "id": "quierodibujosanimados", + "name": "Quiero Dibujos Animados", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "quierodibujosanimados.png", + "banner": "quierodibujosanimados.png", + "fanart": "quierodibujosanimados.jpg", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ], + "categories": [ + "tvshow" + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/quierodibujosanimados.py b/plugin.video.alfa/channels/quierodibujosanimados.py new file mode 100755 index 00000000..f7b16b3c --- /dev/null +++ b/plugin.video.alfa/channels/quierodibujosanimados.py @@ -0,0 +1,118 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import config +from core import logger +from core import scrapertools +from core.item import Item + + + +def mainlist(item): + logger.info() + + # itemlist.append( Item(channel=item.channel , action="novedades" , title="Novedades" , url="http://www.quierodibujosanimados.com/")) + return series( + Item(channel=item.channel, action="series", title="Series", url="http://www.quierodibujosanimados.com/", + fanart=item.fanart)) + + +def series(item): + logger.info() + itemlist = [] + + data = scrapertools.cache_page(item.url) + data = scrapertools.get_match(data, '<ul class="categorias">(.*?)</ul') + + # <a href="http://www.quierodibujosanimados.com/cat/popeye-el-marino/38" title="Popeye el marino">Popeye el marino</a> + patron = '<a href="([^"]+)"[^>]+>([^<]+)</a>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + title = scrapedtitle.strip() + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = "" + plot = "" + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + + itemlist.append( + Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, plot=plot, + fanart=item.fanart)) + + next_page_url = scrapertools.find_single_match(data, '</span[^<]+<a href="([^"]+)">') + if next_page_url != "": + itemlist.append(Item(channel=item.channel, action="episodios", title=">> Página siguiente", + url=urlparse.urljoin(item.url, next_page_url), folder=True, + fanart=item.fanart)) + + return itemlist + + +def episodios(item): + logger.info() + + ''' + <li> + <div class="info"> + <h2><a href="http://www.quierodibujosanimados.com/Caillou-raton-de-biblioteca/954" title="Caillou ratón de biblioteca">Caillou ratón de biblioteca</a></h2> + <p>Caillou volvía con su hermanita Rosi y su mamá de la biblioteca y traían un montón de libros que Caillou quería leer, especialmente uno de piratas. Capítulo titulado "Caillou ratón de biblioteca".</p> + <div class="pie"> + <div class="categoria"> + <span>Categoría:</span> + <a href="http://www.quierodibujosanimados.com/cat/caillou/14" title="Caillou" class="categoria">Caillou</a> + </div> + <div class="puntuacion"> + <div class="rating_16 punt_0" data-noticia="954"> + <span>0.5</span> + <span>1</span> + <span>1.5</span> + <span>2</span> + <span>2.5</span> + <span>3</span> + <span>3.5</span> + <span>4</span> + <span>4.5</span> + <span>5</span> + </div> + </div> + </div> + <span class="pico"></span> + </div> + <div class="dibujo"> + <a href="http://www.quierodibujosanimados.com/Caillou-raton-de-biblioteca/954" title="Caillou ratón de biblioteca" class="thumb"> + <img src="http://www.quierodibujosanimados.com/i/thm-Caillou-raton-de-biblioteca.jpg" alt="Caillou ratón de biblioteca" width="137" height="174" /> + </a> + <h4><a href="http://www.quierodibujosanimados.com/Caillou-raton-de-biblioteca/954" title="Caillou ratón de biblioteca">Caillou ratón de biblioteca</a></h4> + </div> + </li> + ''' + + # Descarga la pagina + data = scrapertools.cache_page(item.url) + patron = '<div class="dibujo"[^<]+' + patron += '<a href="([^"]+)" title="([^"]+)"[^<]+' + patron += '<img src="([^"]+)"' + + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + for scrapedurl, scrapedtitle, scrapedthumbnail in matches: + title = scrapedtitle.strip() + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) + plot = "" + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot, + fanart=item.fanart)) + + next_page_url = scrapertools.find_single_match(data, '</span[^<]+<a href="([^"]+)">') + if next_page_url != "": + itemlist.append(Item(channel=item.channel, action="episodios", title=">> Página siguiente", + url=urlparse.urljoin(item.url, next_page_url), folder=True, + fanart=item.fanart)) + + return itemlist diff --git a/plugin.video.alfa/channels/renumbertools.py b/plugin.video.alfa/channels/renumbertools.py new file mode 100755 index 00000000..87f1f8eb --- /dev/null +++ b/plugin.video.alfa/channels/renumbertools.py @@ -0,0 +1,990 @@ +# -*- coding: utf-8 -*- +# -------------------------------------------------------------------------------- +# renumeratetools - se encarga de renumerar episodios +# -------------------------------------------------------------------------------- + +import os + +try: + import xbmcgui +except: + xbmcgui = None + +from core import config +from core import jsontools +from core import logger +from core.item import Item +from platformcode import platformtools + +TAG_TVSHOW_RENUMERATE = "TVSHOW_RENUMBER" +TAG_SEASON_EPISODE = "season_episode" +__channel__ = "renumbertools" + + +def access(): + """ + Devuelve si se puede usar o no renumbertools + """ + allow = False + + if config.is_xbmc(): + allow = True + + return allow + + +def context(item): + """ + Para xbmc/kodi que pueden mostrar el menú contextual, se añade un menu para configuración + la opción de renumerar, sólo si es para series. + + @param item: elemento para obtener la información y ver que contexto añadir + @type item: item + @return: lista de opciones a mostrar en el menú contextual + @rtype: list + """ + + # Dependiendo de como sea el contexto lo guardamos y añadimos las opciones de filtertools. + if type(item.context) == str: + _context = item.context.split("|") + elif type(item.context) == list: + _context = item.context + else: + _context = [] + + if access(): + dict_data = {"title": "RENUMERAR", "action": "config_item", "channel": "renumbertools"} + _context.append(dict_data) + + return _context + + +def show_option(channel, itemlist): + if access(): + itemlist.append(Item(channel=__channel__, title="[COLOR yellow]Configurar renumeración en series...[/COLOR]", + action="load", from_channel=channel)) + + return itemlist + + +def load(item): + return mainlist(channel=item.from_channel) + + +def mainlist(channel): + """ + Muestra una lista de las series renumeradas + + :param channel: nombre del canal para obtener las series renumeradas + :type channel: str + :return: lista de Item + :rtype: list[Item] + """ + logger.info() + itemlist = [] + dict_series = jsontools.get_node_from_file(channel, TAG_TVSHOW_RENUMERATE) + + idx = 0 + for tvshow in sorted(dict_series): + tag_color = "0xff008000" + if idx % 2 == 0: + tag_color = "blue" + + idx += 1 + name = tvshow + title = "Configurar [COLOR %s][%s][/COLOR]" % (tag_color, name) + + itemlist.append(Item(channel=__channel__, action="config_item", title=title, show=name, from_channel=channel)) + + if len(itemlist) == 0: + itemlist.append(Item(channel=channel, action="mainlist", + title="No se han encontrado series, busca una serie y pulsa en menú contextual " + "'RENUMERAR'")) + + return itemlist + + +def config_item(item): + """ + muestra una serie renumerada para su configuración + + :param item: item + :type item: Item + """ + logger.info("item %s" % item.tostring("\n")) + + dict_series = jsontools.get_node_from_file(item.from_channel, TAG_TVSHOW_RENUMERATE) + data = dict_series.get(item.show, {}) + + if data: + data = data.get(TAG_SEASON_EPISODE, []) + + ventana = RenumberWindow(show=item.show, channel=item.from_channel, data=data) + del ventana + else: + # tenemos información y devolvemos los datos añadidos para que se muestre en la ventana + if data: + return add_season(data) + # es la primera vez que se añaden datos (usando menú contextual) por lo que no devolvemos nada + # para evitar error al listar los items + else: + data = add_season(data) + write_data(item.from_channel, item.show, data) + + +def numbered_for_tratk(channel, show, season, episode): + """ + Devuelve la temporada y episodio convertido para que se marque correctamente en tratk.tv + + @param channel: Nombre del canal + @type channel: str + @param show: Nombre de la serie a comprobar + @type show: str + @param season: Temporada que devuelve el scrapper + @type season: int + @param episode: Episodio que devuelve el scrapper + @type episode: int + @return: season, episode + @rtype: int, int + """ + logger.info() + + if access(): + show = show.lower() + + new_season = season + new_episode = episode + dict_series = jsontools.get_node_from_file(channel, TAG_TVSHOW_RENUMERATE) + + # ponemos en minusculas el key, ya que previamente hemos hecho lo mismo con show. + for key in dict_series.keys(): + new_key = key.lower() + if new_key != key: + dict_series[new_key] = dict_series[key] + del dict_series[key] + + if show in dict_series: + logger.debug("ha encontrado algo: %s" % dict_series[show]) + + if len(dict_series[show]['season_episode']) > 1: + for row in dict_series[show]['season_episode']: + + if new_episode > row[1]: + new_episode -= row[1] + new_season = row[0] + break + + else: + new_season = dict_series[show]['season_episode'][0][0] + new_episode += dict_series[show]['season_episode'][0][1] + + logger.debug("%s:%s" % (new_season, new_episode)) + else: + # no se tiene acceso se devuelven los datos. + new_season = season + new_episode = episode + + return new_season, new_episode + + +def borrar(channel, show): + logger.info() + heading = "¿Está seguro que desea eliminar renumeración?" + line1 = "Pulse 'Si' para eliminar la renumeración de [COLOR blue]%s[/COLOR], pulse 'No' o cierre la ventana " \ + "para no hacer nada." % show.strip() + + if platformtools.dialog_yesno(heading, line1) == 1: + dict_series = jsontools.get_node_from_file(channel, TAG_TVSHOW_RENUMERATE) + dict_series.pop(show, None) + + result, json_data = jsontools.update_node(dict_series, channel, TAG_TVSHOW_RENUMERATE) + + if result: + message = "FILTRO ELIMINADO" + else: + message = "Error al guardar en disco" + + heading = show.strip() + platformtools.dialog_notification(heading, message) + + +def add_season(data=None): + logger.debug("data %s" % data) + heading = "Introduzca el número de la temporada" + # default = 2 + # se reordena la lista + list_season_episode = data + if list_season_episode: + list_season_episode.sort(key=lambda el: int(el[0]), reverse=False) + + # if list_season_episode: + # # mostrar temporada + 1 de la lista + # # TODO buscar la primera posicion libre + # default = list_season_episode[0][0]+1 + + season = platformtools.dialog_numeric(0, heading) # , str(default)) + for element in list_season_episode: + if int(season) == element[0]: + platformtools.dialog_notification("No se añade la temporada", "Ya existe, edíte la existente") + return + + # si hemos insertado un valor en la temporada + if season != "" and int(season) > 0: + heading = "Introduzca el número de episodio desde que empieza la temporada" + # default = 0 + # if list_season_episode: + # for e in list_season_episode: + # # mostrar suma episodios de la lista + # # sumar hasta el indice del primer libre encontrado + # default += e[1] + episode = platformtools.dialog_numeric(0, heading) # , str(default)) + + # si hemos insertado un valor en el episodio + if episode != "": + if list_season_episode: + list_season_episode.insert(0, [int(season), int(episode)]) + new_list_season_episode = list_season_episode[:] + return new_list_season_episode + else: + return [[int(season), int(episode)]] + + +def write_data(channel, show, data): + # OBTENEMOS LOS DATOS DEL JSON + dict_series = jsontools.get_node_from_file(channel, TAG_TVSHOW_RENUMERATE) + tvshow = show.strip() + list_season_episode = dict_series.get(tvshow, {}).get(TAG_SEASON_EPISODE, []) + logger.debug("data %s" % list_season_episode) + + if data: + # cambiamos el orden para que se vea en orden descendente y usarse bien en el _data.json + data.sort(key=lambda el: int(el[0]), reverse=True) + dict_renumerate = {TAG_SEASON_EPISODE: data} + + dict_series[tvshow] = dict_renumerate + else: + # hemos borrado todos los elementos, por lo que se borra la serie del fichero + dict_series.pop(tvshow, None) + + result, json_data = jsontools.update_node(dict_series, channel, TAG_TVSHOW_RENUMERATE) + + if result: + if data: + message = "FILTRO GUARDADO" + else: + message = "FILTRO BORRADO" + else: + message = "Error al guardar en disco" + + heading = show.strip() + platformtools.dialog_notification(heading, message) + + +if xbmcgui: + + # Align + ALIGN_LEFT = 0 + ALIGN_RIGHT = 1 + ALIGN_CENTER_X = 2 + ALIGN_CENTER_Y = 4 + ALIGN_CENTER = 6 + ALIGN_TRUNCATED = 8 + ALIGN_JUSTIFY = 10 + + # button ids + ID_BUTTON_CLOSE = 3003 + ID_BUTTON_ADD_SEASON = 3008 + ID_BUTTON_INFO = 3009 + ID_CHECK_UPDATE_INTERNET = 3010 + ID_BUTTON_OK = 3012 + ID_BUTTON_CANCEL = 3013 + ID_BUTTON_DELETE = 3014 + + + class RenumberWindow(xbmcgui.WindowDialog): + def __init__(self, *args, **kwargs): + logger.debug() + + if xbmcgui.__version__ == "1.2": + self.setCoordinateResolution(1) + else: + self.setCoordinateResolution(5) + + self.show = kwargs.get("show") + self.channel = kwargs.get("channel") + self.data = kwargs.get("data") + self.init = True + + self.mediapath = os.path.join(config.get_runtime_path(), 'resources', 'skins', 'Default', 'media') + self.font = "font12" + + window_bg = xbmcgui.ControlImage(320, 130, 600, 440, os.path.join(self.mediapath, 'Windows', 'DialogBack.png')) + self.addControl(window_bg) + + header_bg = xbmcgui.ControlImage(window_bg.getX(), window_bg.getY() + 8, window_bg.getWidth(), 35, + os.path.join(self.mediapath, 'Windows', 'dialogheader.png')) + self.addControl(header_bg) + + btn_close_w = 64 + self.btn_close = xbmcgui.ControlButton(window_bg.getX() + window_bg.getWidth() - btn_close_w - 13, + header_bg.getY() + 6, btn_close_w, 30, '', + focusTexture=os.path.join(self.mediapath, 'Controls', + 'DialogCloseButton-focus.png'), + noFocusTexture=os.path.join(self.mediapath, 'Controls', + 'DialogCloseButton.png')) + self.addControl(self.btn_close) + + header_title_x = window_bg.getX() + 20 + header_title = xbmcgui.ControlFadeLabel(header_title_x, header_bg.getY() + 5, self.btn_close.getX() - + header_title_x, 30, font="font12_title", textColor="0xFFFFA500", + _alignment=ALIGN_CENTER) + self.addControl(header_title) + header_title.addLabel(self.show) + + self.controls_bg = xbmcgui.ControlImage(window_bg.getX() + 20, header_bg.getY() + header_bg.getHeight() + 6, + 562, 260, os.path.join(self.mediapath, 'Windows', 'BackControls.png')) + self.addControl(self.controls_bg) + + self.scroll_bg = xbmcgui.ControlImage(window_bg.getX() + window_bg.getWidth() - 25, self.controls_bg.getY(), 10, + self.controls_bg.getHeight(), os.path.join(self.mediapath, 'Controls', + 'ScrollBack.png')) + self.addControl(self.scroll_bg) + self.scroll_bg.setVisible(False) + + self.scroll2_bg = xbmcgui.ControlImage(window_bg.getX() + window_bg.getWidth() - 25, self.controls_bg.getY(), + 10, self.controls_bg.getHeight(), os.path.join(self.mediapath, + 'Controls', + 'ScrollBar.png')) + self.addControl(self.scroll2_bg) + self.scroll2_bg.setVisible(False) + + btn_add_season = xbmcgui.ControlButton(window_bg.getX() + 20, self.controls_bg.getY() + + self.controls_bg.getHeight() + 14, 165, 30, 'Añadir Temporada', + font=self.font, focusTexture=os.path.join(self.mediapath, 'Controls', + 'KeyboardKey.png'), + noFocusTexture=os.path.join(self.mediapath, 'Controls', + 'KeyboardKeyNF.png'), alignment=ALIGN_CENTER) + self.addControl(btn_add_season) + + self.btn_info = xbmcgui.ControlButton(window_bg.getX() + 210, btn_add_season.getY(), 120, 30, 'Información', + font=self.font, focusTexture=os.path.join(self.mediapath, 'Controls', + 'KeyboardKey.png'), + noFocusTexture=os.path.join(self.mediapath, 'Controls', + 'KeyboardKeyNF.png'), alignment=ALIGN_CENTER) + self.addControl(self.btn_info) + + check_update_internet_w = 235 + # Versiones antiguas no admite algunas texturas + if xbmcgui.__version__ in ["1.2", "2.0"]: + self.check_update_internet = xbmcgui.ControlRadioButton( + window_bg.getX() + window_bg.getWidth() - check_update_internet_w - 20, btn_add_season.getY() - 3, + check_update_internet_w, 34, "Actualizar desde Internet:", font=self.font, + focusTexture=os.path.join(self.mediapath, 'Controls', 'MenuItemFO.png'), + noFocusTexture=os.path.join(self.mediapath, 'Controls', 'MenuItemNF.png')) + else: + self.check_update_internet = xbmcgui.ControlRadioButton( + window_bg.getX() + window_bg.getWidth() - check_update_internet_w - 20, btn_add_season.getY() - 3, + check_update_internet_w, 34, "Actualizar desde Internet:", font=self.font, + focusTexture=os.path.join(self.mediapath, 'Controls', 'MenuItemFO.png'), + noFocusTexture=os.path.join(self.mediapath, 'Controls', 'MenuItemNF.png'), + focusOnTexture=os.path.join(self.mediapath, 'Controls', 'radiobutton-focus.png'), + noFocusOnTexture=os.path.join(self.mediapath, 'Controls', 'radiobutton-focus.png'), + focusOffTexture=os.path.join(self.mediapath, 'Controls', 'radiobutton-nofocus.png'), + noFocusOffTexture=os.path.join(self.mediapath, 'Controls', 'radiobutton-nofocus.png')) + + self.addControl(self.check_update_internet) + self.check_update_internet.setEnabled(False) + + hb_bg = xbmcgui.ControlImage(window_bg.getX() + 20, btn_add_season.getY() + btn_add_season.getHeight() + 13, + window_bg.getWidth() - 40, 2, + os.path.join(self.mediapath, 'Controls', 'ScrollBack.png')) + self.addControl(hb_bg) + + self.btn_ok = xbmcgui.ControlButton(window_bg.getX() + 68, hb_bg.getY() + hb_bg.getHeight() + 13, 120, 30, + 'OK', font=self.font, + focusTexture=os.path.join(self.mediapath, 'Controls', + 'KeyboardKey.png'), + noFocusTexture=os.path.join(self.mediapath, 'Controls', + 'KeyboardKeyNF.png'), alignment=ALIGN_CENTER) + self.addControl(self.btn_ok) + + self.btn_cancel = xbmcgui.ControlButton(self.btn_info.getX() + 30, self.btn_ok.getY(), 120, 30, 'Cancelar', + font=self.font, focusTexture=os.path.join(self.mediapath, 'Controls', + 'KeyboardKey.png'), + noFocusTexture=os.path.join(self.mediapath, 'Controls', + 'KeyboardKeyNF.png'), + alignment=ALIGN_CENTER) + self.addControl(self.btn_cancel) + + self.btn_delete = xbmcgui.ControlButton(self.btn_cancel.getX() + self.btn_cancel.getWidth() + 50, + self.btn_ok.getY(), 120, 30, 'Borrar', font=self.font, + focusTexture=os.path.join(self.mediapath, 'Controls', + 'KeyboardKey.png'), + noFocusTexture=os.path.join(self.mediapath, 'Controls', + 'KeyboardKeyNF.png'), + alignment=ALIGN_CENTER) + self.addControl(self.btn_delete) + + self.controls = [] + self.onInit() + self.setFocus(self.controls[0].edit_season) + self.doModal() + + def onInit(self, *args, **kwargs): + try: + # listado temporada / episodios + pos_y = self.controls_bg.getY() + 10 + + # eliminamos los componentes al repintar la ventana + for linea in self.controls: + self.removeControls(linea.list_elements()) + + # mostramos el scroll si hay más de 5 elementos + if len(self.data) > 5: + self.controls_bg.setWidth(545) + self.scroll_bg.setVisible(True) + self.scroll2_bg.setVisible(True) + else: + self.controls_bg.setWidth(562) + self.scroll_bg.setVisible(False) + self.scroll2_bg.setVisible(False) + + self.controls = [] + # cambiamos el orden para que se vea en orden ascendente + self.data.sort(key=lambda el: int(el[0]), reverse=False) + + for index, e in enumerate(self.data): + pos_x = self.controls_bg.getX() + 15 + label_season_w = 100 + label_season = xbmcgui.ControlLabel(pos_x, pos_y + 3, label_season_w, 34, + "Temporada:", font=self.font, textColor="0xFF2E64FE") + self.addControl(label_season) + label_season.setVisible(False) + + pos_x += label_season_w + 5 + + # TODO mirar retro-compatilibidad + # if xbmcgui.ControlEdit == ControlEdit: + # edit_season = xbmcgui.ControlEdit(0, 0, 0, 0, '', font=self.font, isPassword=False, + # textColor='', + # focusTexture=os.path.join(self.mediapath, 'Controls', + # 'MenuItemFO.png'), + # noFocusTexture=os.path.join(self.mediapath, 'Controls', + # 'MenuItemNF.png'), window=self) + # else: + + # control bugeado se tiene que usar metodos sets para que se cree correctamente. + edit_season = xbmcgui.ControlEdit(0, 0, 0, 0, "", self.font, "", '', 4, isPassword=False, + focusTexture=os.path.join(self.mediapath, 'Controls', + 'MenuItemFO.png'), + noFocusTexture=os.path.join(self.mediapath, 'Controls', + 'MenuItemNF.png')) + self.addControl(edit_season) + edit_season.setText(str(e[0])) + # edit_season.setLabel("Temporada:", font=self.font, textColor="0xFF2E64FE") + edit_season.setPosition(pos_x, pos_y - 2) + edit_season.setWidth(25) + edit_season.setHeight(35) + edit_season.setVisible(False) + + label_episode_w = 90 + pos_x += edit_season.getWidth() + 60 + label_episode = xbmcgui.ControlLabel(pos_x, pos_y + 3, label_episode_w, 34, "Episodios:", + font=self.font, textColor="0xFF2E64FE") + self.addControl(label_episode) + label_episode.setVisible(False) + + pos_x += label_episode_w + 5 + # control bugeado se tiene que usar metodos sets para que se cree correctamente. + edit_episode = xbmcgui.ControlEdit(0, 0, 0, 0, "", self.font, "", '', 4, isPassword=False, + focusTexture=os.path.join(self.mediapath, 'Controls', + 'MenuItemFO.png'), + noFocusTexture=os.path.join(self.mediapath, 'Controls', + 'MenuItemNF.png')) + self.addControl(edit_episode) + edit_episode.setText(str(e[1])) + # edit_episode.setLabel("Episodios:", font=self.font, textColor="0xFF2E64FE") + edit_episode.setPosition(pos_x, pos_y - 2) + edit_episode.setWidth(40) + edit_episode.setHeight(35) + edit_episode.setVisible(False) + + btn_delete_season_w = 120 + btn_delete_season = xbmcgui.ControlButton(self.controls_bg.getX() + self.controls_bg.getWidth() - + btn_delete_season_w - 14, pos_y, btn_delete_season_w, 30, + 'Eliminar', font=self.font, + focusTexture=os.path.join(self.mediapath, 'Controls', + 'KeyboardKey.png'), + noFocusTexture=os.path.join(self.mediapath, 'Controls', + 'KeyboardKeyNF.png'), + alignment=ALIGN_CENTER) + self.addControl(btn_delete_season) + btn_delete_season.setVisible(False) + + hb_bg = xbmcgui.ControlImage(self.controls_bg.getX() + 10, pos_y + 40, self.controls_bg.getWidth() - 20, + 2, os.path.join(self.mediapath, 'Controls', 'ScrollBack.png')) + self.addControl(hb_bg) + hb_bg.setVisible(False) + + group = ControlGroup(label_season=label_season, edit_season=edit_season, label_episode=label_episode, + edit_episode=edit_episode, btn_delete_season=btn_delete_season, hb=hb_bg) + + pos_y += 50 + + if index < 5: + group.set_visible(True) + + self.controls.append(group) + + if len(self.data) > 5: + self.move_scroll() + + except Exception, Ex: + logger.error("HA HABIDO UNA HOSTIA %s" % Ex) + + # def onClick(self, control_id): + # pass + # + # def onFocus(self, control_id): + # pass + + def onControl(self, control): + # logger.debug("%s" % control.getId()) + control_id = control.getId() + + if control_id == ID_BUTTON_OK: + write_data(self.channel, self.show, self.data) + self.close() + if control_id in [ID_BUTTON_CLOSE, ID_BUTTON_CANCEL]: + self.close() + elif control_id == ID_BUTTON_DELETE: + self.close() + borrar(self.channel, self.show) + elif control_id == ID_BUTTON_ADD_SEASON: + # logger.debug("data que enviamos: {}".format(self.data)) + data = add_season(self.data) + if data: + self.data = data + # logger.debug("data que recibimos: {}".format(self.data)) + self.onInit() + + # si hay más de 5 elementos movemos el scroll + if len(self.data) > 5: + self.scroll(len(self.data) - 2, 1) + self.move_scroll() + + elif control_id == ID_BUTTON_INFO: + self.method_info() + else: + for x, grupo in enumerate(self.controls): + if control_id == self.controls[x].btn_delete_season.getId(): + # logger.debug("A data %s" % self.data) + self.removeControls(self.controls[x].list_elements()) + del self.controls[x] + del self.data[x] + # logger.debug("D data %s" % self.data) + self.onInit() + + return + + def onAction(self, action): + # logger.debug("%s" % action.getId()) + # logger.debug("focus %s" % self.getFocusId()) + # Obtenemos el foco + focus = self.getFocusId() + + action = action.getId() + # Flecha izquierda + if action == xbmcgui.ACTION_MOVE_LEFT: + # Si el foco no está en ninguno de los 6 botones inferiores, y esta en un "list" cambiamos el valor + if focus not in [ID_BUTTON_ADD_SEASON, ID_BUTTON_INFO, ID_CHECK_UPDATE_INTERNET, + ID_BUTTON_OK, ID_BUTTON_CANCEL, ID_BUTTON_DELETE]: + + # Localizamos en el listado de controles el control que tiene el focus + # todo mirar tema del cursor en el valor al desplazar lateralmente + for x, linea in enumerate(self.controls): + if focus == linea.edit_season.getId(): + return self.setFocus(self.controls[x].btn_delete_season) + elif focus == linea.edit_episode.getId(): + return self.setFocus(self.controls[x].edit_season) + elif focus == linea.btn_delete_season.getId(): + return self.setFocus(self.controls[x].edit_episode) + + # Si el foco está en alguno de los 6 botones inferiores, movemos al siguiente + else: + if focus in [ID_BUTTON_ADD_SEASON, ID_BUTTON_INFO, ID_CHECK_UPDATE_INTERNET]: + if focus == ID_BUTTON_ADD_SEASON: + self.setFocusId(ID_BUTTON_INFO) + # TODO cambiar cuando se habilite la opcion de actualizar por internet + # self.setFocusId(ID_CHECK_UPDATE_INTERNET) + elif focus == ID_BUTTON_INFO: + self.setFocusId(ID_BUTTON_ADD_SEASON) + elif focus == ID_CHECK_UPDATE_INTERNET: + self.setFocusId(ID_BUTTON_INFO) + + elif focus in [ID_BUTTON_OK, ID_BUTTON_CANCEL, ID_BUTTON_DELETE]: + if focus == ID_BUTTON_OK: + self.setFocusId(ID_BUTTON_DELETE) + elif focus == ID_BUTTON_CANCEL: + self.setFocusId(ID_BUTTON_OK) + elif focus == ID_BUTTON_DELETE: + self.setFocusId(ID_BUTTON_CANCEL) + + # Flecha derecha + elif action == xbmcgui.ACTION_MOVE_RIGHT: + # Si el foco no está en ninguno de los 6 botones inferiores, y esta en un "list" cambiamos el valor + if focus not in [ID_BUTTON_ADD_SEASON, ID_BUTTON_INFO, ID_CHECK_UPDATE_INTERNET, + ID_BUTTON_OK, ID_BUTTON_CANCEL, ID_BUTTON_DELETE]: + + # Localizamos en el listado de controles el control que tiene el focus + # todo mirar tema del cursor en el valor al desplazar lateralmente + for x, linea in enumerate(self.controls): + if focus == linea.edit_season.getId(): + return self.setFocus(self.controls[x].edit_episode) + elif focus == linea.edit_episode.getId(): + return self.setFocus(self.controls[x].btn_delete_season) + elif focus == linea.btn_delete_season.getId(): + return self.setFocus(self.controls[x].edit_season) + + # Si el foco está en alguno de los 6 botones inferiores, movemos al siguiente + else: + if focus in [ID_BUTTON_ADD_SEASON, ID_BUTTON_INFO, ID_CHECK_UPDATE_INTERNET]: + if focus == ID_BUTTON_ADD_SEASON: + self.setFocusId(ID_BUTTON_INFO) + if focus == ID_BUTTON_INFO: + self.setFocusId(ID_BUTTON_ADD_SEASON) + # TODO cambiar cuando se habilite la opcion de actualizar por internet + # self.setFocusId(ID_CHECK_UPDATE_INTERNET) + if focus == ID_CHECK_UPDATE_INTERNET: + self.setFocusId(ID_BUTTON_OK) + + elif focus in [ID_BUTTON_OK, ID_BUTTON_CANCEL, ID_BUTTON_DELETE]: + if focus == ID_BUTTON_OK: + self.setFocusId(ID_BUTTON_CANCEL) + if focus == ID_BUTTON_CANCEL: + self.setFocusId(ID_BUTTON_DELETE) + if focus == ID_BUTTON_DELETE: + self.setFocusId(ID_BUTTON_OK) + + # Flecha arriba + elif action == xbmcgui.ACTION_MOVE_UP: + self.move_up(focus) + # Flecha abajo + elif action == xbmcgui.ACTION_MOVE_DOWN: + self.move_down(focus) + # scroll up + elif action == xbmcgui.ACTION_MOUSE_WHEEL_UP: + self.move_up(focus) + # scroll down + elif action == xbmcgui.ACTION_MOUSE_WHEEL_DOWN: + self.move_down(focus) + + # ACTION_PAGE_DOWN = 6 + # ACTION_PAGE_UP = 5 + + # Menú previo o Atrás + elif action in [xbmcgui.ACTION_PREVIOUS_MENU, xbmcgui.ACTION_NAV_BACK]: + self.close() + + def move_down(self, focus): + # logger.debug("focus " + str(focus)) + # Si el foco está en uno de los tres botones medios, bajamos el foco a la otra linea de botones + if focus in [ID_BUTTON_ADD_SEASON, ID_BUTTON_INFO, ID_CHECK_UPDATE_INTERNET]: + if focus == ID_BUTTON_ADD_SEASON: + self.setFocusId(ID_BUTTON_OK) + elif focus == ID_BUTTON_INFO: + self.setFocusId(ID_BUTTON_CANCEL) + elif focus == ID_CHECK_UPDATE_INTERNET: + self.setFocusId(ID_BUTTON_DELETE) + # Si el foco está en uno de los tres botones inferiores, subimos el foco al primer control del listado + elif focus in [ID_BUTTON_OK, ID_BUTTON_CANCEL, ID_BUTTON_DELETE]: + first_visible = 0 + for x, linea in enumerate(self.controls): + if linea.get_visible(): + first_visible = x + break + + if focus == ID_BUTTON_OK: + self.setFocus(self.controls[first_visible].edit_season) + elif focus == ID_BUTTON_CANCEL: + self.setFocus(self.controls[first_visible].edit_episode) + elif focus == ID_BUTTON_DELETE: + self.setFocus(self.controls[first_visible].btn_delete_season) + # nos movemos entre los elementos del listado + else: + # Localizamos en el listado de controles el control que tiene el focus + for x, linea in enumerate(self.controls): + if focus == linea.edit_season.getId(): + if x + 1 < len(self.controls): + if not self.controls[x + 1].get_visible(): + self.scroll(x, 1) + + return self.setFocus(self.controls[x + 1].edit_season) + else: + return self.setFocusId(ID_BUTTON_ADD_SEASON) + elif focus == linea.edit_episode.getId(): + if x + 1 < len(self.controls): + if not self.controls[x + 1].get_visible(): + self.scroll(x, 1) + + return self.setFocus(self.controls[x + 1].edit_episode) + else: + self.setFocusId(ID_BUTTON_INFO) + elif focus == linea.btn_delete_season.getId(): + if x + 1 < len(self.controls): + if not self.controls[x + 1].get_visible(): + self.scroll(x, 1) + + return self.setFocus(self.controls[x + 1].btn_delete_season) + else: + return self.setFocusId(ID_BUTTON_INFO) + # TODO cambiar cuando se habilite la opcion de actualizar por internet + # return self.setFocusId(ID_CHECK_UPDATE_INTERNET) + + def move_up(self, focus): + # Si el foco está en uno de los tres botones medios, subimos el foco al último control del listado + if focus in [ID_BUTTON_ADD_SEASON, ID_BUTTON_INFO, ID_CHECK_UPDATE_INTERNET]: + last_visible = 0 + for x, linea in reversed(list(enumerate(self.controls))): + if linea.get_visible(): + last_visible = x + break + + if focus == ID_BUTTON_ADD_SEASON: + self.setFocus(self.controls[last_visible].edit_season) + elif focus == ID_BUTTON_INFO: + self.setFocus(self.controls[last_visible].edit_episode) + elif focus == ID_CHECK_UPDATE_INTERNET: + self.setFocus(self.controls[last_visible].btn_delete_season) + # Si el foco está en uno de los tres botones inferiores, subimos el foco a la otra linea de botones + elif focus in [ID_BUTTON_OK, ID_BUTTON_CANCEL, ID_BUTTON_DELETE]: + if focus == ID_BUTTON_OK: + self.setFocusId(ID_BUTTON_ADD_SEASON) + elif focus == ID_BUTTON_CANCEL: + self.setFocusId(ID_BUTTON_INFO) + elif focus == ID_BUTTON_DELETE: + self.setFocusId(ID_BUTTON_INFO) + # TODO cambiar cuando se habilite la opcion de actualizar por internet + # self.setFocusId(ID_CHECK_UPDATE_INTERNET) + # nos movemos entre los elementos del listado + else: + # Localizamos en el listado de controles el control que tiene el focus + for x, linea in enumerate(self.controls): + if focus == linea.edit_season.getId(): + if x > 0: + if not self.controls[x - 1].get_visible(): + self.scroll(x, -1) + + return self.setFocus(self.controls[x - 1].edit_season) + else: + return self.setFocusId(ID_BUTTON_OK) + elif focus == linea.edit_episode.getId(): + if x > 0: + if not self.controls[x - 1].get_visible(): + self.scroll(x, -1) + + return self.setFocus(self.controls[x - 1].edit_episode) + else: + self.setFocusId(ID_BUTTON_CANCEL) + elif focus == linea.btn_delete_season.getId(): + if x > 0: + if not self.controls[x - 1].get_visible(): + self.scroll(x, -1) + + return self.setFocus(self.controls[x - 1].btn_delete_season) + else: + return self.setFocusId(ID_BUTTON_DELETE) + # TODO cambiar cuando se habilite la opcion de actualizar por internet + # return self.setFocusId(ID_CHECK_UPDATE_INTERNET) + + def scroll(self, position, movement): + try: + for index, group in enumerate(self.controls): + # ponemos todos los elementos como no visibles + group.set_visible(False) + + if movement > 0: + pos_fin = position + movement + 1 + pos_inicio = pos_fin - 5 + else: + pos_inicio = position + movement + pos_fin = pos_inicio + 5 + + # logger.debug("position {}, movement {}, pos_inicio{}, pos_fin{}, self.data.length{}". + # format(position, movement, pos_inicio, pos_fin, len(self.data))) + pos_y = self.controls_bg.getY() + 10 + for i in range(pos_inicio, pos_fin): + pos_x = self.controls_bg.getX() + 15 + + self.controls[i].label_season.setPosition(pos_x, pos_y + 3) + + pos_x += self.controls[i].label_season.getWidth() + 5 + self.controls[i].edit_season.setPosition(pos_x, pos_y - 2) + + pos_x += self.controls[i].edit_season.getWidth() + 60 + self.controls[i].label_episode.setPosition(pos_x, pos_y + 3) + + pos_x += self.controls[i].label_episode.getWidth() + 5 + self.controls[i].edit_episode.setPosition(pos_x, pos_y - 2) + + self.controls[i].btn_delete_season.setPosition(self.controls_bg.getX() + self.controls_bg.getWidth() - + self.controls[i].btn_delete_season.getWidth() - 14, + pos_y) + + self.controls[i].hb.setPosition(self.controls_bg.getX() + 10, pos_y + 40) + + pos_y += 50 + + # logger.debug("ponemos como True %s" % i) + self.controls[i].set_visible(True) + + self.move_scroll() + + except Exception, Ex: + logger.error("HA HABIDO UNA HOSTIA %s" % Ex) + + def move_scroll(self): + visible_controls = [group for group in self.controls if group.get_visible() == True] + hidden_controls = [group for group in self.controls if group.get_visible() == False] + scroll_position = self.controls.index(visible_controls[0]) + scrollbar_height = self.scroll_bg.getHeight() - (len(hidden_controls) * 10) + scrollbar_y = self.scroll_bg.getPosition()[1] + (scroll_position * 10) + self.scroll2_bg.setPosition(self.scroll_bg.getPosition()[0], scrollbar_y) + self.scroll2_bg.setHeight(scrollbar_height) + + @staticmethod + def method_info(): + title = "Información" + text = "La primera temporada que se añade siempre empieza en \"0\" episodios, la segunda temporada que se " + text += "añade empieza en el número total de episodios de la primera temporada, la tercera temporada será " + text += "la suma de los episodios de las temporadas previas y así sucesivamente.\n" + text += "[COLOR blue]\nEjemplo de serie divida en varias temporadas:\n" + text += "\nFairy Tail:\n" + text += " - SEASON 1: EPISODE 48 --> [season 1, episode: 0]\n" + text += " - SEASON 2: EPISODE 48 --> [season 2, episode: 48]\n" + text += " - SEASON 3: EPISODE 54 --> [season 3, episode: 96 ([48=season2] + [48=season1])]\n" + text += " - SEASON 4: EPISODE 175 --> [season 4: episode: 150 ([54=season3] + [48=season2] + [48=season3" \ + "])][/COLOR]\n" + text += "[COLOR green]\nEjemplo de serie que continua en la temporada de la original:\n" + text += "\nFate/Zero 2nd Season:\n" + text += " - SEASON 1: EPISODE 12 --> [season 1, episode: 13][/COLOR]\n" + + text += "[COLOR blue]\nEjemplo de serie que es la segunda temporada de la original:\n" + text += "\nFate/kaleid liner Prisma☆Illya 2wei!:\n" + text += " - SEASON 1: EPISODE 12 --> [season 2, episode: 0][/COLOR]\n" + + return TextBox("DialogTextViewer.xml", os.getcwd(), "Default", title=title, text=text) + + + class ControlGroup: + """ + conjunto de controles, son los elementos que se muestra por línea de una lista. + """ + + def __init__(self, label_season, edit_season, label_episode, edit_episode, btn_delete_season, hb): + self.visible = False + self.label_season = label_season + self.edit_season = edit_season + self.label_episode = label_episode + self.edit_episode = edit_episode + self.btn_delete_season = btn_delete_season + self.hb = hb + + def list_elements(self): + return [self.label_season, self.edit_season, self.label_episode, self.edit_episode, self.btn_delete_season, + self.hb] + + def get_visible(self): + return self.visible + + def set_visible(self, visible): + self.visible = visible + self.label_season.setVisible(visible) + self.edit_season.setVisible(visible) + self.label_episode.setVisible(visible) + self.edit_episode.setVisible(visible) + self.btn_delete_season.setVisible(visible) + self.hb.setVisible(visible) + + + class TextBox(xbmcgui.WindowXMLDialog): + """ Create a skinned textbox window """ + + def __init__(self, *args, **kwargs): + self.title = kwargs.get('title') + self.text = kwargs.get('text') + self.doModal() + + def onInit(self): + try: + self.getControl(5).setText(self.text) + self.getControl(1).setLabel(self.title) + except: + pass + + def onClick(self, control_id): + pass + + def onFocus(self, control_id): + pass + + def onAction(self, action): + self.close() + + # TODO mirar retro-compatiblidad + # class ControlEdit(xbmcgui.ControlButton): + # def __new__(self, *args, **kwargs): + # del kwargs["isPassword"] + # del kwargs["window"] + # args = list(args) + # return xbmcgui.ControlButton.__new__(self, *args, **kwargs) + # + # def __init__(self, *args, **kwargs): + # self.isPassword = kwargs["isPassword"] + # self.window = kwargs["window"] + # self.label = "" + # self.text = "" + # self.textControl = xbmcgui.ControlLabel(self.getX(), self.getY(), self.getWidth(), self.getHeight(), + # self.text, + # font=kwargs["font"], textColor=kwargs["textColor"], alignment=4 | 1) + # self.window.addControl(self.textControl) + # + # def setLabel(self, val): + # self.label = val + # xbmcgui.ControlButton.setLabel(self, val) + # + # def getX(self): + # return xbmcgui.ControlButton.getPosition(self)[0] + # + # def getY(self): + # return xbmcgui.ControlButton.getPosition(self)[1] + # + # def setEnabled(self, e): + # xbmcgui.ControlButton.setEnabled(self, e) + # self.textControl.setEnabled(e) + # + # def setWidth(self, w): + # xbmcgui.ControlButton.setWidth(self, w) + # self.textControl.setWidth(w / 2) + # + # def setHeight(self, w): + # xbmcgui.ControlButton.setHeight(self, w) + # self.textControl.setHeight(w) + # + # def setPosition(self, x, y): + # xbmcgui.ControlButton.setPosition(self, x, y) + # self.textControl.setPosition(x + self.getWidth() / 2, y) + # + # def setText(self, text): + # self.text = text + # if self.isPassword: + # self.textControl.setLabel("*" * len(self.text)) + # else: + # self.textControl.setLabel(self.text) + # + # def getText(self): + # return self.text + # + # + # if not hasattr(xbmcgui, "ControlEdit"): + # xbmcgui.ControlEdit = ControlEdit diff --git a/plugin.video.alfa/channels/repelis.json b/plugin.video.alfa/channels/repelis.json new file mode 100755 index 00000000..2803277a --- /dev/null +++ b/plugin.video.alfa/channels/repelis.json @@ -0,0 +1,38 @@ +{ + "id": "repelis", + "name": "Repelis", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "repelis.png", + "banner": "repelis.png", + "version": 1, + "changes": [ + { + "date": "18/05/2017", + "description": "Cambiado patrón en búsqueda de videos" + }, + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ], + "categories": [ + "movie", + "documentary" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/repelis.py b/plugin.video.alfa/channels/repelis.py new file mode 100755 index 00000000..17ae1de9 --- /dev/null +++ b/plugin.video.alfa/channels/repelis.py @@ -0,0 +1,406 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import config +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + + +# Main list manual +def mainlist(item): + logger.info() + itemlist = [] + + item.url = "http://www.repelis.tv/pag/1" + + mifan = "http://www.psicocine.com/wp-content/uploads/2013/08/Bad_Robot_Logo.jpg" + + itemlist.append(Item(channel=item.channel, action="menupelis", title="Peliculas", url="http://www.repelis.tv/pag/1", + thumbnail="http://www.gaceta.es/sites/default/files/styles/668x300/public/metro_goldwyn_mayer_1926-web.png?itok=-lRSR9ZC", + fanart=mifan)) + itemlist.append(Item(channel=item.channel, action="menuestre", title="Estrenos", + url="http://www.repelis.tv/archivos/estrenos/pag/1", + thumbnail="http://t0.gstatic.com/images?q=tbn:ANd9GcS4g68rmeLQFuX7iCrPwd00FI_OlINZXCYXEFrJHTZ0VSHefIIbaw", + fanart=mifan)) + itemlist.append( + Item(channel=item.channel, action="menudesta", title="Destacadas", url="http://www.repelis.tv/pag/1", + thumbnail="http://img.irtve.es/v/1074982/", fanart=mifan)) + itemlist.append(Item(channel=item.channel, action="todaspelis", title="Proximos estrenos", + url="http://www.repelis.tv/archivos/proximos-estrenos/pag/1", + thumbnail="https://encrypted-tbn3.gstatic.com/images?q=tbn:ANd9GcTpsRC-GTYzCqhor2gIDfAB61XeymwgXWSVBHoRAKs2c5HAn29f&reload=on", + fanart=mifan)) + itemlist.append( + Item(channel=item.channel, action="todaspelis", title="Todas las Peliculas", url="http://www.repelis.tv/pag/1", + thumbnail="https://freaksociety.files.wordpress.com/2012/02/logos-cine.jpg", fanart=mifan)) + + if config.get_setting("adult_mode") != 0: + itemlist.append(Item(channel=item.channel, action="todaspelis", title="Eroticas +18", + url="http://www.repelis.tv/genero/eroticas/pag/1", + thumbnail="http://www.topkamisetas.com/catalogo/images/TB0005.gif", + fanart="http://www.topkamisetas.com/catalogo/images/TB0005.gif")) + # Quito la busqueda por año si no esta enabled el adultmode, porque no hay manera de filtrar los enlaces eroticos72 + itemlist.append( + Item(channel=item.channel, action="poranyo", title="Por Año", url="http://www.repelis.tv/anio/2016", + thumbnail="http://t3.gstatic.com/images?q=tbn:ANd9GcSkxiYXdBcI0cvBLsb_nNlz_dWXHRl2Q-ER9dPnP1gNUudhrqlR", + fanart=mifan)) + + # Por categoria si que filtra la categoria de eroticos + itemlist.append(Item(channel=item.channel, action="porcateg", title="Por Categoria", + url="http://www.repelis.tv/genero/accion/pag/1", + thumbnail="http://www.logopro.it/blog/wp-content/uploads/2013/07/categoria-sigaretta-elettronica.png", + fanart=mifan)) + itemlist.append( + Item(channel=item.channel, action="search", title="Buscar...", url="http://www.repelis.tv/search/?s=", + thumbnail="http://thumbs.dreamstime.com/x/buscar-pistas-13159747.jpg", fanart=mifan)) + + return itemlist + + +# Peliculas recien agregadas ( quitamos las de estreno del slide-bar en el top +def menupelis(item): + logger.info(item.url) + + itemlist = [] + + data = scrapertools.cache_page(item.url).decode('iso-8859-1').encode('utf-8') + + patronenlaces = '<h3>Películas Recién Agregadas</h3>.*?>(.*?)</section>' + matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data) + + logger.info("begin ----------") + scrapertools.printMatches(matchesenlaces) + logger.info("end ----------") + + for bloque_enlaces in matchesenlaces: + + patron = '<div class="poster-media-card">.*?' + patron += '<a href="(.*?)".*?title="(.*?)".*?' + patron += '<img src="(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces) + scrapertools.printMatches(matches) + + for scrapedurl, scrapedtitle, scrapedthumbnail in matches: + logger.info("He encontrado el segundo bloque") + title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película") + title = title.replace("Online", ""); + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, + thumbnail=thumbnail, fanart=thumbnail)) + + ## Paginación + # <span class="current">2</span><a href="http://www.repelis.tv/page/3" + + # Si falla no muestra ">> Página siguiente" + try: + next_page = scrapertools.get_match(data, '<span class="current">\d+</span><a href="([^"]+)"') + title = "[COLOR red][B]Pagina siguiente »[/B][/COLOR]" + itemlist.append( + Item(channel=item.channel, title=title, url=next_page, action="menupelis", thumbnail=item.thumbnail, + fanart=item.fanart, folder=True)) + except: + pass + return itemlist + + +# Todas las peliculas +def todaspelis(item): + logger.info(item.url) + + itemlist = [] + + data = scrapertools.cache_page(item.url).decode('iso-8859-1').encode('utf-8') + print data + patronenlaces = '<h1>.*?</h1>.*?>(.*?)</section>' + matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data) + + for bloque_enlaces in matchesenlaces: + + # patron = '<a href="([^"]+)" title="([^"]+)"> <div class="poster".*?<img src="([^"]+)"' + + patron = '<div class="poster-media-card">.*?' + patron += '<a href="(.*?)".*?title="(.*?)".*?' + patron += '<img src="(.*?)"' + + matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces) + scrapertools.printMatches(matches) + for scrapedurl, scrapedtitle, scrapedthumbnail in matches: + title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película") + title = title.replace("Online", ""); + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, + thumbnail=thumbnail, fanart=thumbnail)) + + ## Paginación + # <span class="current">2</span><a href="http://www.repelis.tv/page/3" + + # Si falla no muestra ">> Página siguiente" + try: + next_page = scrapertools.get_match(data, '<span class="current">\d+</span><a href="([^"]+)"') + title = "[COLOR red][B]Pagina siguiente »[/B][/COLOR]" + itemlist.append(Item(channel=item.channel, title=title, url=next_page, action="todaspelis", folder=True)) + except: + pass + return itemlist + + +# Peliculas Destacadas +def menudesta(item): + logger.info(item.url) + + itemlist = [] + + data = scrapertools.cache_page(item.url).decode('iso-8859-1').encode('utf-8') + + patronenlaces = '<h3>.*?Destacadas.*?>(.*?)<h3>' + matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data) + + for bloque_enlaces in matchesenlaces: + + # patron = '<a href="([^"]+)" title="([^"]+)"> <div class="poster".*?<img src="([^"]+)"' + + patron = '<div class="poster-media-card">.*?' + patron += '<a href="(.*?)".*?title="(.*?)".*?' + patron += '<img src="(.*?)"' + + matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces) + scrapertools.printMatches(matches) + for scrapedurl, scrapedtitle, scrapedthumbnail in matches: + title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película") + title = title.replace("Online", ""); + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, + thumbnail=thumbnail, fanart=thumbnail)) + + return itemlist + + +# Peliculas de Estreno +def menuestre(item): + logger.info(item.url) + + itemlist = [] + + data = scrapertools.cache_page(item.url).decode('iso-8859-1').encode('utf-8') + patronenlaces = '<h1>Estrenos</h1>(.*?)</section>' + matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data) + + for bloque_enlaces in matchesenlaces: + + # patron = '<a href="([^"]+)" title="([^"]+)"> <div class="poster".*?<img src="([^"]+)"' + + patron = '<div class="poster-media-card">.*?' + patron += '<a href="(.*?)".*?title="(.*?)".*?' + patron += '<img src="(.*?)"' + + matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces) + scrapertools.printMatches(matches) + for scrapedurl, scrapedtitle, scrapedthumbnail in matches: + title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película") + title = title.replace("Online", ""); + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, + thumbnail=thumbnail, fanart=thumbnail)) + + ## Paginación + # <span class="current">2</span><a href="http://www.repelis.tv/page/3" + + # Si falla no muestra ">> Página siguiente" + try: + next_page = scrapertools.get_match(data, '<span class="current">\d+</span><a href="([^"]+)"') + title = "[COLOR red][B]Pagina siguiente »[/B][/COLOR]" + itemlist.append(Item(channel=item.channel, title=title, url=next_page, action="menuestre", folder=True)) + except: + pass + return itemlist + + +def findvideos(item): + logger.info(item.url) + + itemlist = [] + + data = scrapertools.cache_page(item.url).decode('iso-8859-1').encode('utf-8') + + '''<h2>Sinopsis</2><p>(.*?)</p> + <div id="informacion" class="tab-pane"> + <h2>Titulo en Español</h2> + <p>Abzurdah</p> + <h2>Titulo Original</h2> + <p>Abzurdah</p> + <h2>Año de Lanzamiento</h2> + <p>2015</p> + <h2>Generos</h2> + <p>Romance</p> + <h2>Idioma</h2> + <p>Latino</p> + <h2>Calidad</h2> + <p>DVD-Rip</p> + ''' + + # estos son los datos para plot + patron = '<h2>Sinopsis</h2>.*?<p>(.*?)</p>.*?<div id="informacion".*?</h2>.*?<p>(.*?)</p>' # titulo + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + for sinopsis, title in matches: + title = "[COLOR white][B]" + title + "[/B][/COLOR]" + + patron = '<div id="informacion".*?>(.*?)</div>' + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + for scrapedplot in matches: + splot = title + "\n\n" + plot = scrapedplot + plot = re.sub('<h2>', "[COLOR red][B]", plot) + plot = re.sub('</h2>', "[/B][/COLOR] : ", plot) + plot = re.sub('<p>', "[COLOR green]", plot) + plot = re.sub('</p>', "[/COLOR]\n", plot) + plot = re.sub('<[^>]+>', "", plot) + splot += plot + "\n[COLOR red][B] Sinopsis[/B][/COLOR]\n " + sinopsis + + # datos de los enlaces + ''' + <a rel="nofollow" href="(.*?)".*?<td><img.*?</td><td>(.*?)</td><td>(.*?)</td></tr> + + ">Vimple</td> + ''' + + patron = '<tbody>(.*?)</tbody>' + matchesx = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matchesx) + + for bloq in matchesx: + patron = 'href="(.*?)".*?0 0">(.*?)</.*?<td>(.*?)</.*?<td>(.*?)<' + + matches = re.compile(patron, re.DOTALL).findall(bloq) + # scrapertools.printMatches(matches) + + for scrapedurl, scrapedserver, scrapedlang, scrapedquality in matches: + url = urlparse.urljoin(item.url, scrapedurl) + logger.info("Lang:[" + scrapedlang + "] Quality[" + scrapedquality + "] URL[" + url + "]") + patronenlaces = '.*?://(.*?)/' + matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(scrapedurl) + scrapertools.printMatches(matchesenlaces) + scrapedtitle = "" + for scrapedenlace in matchesenlaces: + scrapedtitle = title + " [COLOR white][ [/COLOR]" + "[COLOR green]" + scrapedquality + "[/COLOR]" + "[COLOR white] ][/COLOR]" + " [COLOR red] [" + scrapedlang + "][/COLOR] » " + scrapedserver + itemlist.append( + Item(channel=item.channel, action="play", title=scrapedtitle, extra=title, url=url, fanart=item.thumbnail, + thumbnail=item.thumbnail, plot=splot, folder=False)) + + return itemlist + + +def play(item): + logger.info("url=" + item.url) + + # itemlist = servertools.find_video_items(data=item.url) + + url = scrapertools.find_single_match(scrapertools.cache_page(item.url), '<iframe src="([^"]+)"') + itemlist = servertools.find_video_items(data=url) + + return itemlist + + +def search(item, texto): + logger.info(item.url) + texto = texto.replace(" ", "+") + item.url = 'http://www.repelis.tv/buscar/?s=%s' % (texto) + logger.info(item.url) + + data = scrapertools.cache_page(item.url).decode('iso-8859-1').encode('utf-8') + + logger.info("data: " + data) + + ''' + <div class="col-xs-2"> + <div class="row"> + <a href="http://www.repelis.tv/8973/pelicula/contracted-phase-ii.html" title="Ver Película Contracted: Phase II Online"> + <img src="http://1.bp.blogspot.com/-YWmw6voBipE/VcB91p-EcnI/AAAAAAAAQZs/EhUzWlInmA8/s175/contracted-phase-2.jpg" border="0"> + ''' + + patron = '<div class="col-xs-2">.*?' + patron += '<div class="row">.*?' + patron += '<a href="(.*?)" title="(.*?)">.*?' + patron += '<img src="(.*?)"' + + logger.info(patron) + + matches = re.compile(patron, re.DOTALL).findall(data) + + scrapertools.printMatches(matches) + print "repelis ..................................." + itemlist = [] + + for scrapedurl, scrapedtitle, scrapedthumbnail in matches: + title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película") + title = title.replace("Online", "") + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) + logger.info(url) + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumbnail, + fanart=thumbnail)) + + return itemlist + + +# Por año, aquí está difícil filtrar las "eroticas" así que quito la opcion si no esta el adultmode enabled +def poranyo(item): + logger.info(item.url) + + itemlist = [] + + data = scrapertools.cache_page(item.url).decode('iso-8859-1').encode('utf-8') + + patron = '<option value="([^"]+)">(.*?)</option>' + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + for scrapedurl, scrapedtitle in matches: + title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película") + title = title.replace("Online", "") + url = urlparse.urljoin(item.url, scrapedurl) + itemlist.append( + Item(channel=item.channel, action="todaspelis", title=title, fulltitle=title, url=url, fanart=item.fanart)) + + return itemlist + + +# Aqui si que se filtran las eroticas +def porcateg(item): + logger.info(item.url) + itemlist = [] + + data = scrapertools.cache_page(item.url).decode('iso-8859-1').encode('utf-8') + patron = '<li class="cat-item cat-item-3">.*?<a href="([^"]+)" title="([^"]+)">' + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + itemlist = [] + + for scrapedurl, scrapedtitle in matches: + title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película") + title = title.replace("Online", "") + url = urlparse.urljoin(item.url, scrapedurl) + logger.info(url) + # si no esta permitidas categoria adultos, la filtramos + erotica = "" + if config.get_setting("adult_mode") == 0: + patron = '.*?/erotic.*?' + try: + erotica = scrapertools.get_match(scrapedurl, patron) + except: + itemlist.append( + Item(channel=item.channel, action="todaspelis", fanart=item.fanart, title=title, fulltitle=title, + url=url)) + else: + itemlist.append(Item(channel=item.channel, action="todaspelis", title=title, fulltitle=title, url=url, + fanart=item.fanart)) + + return itemlist diff --git a/plugin.video.alfa/channels/search.json b/plugin.video.alfa/channels/search.json new file mode 100755 index 00000000..e5808ff9 --- /dev/null +++ b/plugin.video.alfa/channels/search.json @@ -0,0 +1,81 @@ +{ + "id": "search", + "name": "Buscador global", + "active": false, + "adult": false, + "language": "es", + "version": 1, + "changes": [ + { + "date": "25/05/17", + "description": "añadido control parental" + }, + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "17/02/17", + "description": "Cambios de detalles visuales" + }, + { + "date": "06/12/16", + "description": "Añadir al menu: 'Recordar última búsqueda'" + }, + { + "date": "01/07/16", + "description": "Eliminado código innecesario." + }, + { + "date": "???", + "description": "Multihilos y seleccion de canales" + } + ], + "categories": [ + "movie" + ], + "settings": [ + { + "id": "multithread", + "type": "bool", + "label": "Buscador MultiThread", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "result_mode", + "type": "list", + "label": "Mostrar resultados:", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "Por canales", + "Todo junto" + ] + }, + { + "id": "saved_searches_limit", + "type": "list", + "label": "Busquedas guardadas:", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "10", + "20", + "30", + "40" + ] + }, + { + "id": "last_search", + "type": "bool", + "label": "Recordar última búsqueda", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/search.py b/plugin.video.alfa/channels/search.py new file mode 100755 index 00000000..4f2be5d4 --- /dev/null +++ b/plugin.video.alfa/channels/search.py @@ -0,0 +1,485 @@ +# -*- coding: utf-8 -*- + +import glob +import os +import re +import time +from threading import Thread + +from core import channeltools +from core import config +from core import logger +from core.item import Item +from platformcode import platformtools + + +def mainlist(item): + logger.info() + item.channel = "search" + + itemlist = list() + context = [{"title": "Elegir canales incluidos", + "action": "setting_channel", + "channel": item.channel}] + itemlist.append(Item(channel=item.channel, action="search", + title="Buscar por titulo", context=context, + thumbnail=config.get_thumb("thumb_search.png"))) + itemlist.append(Item(channel=item.channel, action="search", + title="Buscar por categorias (búsqueda avanzada)", extra="categorias", + context=context, + thumbnail=config.get_thumb("thumb_search.png"))) + itemlist.append(Item(channel=item.channel, action="opciones", title="Opciones", + thumbnail=config.get_thumb("thumb_search.png"))) + + saved_searches_list = get_saved_searches() + context2 = context[:] + context2.append({"title": "Borrar búsquedas guardadas", + "action": "clear_saved_searches", + "channel": item.channel}) + logger.info("saved_searches_list=%s" % saved_searches_list) + + if saved_searches_list: + itemlist.append(Item(channel=item.channel, action="", + title="Búsquedas guardadas:", context=context2, + thumbnail=config.get_thumb("thumb_search.png"))) + for saved_search_text in saved_searches_list: + itemlist.append(Item(channel=item.channel, action="do_search", + title=' "' + saved_search_text + '"', + extra=saved_search_text, context=context2, + category=saved_search_text, + thumbnail=config.get_thumb("thumb_search.png"))) + + return itemlist + + +def opciones(item): + itemlist = list() + itemlist.append(Item(channel=item.channel, action="setting_channel", + title="Elegir canales incluidos en la búsqueda", + folder=False, thumbnail=config.get_thumb("thumb_search.png"))) + itemlist.append(Item(channel=item.channel, action="clear_saved_searches", + title="Borrar búsquedas guardadas", folder=False, + thumbnail=config.get_thumb("thumb_search.png"))) + itemlist.append(Item(channel=item.channel, action="settings", + title="Otros ajustes", folder=False, + thumbnail=config.get_thumb("thumb_search.png"))) + return itemlist + + +def settings(item): + return platformtools.show_channel_settings(caption="configuración -- Buscador") + + +def setting_channel(item): + channels_path = os.path.join(config.get_runtime_path(), "channels", '*.json') + channel_language = config.get_setting("channel_language") + + if channel_language == "": + channel_language = "all" + + list_controls = [] + for infile in sorted(glob.glob(channels_path)): + channel_name = os.path.basename(infile)[:-5] + channel_parameters = channeltools.get_channel_parameters(channel_name) + + # No incluir si es un canal inactivo + if not channel_parameters["active"]: + continue + + # No incluir si es un canal para adultos, y el modo adulto está desactivado + if channel_parameters["adult"] and config.get_setting("adult_mode") == 0: + continue + + # No incluir si el canal es en un idioma filtrado + if channel_language != "all" and channel_parameters["language"] != channel_language: + continue + + # No incluir si en la configuracion del canal no existe "include_in_global_search" + include_in_global_search = channel_parameters["include_in_global_search"] + + if not include_in_global_search: + continue + else: + # Se busca en la configuración del canal el valor guardado + include_in_global_search = config.get_setting("include_in_global_search", channel_name) + + control = {'id': channel_name, + 'type': "bool", + 'label': channel_parameters["title"], + 'default': include_in_global_search, + 'enabled': True, + 'visible': True} + + list_controls.append(control) + + if config.get_setting("custom_button_value", item.channel): + custom_button_label = "Ninguno" + else: + custom_button_label = "Todos" + + return platformtools.show_channel_settings(list_controls=list_controls, + caption="Canales incluidos en la búsqueda global", + callback="save_settings", item=item, + custom_button={'visible': True, + 'function': "cb_custom_button", + 'close': False, + 'label': custom_button_label}) + + +def save_settings(item, dict_values): + progreso = platformtools.dialog_progress("Guardando configuración...", "Espere un momento por favor.") + n = len(dict_values) + for i, v in enumerate(dict_values): + progreso.update((i * 100) / n, "Guardando configuración...") + config.set_setting("include_in_global_search", dict_values[v], v) + + progreso.close() + + +def cb_custom_button(item, dict_values): + value = config.get_setting("custom_button_value", item.channel) + if value == "": + value = False + + for v in dict_values.keys(): + dict_values[v] = not value + + if config.set_setting("custom_button_value", not value, item.channel) == True: + return {"label": "Ninguno"} + else: + return {"label": "Todos"} + + +def searchbycat(item): + # Only in xbmc/kodi + # Abre un cuadro de dialogo con las categorías en las que hacer la búsqueda + + categories = ["Películas", "Series", "Anime", "Documentales", "VOS", "Latino"] + categories_id = ["movie", "tvshow", "anime", "documentary", "vos", "latino"] + list_controls = [] + for i, category in enumerate(categories): + control = {'id': categories_id[i], + 'type': "bool", + 'label': category, + 'default': False, + 'enabled': True, + 'visible': True} + + list_controls.append(control) + control = {'id': "separador", + 'type': "label", + 'label': '', + 'default': "", + 'enabled': True, + 'visible': True} + list_controls.append(control) + control = {'id': "torrent", + 'type': "bool", + 'label': 'Incluir en la búsqueda canales Torrent', + 'default': True, + 'enabled': True, + 'visible': True} + list_controls.append(control) + + return platformtools.show_channel_settings(list_controls=list_controls, caption="Elegir categorías", + callback="search_cb", item=item) + + +def search_cb(item, values=""): + cat = [] + for c in values: + if values[c]: + cat.append(c) + + if not len(cat): + return None + else: + logger.info(item.tostring()) + logger.info(str(cat)) + return do_search(item, cat) + + +# Al llamar a esta función, el sistema pedirá primero el texto a buscar +# y lo pasará en el parámetro "tecleado" +def search(item, tecleado): + logger.info() + tecleado = tecleado.replace("+", " ") + item.category = tecleado + + if tecleado != "": + save_search(tecleado) + + if item.extra == "categorias": + item.extra = tecleado + itemlist = searchbycat(item) + else: + item.extra = tecleado + itemlist = do_search(item, []) + + return itemlist + + +def show_result(item): + tecleado = None + if item.adult and config.get_setting("adult_request_password"): + # Solicitar contraseña + tecleado = platformtools.dialog_input("", "Contraseña para canales de adultos", True) + if tecleado is None or tecleado != config.get_setting("adult_pin"): + return [] + + item.channel = item.__dict__.pop('from_channel') + item.action = item.__dict__.pop('from_action') + if item.__dict__.has_key('tecleado'): + tecleado = item.__dict__.pop('tecleado') + + try: + channel = __import__('channels.%s' % item.channel, fromlist=["channels.%s" % item.channel]) + except: + import traceback + logger.error(traceback.format_exc()) + return [] + + if tecleado: + # Mostrar resultados: agrupados por canales + return channel.search(item, tecleado) + else: + # Mostrar resultados: todos juntos + try: + from platformcode import launcher + launcher.run(item) + except ImportError: + return getattr(channel, item.action)(item) + + +def channel_search(search_results, channel_parameters, tecleado): + try: + exec "from channels import " + channel_parameters["channel"] + " as module" + mainlist = module.mainlist(Item(channel=channel_parameters["channel"])) + search_items = [item for item in mainlist if item.action == "search"] + if not search_items: + search_items = [Item(channel=channel_parameters["channel"], action="search")] + + for item in search_items: + result = module.search(item.clone(), tecleado) + if result is None: + result = [] + if len(result): + if not channel_parameters["title"] in search_results: + search_results[channel_parameters["title"]] = [] + + search_results[channel_parameters["title"]].append({"item": item, + "itemlist": result, + "adult": channel_parameters["adult"]}) + + except: + logger.error("No se puede buscar en: %s" % channel_parameters["title"]) + import traceback + logger.error(traceback.format_exc()) + + +# Esta es la función que realmente realiza la búsqueda +def do_search(item, categories=None): + logger.info("blaa categorias %s" % categories) + + if categories is None: + categories = [] + + multithread = config.get_setting("multithread", "search") + result_mode = config.get_setting("result_mode", "search") + + tecleado = item.extra + + itemlist = [] + + channels_path = os.path.join(config.get_runtime_path(), "channels", '*.json') + logger.info("channels_path=%s" % channels_path) + + channel_language = config.get_setting("channel_language") + logger.info("channel_language=%s" % channel_language) + if channel_language == "": + channel_language = "all" + logger.info("channel_language=%s" % channel_language) + + # Para Kodi es necesario esperar antes de cargar el progreso, de lo contrario + # el cuadro de progreso queda "detras" del cuadro "cargando..." y no se le puede dar a cancelar + time.sleep(0.5) + progreso = platformtools.dialog_progress("Buscando '%s'..." % tecleado, "") + channel_files = sorted(glob.glob(channels_path), key=lambda x: os.path.basename(x)) + + import math + # fix float porque la division se hace mal en python 2.x + number_of_channels = float(100) / len(channel_files) + + threads = [] + search_results = {} + start_time = time.time() + + for index, infile in enumerate(channel_files): + try: + percentage = int(math.ceil((index + 1) * number_of_channels)) + + basename = os.path.basename(infile) + basename_without_extension = basename[:-5] + logger.info("%s..." % basename_without_extension) + + channel_parameters = channeltools.get_channel_parameters(basename_without_extension) + + # No busca si es un canal inactivo + if not channel_parameters["active"]: + logger.info("%s -no activo-" % basename_without_extension) + continue + + # En caso de búsqueda por categorias + if categories: + + # Si no se ha seleccionado torrent no se muestra + if "torrent" not in categories: + if "torrent" in channel_parameters["categories"]: + logger.info("%s -torrent-" % basename_without_extension) + continue + + for cat in categories: + if cat not in channel_parameters["categories"]: + logger.info("%s -no en %s-" % (basename_without_extension, cat)) + continue + + # No busca si es un canal para adultos, y el modo adulto está desactivado + if channel_parameters["adult"] and config.get_setting("adult_mode") == 0: + logger.info("%s -adulto-" % basename_without_extension) + continue + + # No busca si el canal es en un idioma filtrado + if channel_language != "all" and channel_parameters["language"] != channel_language: + logger.info("%s -idioma no válido-" % basename_without_extension) + continue + + # No busca si es un canal excluido de la búsqueda global + include_in_global_search = channel_parameters["include_in_global_search"] + if include_in_global_search: + # Buscar en la configuracion del canal + include_in_global_search = config.get_setting("include_in_global_search", basename_without_extension) + + if not include_in_global_search: + logger.info("%s -no incluido en lista a buscar-" % basename_without_extension) + continue + + if progreso.iscanceled(): + progreso.close() + logger.info("Búsqueda cancelada") + return itemlist + + # Modo Multi Thread + if multithread: + t = Thread(target=channel_search, args=[search_results, channel_parameters, tecleado], + name=channel_parameters["title"]) + t.setDaemon(True) + t.start() + threads.append(t) + + # Modo single Thread + else: + logger.info("Intentado búsqueda en %s de %s " % (basename_without_extension, tecleado)) + channel_search(search_results, channel_parameters, tecleado) + + logger.info("%s incluido en la búsqueda" % basename_without_extension) + progreso.update(percentage, + "Buscando en %s..." % channel_parameters["title"]) + + except: + logger.error("No se puede buscar en: %s" % channel_parameters["title"]) + import traceback + logger.error(traceback.format_exc()) + continue + + # Modo Multi Thread + # Usando isAlive() no es necesario try-except, + # ya que esta funcion (a diferencia de is_alive()) + # es compatible tanto con versiones antiguas de python como nuevas + if multithread: + pendent = [a for a in threads if a.isAlive()] + t = float(100) / len(pendent) + while pendent: + index = (len(threads) - len(pendent)) + 1 + percentage = int(math.ceil(index * t)) + + list_pendent_names = [a.getName() for a in pendent] + mensaje = "Buscando en %s" % (", ".join(list_pendent_names)) + progreso.update(percentage, "Finalizado en %d/%d canales..." % (len(threads) - len(pendent), len(threads)), + mensaje) + logger.debug(mensaje) + + if progreso.iscanceled(): + logger.info("Búsqueda cancelada") + break + + time.sleep(0.5) + pendent = [a for a in threads if a.isAlive()] + + total = 0 + + for channel in sorted(search_results.keys()): + for element in search_results[channel]: + total += len(element["itemlist"]) + title = channel + + # resultados agrupados por canales + if result_mode == 0: + if len(search_results[channel]) > 1: + title += " [%s]" % element["item"].title.strip() + title += " (%s)" % len(element["itemlist"]) + + title = re.sub("\[COLOR [^\]]+\]", "", title) + title = re.sub("\[/COLOR]", "", title) + + itemlist.append(Item(title=title, channel="search", action="show_result", url=element["item"].url, + extra=element["item"].extra, folder=True, adult=element["adult"], + from_action="search", from_channel=element["item"].channel, tecleado=tecleado)) + # todos los resultados juntos, en la misma lista + else: + title = " [ Resultados del canal %s ] " % channel + itemlist.append(Item(title=title, channel="search", action="", + folder=False, text_bold=True)) + for i in element["itemlist"]: + if i.action: + title = " " + i.title + itemlist.append(i.clone(title=title, from_action=i.action, from_channel=i.channel, + channel="search", action="show_result", adult=element["adult"])) + + title = "Buscando: '%s' | Encontrado: %d vídeos | Tiempo: %2.f segundos" % (tecleado, total, time.time() - start_time) + itemlist.insert(0, Item(title=title, text_color='yellow')) + + progreso.close() + + return itemlist + + +def save_search(text): + saved_searches_limit = int((10, 20, 30, 40,)[int(config.get_setting("saved_searches_limit", "search"))]) + + current_saved_searches_list = config.get_setting("saved_searches_list", "search") + if current_saved_searches_list is None: + saved_searches_list = [] + else: + saved_searches_list = list(current_saved_searches_list) + + if text in saved_searches_list: + saved_searches_list.remove(text) + + saved_searches_list.insert(0, text) + + config.set_setting("saved_searches_list", saved_searches_list[:saved_searches_limit], "search") + + +def clear_saved_searches(item): + config.set_setting("saved_searches_list", list(), "search") + platformtools.dialog_ok("Buscador", "Búsquedas borradas correctamente") + + +def get_saved_searches(): + current_saved_searches_list = config.get_setting("saved_searches_list", "buscador") + if current_saved_searches_list is None: + saved_searches_list = [] + else: + saved_searches_list = list(current_saved_searches_list) + + return saved_searches_list diff --git a/plugin.video.alfa/channels/seodiv.json b/plugin.video.alfa/channels/seodiv.json new file mode 100755 index 00000000..1dcceec1 --- /dev/null +++ b/plugin.video.alfa/channels/seodiv.json @@ -0,0 +1,48 @@ +{ + "id": "seodiv", + "name": "Seodiv", + "compatible": { + "addon_version": "4.3" + }, + "active": true, + "adult": false, + "language": "es", + "thumbnail": "https://s32.postimg.org/gh8lhbkb9/seodiv.png", + "banner": "https://s31.postimg.org/klwjzp7t7/seodiv_banner.png", + "version": 1, + "changes": [ + { + "date": "24/06/2017", + "description": "Cambios para autoplay" + }, + { + "date": "06/06/2017", + "description": "compatibilidad con AutoPlay" + }, + { + "date": "25/05/2017", + "description": "cambios esteticos" + }, + { + "date": "09/05/2017", + "description": "Fix temporadas" + } + ], + "categories": [ + "tvshows" + ], + "settings": [ + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "No filtrar", + "Latino" + ] + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/seodiv.py b/plugin.video.alfa/channels/seodiv.py new file mode 100755 index 00000000..251ded6b --- /dev/null +++ b/plugin.video.alfa/channels/seodiv.py @@ -0,0 +1,254 @@ +# -*- coding: utf-8 -*- + +import re + +from channels import autoplay +from channels import filtertools +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + +IDIOMAS = {'latino': 'Latino'} +list_language = IDIOMAS.values() +list_servers = ['openload', + 'okru', + 'myvideo', + 'sendvid' + ] +list_quality = ['default'] + +host = 'http://www.seodiv.com' + + +def mainlist(item): + logger.info() + + autoplay.init(item.channel, list_servers, list_quality) + itemlist = [] + + itemlist.append( + Item(channel=item.channel, + title="Todos", + action="todas", + url=host, + thumbnail='https://s27.postimg.org/iahczwgrn/series.png', + fanart='https://s27.postimg.org/iahczwgrn/series.png', + language='latino' + )) + autoplay.show_option(item.channel, itemlist) + return itemlist + + +def todas(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data) + patron = '<div class=shortf><div><div class=shortf-img><a href=(.*?)><img src=(.*?) alt=.*?quality>(.*?)<.*?Ver ' \ + 'Serie><span>(.*?)<\/span>' + + matches = re.compile(patron, re.DOTALL).findall(data) + for scrapedurl, scrapedthumbnail, scrapedcalidad, scrapedtitle in matches: + url = host + scrapedurl + calidad = scrapedcalidad + title = scrapedtitle.decode('utf-8') + thumbnail = scrapedthumbnail + fanart = 'https://s32.postimg.org/gh8lhbkb9/seodiv.png' + + itemlist.append( + Item(channel=item.channel, + action="temporadas", + title=title, url=url, + thumbnail=thumbnail, + fanart=fanart, + contentSerieName=title, + extra='', + language=item.language, + quality='default', + context=autoplay.context + )) + + return itemlist + + +def temporadas(item): + logger.info() + itemlist = [] + data = get_source(item.url) + url_base = item.url + patron = '<li class=item\d+><a href=#>(.*?) <\/a>' + matches = re.compile(patron, re.DOTALL).findall(data) + temp = 1 + if matches: + for scrapedtitle in matches: + url = url_base + tempo = re.findall(r'\d+', scrapedtitle) + # if tempo: + # title = 'Temporada' + ' ' + tempo[0] + # else: + title = scrapedtitle + thumbnail = item.thumbnail + plot = item.plot + fanart = scrapertools.find_single_match(data, '<img src="([^"]+)"/>.*?</a>') + itemlist.append( + Item(channel=item.channel, + action="episodiosxtemp", + title=title, + fulltitle=item.title, + url=url, + thumbnail=thumbnail, + plot=plot, fanart=fanart, + temp=str(temp), + contentSerieName=item.contentSerieName, + language=item.language, + quality=item.quality, + context=item.context + )) + temp = temp + 1 + + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append( + Item(channel=item.channel, + title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', + url=item.url, + action="add_serie_to_library", + extra="episodios", + contentSerieName=item.contentSerieName, + extra1=item.extra1, + temp=str(temp) + )) + return itemlist + else: + itemlist = episodiosxtemp(item) + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append( + Item(channel=item.channel, + title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', + url=item.url, + action="add_serie_to_library", + extra="episodios", + contentSerieName=item.contentSerieName, + extra1=item.extra1, + temp=str(temp) + )) + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + templist = temporadas(item) + for tempitem in templist: + itemlist += episodiosxtemp(tempitem) + + return itemlist + + +def get_source(url): + logger.info() + data = httptools.downloadpage(url, add_referer=True).data + data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data) + return data + + +def episodiosxtemp(item): + logger.info() + + logger.info() + itemlist = [] + patron_temp = '<li class=item\d+><a href=#>%s <\/a><ul><!--initiate accordion-->.*?<!--initiate ' \ + 'accordion-->' % item.title + all_data = get_source(item.url) + data = scrapertools.find_single_match(all_data, patron_temp) + tempo = item.title + if 'Temporada' in item.title: + item.title = item.title.replace('Temporada', 'temporada') + item.title = item.title.strip() + item.title = item.title.replace(' ', '-') + + patron = '<li><a href=(.*?)>.*?(Capitulo|Pelicula).*?(\d+).*?<' + + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtipo, scrapedtitle in matches: + url = host + scrapedurl + plot = item.plot + if scrapedtipo == 'Capitulo' and item.temp != '': + title = item.contentSerieName + ' ' + item.temp + 'x' + scrapedtitle + itemlist.append( + Item(channel=item.channel, + action="findvideos", + title=title, + fulltitle=item.fulltitle, + url=url, + thumbnail=item.thumbnail, + plot=plot, + language=item.language, + quality=item.quality, + contentSerieName=item.contentSerieName, + context=item.context + )) + + if item.title not in scrapedurl and scrapedtipo == 'Capitulo' and item.temp \ + == '': + if item.temp == '': temp = '1' + title = item.contentSerieName + ' ' + temp + 'x' + scrapedtitle + if '#' not in scrapedurl: + itemlist.append( + Item(channel=item.channel, + action="findvideos", + title=title, + fulltitle=item.fulltitle, + url=url, + thumbnail=item.thumbnail, + plot=plot, + contentSerieName=item.contentSerieName, + context=item.context + )) + + if 'temporada' not in item.title and item.title not in scrapedurl and scrapedtipo == 'Pelicula': + title = scrapedtipo + ' ' + scrapedtitle + itemlist.append( + Item(channel=item.channel, + action="findvideos", + title=title, + fulltitle=item.fulltitle, + url=url, + thumbnail=item.thumbnail, + plot=plot, + language=item.language, + contentSerieName=item.contentSerieName, + context=item.context + )) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + video_items = servertools.find_video_items(item) + + for videoitem in video_items: + videoitem.thumbnail = servertools.guess_server_thumbnail(videoitem.server) + videoitem.language = scrapertools.find_single_match(data, '<span class="f-info-title">Idioma:<\/span>\s*<span ' + 'class="f-info-text">(.*?)<\/span>') + videoitem.title = item.contentSerieName + ' (' + videoitem.server + ') (' + videoitem.language + ')' + videoitem.quality = 'default' + videoitem.context = item.context + itemlist.append(videoitem) + + # Requerido para FilterTools + + if len(itemlist) > 0 and filtertools.context: + itemlist = filtertools.get_links(itemlist, item, list_language) + + # Requerido para AutoPlay + + autoplay.start(itemlist, item) + + return itemlist diff --git a/plugin.video.alfa/channels/seriecanal.json b/plugin.video.alfa/channels/seriecanal.json new file mode 100755 index 00000000..8e14a0d0 --- /dev/null +++ b/plugin.video.alfa/channels/seriecanal.json @@ -0,0 +1,72 @@ +{ + "id": "seriecanal", + "name": "Seriecanal", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "http://i.imgur.com/EwMK8Yd.png", + "banner": "seriecanal.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "08/07/2016", + "description": "Corregido el canal, es necesario registrarse, adaptado a la nueva version." + } + ], + "categories": [ + "tvshow", + "vos" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": true, + "visible": true + }, + { + "id": "modo_grafico", + "type": "bool", + "label": "Buscar información extra", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "user", + "type": "text", + "label": "Usuario", + "color": "0xFFd50b0b", + "enabled": true, + "visible": true + }, + { + "id": "password", + "type": "text", + "label": "Contraseña", + "color": "0xFFd50b0b", + "enabled": true, + "visible": true, + "hidden": true + }, + { + "id": "perfil", + "type": "list", + "label": "Perfil de color", + "default": 2, + "enabled": true, + "visible": true, + "lvalues": [ + "Perfil 3", + "Perfil 2", + "Perfil 1" + ] + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/seriecanal.py b/plugin.video.alfa/channels/seriecanal.py new file mode 100755 index 00000000..af7eae18 --- /dev/null +++ b/plugin.video.alfa/channels/seriecanal.py @@ -0,0 +1,253 @@ +# -*- coding: utf-8 -*- + +import re +import urllib +import urlparse + +from core import config +from core import logger +from core import scrapertools +from core import servertools + +__modo_grafico__ = config.get_setting('modo_grafico', "seriecanal") +__perfil__ = config.get_setting('perfil', "descargasmix") + +# Fijar perfil de color +perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'], + ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'], + ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']] +color1, color2, color3 = perfil[__perfil__] + +URL_BASE = "http://www.seriecanal.com/" + + +def login(): + logger.info() + data = scrapertools.downloadpage(URL_BASE) + if "Cerrar Sesion" in data: + return True, "" + + usuario = config.get_setting("user", "seriecanal") + password = config.get_setting("password", "seriecanal") + if usuario == "" or password == "": + return False, 'Regístrate en www.seriecanal.com e introduce tus datos en "Configurar Canal"' + else: + post = urllib.urlencode({'username': usuario, 'password': password}) + data = scrapertools.downloadpage("http://www.seriecanal.com/index.php?page=member&do=login&tarea=acceder", + post=post) + if "Bienvenid@, se ha identificado correctamente en nuestro sistema" in data: + return True, "" + else: + return False, "Error en el login. El usuario y/o la contraseña no son correctos" + + +def mainlist(item): + logger.info() + itemlist = [] + item.text_color = color1 + + result, message = login() + if result: + itemlist.append(item.clone(action="series", title="Últimos episodios", url=URL_BASE)) + itemlist.append(item.clone(action="genero", title="Series por género")) + itemlist.append(item.clone(action="alfabetico", title="Series por orden alfabético")) + itemlist.append(item.clone(action="search", title="Buscar...")) + else: + itemlist.append(item.clone(action="", title=message, text_color="red")) + + itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False)) + + return itemlist + + +def configuracion(item): + from platformcode import platformtools + ret = platformtools.show_channel_settings() + platformtools.itemlist_refresh() + return ret + + +def search(item, texto): + logger.info() + item.url = "http://www.seriecanal.com/index.php?page=portada&do=category&method=post&category_id=0&order=" \ + "C_Create&view=thumb&pgs=1&p2=1" + try: + post = "keyserie=" + texto + item.extra = post + return series(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def genero(item): + logger.info() + itemlist = [] + data = scrapertools.downloadpage(URL_BASE) + data = scrapertools.find_single_match(data, '<ul class="tag-cloud">(.*?)</ul>') + + matches = scrapertools.find_multiple_matches(data, '<a.*?href="([^"]+)">([^"]+)</a>') + for scrapedurl, scrapedtitle in matches: + scrapedtitle = scrapedtitle.capitalize() + url = urlparse.urljoin(URL_BASE, scrapedurl) + itemlist.append(item.clone(action="series", title=scrapedtitle, url=url)) + + return itemlist + + +def alfabetico(item): + logger.info() + itemlist = [] + data = scrapertools.downloadpage(URL_BASE) + data = scrapertools.find_single_match(data, '<ul class="pagination pagination-sm" style="margin:5px 0;">(.*?)</ul>') + + matches = scrapertools.find_multiple_matches(data, '<a.*?href="([^"]+)">([^"]+)</a>') + for scrapedurl, scrapedtitle in matches: + url = urlparse.urljoin(URL_BASE, scrapedurl) + itemlist.append(item.clone(action="series", title=scrapedtitle, url=url)) + return itemlist + + +def series(item): + logger.info() + itemlist = [] + item.infoLabels = {} + item.text_color = color2 + + if item.extra != "": + data = scrapertools.downloadpage(item.url, post=item.extra) + else: + data = scrapertools.downloadpage(item.url) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '<div class="item-inner" style="margin: 0 20px 0px 0\;"><img src="([^"]+)".*?' \ + 'href="([^"]+)" title="Click para Acceder a la Ficha(?:\|([^"]+)|)".*?' \ + '<strong>([^"]+)</strong></a>.*?<strong>([^"]+)</strong></p>.*?' \ + '<p class="text-warning".*?\;">(.*?)</p>' + + matches = scrapertools.find_multiple_matches(data, patron) + + for scrapedthumbnail, scrapedurl, scrapedplot, scrapedtitle, scrapedtemp, scrapedepi in matches: + title = scrapedtitle + " - " + scrapedtemp + " - " + scrapedepi + url = urlparse.urljoin(URL_BASE, scrapedurl) + temporada = scrapertools.find_single_match(scrapedtemp, "(\d+)") + new_item = item.clone() + new_item.contentType = "tvshow" + if temporada != "": + new_item.infoLabels['season'] = temporada + new_item.contentType = "season" + + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + scrapedthumbnail + "]") + itemlist.append(new_item.clone(action="findvideos", title=title, fulltitle=scrapedtitle, url=url, + thumbnail=scrapedthumbnail, plot=scrapedplot, contentTitle=scrapedtitle, + context=["buscar_trailer"], show=scrapedtitle)) + + try: + from core import tmdb + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + except: + pass + # Extra marca siguiente página + next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" (?:onclick="return false;" |)title=' + '"Página Siguiente"') + if next_page != "/": + url = urlparse.urljoin(URL_BASE, next_page) + itemlist.append(item.clone(action="series", title=">> Siguiente", url=url, text_color=color3)) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + item.text_color = color3 + + data = scrapertools.downloadpage(item.url) + data = scrapertools.decodeHtmlentities(data) + + # Busca en la seccion descarga/torrent + data_download = scrapertools.find_single_match(data, '<th>Episodio - Enlaces de Descarga</th>(.*?)</table>') + patron = '<p class="item_name".*?<a href="([^"]+)".*?>([^"]+)</a>' + matches = scrapertools.find_multiple_matches(data_download, patron) + for scrapedurl, scrapedepi in matches: + new_item = item.clone() + if "Episodio" not in scrapedepi: + scrapedtitle = "[Torrent] Episodio " + scrapedepi + else: + scrapedtitle = "[Torrent] " + scrapedepi + scrapedtitle = scrapertools.htmlclean(scrapedtitle) + + new_item.infoLabels['episode'] = scrapertools.find_single_match(scrapedtitle, "Episodio (\d+)") + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "]") + itemlist.append(new_item.clone(action="play", title=scrapedtitle, url=scrapedurl, server="torrent", + contentType="episode")) + + # Busca en la seccion online + data_online = scrapertools.find_single_match(data, "<th>Enlaces de Visionado Online</th>(.*?)</table>") + patron = '<a href="([^"]+)\\n.*?src="([^"]+)".*?' \ + 'title="Enlace de Visionado Online">([^"]+)</a>' + matches = scrapertools.find_multiple_matches(data_online, patron) + + for scrapedurl, scrapedthumb, scrapedtitle in matches: + # Deshecha enlaces de trailers + scrapedtitle = scrapertools.htmlclean(scrapedtitle) + if (scrapedthumb != "images/series/youtube.png") & (scrapedtitle != "Trailer"): + new_item = item.clone() + server = scrapertools.find_single_match(scrapedthumb, "images/series/(.*?).png") + title = "[" + server.capitalize() + "]" + " " + scrapedtitle + + new_item.infoLabels['episode'] = scrapertools.find_single_match(scrapedtitle, "Episodio (\d+)") + itemlist.append(new_item.clone(action="play", title=title, url=scrapedurl, contentType="episode")) + + # Comprueba si hay otras temporadas + if not "No hay disponible ninguna Temporada adicional" in data: + data_temp = scrapertools.find_single_match(data, '<div class="panel panel-success">(.*?)</table>') + data_temp = re.sub(r"\n|\r|\t|\s{2}| ", "", data_temp) + patron = '<tr><td><p class="item_name"><a href="([^"]+)".*?' \ + '<p class="text-success"><strong>([^"]+)</strong>' + matches = scrapertools.find_multiple_matches(data_temp, patron) + for scrapedurl, scrapedtitle in matches: + new_item = item.clone() + url = urlparse.urljoin(URL_BASE, scrapedurl) + scrapedtitle = scrapedtitle.capitalize() + temporada = scrapertools.find_single_match(scrapedtitle, "Temporada (\d+)") + if temporada != "": + new_item.infoLabels['season'] = temporada + new_item.infoLabels['episode'] = "" + itemlist.append(new_item.clone(action="findvideos", title=scrapedtitle, url=url, text_color="red", + contentType="season")) + + try: + from core import tmdb + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + except: + pass + + new_item = item.clone() + if config.is_xbmc(): + new_item.contextual = True + itemlist.append(new_item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="", + text_color="magenta")) + return itemlist + + +def play(item): + logger.info() + itemlist = [] + + if item.extra == "torrent": + itemlist.append(item.clone()) + else: + # Extrae url de enlace bit.ly + if item.url.startswith("http://bit.ly/"): + item.url = scrapertools.getLocationHeaderFromResponse(item.url) + video_list = servertools.findvideos(item.url) + if video_list: + url = video_list[0][1] + server = video_list[0][2] + itemlist.append(item.clone(server=server, url=url)) + + return itemlist diff --git a/plugin.video.alfa/channels/seriesadicto.json b/plugin.video.alfa/channels/seriesadicto.json new file mode 100755 index 00000000..4039a308 --- /dev/null +++ b/plugin.video.alfa/channels/seriesadicto.json @@ -0,0 +1,34 @@ +{ + "id": "seriesadicto", + "name": "Seriesadicto", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "seriesadicto.png", + "banner": "seriesadicto.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ], + "categories": [ + "tvshow", + "anime" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/seriesadicto.py b/plugin.video.alfa/channels/seriesadicto.py new file mode 100755 index 00000000..5fa7c27d --- /dev/null +++ b/plugin.video.alfa/channels/seriesadicto.py @@ -0,0 +1,222 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import config +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + + +def mainlist(item): + logger.info() + + itemlist = [] + itemlist.append( + Item(channel=item.channel, action="letras", title="Todas por orden alfabético", url="http://seriesadicto.com/", + folder=True)) + itemlist.append(Item(channel=item.channel, action="search", title="Buscar...")) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = "http://seriesadicto.com/buscar/" + texto + + try: + return series(item) + # Se captura la excepci?n, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def letras(item): + logger.info() + itemlist = [] + + # Descarga la página + data = scrapertools.cachePage(item.url) + data = scrapertools.find_single_match(data, '<li class="nav-header">Por inicial</li>(.*?)</ul>') + logger.info("data=" + data) + + patronvideos = '<li><a rel="nofollow" href="([^"]+)">([^<]+)</a>' + matches = re.compile(patronvideos, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + title = scrapedtitle + plot = "" + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = "" + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + + itemlist.append( + Item(channel=item.channel, action='series', title=title, url=url, thumbnail=thumbnail, plot=plot)) + + return itemlist + + +def series(item): + logger.info() + itemlist = [] + + ''' + <li class="col-xs-6 col-sm-4 col-md-2"> + <a href="/serie/justicia-ciega-blind-justuce" title="Ver Justicia ciega ( Blind Justuce ) Online" class="thumbnail thumbnail-artist-grid"> + <img style="width: 120px; height: 180px;" src="/img/series/justicia-ciega-blind-justuce-th.jpg" alt="Justicia ciega ( Blind Justuce )"/> + ''' + + data = scrapertools.cachePage(item.url) + logger.info("data=" + data) + + patron = '<li class="col-xs-6[^<]+' + patron += '<a href="([^"]+)"[^<]+' + patron += '<img style="[^"]+" src="([^"]+)" alt="([^"]+)"' + logger.info("patron=" + patron) + + matches = re.compile(patron, re.DOTALL).findall(data) + logger.info("matches=" + repr(matches)) + scrapertools.printMatches(matches) + + for scrapedurl, scrapedthumbnail, scrapedtitle in matches: + title = scrapertools.htmlclean(scrapedtitle.strip()) + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) + plot = "" + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + itemlist.append( + Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=url, thumbnail=thumbnail, + plot=plot, show=title, folder=True)) + + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + + ''' + <tr> + <td class="sape"><i class="glyphicon glyphicon-film"></i> <a href="/capitulo/saving-hope/1/2/82539" class="color4">Saving Hope 1x02</a></td> + <td><div class="vistodiv" title="82539"><a title="Marcar como Visto"><span class="visto visto-no"></span></a></div></td> + <td> + <img src="/img/3.png" border="0" height="14" width="22" /> <img src="/img/4.png" border="0" height="14" width="22" />  </td> + </tr> + ''' + + data = scrapertools.cachePage(item.url) + + patron = '<tr[^<]+' + patron += '<td class="sape"><i[^<]+</i[^<]+<a href="([^"]+)"[^>]+>([^<]+)</a></td[^<]+' + patron += '<td><div[^<]+<a[^<]+<span[^<]+</span></a></div></td[^<]+' + patron += '<td>(.*?)</td' + + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + for scrapedurl, scrapedtitle, bloqueidiomas in matches: + title = scrapedtitle.strip() + " (" + extrae_idiomas(bloqueidiomas) + ")" + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = "" + plot = "" + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumbnail, + plot=plot, show=item.show, folder=True)) + + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url, + action="add_serie_to_library", extra="episodios", show=item.show)) + itemlist.append(Item(channel=item.channel, title="Descargar todos los episodios de la serie", url=item.url, + action="download_all_episodes", extra="episodios", show=item.show)) + + return itemlist + + +def extrae_idiomas(bloqueidiomas): + logger.info("idiomas=" + bloqueidiomas) + patronidiomas = '([a-z0-9]+).png"' + idiomas = re.compile(patronidiomas, re.DOTALL).findall(bloqueidiomas) + textoidiomas = "" + for idioma in idiomas: + if idioma == "1": + textoidiomas = textoidiomas + "Español" + "/" + if idioma == "2": + textoidiomas = textoidiomas + "Latino" + "/" + if idioma == "3": + textoidiomas = textoidiomas + "VOS" + "/" + if idioma == "4": + textoidiomas = textoidiomas + "VO" + "/" + + textoidiomas = textoidiomas[:-1] + return textoidiomas + + +def codigo_a_idioma(codigo): + idioma = "" + if codigo == "1": + idioma = "Español" + if codigo == "2": + idioma = "Latino" + if codigo == "3": + idioma = "VOS" + if codigo == "4": + idioma = "VO" + + return idioma + + +def findvideos(item): + logger.info() + itemlist = [] + + ''' + <tr class="lang_3 no-mobile"> + <td><img src="/img/3.png" border="0" height="14" width="22" /></td> + <td>Nowvideo</td> + <td class="enlacevideo" title="82539"><a href="http://www.nowvideo.eu/video/4fdc641896fe8" rel="nofollow" target="_blank" class="btn btn-primary btn-xs bg2"><i class="glyphicon glyphicon-play"></i> Reproducir</a></td> + </td> + </tr> + ''' + # Descarga la pagina + data = scrapertools.cachePage(item.url) + + patron = '<tr class="lang_[^<]+' + patron += '<td><img src="/img/(\d).png"[^<]+</td[^<]+' + patron += '<td>([^<]+)</td[^<]+' + patron += '<td class="enlacevideo"[^<]+<a href="([^"]+)"' + + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + for idioma, servername, scrapedurl in matches: + title = "Mirror en " + servername + " (" + codigo_a_idioma(idioma) + ")" + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = "" + plot = "" + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + itemlist.append( + Item(channel=item.channel, action="play", title=title, fulltitle=title, url=url, thumbnail=thumbnail, + plot=plot, folder=False)) + + return itemlist + + +def play(item): + logger.info() + + itemlist = servertools.find_video_items(data=item.url) + + for videoitem in itemlist: + videoitem.title = "Enlace encontrado en " + videoitem.server + " (" + scrapertools.get_filename_from_url( + videoitem.url) + ")" + videoitem.fulltitle = item.fulltitle + videoitem.thumbnail = item.thumbnail + videoitem.channel = item.channel + + return itemlist diff --git a/plugin.video.alfa/channels/seriesblanco.json b/plugin.video.alfa/channels/seriesblanco.json new file mode 100755 index 00000000..4e1616fb --- /dev/null +++ b/plugin.video.alfa/channels/seriesblanco.json @@ -0,0 +1,78 @@ +{ + "id": "seriesblanco", + "name": "Seriesblanco", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "seriesblanco.png", + "banner": "seriesblanco.png", + "version": 1, + "changes": [ + { + "date": "11/05/2017", + "description": "Fix merge" + }, + { + "date": "06/04/2017", + "description": "Mejoras en filtertools, fix calidades" + }, + { + "date": "01/04/2017", + "description": "Cambiado canal." + } + ], + "categories": [ + "tvshow", + "vos", + "latino" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_series", + "type": "bool", + "label": "Incluir en Novedades - Episodios de series", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "No filtrar", + "Español", + "Inglés", + "Latino", + "VO", + "VOS", + "VOSI", + "OVOS" + ] + }, + { + "id": "filterlinks", + "type": "list", + "label": "Mostrar enlaces de tipo...", + "default": 2, + "enabled": true, + "visible": true, + "lvalues": [ + "Solo Descarga", + "Solo Online", + "No filtrar" + ] + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/seriesblanco.py b/plugin.video.alfa/channels/seriesblanco.py new file mode 100755 index 00000000..91c3cd92 --- /dev/null +++ b/plugin.video.alfa/channels/seriesblanco.py @@ -0,0 +1,331 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from channels import filtertools +from core import config +from core import httptools +from core import logger +from core import scrapertoolsV2 +from core import servertools +from core.item import Item + +HOST = "http://seriesblanco.com/" +IDIOMAS = {'es': 'Español', 'en': 'Inglés', 'la': 'Latino', 'vo': 'VO', 'vos': 'VOS', 'vosi': 'VOSI', 'otro': 'OVOS'} +list_idiomas = IDIOMAS.values() +CALIDADES = ['SD', 'HDiTunes', 'Micro-HD-720p', 'Micro-HD-1080p', '1080p', '720p'] + +CAPITULOS_DE_ESTRENO_STR = "Capítulos de Estreno" + + +def mainlist(item): + logger.info() + + thumb_series = config.get_thumb("thumb_channels_tvshow.png") + thumb_series_az = config.get_thumb("thumb_channels_tvshow_az.png") + thumb_buscar = config.get_thumb("thumb_search.png") + + itemlist = list() + itemlist.append(Item(channel=item.channel, title="Listado alfabético", action="series_listado_alfabetico", + thumbnail=thumb_series_az)) + itemlist.append(Item(channel=item.channel, title="Todas las series", action="series", + url=urlparse.urljoin(HOST, "listado/"), thumbnail=thumb_series)) + itemlist.append( + Item(channel=item.channel, title="Capítulos de estreno", action="home_section", extra=CAPITULOS_DE_ESTRENO_STR, + url=HOST, thumbnail=thumb_series)) + itemlist.append( + Item(channel=item.channel, title="Último actualizado", action="home_section", extra="Último Actualizado", + url=HOST, thumbnail=thumb_series)) + itemlist.append(Item(channel=item.channel, title="Series más vistas", action="series", extra="Series Más vistas", + url=urlparse.urljoin(HOST, "listado-visto/"), thumbnail=thumb_series)) + itemlist.append( + Item(channel=item.channel, title="Series menos vistas", action="home_section", extra="Series Menos vistas", + url=HOST, thumbnail=thumb_series)) + itemlist.append(Item(channel=item.channel, title="Últimas fichas creadas", action="series", + url=urlparse.urljoin(HOST, "fichas_creadas/"), thumbnail=thumb_series)) + itemlist.append(Item(channel=item.channel, title="Series por género", action="generos", + url=HOST, thumbnail=thumb_series)) + itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", url=HOST + "finder.php", + thumbnail=thumb_buscar)) + + itemlist = filtertools.show_option(itemlist, item.channel, list_idiomas, CALIDADES) + + return itemlist + + +def home_section(item): + logger.info("section = %s" % item.extra) + + pattern = "['\"]panel-title['\"]>[^/]*%s(.*?)(?:panel-title|\Z)" % item.extra + # logger.debug("pattern = %s" % pattern) + + data = httptools.downloadpage(item.url).data + result = re.search(pattern, data, re.MULTILINE | re.DOTALL) + + if result: + # logger.debug("found section: {0}".format(result.group(1))) + item.extra = 1 + return extract_series_from_data(item, result.group(1)) + + logger.debug("No match") + return [] + + +def extract_series_from_data(item, data): + itemlist = [] + episode_pattern = re.compile('/capitulo-([0-9]+)/') + shows = re.findall("<a.+?href=['\"](?P<url>/serie[^'\"]+)[^<]*<img[^>]*src=['\"](?P<img>http[^'\"]+).*?" + "(?:alt|title)=['\"](?P<name>[^'\"]+)", data) + for url, img, name in shows: + try: + name.decode('utf-8') + except UnicodeError: + name = unicode(name, "iso-8859-1", errors="replace").encode("utf-8") + + # logger.debug("Show found: %s -> %s (%s)" % (name, url, img)) + if not episode_pattern.search(url): + action = "episodios" + else: + action = "findvideos" + + itemlist.append(item.clone(title=name, url=urlparse.urljoin(HOST, url), + action=action, show=name, + thumbnail=img, + context=filtertools.context(item, list_idiomas, CALIDADES))) + + more_pages = re.search('pagina=([0-9]+)">>>', data) + if more_pages: + # logger.debug("Adding next page item") + itemlist.append(item.clone(title="Siguiente >>", extra=item.extra + 1)) + + if item.extra > 1: + # logger.debug("Adding previous page item") + itemlist.append(item.clone(title="<< Anterior", extra=item.extra - 1)) + + return itemlist + + +def series(item): + logger.info() + if not hasattr(item, 'extra') or not isinstance(item.extra, int): + item.extra = 1 + + if '?' in item.url: + merger = '&' + else: + merger = '?' + + page_url = "%s%spagina=%s" % (item.url, merger, item.extra) + logger.info("url = %s" % page_url) + + data = scrapertoolsV2.decodeHtmlentities(httptools.downloadpage(page_url).data) + return extract_series_from_data(item, data) + + +def series_listado_alfabetico(item): + logger.info() + + return [item.clone(action="series", title=letra, url=urlparse.urljoin(HOST, "listado-%s/" % letra)) + for letra in "ABCDEFGHIJKLMNOPQRSTUVWXYZ"] + + +def generos(item): + logger.info() + data = httptools.downloadpage(item.url).data + + result = re.findall("href=['\"](?P<url>/listado/[^'\"]+)['\"][^/]+/i>\s*(?P<genero>[^<]+)", data) + return [item.clone(action="series", title=genero, url=urlparse.urljoin(item.url, url)) for url, genero in result] + + +def newest(categoria): + logger.info("categoria: %s" % categoria) + itemlist = [] + try: + if categoria == 'series': + itemlist = home_section(Item(extra=CAPITULOS_DE_ESTRENO_STR, url=HOST)) + + # Se captura la excepción, para no interrumpir al canal novedades si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + return itemlist + + +def search(item, texto): + logger.info("%s" % texto) + texto = texto.replace(" ", "+") + + itemlist = [] + + try: + post = "query=%s" % texto + data = httptools.downloadpage(item.url, post=post).data + data = re.sub(r"\n|\r|\t|\s{2}", "", data) + shows = re.findall("<a href=['\"](?P<url>/serie[^'\"]+)['\"].*?<img src=['\"](?P<img>[^'\"]+)['\"].*?" + "id=['\"]q2[1\"] name=['\"]q2['\"] value=['\"](?P<title>.*?)['\"]", data) + + for url, img, title in shows: + itemlist.append(item.clone(title=title, url=urlparse.urljoin(HOST, url), action="episodios", show=title, + thumbnail=img, context=filtertools.context(item, list_idiomas, CALIDADES))) + + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + + return itemlist + + +def episodios(item): + logger.info("%s - %s" % (item.title, item.url)) + + itemlist = [] + + # Descarga la página + data = httptools.downloadpage(item.url).data + + fanart = scrapertoolsV2.find_single_match(data, "background-image[^'\"]+['\"]([^'\"]+)") + plot = scrapertoolsV2.find_single_match(data, "id=['\"]profile2['\"]>\s*(.*?)\s*</div>") + + # logger.debug("fanart: %s" % fanart) + # logger.debug("plot: %s" % plot) + + episodes = re.findall("<tr.*?href=['\"](?P<url>[^'\"]+).+?>(?P<title>.+?)</a>.*?<td>(?P<flags>.*?)</td>", data, + re.MULTILINE | re.DOTALL) + for url, title, flags in episodes: + idiomas = " ".join(["[%s]" % IDIOMAS.get(language, "OVOS") for language in + re.findall("banderas/([^\.]+)", flags, re.MULTILINE)]) + filter_lang = idiomas.replace("[", "").replace("]", "").split(" ") + display_title = "%s - %s %s" % (item.show, title, idiomas) + # logger.debug("Episode found %s: %s" % (display_title, urlparse.urljoin(HOST, url))) + itemlist.append(item.clone(title=display_title, url=urlparse.urljoin(HOST, url), + action="findvideos", plot=plot, fanart=fanart, language=filter_lang)) + + itemlist = filtertools.get_links(itemlist, item, list_idiomas, CALIDADES) + + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append( + item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios")) + + return itemlist + + +def parse_videos(item, type_str, data): + video_patterns_str = [ + '<tr.+?<span>(?P<date>.+?)</span>.*?banderas/(?P<language>[^\.]+).+?href="(?P<link>[^"]+).+?servidores/' + '(?P<server>[^\.]+).*?</td>.*?<td>.*?<span>(?P<uploader>.+?)</span>.*?<span>(?P<quality>.*?)</span>', + '<tr.+?banderas/(?P<language>[^\.]+).+?<td[^>]*>(?P<date>.+?)</td>.+?href=[\'"](?P<link>[^\'"]+)' + '.+?servidores/(?P<server>[^\.]+).*?</td>.*?<td[^>]*>.*?<a[^>]+>(?P<uploader>.+?)</a>.*?</td>.*?<td[^>]*>' + '(?P<quality>.*?)</td>.*?</tr>' + ] + + for v_pat_str in video_patterns_str: + v_patt_iter = re.compile(v_pat_str, re.MULTILINE | re.DOTALL).finditer(data) + + itemlist = [] + + for vMatch in v_patt_iter: + v_fields = vMatch.groupdict() + quality = v_fields.get("quality") + + # FIX para veces que añaden el idioma en los comentarios + regex = re.compile('sub-inglés-?', re.I) + quality = regex.sub("", quality) + # quality = re.sub(r"sub-inglés-?", "", quality, flags=re.IGNORECASE) + + if not quality: + quality = "SD" + + # FIX para los guiones en la calidad y no tener que añadir otra opción en la lista de calidades + if quality.startswith("MicroHD"): + regex = re.compile('microhd', re.I) + quality = regex.sub("Micro-HD-", quality) + # quality = re.sub(r"microhd", "Micro-HD-", quality, flags=re.IGNORECASE) + + title = "%s en %s [%s] [%s] (%s: %s)" % (type_str, v_fields.get("server"), + IDIOMAS.get(v_fields.get("language"), "OVOS"), quality, + v_fields.get("uploader"), v_fields.get("date")) + itemlist.append( + item.clone(title=title, fulltitle=item.title, url=urlparse.urljoin(HOST, v_fields.get("link")), + action="play", language=IDIOMAS.get(v_fields.get("language"), "OVOS"), + quality=quality)) + + if len(itemlist) > 0: + return itemlist + + return [] + + +def extract_videos_section(data): + return re.findall("panel-title(.+?)</div>[^<]*</div>[^<]*</div>", data, re.MULTILINE | re.DOTALL) + + +def findvideos(item): + logger.info("%s = %s" % (item.show, item.url)) + + # Descarga la página + data = httptools.downloadpage(item.url).data + # logger.info(data) + + online = extract_videos_section(data) + + try: + filtro_enlaces = config.get_setting("filterlinks", item.channel) + except: + filtro_enlaces = 2 + + list_links = [] + + if filtro_enlaces != 0: + list_links.extend(parse_videos(item, "Ver", online[0])) + + if filtro_enlaces != 1: + list_links.extend(parse_videos(item, "Descargar", online[1])) + + list_links = filtertools.get_links(list_links, item, list_idiomas, CALIDADES) + + return list_links + + +def play(item): + logger.info("%s - %s = %s" % (item.show, item.title, item.url)) + + if item.url.startswith(HOST): + data = httptools.downloadpage(item.url).data + + ajax_link = re.findall("loadEnlace\((\d+),(\d+),(\d+),(\d+)\)", data) + ajax_data = "" + for serie, temp, cap, linkID in ajax_link: + # logger.debug( + # "Ajax link request: Serie = %s - Temp = %s - Cap = %s - Link = %s" % (serie, temp, cap, linkID)) + ajax_data += httptools.downloadpage( + HOST + '/ajax/load_enlace.php?serie=' + serie + '&temp=' + temp + '&cap=' + cap + '&id=' + linkID).data + + if ajax_data: + data = ajax_data + + patron = "onclick='window.open\(\"([^\"]+)\"\);'/>" + url = scrapertoolsV2.find_single_match(data, patron) + + else: + url = item.url + + itemlist = servertools.find_video_items(data=url) + + titulo = scrapertoolsV2.find_single_match(item.fulltitle, "^(.*?)\s\[.+?$") + if titulo: + titulo += " [%s]" % item.language + + for videoitem in itemlist: + if titulo: + videoitem.title = titulo + else: + videoitem.title = item.title + videoitem.channel = item.channel + + return itemlist diff --git a/plugin.video.alfa/channels/seriesdanko.json b/plugin.video.alfa/channels/seriesdanko.json new file mode 100755 index 00000000..a8090bb9 --- /dev/null +++ b/plugin.video.alfa/channels/seriesdanko.json @@ -0,0 +1,49 @@ +{ + "id": "seriesdanko", + "name": "Seriesdanko", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "seriesdanko.png", + "banner": "seriesdanko.png", + "version": 1, + "changes": [ + { + "date": "06/04/2017", + "description": "Mejoras en filtertools" + }, + { + "date": "27/01/2017", + "description": "Fix añadir serie desde la búsqueda.." + } + ], + "categories": [ + "tvshow", + "vos" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": true, + "visible": true + }, + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "No filtrar", + "Español", + "Latino", + "VO", + "VOS" + ] + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/seriesdanko.py b/plugin.video.alfa/channels/seriesdanko.py new file mode 100755 index 00000000..287a2bd3 --- /dev/null +++ b/plugin.video.alfa/channels/seriesdanko.py @@ -0,0 +1,255 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from channels import filtertools +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + +HOST = 'http://seriesdanko.to/' +IDIOMAS = {'es': 'Español', 'la': 'Latino', 'vos': 'VOS', 'vo': 'VO'} +list_idiomas = IDIOMAS.values() +CALIDADES = ['SD', 'MicroHD', 'HD/MKV'] + + +def mainlist(item): + logger.info() + + itemlist = list() + itemlist.append(Item(channel=item.channel, title="Novedades", action="novedades", url=HOST)) + itemlist.append(Item(channel=item.channel, title="Más vistas", action="mas_vistas", url=HOST)) + itemlist.append(Item(channel=item.channel, title="Listado Alfabético", action="listado_alfabetico", url=HOST)) + itemlist.append(Item(channel=item.channel, title="Todas las series", action="listado_completo", url=HOST)) + itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", + url=urlparse.urljoin(HOST, "all.php"))) + + itemlist = filtertools.show_option(itemlist, item.channel, list_idiomas, CALIDADES) + + return itemlist + + +def novedades(item): + logger.info() + + itemlist = list() + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data) + data = re.sub(r"<!--.*?-->", "", data) + + patron = '<a title="([^"]+)" href="([^"]+)".*?>' + patron += "<img.*?src='([^']+)'" + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedtitle, scrapedurl, scrapedthumb in matches: + # patron = "^(.*?)(?:Ya Disponible|Disponible|Disponbile|disponible|\(Actualizada\))$" + # match = re.compile(patron, re.DOTALL).findall(scrapedtitle) + title = scrapertools.decodeHtmlentities(scrapedtitle) + show = scrapertools.find_single_match(title, "^(.+?) \d+[x|X]\d+") + + itemlist.append(Item(channel=item.channel, title=title, url=urlparse.urljoin(HOST, scrapedurl), show=show, + action="episodios", thumbnail=scrapedthumb, + context=filtertools.context(item, list_idiomas, CALIDADES))) + + return itemlist + + +def mas_vistas(item): + logger.info() + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data) + data = re.sub(r"<!--.*?-->", "", data) + + patron = "<div class='widget HTML' id='HTML3'.+?<div class='widget-content'>(.*?)</div>" + data = scrapertools.get_match(data, patron) + + return series_seccion(item, data) + + +def listado_completo(item): + logger.info() + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data) + data = re.sub(r"<!--.*?-->", "", data) + patron = '<div class="widget HTML" id="HTML10".+?<div class="widget-content">(.*?)</div>' + data = scrapertools.get_match(data, patron) + + return series_seccion(item, data) + + +def series_seccion(item, data): + logger.info() + + itemlist = [] + patron = "<a href='([^']+)'.*?>(.*?)</a>" + matches = re.compile(patron, re.DOTALL).findall(data) + for scrapedurl, scrapedtitle in matches: + itemlist.append(Item(channel=item.channel, action="episodios", title=scrapedtitle, show=scrapedtitle, + url=urlparse.urljoin(HOST, scrapedurl), + context=filtertools.context(item, list_idiomas, CALIDADES))) + + return itemlist + + +def listado_alfabetico(item): + logger.info() + + itemlist = [] + + for letra in '0ABCDEFGHIJKLMNOPQRSTUVWXYZ': + itemlist.append(Item(channel=item.channel, action="series_por_letra", title=letra, + url=urlparse.urljoin(HOST, "series.php?id=%s" % letra))) + + return itemlist + + +def series_por_letra(item): + logger.info("letra = {0}".format(item.title)) + data = httptools.downloadpage(item.url).data + + shows = re.findall("<a href='(?P<url>[^']+)' title='Capitulos de: (?P<title>.+?)'><img.+?src='(?P<img>[^']+)", data) + itemlist = [] + for url, title, img in shows: + itemlist.append(item.clone(title=title, url=urlparse.urljoin(HOST, url), action="episodios", thumbnail=img, + show=title, context=filtertools.context(item, list_idiomas, CALIDADES))) + return itemlist + + +def search(item, texto): + logger.info("texto=%s" % texto) + + itemlist = [] + + try: + data = httptools.downloadpage(item.url).data + shows = re.findall("<a href='(?P<url>/serie.php\?serie=[0-9]+)'[^>]*>(?P<title>[^<]*{0}[^<]*)".format(texto), + data, re.IGNORECASE) + for url, title in shows: + itemlist.append(item.clone(title=title, url=urlparse.urljoin(HOST, url), action="episodios", show=title, + context=filtertools.context(item, list_idiomas, CALIDADES))) + + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + + return itemlist + + +def episodios(item): + logger.info() + + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data) + data = re.sub(r"<!--.*?-->", "", data) + + data = re.sub(r"a> <img src=/assets/img/banderas/", "a><idioma>", data) + data = re.sub(r"<img src=/assets/img/banderas/", "|", data) + data = re.sub(r"\.png border='\d+' height='\d+' width='\d+'[^>]+>\s+<", "</idioma><", data) + data = re.sub(r"\.png border='\d+' height='\d+' width='\d+'[^>]+>", "", data) + + patron = '<div id="T1".*?' + patron += "<img src='([^']+)'" + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) > 0: + thumbnail = matches[0] + else: + thumbnail = item.thumbnail + + patron = "<a href='([^']+)'>(.*?)</a><idioma>(.*?)</idioma>" + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle, scrapedidioma in matches: + idioma = "" + filter_langs = [] + for i in scrapedidioma.split("|"): + idioma += " [" + IDIOMAS.get(i, "OVOS") + "]" + filter_langs.append(IDIOMAS.get(i, "OVOS")) + title = scrapedtitle + idioma + + itemlist.append(Item(channel=item.channel, title=title, url=urlparse.urljoin(HOST, scrapedurl), + action="findvideos", show=item.show, thumbnail=thumbnail, plot="", language=filter_langs)) + + itemlist = filtertools.get_links(itemlist, item, list_idiomas, CALIDADES) + + # Opción "Añadir esta serie a la videoteca de XBMC" + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append(item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", + extra="episodios")) + + return itemlist + + +def findvideos(item): + logger.info() + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data) + data = re.sub(r"<!--.*?-->", "", data) + + online = re.findall('<table class=.+? cellpadding=.+? cellspacing=.+?>(.+?)</table>', data, + re.MULTILINE | re.DOTALL) + + itemlist = parse_videos(item, "Ver", online[0]) + itemlist.extend(parse_videos(item, "Descargar", online[1])) + + itemlist = filtertools.get_links(itemlist, item, list_idiomas, CALIDADES) + + return itemlist + + +def parse_videos(item, tipo, data): + logger.info() + + itemlist = [] + + pattern = "<td.+?<img src='/assets/img/banderas/([^\.]+).+?</td><td.+?>(.*?)</td><td.+?" \ + "<img src='/assets/img/servidores/([^\.]+).+?</td><td.+?href='([^']+)'.+?>.*?</a></td>" \ + "<td.+?>(.*?)</td>" + + links = re.findall(pattern, data, re.MULTILINE | re.DOTALL) + + for language, date, server, link, quality in links: + if quality == "": + quality = "SD" + title = "%s en %s [%s] [%s] (%s)" % (tipo, server, IDIOMAS.get(language, "OVOS"), quality, date) + + itemlist.append(Item(channel=item.channel, title=title, url=urlparse.urljoin(HOST, link), action="play", + show=item.show, language=IDIOMAS.get(language, "OVOS"), quality=quality, + fulltitle=item.title)) + + return itemlist + + +def play(item): + logger.info("play url=%s" % item.url) + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data) + + patron = '<div id="url2".*?><a href="([^"]+)">.+?</a></div>' + url = scrapertools.find_single_match(data, patron) + + itemlist = servertools.find_video_items(data=url) + titulo = scrapertools.find_single_match(item.fulltitle, "^(.*?)\s\[.+?$") + if titulo: + titulo += " [%s]" % item.language + + for videoitem in itemlist: + if titulo: + videoitem.title = titulo + else: + videoitem.title = item.title + videoitem.channel = item.channel + + return itemlist diff --git a/plugin.video.alfa/channels/serieslan.json b/plugin.video.alfa/channels/serieslan.json new file mode 100755 index 00000000..47e0f9dc --- /dev/null +++ b/plugin.video.alfa/channels/serieslan.json @@ -0,0 +1,20 @@ +{ + "id": "serieslan", + "name": "SeriesLan", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "http://i.imgur.com/s6CBxlw.png", + "banner": "http://i.imgur.com/c1YTgNT.png", + "version": 1, + "changes": [ + { + "date": "09/06/2017", + "description": "Primera version de series en Latino" + } + ], + "categories": [ + "tvshow", + "latino" + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/serieslan.py b/plugin.video.alfa/channels/serieslan.py new file mode 100755 index 00000000..c3f05a08 --- /dev/null +++ b/plugin.video.alfa/channels/serieslan.py @@ -0,0 +1,183 @@ +# -*- coding: utf-8 -*- + +import re + +from channels import renumbertools +from channelselector import get_thumb +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item + +host = "https://serieslan.com" + + +def mainlist(item): + logger.info() + thumb_series = get_thumb("thumb_channels_tvshow.png") + + itemlist = list() + + itemlist.append( + Item(channel=item.channel, action="lista", title="Series", url=host, thumbnail=thumb_series, page=0)) + itemlist = renumbertools.show_option(item.channel, itemlist) + return itemlist + + +def lista(item): + logger.info() + + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '<a href="([^"]+)" ' + patron += 'class="link">.+?<img src="([^"]+)".*?' + patron += 'title="([^"]+)">' + + matches = scrapertools.find_multiple_matches(data, patron) + + # Paginacion + num_items_x_pagina = 30 + min = item.page * num_items_x_pagina + max = min + num_items_x_pagina - 1 + + for link, img, name in matches[min:max]: + title = name + url = host + link + scrapedthumbnail = host + img + itemlist.append(item.clone(title=title, url=url, action="episodios", thumbnail=scrapedthumbnail, show=title, + context=renumbertools.context(item))) + + itemlist.append( + Item(channel=item.channel, title="Página Siguiente >>", url=item.url, action="lista", page=item.page + 1)) + + tmdb.set_infoLabels(itemlist) + return itemlist + + +def episodios(item): + logger.info() + + itemlist = [] + data = httptools.downloadpage(item.url).data + # obtener el numero total de episodios + total_episode = 0 + + patron_caps = '<li><span>Capitulo ([^"]+)\:<\/span><[^"]+"(.+?)">([^"]+)<[^"]+<\/li>' + matches = scrapertools.find_multiple_matches(data, patron_caps) + # data_info = scrapertools.find_single_match(data, '<div class="info">.+?<\/div><\/div>') + patron_info = '<img src="([^"]+)">.+?<\/span>([^"]+)<\/p><p><span>I.+?Reseña: <\/span>(.+?)<\/p><\/div>' + scrapedthumbnail, show, scrapedplot = scrapertools.find_single_match(data, patron_info) + scrapedthumbnail = host + scrapedthumbnail + + for cap, link, name in matches: + + title = "" + pat = "as/sd" + # varios episodios en un enlace + if len(name.split(pat)) > 1: + i = 0 + for pos in name.split(pat): + i = i + 1 + total_episode += 1 + season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, total_episode) + if len(name.split(pat)) == i: + title += "{0}x{1:02d} ".format(season, episode) + else: + title += "{0}x{1:02d}_".format(season, episode) + else: + total_episode += 1 + season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, total_episode) + + title += "{0}x{1:02d} ".format(season, episode) + + url = host + "/" + link + if "disponible" in link: + title += "No Disponible aún" + else: + title += name + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, url=url, show=show, plot=scrapedplot, + thumbnail=scrapedthumbnail)) + + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url, + action="add_serie_to_library", extra="episodios", show=show)) + + return itemlist + + +# def getUrlVideo(item): +def findvideos(item): + ## Kodi 17+ + ## Openload as default server + + import base64 + + itemlist = [] + + ## Urls + urlServer = "https://openload.co/embed/%s/" + urlApiGetKey = "https://serieslan.com/idv.php?i=%s" + + ## JS + def txc(key, str): + s = range(256) + j = 0 + res = '' + for i in range(256): + j = (j + s[i] + ord(key[i % len(key)])) % 256 + x = s[i] + s[i] = s[j] + s[j] = x + i = 0 + j = 0 + for y in range(len(str)): + i = (i + 1) % 256 + j = (j + s[i]) % 256 + x = s[i] + s[i] = s[j] + s[j] = x + res += chr(ord(str[y]) ^ s[(s[i] + s[j]) % 256]) + return res + + data = httptools.downloadpage(item.url).data + pattern = '<div id="video" idv="([^"]*)" ide="([^"]*)" ids="[^"]*" class="video">' + idv, ide = scrapertools.find_single_match(data, pattern) + thumbnail = scrapertools.find_single_match(data, + '<div id="tab-1" class="tab-content current">.+?<img src="([^"]*)">') + show = scrapertools.find_single_match(data, '<span>Episodio: <\/span>([^"]*)<\/p><p><span>Idioma') + thumbnail = host + thumbnail + data = httptools.downloadpage(urlApiGetKey % idv, headers={'Referer': item.url}).data + video_url = urlServer % (txc(ide, base64.decodestring(data))) + server = "openload" + if " SUB" in item.title: + lang = "VOS" + elif " Sub" in item: + lang = "VOS" + else: + lang = "Latino" + title = "Enlace encontrado en " + server + " [" + lang + "]" + itemlist.append(Item(channel=item.channel, action="play", title=title, show=show, url=video_url, plot=item.plot, + thumbnail=thumbnail, server=server, folder=False)) + + return itemlist + + +def play(item): + logger.info() + itemlist = [] + # Buscamos video por servidor ... + devuelve = servertools.findvideosbyserver(item.url, item.server) + if not devuelve: + # ...sino lo encontramos buscamos en todos los servidores disponibles + devuelve = servertools.findvideos(item.url, skip=True) + if devuelve: + # logger.debug(devuelve) + itemlist.append(Item(channel=item.channel, title=item.contentTitle, action="play", server=devuelve[0][2], + url=devuelve[0][1], thumbnail=item.thumbnail, folder=False)) + return itemlist diff --git a/plugin.video.alfa/channels/serieslatino.json b/plugin.video.alfa/channels/serieslatino.json new file mode 100755 index 00000000..5572fd74 --- /dev/null +++ b/plugin.video.alfa/channels/serieslatino.json @@ -0,0 +1,62 @@ +{ + "id": "serieslatino", + "name": "SeriesLatino", + "compatible": { + "addon_version": "4.3" + }, + "active": true, + "adult": false, + "language": "es", + "thumbnail": "https://s17.postimg.org/rpmc90y9r/serieslatino.png", + "banner": "https://s8.postimg.org/ka707waat/serieslatino_banner.png", + "version": 1, + "changes": [ + { + "date": "24/06/2017", + "description": "Cambios para autoplay" + }, + { + "date": "22/06/2017", + "description": "ajustes para AutoPlay" + }, + { + "date": "25/05/2017", + "description": "cambios esteticos" + }, + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "04/01/2017", + "description": "Release." + } + ], + "categories": [ + "latino", + "tvshow" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": false, + "visible": false + }, + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "No filtrar", + "Latino", + "VOS" + ] + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/serieslatino.py b/plugin.video.alfa/channels/serieslatino.py new file mode 100755 index 00000000..9ed34e1e --- /dev/null +++ b/plugin.video.alfa/channels/serieslatino.py @@ -0,0 +1,318 @@ +# -*- coding: utf-8 -*- + + +import re + +from channels import autoplay +from channels import filtertools +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item + +host = 'http://www.serieslatino.tv/' + +IDIOMAS = {'Latino': 'Latino', 'Sub Español': 'VOS'} +list_language = IDIOMAS.values() +list_quality = [] +list_servers = ['yourupload', 'openload', 'sendvid'] + +vars = { + 'ef5ca18f089cf01316bbc967fa10f72950790c39ef5ca18f089cf01316bbc967fa10f72950790c39': 'http://www.estadepelis.com/', + 'b48699bb49d4550f27879deeb948d4f7d9c5949a8': 'embed', 'JzewJkLlrvcFnLelj2ikbA': 'php?url=', + 'p889c6853a117aca83ef9d6523335dc065213ae86': 'player', + 'e20fb341325556c0fc0145ce10d08a970538987': 'http://yourupload.com/embed/'} + +tgenero = {"acción": "https://s3.postimg.org/y6o9puflv/accion.png", + "animación": "https://s13.postimg.org/5on877l87/animacion.png", + "aventura": "https://s10.postimg.org/6su40czih/aventura.png", + "belico": "https://s23.postimg.org/71itp9hcr/belica.png", + "ciencia ficción": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png", + "comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png", + "comedia romántica": "https://s21.postimg.org/xfsj7ua0n/romantica.png", + "cortometrajes": "https://s15.postimg.org/kluxxwg23/cortometraje.png", + "crimen": "https://s4.postimg.org/6z27zhirx/crimen.png", + "cristianas": "https://s7.postimg.org/llo852fwr/religiosa.png", + "deportivas": "https://s13.postimg.org/xuxf5h06v/deporte.png", + "drama": "https://s16.postimg.org/94sia332d/drama.png", + "familiar": "https://s7.postimg.org/6s7vdhqrf/familiar.png", + "fantasía": "https://s13.postimg.org/65ylohgvb/fantasia.png", + "guerra": "https://s4.postimg.org/n1h2jp2jh/guerra.png", + "historia": "https://s15.postimg.org/fmc050h1n/historia.png", + "intriga": "https://s27.postimg.org/v9og43u2b/intriga.png", + "misterios": "https://s1.postimg.org/w7fdgf2vj/misterio.png", + "musical": "https://s29.postimg.org/bbxmdh9c7/musical.png", + "romance": "https://s15.postimg.org/fb5j8cl63/romance.png", + "suspenso": "https://s13.postimg.org/wmw6vl1cn/suspenso.png", + "terror": "https://s7.postimg.org/yi0gij3gb/terror.png", + "thriller": "https://s22.postimg.org/5y9g0jsu9/thriller.png"} + + +def mainlist(item): + logger.info() + + autoplay.init(item.channel, list_servers, list_quality) + itemlist = [] + + itemlist.append(item.clone(title="Series", action="lista", thumbnail='https://s27.postimg.org/iahczwgrn/series.png', + fanart='https://s27.postimg.org/iahczwgrn/series.png', extra='peliculas/', + url=host + 'lista-de-series/')) + + itemlist.append( + itemlist[-1].clone(title="Doramas", action="lista", thumbnail='https://s15.postimg.org/sjcthoa6z/doramas.png', + fanart='https://s15.postimg.org/sjcthoa6z/doramas.png', url=host + 'lista-de-doramas/', + extra='/genero')) + + itemlist.append( + itemlist[-1].clone(title="Generos", action="generos", thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png', + fanart='https://s3.postimg.org/5s9jg2wtf/generos.png', url=host, extra='/genero')) + + itemlist.append(itemlist[-1].clone(title="Buscar", action="search", url=host + 'resultados/?q=', + thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png', + fanart='https://s30.postimg.org/pei7txpa9/buscar.png')) + + autoplay.show_option(item.channel, itemlist) + + return itemlist + + +def lista(item): + logger.info() + + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data) + # logger.debug(data) + # return + patron = '<div id=mt-1830 class=item><a href=(.*?)><div class=image><img src=(.*?) alt=(.*?) width=.*? ' \ + 'height=.*?class=player>.*?class=ttx>(.*?)<div class=degradado>.*?class=year>(.*?)<\/span><\/div><\/div>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot, scrapedyear in matches: + url = host + scrapedurl + thumbnail = scrapedthumbnail + plot = scrapedplot + scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle) + title = scrapedtitle + ' ' + scrapedyear + fanart = '' + itemlist.append( + Item(channel=item.channel, action='temporadas', title=scrapedtitle, url=url, thumbnail=thumbnail, plot=plot, + fanart=fanart, contentSerieName=scrapedtitle, contentYear=scrapedyear, + infoLabels={'year': scrapedyear})) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + # Paginacion + + if itemlist != []: + actual_page_url = item.url + next_page = scrapertools.find_single_match(data, '<div class=pag_b><a href=(.*?) >Siguiente<\/a><\/div>') + if next_page != '': + itemlist.append(Item(channel=item.channel, action="lista", title='Siguiente >>>', url=item.url + next_page, + thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png')) + return itemlist + + +def temporadas(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data) + # logger.debug(data) + # return + + patron = '<span class=se-t.*?>(.*?)<\/span>' + + matches = re.compile(patron, re.DOTALL).findall(data) + infoLabels = item.infoLabels + for scrapedtitle in matches: + contentSeasonNumber = scrapedtitle.strip('') + title = 'Temporada %s' % scrapedtitle + thumbnail = item.thumbnail + plot = item.plot + fanart = item.fanart + infoLabels['season'] = contentSeasonNumber + + itemlist.append(Item(channel=item.channel, action='episodiosxtemp', url=item.url, title=title, + contentSerieName=item.contentSerieName, thumbnail=thumbnail, plot=plot, fanart=fanart, + contentSeasonNumber=contentSeasonNumber, infoLabels=item.infoLabels)) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append( + Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url, + action="add_serie_to_library", extra="episodiosxtemp", contentSerieName=item.contentSerieName, + contentYear=item.contentYear, extra1='library')) + + return itemlist + + +def episodiosxtemp(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data) + patron = 'class=numerando>(.*?)x(.*?)<\/div><div class=episodiotitle><a href=(.*?)>(.*?)<\/a><span class=date>.*?' + matches = re.compile(patron, re.DOTALL).findall(data) + infoLabels = item.infoLabels + for scrapedtemp, scrapedep, scrapedurl, scrapedtitle in matches: + url = host + scrapedurl + contentEpisodeNumber = scrapedep.strip(' ') + temp = scrapedtemp.strip(' ') + title = item.contentSerieName + ' %sx%s' % (temp, contentEpisodeNumber) + thumbnail = item.thumbnail + plot = item.plot + fanart = item.fanart + infoLabels['episode'] = contentEpisodeNumber + logger.debug('Nombre: ' + item.contentSerieName) + infoLabels = item.infoLabels + if item.extra1 == 'library': + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, fulltitle=item.fulltitle, url=url, + thumbnail=item.thumbnail, plot=plot, contentSerieName=item.contentSerieName, + contentSeasonNumber=item.contentSeasonNumber, infoLabels=infoLabels)) + elif temp == item.contentSeasonNumber: + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, fulltitle=item.fulltitle, url=url, + thumbnail=item.thumbnail, plot=plot, contentSerieName=item.contentSerieName, + contentSeasonNumber=item.contentSeasonNumber, infoLabels=infoLabels)) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + return itemlist + + +def generos(item): + logger.info() + + itemlist = [] + norep = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data) + patron = '<li class=cat-item cat-item-.*?><a href=(.*?)>([^<]+)<\/a>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + + url = host + scrapedurl + title = scrapedtitle.lower() + if title in tgenero: + thumbnail = tgenero[title.lower()] + else: + thumbnail = '' + + itemactual = Item(channel=item.channel, action='lista', title=title, url=url, thumbnail=thumbnail, + extra=item.extra) + + if title not in norep: + itemlist.append(itemactual) + norep.append(itemactual.title) + return itemlist + + +def dec(encurl): + logger.info() + url = '' + encurl = encurl.translate(None, "',(,),;") + encurl = encurl.split('+') + + for cod in encurl: + if cod in vars: + url = url + vars[cod] + else: + url = url + cod + return url + + +def findvideos(item): + logger.info() + + itemlist = [] + langs = dict() + + data = httptools.downloadpage(item.url).data + patron = '<a.*?onclick="return (play\d+).*?;".*?> (.*?) <\/a>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for key, value in matches: + langs[key] = value.strip() + + patron = 'function (play\d).*?servidores.*?attr.*?src.*?\+([^;]+);' + matches = re.compile(patron, re.DOTALL).findall(data) + title = item.title + enlace = scrapertools.find_single_match(data, + 'var e20fb341325556c0fc0145ce10d08a970538987 =.*?"\/your\.".*?"([^"]+)"') + + for scrapedlang, encurl in matches: + + if 'e20fb34' in encurl: + url = dec(encurl) + url = url + enlace + + else: + url = dec(encurl) + title = '' + server = '' + servers = {'/opl': 'openload', '/your': 'yourupload', '/sen': 'senvid', '/face': 'netutv', '/vk': 'vk'} + server_id = re.sub(r'.*?embed|\.php.*', '', url) + if server_id and server_id in servers: + server = servers[server_id] + logger.debug('server_id: %s' % server_id) + logger.debug('langs: %s' % langs) + if langs[scrapedlang] in list_language: + language = IDIOMAS[langs[scrapedlang]] + else: + language = 'Latino' + if langs[scrapedlang] == 'Latino': + idioma = '[COLOR limegreen]LATINO[/COLOR]' + elif langs[scrapedlang] == 'Sub Español': + idioma = '[COLOR red]SUB[/COLOR]' + + title = item.contentSerieName + ' (' + server + ') ' + idioma + plot = item.plot + + thumbnail = servertools.guess_server_thumbnail(title) + + if 'player' not in url and 'php' in url: + itemlist.append(item.clone(title=title, + url=url, + action="play", + plot=plot, + thumbnail=thumbnail, + server=server, + quality='', + language=language + )) + logger.debug('url: %s' % url) + # Requerido para FilterTools + + itemlist = filtertools.get_links(itemlist, item, list_language) + + # Requerido para AutoPlay + + autoplay.start(itemlist, item) + + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = item.url + texto + if texto != '': + return lista(item) + + +def play(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url, add_referer=True).data + if 'your' in item.url: + item.url = 'http://www.yourupload.com/embed/' + scrapertools.find_single_match(data, 'src=".*?code=(.*?)"') + itemlist.append(item) + else: + + itemlist = servertools.find_video_items(data=data) + + return itemlist diff --git a/plugin.video.alfa/channels/seriesmeme.json b/plugin.video.alfa/channels/seriesmeme.json new file mode 100755 index 00000000..8f0abc0e --- /dev/null +++ b/plugin.video.alfa/channels/seriesmeme.json @@ -0,0 +1,20 @@ +{ + "id": "seriesmeme", + "name": "SeriesMeme", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "seriesmeme.png", + "banner": "seriesmeme.png", + "version": 1, + "changes": [ + { + "date": "18/05/2017", + "description": "Mejora estética agregado modulo tmdb" + } + ], + "categories": [ + "tvshow", + "latino" + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/seriesmeme.py b/plugin.video.alfa/channels/seriesmeme.py new file mode 100755 index 00000000..662a2745 --- /dev/null +++ b/plugin.video.alfa/channels/seriesmeme.py @@ -0,0 +1,198 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from channels import renumbertools +from channelselector import get_thumb +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import tmdb +from core.item import Item + +host = "https://seriesmeme.com/" + + +def mainlist(item): + logger.info() + + thumb_series = get_thumb("thumb_channels_tvshow.png") + + thumb_series_az = get_thumb("thumb_channels_tvshow_az.png") + + itemlist = list() + + itemlist.append(Item(channel=item.channel, action="lista_gen", title="Novedades", url=host, + thumbnail=thumb_series)) + itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=urlparse.urljoin(host, "/lista"), + thumbnail=thumb_series)) + itemlist.append(Item(channel=item.channel, action="categorias", title="Categorias", url=host, + thumbnail=thumb_series)) + itemlist.append(Item(channel=item.channel, action="alfabetico", title="Listado Alfabetico", url=host, + thumbnail=thumb_series_az)) + itemlist.append(Item(channel=item.channel, action="top", title="Top Series", url=host, + thumbnail=thumb_series)) + itemlist = renumbertools.show_option(item.channel, itemlist) + return itemlist + + +""" +def search(item, texto): + logger.info() + texto = texto.replace(" ","+") + item.url = item.url+texto + if texto!='': + return lista(item) +""" + + +def categorias(item): + logger.info() + dict_gender = {"acción": "accion", "animes": "animacion", "aventuras": "aventura", "dibujos": "animacion", + "ciencia ficción": "ciencia%20ficcion", "intriga": "misterio", "suspenso": "suspense", + "thriller": "suspense", "fantástico": "fantasia"} + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron_cat = '<li id="menu-item-15068" class=".+?"><.+?>.+?<\/a>(.+?)<\/ul><\/li>' + categorias = scrapertools.find_single_match(data, patron_cat) + patron = '<li id="menu-item-.+?" class=".+?"><a href="([^"]+)">([^"]+)<\/a><\/li>' + matches = scrapertools.find_multiple_matches(categorias, patron) + for link, name in matches: + if 'Género' in name: + title = name.replace('Género ', '') + url = link + thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/azul/%s.png" + thumbnail = thumbnail % dict_gender.get(title.lower(), title.lower()) + itemlist.append(item.clone(title=title, url=url, plot=title, action="lista_gen", thumbnail=thumbnail)) + return itemlist + + +def alfabetico(item): + logger.info() + + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron_alf1 = '<li id="menu-item-15069" class=".+?"><.+?>.+?<\/a>(.+?)<\/ul><\/li>' + patron_alf2 = '<li id="menu-item-15099" class=".+?"><.+?>.+?<\/a>(.+?)<\/ul><\/li>' + alfabeto1 = scrapertools.find_single_match(data, patron_alf1) + alfabeto2 = scrapertools.find_single_match(data, patron_alf2) + alfabeto = alfabeto1 + alfabeto2 + patron = '<li id="menu-item-.+?" class=".+?"><a href="([^"]+)">([^"]+)<\/a><\/li>' + matches = scrapertools.find_multiple_matches(alfabeto, patron) + for link, name in matches: + title = name + url = link + itemlist.append(item.clone(title=title, url=url, plot=title, action="lista_gen")) + return itemlist + + +def top(item): + logger.info() + + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron_top = '<li id="menu-item-15087" class=".+?"><.+?>.+?<\/a>(.+?)<\/ul><\/li>' + top = scrapertools.find_single_match(data, patron_top) + patron = '<a href="([^"]+)">([^"]+)<\/a>' + matches = scrapertools.find_multiple_matches(top, patron) + for link, name in matches: + title = name + url = link + itemlist.append(item.clone(title=title, url=url, plot=title, action="lista_gen", show=title)) + tmdb.set_infoLabels(itemlist) + return itemlist + + +def lista_gen(item): + logger.info() + + itemlist = [] + + data1 = httptools.downloadpage(item.url).data + data1 = re.sub(r"\n|\r|\t|\s{2}| ", "", data1) + patron_sec = '<section class="content">.+?<\/section>' + data = scrapertools.find_single_match(data1, patron_sec) + patron = '<article id=.+? class=.+?><div.+?>' + patron += '<a href="([^"]+)" title="([^"]+)' # scrapedurl, # scrapedtitle + patron += ' Capítulos Completos ([^"]+)">' # scrapedlang + patron += '<img.+? data-src=.+? data-lazy-src="([^"]+)"' # scrapedthumbnail + matches = scrapertools.find_multiple_matches(data, patron) + i = 0 + for scrapedurl, scrapedtitle, scrapedlang, scrapedthumbnail in matches: + i = i + 1 + if 'HD' in scrapedlang: + scrapedlang = scrapedlang.replace('HD', '') + title = scrapedtitle + " [ " + scrapedlang + "]" + itemlist.append( + Item(channel=item.channel, title=title, url=scrapedurl, thumbnail=scrapedthumbnail, action="episodios", + show=scrapedtitle, context=renumbertools.context(item))) + tmdb.set_infoLabels(itemlist) + # Paginacion + patron_pag = '<a class="nextpostslink" rel="next" href="([^"]+)">' + next_page_url = scrapertools.find_single_match(data, patron_pag) + + if next_page_url != "" and i != 1: + item.url = next_page_url + itemlist.append(Item(channel=item.channel, action="lista_gen", title=">> Página siguiente", url=next_page_url, + thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png')) + + return itemlist + + +def lista(item): + logger.info() + + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '<li><strong><a href="([^"]+)">([^"]+)<\/a>' + matches = scrapertools.find_multiple_matches(data, patron) + for link, name in matches: + title = name + url = link + itemlist.append(item.clone(title=title, url=url, plot=title, action="episodios")) + return itemlist + + +def episodios(item): + logger.info() + + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron_caps = '<li><strong><a href="([^"]+)">(.+?)–(.+?)<\/a>' + matches = scrapertools.find_multiple_matches(data, patron_caps) + show = scrapertools.find_single_match(data, '<h3><strong>.+?de (.+?)<\/strong>') + scrapedplot = scrapertools.find_single_match(data, '<strong>Sinopsis<\/strong><strong>([^"]+)<\/strong><\/pre>') + logger.info("epibla " + data) + for link, cap, name in matches: + if 'x' in cap: + title = cap + " - " + name + else: + season = 1 + episode = int(cap) + season, episode = renumbertools.numbered_for_tratk( + item.channel, item.show, season, episode) + date = name + title = "{0}x{1:02d} {2} ({3})".format( + season, episode, "Episodio " + str(episode), date) + # title = cap+" - "+name + url = link + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=item.thumbnail, + plot=scrapedplot, show=show)) + + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url, + + action="add_serie_to_library", extra="episodios", show=show)) + + return itemlist diff --git a/plugin.video.alfa/channels/seriespapaya.json b/plugin.video.alfa/channels/seriespapaya.json new file mode 100755 index 00000000..ccd7763f --- /dev/null +++ b/plugin.video.alfa/channels/seriespapaya.json @@ -0,0 +1,62 @@ +{ + "id": "seriespapaya", + "name": "SeriesPapaya", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "http://i.imgur.com/P1D92cf.png", + "banner": "http://i.imgur.com/OHHvi5z.png", + "version": 1, + "changes": [ + { + "date": "06/04/2017", + "description": "Mejoras en filtertools" + }, + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "02/10/2016", + "description": "Primera version" + } + ], + "categories": [ + "tvshow", + "anime" + ], + "settings": [ + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "No filtrar", + "Español", + "Inglés", + "Latino", + "Catalán", + "VOS" + ] + }, + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_series", + "type": "bool", + "label": "Incluir en Novedades - Episodios de series", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/seriespapaya.py b/plugin.video.alfa/channels/seriespapaya.py new file mode 100755 index 00000000..b03af7b5 --- /dev/null +++ b/plugin.video.alfa/channels/seriespapaya.py @@ -0,0 +1,212 @@ +# -*- coding: utf-8 -*- + +import re +import string +import urllib +import urlparse + +from channels import filtertools +from channelselector import get_thumb +from core import config +from core import httptools +from core import jsontools +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + +HOST = "http://www.seriespapaya.com" + +IDIOMAS = {'es': 'Español', 'lat': 'Latino', 'in': 'Inglés', 'ca': 'Catalán', 'sub': 'VOS'} +list_idiomas = IDIOMAS.values() +CALIDADES = ['360p', '480p', '720p HD', '1080p HD'] + + +def mainlist(item): + logger.info() + + thumb_series = get_thumb("thumb_channels_tvshow.png") + thumb_series_az = get_thumb("thumb_channels_tvshow_az.png") + thumb_buscar = get_thumb("thumb_search.png") + + itemlist = [] + itemlist.append( + Item(action="listado_alfabetico", title="Listado Alfabetico", channel=item.channel, thumbnail=thumb_series_az)) + itemlist.append( + Item(action="novedades", title="Capítulos de estreno", channel=item.channel, thumbnail=thumb_series)) + itemlist.append(Item(action="search", title="Buscar", channel=item.channel, thumbnail=thumb_buscar)) + + itemlist = filtertools.show_option(itemlist, item.channel, list_idiomas, CALIDADES) + + return itemlist + + +def listado_alfabetico(item): + logger.info() + + itemlist = [item.clone(action="series_por_letra", title="0-9")] + for letra in string.ascii_uppercase: + itemlist.append(item.clone(action="series_por_letra", title=letra)) + + return itemlist + + +def series_por_letra(item): + logger.info("letra: {0}".format(item.title)) + item.letter = item.title.lower() + item.extra = 0 + return series_por_letra_y_grupo(item) + + +def series_por_letra_y_grupo(item): + logger.info("letra: {0} - grupo: {1}".format(item.letter, item.extra)) + itemlist = [] + url = urlparse.urljoin(HOST, "autoload_process.php") + + postRequest = { + "group_no": item.extra, + "letra": item.letter.lower() + } + data = httptools.downloadpage(url, post=urllib.urlencode(postRequest)).data + + series = re.findall( + 'list_imagen.+?src="(?P<img>[^"]+).+?<div class="list_titulo"><a[^>]+href="(?P<url>[^"]+)[^>]+>(.*?)</a>', data, + re.MULTILINE | re.DOTALL) + + for img, url, name in series: + itemlist.append(item.clone( + action="episodios", + title=name, + show=name, + url=urlparse.urljoin(HOST, url), + thumbnail=urlparse.urljoin(HOST, img), + context=filtertools.context(item, list_idiomas, CALIDADES) + )) + + if len(series) == 8: + itemlist.append(item.clone(title="Siguiente >>", action="series_por_letra_y_grupo", extra=item.extra + 1)) + + if item.extra > 0: + itemlist.append(item.clone(title="<< Anterior", action="series_por_letra_y_grupo", extra=item.extra - 1)) + + return itemlist + + +def novedades(item): + logger.info() + data = httptools.downloadpage(HOST).data + shows = re.findall('sidebarestdiv[^<]+<a[^<]+title="([^"]*)[^<]+href="([^"]*)[^<]+<img[^<]+src="([^"]+)', data, + re.MULTILINE | re.DOTALL) + + itemlist = [] + + for title, url, img in shows: + itemlist.append(item.clone(action="findvideos", title=title, url=urlparse.urljoin(HOST, url), thumbnail=img)) + + return itemlist + + +def newest(categoria): + logger.info("categoria: {0}".format(categoria)) + + if categoria != 'series': + return [] + + try: + return novedades(Item()) + + # Se captura la excepción, para no interrumpir al canal novedades si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + + return [] + + +def episodios(item): + logger.info("url: {0}".format(item.url)) + + data = httptools.downloadpage(item.url).data + + episodes = re.findall('visco.*?href="(?P<url>[^"]+).+?nbsp; (?P<title>.*?)</a>.+?ucapaudio.?>(?P<langs>.*?)</div>', + data, re.MULTILINE | re.DOTALL) + + itemlist = [] + for url, title, langs in episodes: + logger.debug("langs %s" % langs) + languages = " ".join( + ["[{0}]".format(IDIOMAS.get(lang, lang)) for lang in re.findall('images/s-([^\.]+)', langs)]) + filter_lang = languages.replace("[", "").replace("]", "").split(" ") + itemlist.append(item.clone(action="findvideos", + title="{0} {1} {2}".format(item.title, title, languages), + url=urlparse.urljoin(HOST, url), + language=filter_lang + )) + + itemlist = filtertools.get_links(itemlist, item.channel, list_idiomas, CALIDADES) + + # Opción "Añadir esta serie a la videoteca de XBMC" + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append( + item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios")) + + return itemlist + + +def search(item, texto): + logger.info("texto: {0}".format(texto)) + data = httptools.downloadpage(urlparse.urljoin(HOST, "/buscar.php?term={0}".format(texto))).data + jsonResult = jsontools.load(data) + tvShows = jsonResult["myData"] + + return [item.clone(action="episodios", + title=show["titulo"], + show=show["titulo"], + url=urlparse.urljoin(HOST, show["urla"]), + thumbnail=urlparse.urljoin(HOST, show["img"]), + context=filtertools.context(item, list_idiomas, CALIDADES) + ) for show in tvShows] + + +def findvideos(item): + logger.info("url: {0}".format(item.url)) + + data = httptools.downloadpage(item.url).data + + expr = 'mtos' + '.+?' + \ + '<div.+?images/(?P<lang>[^\.]+)' + '.+?' + \ + '<div[^>]+>\s+(?P<date>[^\s<]+)' + '.+?' + \ + '<div.+?img.+?>\s*(?P<server>.+?)</div>' + '.+?' + \ + '<div.+?href="(?P<url>[^"]+).+?images/(?P<type>[^\.]+)' + '.+?' + \ + '<div[^>]+>\s*(?P<quality>.*?)</div>' + '.+?' + \ + '<div.+?<a.+?>(?P<uploader>.*?)</a>' + + links = re.findall(expr, data, re.MULTILINE | re.DOTALL) + + itemlist = [item.clone( + action="play", + title="{linkType} en {server} [{lang}] [{quality}] ({uploader}: {date})".format( + linkType="Ver" if linkType != "descargar" else "Descargar", + lang=IDIOMAS.get(lang, lang), + date=date, + server=server.rstrip(), + quality=quality, + uploader=uploader), + url=urlparse.urljoin(HOST, url), + language=IDIOMAS.get(lang, lang), + quality=quality, + ) for lang, date, server, url, linkType, quality, uploader in links] + + itemlist = filtertools.get_links(itemlist, item.channel, list_idiomas, CALIDADES) + + return itemlist + + +def play(item): + logger.info("play: {0}".format(item.url)) + data = httptools.downloadpage(item.url).data + videoURL = scrapertools.find_single_match(data, "location.href='([^']+)") + logger.debug("Video URL = {0}".format(videoURL)) + itemlist = servertools.find_video_items(data=videoURL) + return itemlist diff --git a/plugin.video.alfa/channels/seriesyonkis.json b/plugin.video.alfa/channels/seriesyonkis.json new file mode 100755 index 00000000..54fde9a4 --- /dev/null +++ b/plugin.video.alfa/channels/seriesyonkis.json @@ -0,0 +1,36 @@ +{ + "id": "seriesyonkis", + "name": "Seriesyonkis", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "seriesyonkis.png", + "banner": "seriesyonkis.png", + "fanart": "seriesyonkis.jpg", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "11/07/2016", + "description": "Reparada búsqueda y soluciona error en findvideos" + } + ], + "categories": [ + "tvshow", + "anime", + "vos" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/seriesyonkis.py b/plugin.video.alfa/channels/seriesyonkis.py new file mode 100755 index 00000000..acf77d46 --- /dev/null +++ b/plugin.video.alfa/channels/seriesyonkis.py @@ -0,0 +1,462 @@ +# -*- coding: utf-8 -*- + +import re +import urllib +import urllib2 +import urlparse + +from core import config +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + + +def mainlist(item): + logger.info() + + itemlist = [] + itemlist.append(Item(channel=item.channel, action="listalfabetico", title="Listado alfabetico", + url="http://www.seriesyonkis.sx", + fanart=item.fanart)) + itemlist.append(Item(channel=item.channel, action="mostviewed", title="Series más vistas", + url="http://www.seriesyonkis.sx/series-mas-vistas", + fanart=item.fanart)) + itemlist.append( + Item(channel=item.channel, action="search", title="Buscar", url="http://www.seriesyonkis.sx/buscar/serie", + fanart=item.fanart)) + + return itemlist + + +def search(item, texto, categoria="*"): + logger.info() + itemlist = [] + + if categoria not in ("*", "S"): return itemlist ## <-- + + if item.url == "": + item.url = "http://www.seriesyonkis.sx/buscar/serie" + url = "http://www.seriesyonkis.sx/buscar/serie" # write ur URL here + post = 'keyword=' + texto[0:18] + '&search_type=serie' + + data = scrapertools.cache_page(url, post=post) + try: + return getsearchresults(item, data, "episodios") + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def getsearchresults(item, data, action): + itemlist = [] + + patron = '_results_wrapper">(.*?)<div id="fixed-footer"' + matches = re.compile(patron, re.DOTALL).findall(data) + for match in matches: + # <li class="nth-child1n"> <figure> <a href="/pelicula/el-moderno-sherlock-holmes-1924" title="El moderno Sherlock Holmes (1924)"><img width="100" height="144" src="http://s.staticyonkis.com/img/peliculas/100x144/el-moderno-sherlock-holmes-1924.jpg" alt=""></a> <figcaption>8.0</figcaption> </figure> <aside> <h2><a href="/pelicula/el-moderno-sherlock-holmes-1924" title="El moderno Sherlock Holmes (1924)">El moderno Sherlock Holmes (1924)</a></h2> <p class="date">1924 | Estados Unidos | votos: 3</p> <div class="content">Película sobre el mundo del cine, Keaton es un proyeccionista que sueña con ser un detective cuando, milagrosamente, se encuentra dentro de la película que está proyectando. Allí intentará salvar a su amada de las garras del villano. Una de...</div> <p class="generos"> <a href="/genero/comedia">Comedia</a> <a class="topic" href="/genero/cine-mudo">Cine mudo</a> <a class="topic" href="/genero/mediometraje">Mediometraje</a> <i>(1 más) <span class="aditional_links"> <span> <a class="topic" href="/genero/sherlock-holmes">Sherlock Holmes</a> </span> </span> </i> </p> </aside> </li> + patron = '<li[^>]+>.*?href="([^"]+)".*?title="([^"]+)".*?src="([^"]+).*?<div class="content">([^<]+)</div>.*?</li>' + results = re.compile(patron, re.DOTALL).findall(match) + for result in results: + scrapedtitle = result[1] + scrapedurl = urlparse.urljoin(item.url, result[0]) + scrapedthumbnail = result[2] + scrapedplot = result[3] + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + + itemlist.append( + Item(channel=item.channel, action=action, title=scrapedtitle, fulltitle=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot, show=scrapedtitle)) + + return itemlist + + +def lastepisodes(item): + logger.info() + + data = scrapertools.cache_page(item.url) + + # <li class="thumb-episode "> <a href="/capitulo/strike-back/project-dawn-part-3/200215"><img class="img-shadow" src="/img/series/170x243/strike-back.jpg" height="166" width="115"></a> <div class="transparent"> <a href="/capitulo/strike-back/project-dawn-part-3/200215"><span>2x03</span></a> </div> <strong><a href="/serie/strike-back" title="Strike back">Strike back</a></strong> </li> + matches = re.compile('<li class="thumb-episode ">.*?</li>', re.S).findall(data) + # scrapertools.printMatches(matches) + + itemlist = [] + for match in matches: + + # <li class="thumb-episode "> <a href="/capitulo/strike-back/project-dawn-part-3/200215"><img class="img-shadow" src="/img/series/170x243/strike-back.jpg" height="166" width="115"></a> <div class="transparent"> <a href="/capitulo/strike-back/project-dawn-part-3/200215"><span>2x03</span></a> </div> <strong><a href="/serie/strike-back" title="Strike back">Strike back</a></strong> </li> + datos = re.compile('<a href="([^"]+)">.*?src="([^"]+)".*?<span>([^<]+)</span>.*?title="([^"]+)"', re.S).findall( + match) + + for capitulo in datos: + scrapedtitle = capitulo[3] + " " + capitulo[2] + scrapedurl = urlparse.urljoin(item.url, capitulo[0]) + scrapedthumbnail = item.url + capitulo[1] + scrapedplot = "" + + # Depuracion + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, fulltitle=scrapedtitle, + url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, show=scrapedtitle, + fanart=item.fanart)) + + return itemlist + + +def mostviewed(item): + logger.info() + data = scrapertools.cachePage(item.url) + + # <div id="tabs-1"> <h1>Más vistas ayer</h1> + # <ul class="covers-list"> + # <li class="thumb-episode"><a title="Cómo conocí a vuestra madre (2005)" href="/serie/como-conoci-a-vuestra-madre"><img width="100" height="144" class="img-shadow" src="http://static2.seriesyonkis.sx/90/1/How_I_Met_Your_Mother-19641.JPEG" alt="Cómo conocí a vuestra madre"/></a><strong><a href="/serie/como-conoci-a-vuestra-madre" title"Cómo conocí a vuestra madre (2005)">Cómo conocí a vuestra madre (2005)</a></strong></li><li class="thumb-episode"><a title="The Big Bang Theory (2007)" href="/serie/the-big-bang-theory"><img width="100" height="144" class="img-shadow" src="http://static2.seriesyonkis.sx/90/2/The_Big_Bang_Theory-20285.PNG" alt="The Big Bang Theory"/></a><strong><a href="/serie/the-big-bang-theory" title"The Big Bang Theory (2007)">The Big Bang Theory (2007)</a></strong></li><li class="thumb-episode"><a title="Friends (1994)" href="/serie/friends"><img width="100" height="144" class="img-shadow" src="http://static2.seriesyonkis.sx/90/2/Friends-20013.PNG" alt="Friends"/></a><strong><a href="/serie/friends" title"Friends (1994)">Friends (1994)</a></strong></li><li class="thumb-episode"><a title="The vampire diaries (Crónicas Vampíricas) (2009)" href="/serie/the-vampire-diaries"><img width="100" height="144" class="img-shadow" src="http://static2.seriesyonkis.sx/90/1/The_Vampire_Diaries-18597.JPEG" alt="The vampire diaries (Crónicas Vampíricas)"/></a><strong><a href="/serie/the-vampire-diaries" title"The vampire diaries (Crónicas Vampíricas) (2009)">The vampire diaries (Crónicas Vampíricas) (2009)</a></strong></li><li class="clear"></li> <li class="thumb-episode"><a title="Breaking Bad (2008)" href="/serie/breaking-bad"><img width="100" height="144" class="img-shadow" src="http://static2.seriesyonkis.sx/90/1/Breaking_Bad-18431.PNG" alt="Breaking Bad"/></a><strong><a href="/serie/breaking-bad" title"Breaking Bad (2008)">Breaking Bad (2008)</a></strong></li><li class="thumb-episode"><a title="Anatomía de Grey (2005)" href="/serie/anatomia-de-grey"><img width="100" height="144" class="img-shadow" src="http://static2.seriesyonkis.sx/90/1/Grey_s_Anatomy-18325.JPEG" alt="Anatomía de Grey"/></a><strong><a href="/serie/anatomia-de-grey" title"Anatomía de Grey (2005)">Anatomía de Grey (2005)</a></strong></li><li class="thumb-episode"><a title="Keeping up with the Kardashians (2007)" href="/serie/keeping-up-with-the-kardashians"><img width="100" height="144" class="img-shadow" src="http://static2.seriesyonkis.sx/90/1/Keeping_Up_with_the_Kardashians-19944.JPEG" alt="Keeping up with the Kardashians"/></a><strong><a href="/serie/keeping-up-with-the-kardashians" title"Keeping up with the Kardashians (2007)">Keeping up with the Kardashians (2007)</a></strong></li><li class="thumb-episode"><a title="The Walking Dead (2010)" href="/serie/the-walking-dead-yonkis1"><img width="100" height="144" class="img-shadow" src="http://static2.seriesyonkis.sx/90/1/The_Walking_Dead-19273.PNG" alt="The Walking Dead"/></a><strong><a href="/serie/the-walking-dead-yonkis1" title"The Walking Dead (2010)">The Walking Dead (2010)</a></strong></li><li class="clear"></li> <li class="thumb-episode"><a title="Pequeñas mentirosas (Pretty Little Liars) (2010)" href="/serie/pretty-little-liars"><img width="100" height="144" class="img-shadow" src="http://static2.seriesyonkis.sx/90/1/Pretty_Little_Liars-18575.PNG" alt="Pequeñas mentirosas (Pretty Little Liars)"/></a><strong><a href="/serie/pretty-little-liars" title"Pequeñas mentirosas (Pretty Little Liars) (2010)">Pequeñas mentirosas (Pretty Little Liars) (2010)</a></strong></li><li class="thumb-episode"><a title="Sobrenatural (Supernatural) (2005)" href="/serie/sobrenatural"><img width="100" height="144" class="img-shadow" src="http://static2.seriesyonkis.sx/90/1/Supernatural-19429.JPEG" alt="Sobrenatural (Supernatural)"/></a><strong><a href="/serie/sobrenatural" title"Sobrenatural (Supernatural) (2005)">Sobrenatural (Supernatural) (2005)</a></strong></li><li class="thumb-episode"><a title="Juego de tronos (2011)" href="/serie/juego-de-tronos-2011"><img width="100" height="144" class="img-shadow" src="http://static2.seriesyonkis.sx/90/2/Game_of_Thrones-22818.PNG" alt="Juego de tronos"/></a><strong><a href="/serie/juego-de-tronos-2011" title"Juego de tronos (2011)">Juego de tronos (2011)</a></strong></li><li class="thumb-episode"><a title="New girl (2011)" href="/serie/new-girl"><img width="100" height="144" class="img-shadow" src="http://static2.seriesyonkis.sx/90/3/New_Girl-40390.JPEG" alt="New girl"/></a><strong><a href="/serie/new-girl" title"New girl (2011)">New girl (2011)</a></strong></li><li class="clear"></li> <li class="thumb-episode"><a title="Modern Family (2009)" href="/serie/modern-family"><img width="100" height="144" class="img-shadow" src="http://static2.seriesyonkis.sx/90/1/Modern_Family-19537.JPEG" alt="Modern Family"/></a><strong><a href="/serie/modern-family" title"Modern Family (2009)">Modern Family (2009)</a></strong></li><li class="thumb-episode"><a title="Padre de Familia (1999)" href="/serie/padre-de-familia"><img width="100" height="144" class="img-shadow" src="http://static2.seriesyonkis.sx/90/2/Family_Guy-21309.PNG" alt="Padre de Familia"/></a><strong><a href="/serie/padre-de-familia" title"Padre de Familia (1999)">Padre de Familia (1999)</a></strong></li><li class="thumb-episode"><a title="Suits (2011)" href="/serie/suits"><img width="100" height="144" class="img-shadow" src="http://static2.seriesyonkis.sx/90/2/Suits-35726.JPEG" alt="Suits"/></a><strong><a href="/serie/suits" title"Suits (2011)">Suits (2011)</a></strong></li><li class="thumb-episode"><a title="Gossip Girl (2007)" href="/serie/gossip-girl-yonkis1"><img width="100" height="144" class="img-shadow" src="http://static2.seriesyonkis.sx/90/1/Gossip_Girl-19209.JPEG" alt="Gossip Girl"/></a><strong><a href="/serie/gossip-girl-yonkis1" title"Gossip Girl (2007)">Gossip Girl (2007)</a></strong></li><li class="clear"></li> <li class="thumb-episode"><a title="Los Simpsons (1989)" href="/serie/los-simpsons"><img width="100" height="144" class="img-shadow" src="http://static2.seriesyonkis.sx/90/1/The_Simpsons-19237.PNG" alt="Los Simpsons"/></a><strong><a href="/serie/los-simpsons" title"Los Simpsons (1989)">Los Simpsons (1989)</a></strong></li><li class="thumb-episode"><a title="Dos Hombres y Medio (Two and a Half Men) (2003)" href="/serie/dos-hombres-y-medio-two-and-a-half-men"><img width="100" height="144" class="img-shadow" src="http://static2.seriesyonkis.sx/90/1/Two_and_a_Half_Men-19450.JPEG" alt="Dos Hombres y Medio (Two and a Half Men)"/></a><strong><a href="/serie/dos-hombres-y-medio-two-and-a-half-men" title"Dos Hombres y Medio (Two and a Half Men) (2003)">Dos Hombres y Medio (Two and a Half Men) (2003)</a></strong></li><li class="thumb-episode"><a title="Revenge (2011)" href="/serie/revenge"><img width="100" height="144" class="img-shadow" src="http://static2.seriesyonkis.sx/90/3/Revenge-40394.JPEG" alt="Revenge"/></a><strong><a href="/serie/revenge" title"Revenge (2011)">Revenge (2011)</a></strong></li><li class="thumb-episode"><a title="Glee (2009)" href="/serie/glee"><img width="100" height="144" class="img-shadow" src="http://static2.seriesyonkis.sx/90/1/Glee-19136.JPEG" alt="Glee"/></a><strong><a href="/serie/glee" title"Glee (2009)">Glee (2009)</a></strong></li><li class="clear"></li> + # </ul> + # </div> + matches = re.compile('<div id="tabs-1">(.*?)</div>', re.S).findall(data) + if len(matches) <= 0: + return [] + data = matches[0] + + # <li class="thumb-episode"> <a href="/serie/como-conoci-a-vuestra-madre" title="Cómo conocí a vuestra madre"><img class="img-shadow" src="/img/series/170x243/como-conoci-a-vuestra-madre.jpg" height="166" width="115"></a> <strong><a href="/serie/como-conoci-a-vuestra-madre" title="Cómo conocí a vuestra madre">Cómo conocí a vuestra madre</a></strong> </li> + matches = re.compile('<a title="([^"]+)" href="([^"]+)".*?src="([^"]+)".*?</a>', re.S).findall(data) + # scrapertools.printMatches(matches) + itemlist = [] + for match in matches: + scrapedtitle = match[0] + scrapedurl = urlparse.urljoin(item.url, match[1]) + scrapedthumbnail = urlparse.urljoin(item.url, match[2]) + scrapedplot = "" + + # Depuracion + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + itemlist.append( + Item(channel=item.channel, action="episodios", title=scrapedtitle, fulltitle=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot, show=scrapedtitle, + fanart=item.fanart)) + + return itemlist + + +def series(item): + logger.info() + itemlist = [] + + data = scrapertools.cachePage(item.url) + + # Paginador + # <div class="paginator">  <a href="/lista-de-series/C/"><</a> <a href="/lista-de-series/C/">1</a> <strong>2</strong> <a href="/lista-de-series/C/200">3</a> <a href="/lista-de-series/C/200">></a>  </div> + matches = re.compile('<a href="([^"]+)">></a>', re.S).findall(data) + # matches = re.compile('<div class="paginator">.*?<a href="([^"]+)".*?</div>', re.S).findall(data) + if len(matches) > 0: + paginador = Item(channel=item.channel, action="series", title="!Página siguiente", + url=urlparse.urljoin(item.url, matches[0]), thumbnail=item.thumbnail, plot="", extra="", + show=item.show, fanart=item.fanart) + else: + paginador = None + + if paginador is not None: + itemlist.append(paginador) + + # <div id="main-section" class="lista-series">.*?</div> + # matches = re.compile('<div id="main-section" class="lista-series">.*?</div>', re.S).findall(data) + matches = re.compile('<ul id="list-container".*?</ul>', re.S).findall(data) + # scrapertools.printMatches(matches) + for match in matches: + data = match + break + + # <li><a href="/serie/al-descubierto" title="Al descubierto">Al descubierto</a></li> + # matches = re.compile('<li>.*?href="([^"]+)".*?title="([^"]+)".*?</li>', re.S).findall(data) + matches = re.compile('title="([^"]+)" href="([^"]+)"', re.S).findall(data) + # scrapertools.printMatches(matches) + + for match in matches: + itemlist.append(Item(channel=item.channel, action="episodios", title=match[0], fulltitle=match[0], + url=urlparse.urljoin(item.url, match[1]), thumbnail="", plot="", extra="", show=match[0], + fanart=item.fanart)) + + if len(itemlist) > 0 and config.get_platform() in ("wiimc", "rss") and item.channel <> "wiideoteca": + itemlist.append( + Item(channel=item.channel, action="add_serie_to_wiideoteca", title=">> Agregar Serie a Wiideoteca <<", + fulltitle=item.fulltitle, url=item.url, thumbnail="", plot="", extra="")) + + if paginador is not None: + itemlist.append(paginador) + + return itemlist + + +def detalle_programa(item, data=""): + # http://www.seriesyonkis.sx/serie/gungrave + # http://www.seriesyonkis.sx/ficha/serie/gungrave + url = item.url + if "seriesyonkis.com/serie/" in url: + url = url.replace("seriesyonkis.com/serie/", "seriesyonkis.com/ficha/serie/") + + # Descarga la página + if data == "": + data = scrapertools.cache_page(url) + + # Obtiene el thumbnail + try: + item.thumbnail = scrapertools.get_match(data, '<div class="profile-info"[^<]+<a[^<]+<img src="([^"]+)"') + except: + pass + + try: + item.plot = scrapertools.htmlclean(scrapertools.get_match(data, '<div class="details">(.*?)</div>')) + except: + pass + logger.info("plot=" + item.plot) + + try: + item.title = scrapertools.get_match(data, '<h1 class="underline"[^>]+>([^<]+)</h1>').strip() + except: + pass + + return item + + +def episodios(item): + logger.info() + + # Descarga la pagina + data = scrapertools.cache_page(item.url) + item = detalle_programa(item) + + # <h2 class="header-subtitle">Capítulos</h2> <ul class="menu"> + # <h2 class="header-subtitle">Cap.*?</h2> <ul class="menu">.*?</ul> + matches = re.compile('<h2 class="header-subtitle">Cap.*?</h2> <ul class="menu">.*?</ul>', re.S).findall(data) + if len(matches) > 0: + data = matches[0] + # <li.*? + matches = re.compile('<li.*?</li>', re.S).findall(data) + # scrapertools.printMatches(matches) + + itemlist = [] + + No = 0 + for match in matches: + itemlist.extend(addChapters( + Item(channel=item.channel, url=item.url, extra=match, thumbnail=item.thumbnail, show=item.show, + plot=item.plot, fulltitle=item.title))) + ''' + if(len(matches)==1): + itemlist = addChapters(Item(url=match, thumbnail=thumbnail)) + else: + # Añade al listado de XBMC + No = No + 1 + title = "Temporada "+str(No) + itemlist.append( Item(channel=item.channel, action="season" , title= title, url=match, thumbnail=thumbnail, plot="", show = title, folder=True)) + ''' + + if config.get_videolibrary_support(): + itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url, + action="add_serie_to_library", extra="episodios", show=item.show, + fanart=item.fanart)) + itemlist.append(Item(channel=item.channel, title="Descargar todos los episodios de la serie", url=item.url, + action="download_all_episodes", extra="episodios", show=item.show, + fanart=item.fanart)) + + return itemlist + + +def addChapters(item): + # <tr > <td class="episode-title"> <span class="downloads allkind" title="Disponibles enlaces a descarga directa y visualizaciones"></span> + # <a href="/capitulo/bones/capitulo-2/2870"> <strong> 1x02 </strong> - El hombre en la unidad especial de victimas </a> </td> <td> 18/08/2007 </td> <td class="episode-lang"> <span class="flags_peq spa" title="Español"></span> </td> <td class="score"> 8 </td> </tr> + matches = re.compile('<a class="episodeLink p1" href="([^"]+)"[^<]+<strong>([^<]+)</strong>(.*?)</a>(.*?)</tr>', + re.S).findall(item.extra) + scrapertools.printMatches(matches) + + itemlist = [] + for match in matches: + url = urlparse.urljoin(item.url, match[0]) + title = match[1].strip() + match[2] + + patron = '<span class="flags[^"]+" title="([^"]+)">' + flags = re.compile(patron, re.DOTALL).findall(match[3]) + for flag in flags: + title = title + " (" + flag + ")" + + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, fulltitle=item.fulltitle + " " + title, + url=url, thumbnail=item.thumbnail, plot=item.plot, show=item.show, context="4", folder=True, + fanart=item.fanart)) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + + try: + Nro = 0 + fmt = id = "" + + # Acota la zona de búsqueda + data = scrapertools.cache_page(item.url) + data = scrapertools.get_match(data, '<div id="section-content"(.*?)</table>') + + # Procesa línea por línea + matches = re.compile('<tr>.*?</tr>', re.S).findall(data) + + for match in matches: + logger.info(match) + # <tr> <td class="episode-server"> <a href="/s/ngo/2/0/0/4/967" title="Reproducir No estamos solos 2x1" target="_blank"><img src="http://s.staticyonkis.com/img/veronline.png" height="22" width="22"> Reproducir</a> </td> <td class="episode-server-img"><a href="/s/ngo/2/0/0/4/967" title="Reproducir No estamos solos 2x1" target="_blank"><span class="server megavideo"></span></a></td> <td class="episode-lang"><span class="flags esp" title="Español">esp</span></td> <td class="center"><span class="flags no_sub" title="Sin subtítulo o desconocido">no</span></td> <td> <span class="episode-quality-icon" title="Calidad del episodio"> <i class="sprite quality5"></i> </span> </td> <td class="episode-notes"><span class="icon-info"></span> <div class="tip hidden"> <h3>Información vídeo</h3> <div class="arrow-tip-right-dark sprite"></div> <ul> <li>Calidad: 6, Duración: 85.8 min, Peso: 405.79 MB, Resolución: 640x368</li> </ul> </div> </td> <td class="episode-uploader">lksomg</td> <td class="center"><a href="#" class="errorlink" data-id="2004967"><img src="http://s.staticyonkis.com/img/icons/bug.png" alt="" /></a></td> </tr> + # <tr> <td class="episode-server" data-value="0"> <a href="/s/ngo/5/5/9/8/737" title="Descargar Capítulo 514 1x514 de rapidgator" target="_blank"><img src="http://s.staticyonkis.com/img/descargadirecta.png" height="22" width="22" alt="descarga directa" /> Descargar</a> <span class="public_sprite like_green vote_link_positive user_not_logged" data-id="5598737" data-type="+" title="Voto positivo">[positivo]</span> <span class="public_sprite dislike_red vote_link_negative user_not_logged" data-id="5598737" data-type="-" title="Voto negativo">[negativo]</span> </td> <td class="episode-server-img"><a href="/s/ngo/5/5/9/8/737" title="Descargar Capítulo 514 1x514" target="_blank"><span class="server rapidgator"></span></a></td> <td class="episode-lang"><span class="flags spa" title="Español">spa</span></td> <td class="episode-subtitle subtitles center"><span class="flags no_sub" title="Sin información">no_sub</span></td> <td class="episode-notes"> <span class="icon-info"></span> <div class="tip hidden"> <h3>Información vídeo</h3> <div class="arrow-tip-right-dark sprite"></div> <ul> <li>hdtv</li> </ul> </div> </td> <td class="episode-uploader"> <span title="repomen77">repomen77</span> </td> <td class="episode-error bug center"><a href="#" class="errorlink" data-id="5598737"><img src="http://s.staticyonkis.com/img/icons/bug.png" alt="error" /></a></td> </tr> + # <a href="/s/ngo/5/5/9/8/737" + # <span class="server rapidgator"></span></a></td> <td class="episode-lang"> + # <span class="flags spa" title="Español">spa</span></td> <td class="episode-subtitle subtitles center"> + # <span class="flags no_sub" title="Sin información">no_sub</span></td> <td class="episode-notes"> <span class="icon-info"></span> <div class="tip hidden"> <h3>Información vídeo</h3> + # <div class="arrow-tip-right-dark sprite"></div> <ul> <li>hdtv</li> </ul> </div> </td> <td class="episode-uploader"> <span title="repomen77">repomen77</span> </td> <td class="episode-error bug center"><a href="#" class="errorlink" data-id="5598737"><img src="http://s.staticyonkis.com/img/icons/bug.png" alt="error" /></a></td> </tr> + patron = '<a href="([^"]+)".*?</td>.*?' + patron += 'alt="([^"]+)".*?' + patron += 'class="episode-lang">.*?title="([^"]+)"' + datos = re.compile(patron, re.S).findall(match) + for info in datos: + id = info[0] + servidor = info[1] + Nro = Nro + 1 + audio = "Audio:" + info[2] + url = "http://www.seriesyonkis.sx" + info[0] + scraptedtitle = "%02d) [%s] - [%s] " % (Nro, audio, servidor) + # El plot va vacío porque necesita menos memoria, y en realidad es el de la serie y no el del episodio :) + itemlist.append( + Item(channel=item.channel, action="play", title=scraptedtitle, fulltitle=item.fulltitle, url=url, + thumbnail=item.thumbnail, plot="", folder=False, + fanart=item.fanart)) + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + + return itemlist + + +def play(item): + logger.info() + itemlist = [] + data = scrapertools.cache_page(item.url) + videos = servertools.findvideos(data) + + if (len(videos) > 0): + url = videos[0][1] + server = videos[0][2] + itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=url, + thumbnail=item.thumbnail, plot=item.plot, server=server, extra=item.extra, folder=False)) + else: + patron = '<ul class="form-login">(.*?)</ul' + matches = re.compile(patron, re.S).findall(data) + if (len(matches) > 0): + if "xbmc" in config.get_platform(): + data = matches[0] + # buscamos la public key + patron = 'src="http://www.google.com/recaptcha/api/noscript\?k=([^"]+)"' + pkeys = re.compile(patron, re.S).findall(data) + if (len(pkeys) > 0): + pkey = pkeys[0] + # buscamos el id de challenge + data = scrapertools.cache_page("http://www.google.com/recaptcha/api/challenge?k=" + pkey) + patron = "challenge.*?'([^']+)'" + challenges = re.compile(patron, re.S).findall(data) + if (len(challenges) > 0): + challenge = challenges[0] + image = "http://www.google.com/recaptcha/api/image?c=" + challenge + + # CAPTCHA + exec "import platformcode.captcha as plugin" + tbd = plugin.Keyboard("", "", image) + tbd.doModal() + confirmed = tbd.isConfirmed() + if (confirmed): + tecleado = tbd.getText() + logger.info("tecleado=" + tecleado) + sendcaptcha(playurl, challenge, tecleado) + del tbd + # tbd ya no existe + if (confirmed and tecleado != ""): + itemlist = play(item) + else: + itemlist.append(Item(channel=item.channel, action="error", title="El sitio web te requiere un captcha")) + + logger.info("len(itemlist)=%s" % len(itemlist)) + return itemlist + + +def sendcaptcha(url, challenge, text): + values = {'recaptcha_challenge_field': challenge, + 'recaptcha_response_field': text} + form_data = urllib.urlencode(values) + url = url.replace("seriesyonkis", "seriescoco") + url = url.replace("peliculasyonkis", "peliculascoco") + logger.info("url=" + url + ", form_data=" + form_data) + request = urllib2.Request(url, form_data) + request.add_header('User-Agent', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0)') + response = urllib2.urlopen(request) + html = response.read() + logger.info("response=" + html) + response.close() + return html + + +# Pone todas las series del listado alfabético juntas, para no tener que ir entrando una por una +def completo(item): + logger.info() + itemlist = [] + + # Carga el menú "Alfabético" de series + item = Item(channel=item.channel, action="listalfabetico") + items_letras = listalfabetico(item) + + # Para cada letra + for item_letra in items_letras: + # print item_letra.title + + # Lee las series + items_programas = series(item_letra) + + salir = False + while not salir: + + # Saca la URL de la siguiente página + ultimo_item = items_programas[len(items_programas) - 1] + + # Páginas intermedias + if ultimo_item.action == "series": + # print ultimo_item.url + # Quita el elemento de "Página siguiente" + ultimo_item = items_programas.pop() + + # Añade las series de la página a la lista completa + itemlist.extend(items_programas) + + # Carga la sigiuente página + items_programas = series(ultimo_item) + + # Última página + else: + # Añade a la lista completa y sale + itemlist.extend(items_programas) + salir = True + + return itemlist + + +def listalfabetico(item): + logger.info() + + itemlist = [] + + itemlist.append( + Item(channel=item.channel, action="series", title="0-9", url="http://www.seriesyonkis.sx/lista-de-series/0-9", + fanart=item.fanart)) + for letra in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', + 'U', 'V', 'W', 'X', 'Y', 'Z']: + itemlist.append(Item(channel=item.channel, action="series", title=letra, + url="http://www.seriesyonkis.sx/lista-de-series/" + letra, + fanart=item.fanart)) + + return itemlist diff --git a/plugin.video.alfa/channels/serviporno.json b/plugin.video.alfa/channels/serviporno.json new file mode 100755 index 00000000..9255e43f --- /dev/null +++ b/plugin.video.alfa/channels/serviporno.json @@ -0,0 +1,33 @@ +{ + "id": "serviporno", + "name": "Serviporno", + "active": true, + "adult": true, + "language": "es", + "thumbnail": "serviporno.png", + "banner": "serviporno.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "05/08/2016", + "description": "Eliminado de sección películas." + } + ], + "categories": [ + "adult" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/serviporno.py b/plugin.video.alfa/channels/serviporno.py new file mode 100755 index 00000000..b17034ae --- /dev/null +++ b/plugin.video.alfa/channels/serviporno.py @@ -0,0 +1,115 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import logger +from core import scrapertools +from core.item import Item + + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append( + Item(channel=item.channel, action="videos", title="Útimos videos", url="http://www.serviporno.com/")) + itemlist.append( + Item(channel=item.channel, action="videos", title="Más vistos", url="http://www.serviporno.com/mas-vistos/")) + itemlist.append( + Item(channel=item.channel, action="videos", title="Más votados", url="http://www.serviporno.com/mas-votados/")) + itemlist.append(Item(channel=item.channel, action="categorias", title="Categorias", + url="http://www.serviporno.com/categorias/")) + itemlist.append( + Item(channel=item.channel, action="chicas", title="Chicas", url="http://www.serviporno.com/pornstars/")) + itemlist.append( + Item(channel=item.channel, action="search", title="Buscar", url="http://www.serviporno.com/search/?q=")) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = item.url + texto + try: + return videos(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def videos(item): + logger.info() + itemlist = [] + data = scrapertools.downloadpage(item.url) + + patron = '<div class="wrap-box-escena">.*?' + patron += '<div class="box-escena">.*?' + patron += '<a href="([^"]+)" data-stats-video-id="[^"]+" data-stats-video-name="([^"]+)" data-stats-video-category="[^"]*" data-stats-list-name="[^"]*" data-stats-list-pos="[^"]*">.*?' + patron += '<img src="([^"]+)" data-src="[^"]+" alt="[^"]+" id=\'[^\']+\' class="thumbs-changer" data-thumbs-prefix="[^"]+" height="150px" width="175px" border=0 />' + + matches = re.compile(patron, re.DOTALL).findall(data) + logger.info(str(matches)) + for url, title, thumbnail in matches: + url = urlparse.urljoin(item.url, url) + itemlist.append(Item(channel=item.channel, action='play', title=title, url=url, thumbnail=thumbnail)) + + # Paginador + patron = '<a href="([^<]+)">Siguiente »</a>' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) > 0: + url = "http://www.serviporno.com" + matches[0] + itemlist.append( + Item(channel=item.channel, action="videos", title="Página Siguiente", url=url, thumbnail="", folder=True)) + + return itemlist + + +def chicas(item): + logger.info() + itemlist = [] + data = scrapertools.downloadpage(item.url) + + patron = '<div class="box-chica">.*?' + patron += '<a href="([^"]+)" title="">.*?' + patron += '<img class="img" src=\'([^"]+)\' width="175" height="150" border=\'0\' alt="[^"]+"/>.*?' + patron += '</a>[^<]{1}<h4><a href="[^"]+" title="">([^"]+)</a></h4>.*?' + patron += '<a class="total-videos" href="[^"]+" title="">([^<]+)</a>' + + matches = re.compile(patron, re.DOTALL).findall(data) + for url, thumbnail, title, videos in matches: + url = urlparse.urljoin("http://www.serviporno.com", url) + title = title + " (" + videos + ")" + itemlist.append(Item(channel=item.channel, action='videos', title=title, url=url, thumbnail=thumbnail, plot="")) + return itemlist + + +def categorias(item): + logger.info() + itemlist = [] + data = scrapertools.downloadpage(item.url) + + patron = '<div class="wrap-box-escena">.*?' + patron += '<div class="cat box-escena">.*?' + patron += '<a href="([^"]+)"><img src="([^"]+)" alt="Webcam" height="150" width="175" border=0 /></a>.*?' + patron += '<h4><a href="[^"]+">([^<]+)</a></h4>' + + matches = re.compile(patron, re.DOTALL).findall(data) + for url, thumbnail, title in matches: + url = urlparse.urljoin(item.url, url) + itemlist.append(Item(channel=item.channel, action='videos', title=title, url=url, thumbnail=thumbnail, plot="")) + return itemlist + + +def play(item): + logger.info() + itemlist = [] + data = scrapertools.downloadpage(item.url) + url = scrapertools.get_match(data, "url: '([^']+)',\s*framesURL:") + itemlist.append( + Item(channel=item.channel, action="play", server="directo", title=item.title, url=url, thumbnail=item.thumbnail, + plot=item.plot, folder=False)) + + return itemlist diff --git a/plugin.video.alfa/channels/setting.py b/plugin.video.alfa/channels/setting.py new file mode 100755 index 00000000..0f34156f --- /dev/null +++ b/plugin.video.alfa/channels/setting.py @@ -0,0 +1,861 @@ +# -*- coding: utf-8 -*- +# ------------------------------------------------------------ +# Configuracion +# ------------------------------------------------------------ + +import os + +from core import config +from core import filetools +from core import logger +from core import servertools +from core.item import Item +from platformcode import platformtools + +CHANNELNAME = "setting" + +# todo revisar elementos de update +def mainlist(item): + logger.info() + + itemlist = list() + itemlist.append(Item(channel=CHANNELNAME, title="Preferencias", action="settings", folder=False, + thumbnail=config.get_thumb("thumb_setting_0.png"))) + + # if config.get_setting("plugin_updates_available") == 0: + # nuevas = "" + # elif config.get_setting("plugin_updates_available") == 1: + # nuevas = " (1 nueva)" + # else: + # nuevas = " (%s nuevas)" % config.get_setting("plugin_updates_available") + # + # thumb_configuracion = "thumb_setting_%s.png" % config.get_setting("plugin_updates_available") + # + # itemlist.append(Item(channel=CHANNELNAME, title="Descargar e instalar otras versiones" + nuevas, + # action="get_all_versions", folder=True, + # thumbnail=config.get_thumb(thumb_configuracion))) + + itemlist.append(Item(channel=CHANNELNAME, title="", action="", folder=False, + thumbnail=config.get_thumb("thumb_setting_0.png"))) + + itemlist.append(Item(channel=CHANNELNAME, title="Ajustes especiales", action="", folder=False, + thumbnail=config.get_thumb("thumb_setting_0.png"))) + itemlist.append(Item(channel=CHANNELNAME, title=" Ajustes de Canales", action="menu_channels", + folder=True, thumbnail=config.get_thumb("thumb_channels.png"))) + itemlist.append(Item(channel=CHANNELNAME, title=" Ajustes de Servidores", action="menu_servers", + folder=True, thumbnail=config.get_thumb("thumb_channels.png"))) + itemlist.append(Item(channel="news", title=" Ajustes de la sección 'Novedades'", action="menu_opciones", + folder=True, thumbnail=config.get_thumb("thumb_news.png"))) + itemlist.append(Item(channel="search", title=" Ajustes del buscador global", action="opciones", folder=True, + thumbnail=config.get_thumb("thumb_search.png"))) + itemlist.append(Item(channel=CHANNELNAME, title=" Ajustes de descargas", action="channel_config", + config="downloads", folder=True, thumbnail=config.get_thumb("thumb_downloads.png"))) + + if config.get_videolibrary_support(): + itemlist.append(Item(channel="videolibrary", title=" Ajustes de la videoteca", + action="channel_config", folder=True, + thumbnail=config.get_thumb("thumb_videolibrary.png"))) + + # itemlist.append(Item(channel=CHANNELNAME, title=" Añadir o Actualizar canal/conector desde una URL", + # action="menu_addchannels")) + itemlist.append(Item(channel=CHANNELNAME, action="", title="", folder=False, + thumbnail=config.get_thumb("thumb_setting_0.png"))) + itemlist.append(Item(channel=CHANNELNAME, title="Otras herramientas", action="submenu_tools", + folder=True, thumbnail=config.get_thumb("thumb_setting_0.png"))) + + return itemlist + + +def menu_channels(item): + logger.info() + itemlist = list() + + itemlist.append(Item(channel=CHANNELNAME, title="Activar/desactivar canales", + action="conf_tools", folder=False, extra="channels_onoff", + thumbnail=config.get_thumb("thumb_setting_0.png"))) + + itemlist.append(Item(channel=CHANNELNAME, title="Ajustes por canales", + action="", folder=False, + thumbnail=config.get_thumb("thumb_setting_0.png"))) + + # Inicio - Canales configurables + import channelselector + from core import channeltools + + channel_list = channelselector.filterchannels("all") + + for channel in channel_list: + channel_parameters = channeltools.get_channel_parameters(channel.channel) + + if channel_parameters["has_settings"]: + itemlist.append(Item(channel=CHANNELNAME, title=" Configuración del canal '%s'" % channel.title, + action="channel_config", config=channel.channel, folder=False, + thumbnail=channel.thumbnail)) + # Fin - Canales configurables + + itemlist.append(Item(channel=CHANNELNAME, action="", title="", folder=False, + thumbnail=config.get_thumb("thumb_setting_0.png"))) + + itemlist.append(Item(channel=CHANNELNAME, title="Herramientas de canales", action="", + folder=False, thumbnail=config.get_thumb("thumb_channels.png"))) + itemlist.append(Item(channel=CHANNELNAME, title=" Comprobar archivos *_data.json", + action="conf_tools", folder=True, extra="lib_check_datajson", + thumbnail=config.get_thumb("thumb_channels.png"))) + + return itemlist + + +def channel_config(item): + return platformtools.show_channel_settings(channelpath=filetools.join(config.get_runtime_path(), "channels", + item.config)) + + +def menu_servers(item): + logger.info() + itemlist = list() + + itemlist.append(Item(channel=CHANNELNAME, title="Sevidores bloqueados", + action="servers_blacklist", folder=False, + thumbnail=config.get_thumb("thumb_setting_0.png"))) + + itemlist.append(Item(channel=CHANNELNAME, title="Servidores favoritos", + action="servers_favorites", folder=False, + thumbnail=config.get_thumb("thumb_setting_0.png"))) + + itemlist.append(Item(channel=CHANNELNAME, title="Ajustes de debriders:", + action="", folder=False, + thumbnail=config.get_thumb("thumb_setting_0.png"))) + + # Inicio - Servidores configurables + + server_list = servertools.get_debriders_list().keys() + for server in server_list: + server_parameters = servertools.get_server_parameters(server) + if server_parameters["has_settings"]: + itemlist.append( + Item(channel=CHANNELNAME, title=" Configuración del servidor '%s'" % server_parameters["name"], + action="server_config", config=server, folder=False, + thumbnail="")) + + itemlist.append(Item(channel=CHANNELNAME, title="Ajustes de servidores", + action="", folder=False, + thumbnail=config.get_thumb("thumb_setting_0.png"))) + + server_list = servertools.get_servers_list().keys() + + for server in sorted(server_list): + server_parameters = servertools.get_server_parameters(server) + logger.info(server_parameters) + if server_parameters["has_settings"] and filter(lambda x: x["id"] not in ["black_list", "white_list"], + server_parameters["settings"]): + itemlist.append( + Item(channel=CHANNELNAME, title=" Configuración del servidor '%s'" % server_parameters["name"], + action="server_config", config=server, folder=False, + thumbnail="")) + + # Fin - Servidores configurables + + return itemlist + + +def server_config(item): + return platformtools.show_channel_settings(channelpath=filetools.join(config.get_runtime_path(), "servers", + item.config)) + + +def servers_blacklist(item): + server_list = servertools.get_servers_list() + dict_values = {} + + list_controls = [{'id': 'filter_servers', + 'type': "bool", + 'label': "@30068", + 'default': False, + 'enabled': True, + 'visible': True}] + dict_values['filter_servers'] = config.get_setting('filter_servers') + + for i, server in enumerate(sorted(server_list.keys())): + server_parameters = server_list[server] + controls, defaults = servertools.get_server_controls_settings(server) + dict_values[server] = config.get_setting("black_list", server=server) + + control = {'id': server, + 'type': "bool", + 'label': ' %s' % server_parameters["name"], + 'default': defaults.get("black_list", False), + 'enabled': "eq(-%s,True)" % (i + 1), + 'visible': True} + list_controls.append(control) + + return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values, + caption="Servidores bloqueados", + callback="cb_servers_blacklist") + + +def cb_servers_blacklist(item, dict_values): + f = False + progreso = platformtools.dialog_progress("Guardando configuración...", "Espere un momento por favor.") + n = len(dict_values) + i = 1 + for k, v in dict_values.items(): + if k == 'filter_servers': + config.set_setting('filter_servers', v) + else: + config.set_setting("black_list", v, server=k) + if v: # Si el servidor esta en la lista negra no puede estar en la de favoritos + config.set_setting("favorites_servers_list", 100, server=k) + f = True + progreso.update((i * 100) / n, "Guardando configuración...%s" % k) + i += 1 + + if not f: # Si no hay ningun servidor en la lista, desactivarla + config.set_setting('filter_servers', False) + + progreso.close() + + +def servers_favorites(item): + server_list = servertools.get_servers_list() + dict_values = {} + + list_controls = [{'id': 'favorites_servers', + 'type': "bool", + 'label': "Ordenar servidores", + 'default': False, + 'enabled': True, + 'visible': True}] + dict_values['favorites_servers'] = config.get_setting('favorites_servers') + + server_names = ['Ninguno'] + + for server in sorted(server_list.keys()): + if config.get_setting("black_list", server=server): + continue + + server_names.append(server_list[server]['name']) + + orden = config.get_setting("favorites_servers_list", server=server) + + if orden > 0: + dict_values[orden] = len(server_names) - 1 + + for x in range(1, 6): + control = {'id': x, + 'type': "list", + 'label': " Servidor #%s" % (x), + 'lvalues': server_names, + 'default': 0, + 'enabled': "eq(-%s,True)" % x, + 'visible': True} + list_controls.append(control) + + return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values, + item=server_names, + caption="Servidores favoritos", + callback="cb_servers_favorites") + + +def cb_servers_favorites(server_names, dict_values): + dict_name = {} + progreso = platformtools.dialog_progress("Guardando configuración...", "Espere un momento por favor.") + + for i, v in dict_values.items(): + if i == "favorites_servers": + config.set_setting("favorites_servers", v) + elif int(v) > 0: + dict_name[server_names[v]] = int(i) + + servers_list = servertools.get_servers_list().items() + n = len(servers_list) + i = 1 + for server, server_parameters in servers_list: + if server_parameters['name'] in dict_name.keys(): + config.set_setting("favorites_servers_list", dict_name[server_parameters['name']], server=server) + else: + config.set_setting("favorites_servers_list", 0, server=server) + progreso.update((i * 100) / n, "Guardando configuración...%s" % server_parameters['name']) + i += 1 + + if not dict_name: # Si no hay ningun servidor en lalista desactivarla + config.set_setting("favorites_servers", False) + + progreso.close() + + +def get_all_versions(item): + logger.info() + + itemlist = [] + + # Lee la versión local + from core import versiontools + + # Descarga la lista de versiones + from core import api + api_response = api.plugins_get_all_packages() + + if api_response["error"]: + platformtools.dialog_ok("Error", "Se ha producido un error al descargar la lista de versiones") + return + + for entry in api_response["body"]: + + if entry["package"] == "plugin": + title = "alfa " + entry["tag"] + " (Publicada " + entry["date"] + ")" + local_version_number = versiontools.get_current_plugin_version() + elif entry["package"] == "channels": + title = "Canales (Publicada " + entry["date"] + ")" + local_version_number = versiontools.get_current_channels_version() + elif entry["package"] == "servers": + title = "Servidores (Publicada " + entry["date"] + ")" + local_version_number = versiontools.get_current_servers_version() + else: + title = entry["package"] + " (Publicada " + entry["date"] + ")" + local_version_number = None + + title_color = "" + + if local_version_number is None: + title = title + + elif entry["version"] == local_version_number: + title += " ACTUAL" + + elif entry["version"] > local_version_number: + title_color = "yellow" + + else: + title_color = "0xFF666666" + + itemlist.append(Item(channel=CHANNELNAME, title=title, url=entry["url"], + filename=entry["filename"], package=entry["package"], + version=str(entry["version"]), text_color=title_color, + action="download_and_install_package", folder=False)) + + return itemlist + + +def download_and_install_package(item): + logger.info() + + from core import updater + from core import versiontools + + if item.package == "plugin": + if int(item.version) < versiontools.get_current_plugin_version(): + if not platformtools.dialog_yesno("Instalando versión anterior", + "¿Seguro que quieres instalar una versión anterior?"): + return + elif int(item.version) == versiontools.get_current_plugin_version(): + if not platformtools.dialog_yesno("Reinstalando versión actual", + "¿Seguro que quieres reinstalar la misma versión que ya tienes?"): + return + elif int(item.version) > versiontools.get_current_plugin_version(): + if not platformtools.dialog_yesno("Instalando nueva versión", + "¿Seguro que quieres instalar esta nueva versión?"): + return + else: + if not platformtools.dialog_yesno("Instalando paquete", "¿Seguro que quieres instalar este paquete?"): + return + + local_file_name = os.path.join(config.get_data_path(), item.filename) + updater.download_and_install(item.url, local_file_name) + + if config.is_xbmc(): + import xbmc + xbmc.executebuiltin("Container.Refresh") + + +def settings(item): + config.open_settings() + + +def menu_addchannels(item): + logger.info() + itemlist = list() + itemlist.append(Item(channel=CHANNELNAME, title="# Copia de seguridad automática en caso de sobrescritura", + action="", text_color="green")) + itemlist.append(Item(channel=CHANNELNAME, title="Añadir o actualizar canal", action="addchannel", folder=False)) + itemlist.append(Item(channel=CHANNELNAME, title="Añadir o actualizar conector", action="addchannel", folder=False)) + itemlist.append(Item(channel=CHANNELNAME, title="Mostrar ruta de carpeta para copias de seguridad", + action="backups", folder=False)) + itemlist.append(Item(channel=CHANNELNAME, title="Eliminar copias de seguridad guardadas", action="backups", + folder=False)) + + return itemlist + + +def addchannel(item): + import os + import time + logger.info() + + tecleado = platformtools.dialog_input("", "Introduzca la URL") + if not tecleado: + return + logger.info("url=%s" % tecleado) + + local_folder = config.get_runtime_path() + if "canal" in item.title: + local_folder = filetools.join(local_folder, 'channels') + folder_to_extract = "channels" + info_accion = "canal" + else: + local_folder = filetools.join(local_folder, 'servers') + folder_to_extract = "servers" + info_accion = "conector" + + # Detecta si es un enlace a un .py o .xml (pensado sobre todo para enlaces de github) + try: + extension = tecleado.rsplit(".", 1)[1] + except: + extension = "" + + files = [] + zip = False + if extension == "py" or extension == "xml": + filename = tecleado.rsplit("/", 1)[1] + localfilename = filetools.join(local_folder, filename) + files.append([tecleado, localfilename, filename]) + else: + import re + from core import scrapertools + # Comprueba si la url apunta a una carpeta completa (channels o servers) de github + if re.search(r'https://github.com/[^\s]+/' + folder_to_extract, tecleado): + try: + data = scrapertools.downloadpage(tecleado) + matches = scrapertools.find_multiple_matches(data, + '<td class="content">.*?href="([^"]+)".*?title="([^"]+)"') + for url, filename in matches: + url = "https://raw.githubusercontent.com" + url.replace("/blob/", "/") + localfilename = filetools.join(local_folder, filename) + files.append([url, localfilename, filename]) + except: + import traceback + logger.error("Detalle del error: %s" % traceback.format_exc()) + platformtools.dialog_ok("Error", "La url no es correcta o no está disponible") + return + else: + filename = 'new%s.zip' % info_accion + localfilename = filetools.join(config.get_data_path(), filename) + files.append([tecleado, localfilename, filename]) + zip = True + + logger.info("localfilename=%s" % localfilename) + logger.info("descarga fichero...") + + try: + if len(files) > 1: + lista_opciones = ["No", "Sí", "Sí (Sobrescribir todos)"] + overwrite_all = False + from core import downloadtools + for url, localfilename, filename in files: + result = downloadtools.downloadfile(url, localfilename, continuar=False, resumir=False) + if result == -3: + if len(files) == 1: + dyesno = platformtools.dialog_yesno("El archivo ya existe", "Ya existe el %s %s. " + "¿Desea sobrescribirlo?" % + (info_accion, filename)) + else: + if not overwrite_all: + dyesno = platformtools.dialog_select("El archivo %s ya existe, ¿desea sobrescribirlo?" + % filename, lista_opciones) + else: + dyesno = 1 + # Diálogo cancelado + if dyesno == -1: + return + # Caso de carpeta github, opción sobrescribir todos + elif dyesno == 2: + overwrite_all = True + elif dyesno: + hora_folder = "Copia seguridad [%s]" % time.strftime("%d-%m_%H-%M", time.localtime()) + backup = filetools.join(config.get_data_path(), 'backups', hora_folder, folder_to_extract) + if not filetools.exists(backup): + os.makedirs(backup) + import shutil + shutil.copy2(localfilename, filetools.join(backup, filename)) + downloadtools.downloadfile(url, localfilename, continuar=True, resumir=False) + else: + if len(files) == 1: + return + else: + continue + except: + import traceback + logger.error("Detalle del error: %s" % traceback.format_exc()) + return + + if zip: + try: + # Lo descomprime + logger.info("descomprime fichero...") + from core import ziptools + unzipper = ziptools.ziptools() + logger.info("destpathname=%s" % local_folder) + unzipper.extract(localfilename, local_folder, folder_to_extract, True, True) + except: + import traceback + logger.error("Detalle del error: %s" % traceback.format_exc()) + # Borra el zip descargado + filetools.remove(localfilename) + platformtools.dialog_ok("Error", "Se ha producido un error extrayendo el archivo") + return + + # Borra el zip descargado + logger.info("borra fichero...") + filetools.remove(localfilename) + logger.info("...fichero borrado") + + platformtools.dialog_ok("Éxito", "Actualización/Instalación realizada correctamente") + + +def backups(item): + logger.info() + + ruta = filetools.join(config.get_data_path(), 'backups') + ruta_split = "" + if "ruta" in item.title: + heading = "Ruta de copias de seguridad" + if not filetools.exists(ruta): + folders = "Carpeta no creada" + else: + folders = str(len(filetools.listdir(ruta))) + " copia/s de seguridad guardadas" + if len(ruta) > 55: + ruta_split = ruta[55:] + ruta = ruta[:55] + platformtools.dialog_ok(heading, ruta, ruta_split, folders) + else: + if not filetools.exists(ruta): + platformtools.dialog_ok("La carpeta no existe", "No hay copias de seguridad guardadas") + else: + dyesno = platformtools.dialog_yesno("Las copias de seguridad se eliminarán", "¿Está seguro?") + if dyesno: + import shutil + shutil.rmtree(ruta, ignore_errors=True) + + +def submenu_tools(item): + logger.info() + itemlist = [] + + itemlist.append(Item(channel=CHANNELNAME, title="Herramientas de canales", action="", + folder=False, thumbnail=config.get_thumb("thumb_channels.png"))) + itemlist.append(Item(channel=CHANNELNAME, title=" Comprobar archivos *_data.json", + action="conf_tools", folder=True, extra="lib_check_datajson", + thumbnail=config.get_thumb("thumb_channels.png"))) + + if config.get_videolibrary_support(): + itemlist.append(Item(channel=CHANNELNAME, action="", title="", folder=False, + thumbnail=config.get_thumb("thumb_setting_0.png"))) + itemlist.append(Item(channel=CHANNELNAME, title="Herramientas de videoteca", action="", + folder=False, thumbnail=config.get_thumb("thumb_videolibrary.png"))) + itemlist.append(Item(channel=CHANNELNAME, action="overwrite_tools", folder=False, + thumbnail=config.get_thumb("thumb_videolibrary.png"), + title=" Sobreescribir toda la videoteca (strm, nfo y json)")) + itemlist.append(Item(channel="videolibrary", action="update_videolibrary", folder=False, + thumbnail=config.get_thumb("thumb_videolibrary.png"), + title=" Buscar nuevos episodios y actualizar videoteca")) + + return itemlist + + +def conf_tools(item): + logger.info() + + # Activar o desactivar canales + if item.extra == "channels_onoff": + import channelselector + from core import channeltools + + channel_list = channelselector.filterchannels("allchannelstatus") + + excluded_channels = ['url', + 'search', + 'videolibrary', + 'setting', + 'news', + # 'help', + 'downloads'] + + list_controls = [] + try: + list_controls.append({'id': "all_channels", + 'type': "list", + 'label': "Todos los canales", + 'default': 0, + 'enabled': True, + 'visible': True, + 'lvalues': ['', + 'Activar todos', + 'Desactivar todos', + 'Establecer estado por defecto']}) + + for channel in channel_list: + # Si el canal esta en la lista de exclusiones lo saltamos + if channel.channel not in excluded_channels: + + channel_parameters = channeltools.get_channel_parameters(channel.channel) + + status_control = "" + status = config.get_setting("enabled", channel.channel) + # si status no existe es que NO HAY valor en _data.json + if status is None: + status = channel_parameters["active"] + logger.debug("%s | Status (XML): %s" % (channel.channel, status)) + if not status: + status_control = " [COLOR grey](Desactivado por defecto)[/COLOR]" + else: + logger.debug("%s | Status: %s" % (channel.channel, status)) + + control = {'id': channel.channel, + 'type': "bool", + 'label': channel_parameters["title"] + status_control, + 'default': status, + 'enabled': True, + 'visible': True} + list_controls.append(control) + + else: + continue + + except: + import traceback + logger.error("Error: %s" % traceback.format_exc()) + else: + return platformtools.show_channel_settings(list_controls=list_controls, + item=item.clone(channel_list=channel_list), + caption="Canales", + callback="channel_status", + custom_button={"visible": False}) + + # Comprobacion de archivos channel_data.json + elif item.extra == "lib_check_datajson": + itemlist = [] + import channelselector + from core import channeltools + channel_list = channelselector.filterchannels("allchannelstatus") + + # Tener una lista de exclusion no tiene mucho sentido por que se comprueba si channel.json tiene "settings", + # pero por si acaso se deja + excluded_channels = ['url', + 'setting', + 'help'] + + try: + import os + from core import jsontools + for channel in channel_list: + + list_status = None + default_settings = None + + # Se comprueba si el canal esta en la lista de exclusiones + if channel.channel not in excluded_channels: + # Se comprueba que tenga "settings", sino se salta + list_controls, dict_settings = channeltools.get_channel_controls_settings(channel.channel) + + if not list_controls: + itemlist.append(Item(channel=CHANNELNAME, + title=channel.title + " - No tiene ajustes por defecto", + action="", folder=False, + thumbnail=channel.thumbnail)) + continue + # logger.info(channel.channel + " SALTADO!") + + # Se cargan los ajustes del archivo json del canal + file_settings = os.path.join(config.get_data_path(), "settings_channels", + channel.channel + "_data.json") + dict_settings = {} + dict_file = {} + if filetools.exists(file_settings): + # logger.info(channel.channel + " Tiene archivo _data.json") + channeljson_exists = True + # Obtenemos configuracion guardada de ../settings/channel_data.json + try: + dict_file = jsontools.load(open(file_settings, "rb").read()) + if isinstance(dict_file, dict) and 'settings' in dict_file: + dict_settings = dict_file['settings'] + except EnvironmentError: + logger.error("ERROR al leer el archivo: %s" % file_settings) + else: + # logger.info(channel.channel + " No tiene archivo _data.json") + channeljson_exists = False + + if channeljson_exists == True: + try: + datajson_size = filetools.getsize(file_settings) + except: + import traceback + logger.error(channel.title + " | Detalle del error: %s" % traceback.format_exc()) + else: + datajson_size = None + + # Si el _data.json esta vacio o no existe... + if (len(dict_settings) and datajson_size) == 0 or channeljson_exists == False: + # Obtenemos controles del archivo ../channels/channel.json + needsfix = True + try: + # Se cargan los ajustes por defecto + list_controls, default_settings = channeltools.get_channel_controls_settings( + channel.channel) + # logger.info(channel.title + " | Default: %s" % default_settings) + except: + import traceback + logger.error(channel.title + " | Detalle del error: %s" % traceback.format_exc()) + # default_settings = {} + + # Si _data.json necesita ser reparado o no existe... + if needsfix == True or channeljson_exists == False: + if default_settings is not None: + # Creamos el channel_data.json + default_settings.update(dict_settings) + dict_settings = default_settings + dict_file['settings'] = dict_settings + # Creamos el archivo ../settings/channel_data.json + json_data = jsontools.dump(dict_file) + try: + open(file_settings, "wb").write(json_data) + # logger.info(channel.channel + " - Archivo _data.json GUARDADO!") + # El channel_data.json se ha creado/modificado + list_status = " - [COLOR red] CORREGIDO!![/COLOR]" + except EnvironmentError: + logger.error("ERROR al salvar el archivo: %s" % file_settings) + else: + if default_settings is None: + list_status = " - [COLOR red] Imposible cargar los ajustes por defecto![/COLOR]" + + else: + # logger.info(channel.channel + " - NO necesita correccion!") + needsfix = False + + # Si se ha establecido el estado del canal se añade a la lista + if needsfix is not None: + if needsfix == True: + if channeljson_exists == False: + list_status = " - Ajustes creados" + list_colour = "red" + else: + list_status = " - No necesita corrección" + list_colour = "green" + else: + # Si "needsfix" es "false" y "datjson_size" es None habra + # ocurrido algun error + if datajson_size is None: + list_status = " - Ha ocurrido algun error" + list_colour = "red" + else: + list_status = " - No necesita corrección" + list_colour = "green" + + if list_status is not None: + itemlist.append(Item(channel=CHANNELNAME, + title=channel.title + list_status, + action="", folder=False, + thumbnail=channel.thumbnail, + text_color=list_colour)) + else: + logger.error("Algo va mal con el canal %s" % channel.channel) + + # Si el canal esta en la lista de exclusiones lo saltamos + else: + continue + except: + import traceback + logger.error("Error: %s" % traceback.format_exc()) + + return itemlist + + +def channel_status(item, dict_values): + try: + for k in dict_values: + + if k == "all_channels": + logger.info("Todos los canales | Estado seleccionado: %s" % dict_values[k]) + if dict_values[k] != 0: + excluded_channels = ['url', 'search', + 'videolibrary', 'setting', + 'news', + 'help', + 'downloads'] + + for channel in item.channel_list: + if channel.channel not in excluded_channels: + from core import channeltools + channel_parameters = channeltools.get_channel_parameters(channel.channel) + new_status_all = None + new_status_all_default = channel_parameters["active"] + + # Opcion Activar todos + if dict_values[k] == 1: + new_status_all = True + + # Opcion Desactivar todos + if dict_values[k] == 2: + new_status_all = False + + # Opcion Recuperar estado por defecto + if dict_values[k] == 3: + # Si tiene "enabled" en el _data.json es porque el estado no es el del channel.json + if config.get_setting("enabled", channel.channel): + new_status_all = new_status_all_default + + # Si el canal no tiene "enabled" en el _data.json no se guarda, se pasa al siguiente + else: + continue + + # Se guarda el estado del canal + if new_status_all is not None: + config.set_setting("enabled", new_status_all, channel.channel) + break + else: + continue + + else: + logger.info("Canal: %s | Estado: %s" % (k, dict_values[k])) + config.set_setting("enabled", dict_values[k], k) + logger.info("el valor esta como %s " % config.get_setting("enabled", k)) + + platformtools.itemlist_update(Item(channel=CHANNELNAME, action="mainlist")) + + except: + import traceback + logger.error("Detalle del error: %s" % traceback.format_exc()) + platformtools.dialog_notification("Error", "Se ha producido un error al guardar") + + +def overwrite_tools(item): + import videolibrary_service + from core import videolibrarytools + + seleccion = platformtools.dialog_yesno("Sobrescribir toda la videoteca", + "Esto puede llevar algun tiempo.", + "¿Desea continuar?") + if seleccion == 1: + heading = 'Sobrescribiendo videoteca....' + p_dialog = platformtools.dialog_progress_bg('alfa', heading) + p_dialog.update(0, '') + + import glob + show_list = glob.glob(filetools.join(videolibrarytools.TVSHOWS_PATH, u'/*/tvshow.nfo')) + + if show_list: + t = float(100) / len(show_list) + + for i, tvshow_file in enumerate(show_list): + head_nfo, serie = videolibrarytools.read_nfo(tvshow_file) + path = filetools.dirname(tvshow_file) + + if not serie.active: + # si la serie no esta activa descartar + continue + + # Eliminamos la carpeta con la serie ... + filetools.rmdirtree(path) + + # ... y la volvemos a añadir + videolibrary_service.update(path, p_dialog, i, t, serie, 3) + + p_dialog.close() diff --git a/plugin.video.alfa/channels/sipeliculas.json b/plugin.video.alfa/channels/sipeliculas.json new file mode 100755 index 00000000..c83ae8b4 --- /dev/null +++ b/plugin.video.alfa/channels/sipeliculas.json @@ -0,0 +1,34 @@ +{ + "id": "sipeliculas", + "name": "SiPeliculas", + "active": true, + "adult": false, + "language": "es", + "banner": "https://s24.postimg.org/5wcznkxhv/sipeliculas.png", + "thumbnail": "https://s23.postimg.org/adrl2k5mz/sipeliculas.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "16/01/17", + "description": "First version" + } + ], + "categories": [ + "latino", + "movie" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": false, + "visible": false + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/sipeliculas.py b/plugin.video.alfa/channels/sipeliculas.py new file mode 100755 index 00000000..6baa41d0 --- /dev/null +++ b/plugin.video.alfa/channels/sipeliculas.py @@ -0,0 +1,138 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + +host = 'http://www.sipeliculas.com' + + +def mainlist(item): + logger.info() + + itemlist = [] + itemlist.append(item.clone(title="Novedades", action="lista", url=host + "/cartelera/")) + itemlist.append(item.clone(title="Actualizadas", action="lista", url=host + "/peliculas-actualizadas/")) + itemlist.append(item.clone(title="Recomendadas", action="lista", url=host + "/peliculas-recomendadas/")) + itemlist.append(item.clone(title="Categorias", action="menuseccion", url=host, extra="/online/")) + itemlist.append(item.clone(title="Año", action="menuseccion", url=host, extra="/estrenos-gratis/")) + itemlist.append(item.clone(title="Alfabetico", action="alfabetica", url=host + '/mirar/')) + itemlist.append(item.clone(title="Buscar", action="search", url=host + "/ver/")) + + return itemlist + + +def alfabetica(item): + logger.info() + itemlist = [] + for letra in "1abcdefghijklmnopqrstuvwxyz": + itemlist.append(item.clone(title=letra.upper(), url=item.url + letra, action="lista")) + + return itemlist + + +def menuseccion(item): + logger.info() + itemlist = [] + seccion = item.extra + data = httptools.downloadpage(item.url).data + + if seccion == '/online/': + data = scrapertools.find_single_match(data, + '<h2 class="[^"]+"><i class="[^"]+"></i>Películas por géneros<u class="[^"]+"></u></h2>(.*?)<ul class="abc">') + patron = '<li ><a href="([^"]+)" title="[^"]+"><i class="[^"]+"></i>([^<]+)</a></li>' + elif seccion == '/estrenos-gratis/': + data = scrapertools.find_single_match(data, '<ul class="lista-anio" id="lista-anio">(.*?)</ul>') + patron = '<li ><a href="([^"]+)" title="[^"]+">([^<]+)</a></li>' + + matches = re.compile(patron, re.DOTALL).findall(data) + for scrapedurl, extra in matches: + itemlist.append(Item(channel=item.channel, action='lista', title=extra, url=scrapedurl)) + return itemlist + + +def lista(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + # data = re.sub(r'"|\n|\r|\t| |<br>', "", data) + + listado = scrapertools.find_single_match(data, + '<div id="sipeliculas" class="borde"><div class="izquierda">(.*?)<div class="derecha"><h2') + logger.info('vergas' + listado) + patron = '<li class="[^"]+"><a class="[^"]+" href="([^"]+)" title="Ver Película([^"]+)"><i></i><img.*?src="([^"]+)" alt="[^"]+"/>(.*?)</li>' + matches = re.compile(patron, re.DOTALL).findall(listado) + for scrapedurl, scrapedtitle, scrapedthumbnail, dataplot in matches: + dataplot = scrapertools.find_single_match(data, '<div class="ttip"><h5>[^<]+</h5><p><span>([^<]+)</span>') + itemlist.append(Item(channel=item.channel, action='findvideos', title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=dataplot, contentTitle=scrapedtitle, extra=item.extra)) + + # Paginacion + if itemlist != []: + patron = '<li[^<]+<a href="([^"]+)" title="[^"]+">Siguiente[^<]+</a></li>' + matches = re.compile(patron, re.DOTALL).findall(data) + if matches: + itemlist.append( + item.clone(title="Pagina Siguiente", action='lista', url=urlparse.urljoin(host, matches[0]))) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "-") + item.url = item.url + texto + if texto != '': + return lista(item) + else: + return [] + + +def findvideos(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + # data = re.sub(r"'|\n|\r|\t| |<br>", "", data) + + listado1 = scrapertools.find_single_match(data, + '<div class="links" id="ver-mas-opciones"><h2 class="h2"><i class="[^"]+"></i>[^<]+</h2><ul class="opciones">(.*?)</ul>') + patron1 = '<li ><a id="([^"]+)" rel="nofollow" href="([^"]+)" title="[^"]+" alt="([^"]+)"><span class="opcion"><i class="[^"]+"></i><u>[^<]+</u>[^<]+</span><span class="ico"><img src="[^"]+" alt="[^"]+"/>[^<]+</span><span>([^"]+)</span><span>([^"]+)</span></a></li>' + matches = matches = re.compile(patron1, re.DOTALL).findall(listado1) + for vidId, vidUrl, vidServer, idioma, calidad in matches: + itemlist.append(Item(channel=item.channel, action='play', url=vidUrl, extra=vidId, + title='Ver en ' + vidServer + ' | ' + idioma + ' | ' + calidad, thumbnail=item.thumbnail)) + + listado2 = scrapertools.find_single_match(data, '<ul class="opciones-tab">(.*?)</ul>') + patron2 = '<li ><a id="([^"]+)" rel="nofollow" href="([^"]+)" title="[^"]+" alt="([^"]+)"><img src="[^"]+" alt="[^"]+"/>[^<]+</a></li>' + matches = matches = re.compile(patron2, re.DOTALL).findall(listado2) + for vidId, vidUrl, vidServer in matches: + itemlist.append(Item(channel=item.channel, action='play', url=vidUrl, extra=vidId, title='Ver en ' + vidServer, + thumbnail=item.thumbnail)) + + for videoitem in itemlist: + videoitem.fulltitle = item.title + videoitem.folder = False + + return itemlist + + +def play(item): + logger.info() + itemlist = [] + + video = httptools.downloadpage(host + '/ajax.public.php', 'acc=ver_opc&f=' + item.extra).data + logger.info("video=" + video) + enlaces = servertools.findvideos(video) + if enlaces: + logger.info("server=" + enlaces[0][2]) + thumbnail = servertools.guess_server_thumbnail(video) + # Añade al listado de XBMC + itemlist.append( + Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=enlaces[0][1], + server=enlaces[0][2], thumbnail=thumbnail, folder=False)) + + return itemlist diff --git a/plugin.video.alfa/channels/submityouflicks.json b/plugin.video.alfa/channels/submityouflicks.json new file mode 100755 index 00000000..c93a2fc7 --- /dev/null +++ b/plugin.video.alfa/channels/submityouflicks.json @@ -0,0 +1,33 @@ +{ + "id": "submityouflicks", + "name": "Submit Your Flicks", + "active": true, + "adult": true, + "language": "es", + "banner": "submityouflicks.png", + "thumbnail": "submityouflicks.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "05/08/2016", + "description": "Eliminado de sección películas." + } + ], + "categories": [ + "adult" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/submityouflicks.py b/plugin.video.alfa/channels/submityouflicks.py new file mode 100755 index 00000000..04b9bf3d --- /dev/null +++ b/plugin.video.alfa/channels/submityouflicks.py @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import logger +from core import scrapertools +from core.item import Item + + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append( + Item(channel=item.channel, action="videos", title="Útimos videos", url="http://www.submityourflicks.com/", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="search", title="Buscar", + url="http://www.submityourflicks.com/index.php?mode=search&q=%s&submit=Search")) + + return itemlist + + +def search(item, texto): + logger.info() + tecleado = texto.replace(" ", "+") + item.url = item.url % tecleado + try: + return videos(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def videos(item): + logger.info() + itemlist = [] + + ''' + <div class="item-block item-normal col" > + <div class="inner-block"> + <a href="http://www.submityourflicks.com/1846642-my-hot-wife-bending-over-and-getting-her-cunt-reamed.html" title="My hot wife bending over and getting her cunt reamed.."> + <span class="image"> + <script type='text/javascript'>stat['56982c566d05c'] = 0; + pic['56982c566d05c'] = new Array(); + pics['56982c566d05c'] = new Array(1, 1, 1, 1, 1, 1, 1, 1, 1, 1);</script> + <img src=" + ''' + + data = scrapertools.downloadpageGzip(item.url) + patron = '<div class="item-block[^<]+' + patron += '<div class="inner-block[^<]+' + patron += '<a href="([^"]+)" title="([^"]+)"[^<]+' + patron += '<span class="image".*?' + patron += '<img src="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle, scrapedthumbnail in matches: + title = scrapedtitle + url = scrapedurl + thumbnail = scrapedthumbnail.replace(" ", "%20") + plot = "" + + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot, + folder=False)) + + next_page_url = scrapertools.find_single_match(data, "<a href='([^']+)' class=\"next\">NEXT</a>") + if next_page_url != "": + url = urlparse.urljoin(item.url, next_page_url) + itemlist.append(Item(channel=item.channel, action="videos", title=">> Página siguiente", url=url, folder=True, + viewmode="movie")) + + return itemlist + + +def play(item): + logger.info() + + data = scrapertools.cache_page(item.url) + + media_url = scrapertools.find_single_match(data, 'file\:\s*"([^"]+)"') + itemlist = [] + itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=media_url, + thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False)) + + return itemlist diff --git a/plugin.video.alfa/channels/teledocumentales.json b/plugin.video.alfa/channels/teledocumentales.json new file mode 100755 index 00000000..d2cb95eb --- /dev/null +++ b/plugin.video.alfa/channels/teledocumentales.json @@ -0,0 +1,23 @@ +{ + "id": "teledocumentales", + "name": "Teledocumentales", + "active": true, + "adult": false, + "language": "es", + "banner": "teledocumentales.png", + "thumbnail": "teledocumentales.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ], + "categories": [ + "documentary" + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/teledocumentales.py b/plugin.video.alfa/channels/teledocumentales.py new file mode 100755 index 00000000..d84952bc --- /dev/null +++ b/plugin.video.alfa/channels/teledocumentales.py @@ -0,0 +1,109 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + + +def mainlist(item): + logger.info() + + itemlist = [] + itemlist.append(Item(channel=item.channel, action="ultimo", title="Últimos Documentales", + url="http://www.teledocumentales.com/", viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, action="ListaCat", title="Listado por Genero", + url="http://www.teledocumentales.com/")) + + return itemlist + + +def ultimo(item): + logger.info() + itemlist = [] + + data = scrapertools.cachePage(item.url) + + # Extrae las entradas + patron = '<div class="imagen"(.*?)<div style="clear.both">' + matches = re.compile(patron, re.DOTALL).findall(data) + print "manolo" + print matches + + for match in matches: + scrapedtitle = scrapertools.get_match(match, '<img src="[^"]+" alt="([^"]+)"') + scrapedtitle = scrapertools.htmlclean(scrapedtitle) + scrapedurl = scrapertools.get_match(match, '<a href="([^"]+)"') + scrapedthumbnail = scrapertools.get_match(match, '<img src="([^"]+)" alt="[^"]+"') + scrapedplot = scrapertools.get_match(match, '<div class="excerpt">([^<]+)</div>') + itemlist.append( + Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, + plot=scrapedplot, fanart=scrapedthumbnail)) + + # Extrae la marca de siguiente pagina + try: + next_page = scrapertools.get_match(data, '<a class="next" href="([^"]+)">') + itemlist.append(Item(channel=item.channel, action="ultimo", title=">> Página siguiente", + url=urlparse.urljoin(item.url, next_page, viewmode="movie_with_plot"))) + except: + pass + + return itemlist + + +def ListaCat(item): + logger.info() + + url = item.url + + data = scrapertools.cachePage(url) + + # Extrae las entradas (carpetas) + + # <div class="slidethumb"> + # <a href="http://www.cine-adicto.com/transformers-dark-of-the-moon.html"><img src="http://www.cine-adicto.com/wp-content/uploads/2011/09/Transformers-Dark-of-the-moon-wallpaper.jpg" width="638" alt="Transformers: Dark of the Moon 2011" /></a> + # </div> + + patron = '<div id="menu_horizontal">(.*?)<div class="cuerpo">' + matches = re.compile(patron, re.DOTALL).findall(data) + logger.info("hay %d matches" % len(matches)) + + itemlist = [] + for match in matches: + data2 = match + patron = '<li class="cat-item cat-item-.*?<a href="(.*?)".*?>(.*?)</a>.*?</li>' + matches2 = re.compile(patron, re.DOTALL).findall(data2) + logger.info("hay %d matches2" % len(matches2)) + + for match2 in matches2: + scrapedtitle = match2[1].replace("–", "-").replace("&", "&").strip() + scrapedurl = match2[0] + scrapedthumbnail = match2[0].replace(" ", "%20") + scrapedplot = "" + + itemlist.append(Item(channel=item.channel, action="ultimo", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot, fanart=scrapedthumbnail, + viewmode="movie_with_plot")) + + return itemlist + + +def play(item): + logger.info() + + data = scrapertools.cachePage(item.url) + + urlvideo = scrapertools.get_match(data, '<!-- end navigation -->.*?<iframe src="([^"]+)"') + data = scrapertools.cachePage(urlvideo) + url = scrapertools.get_match(data, 'iframe src="([^"]+)"') + + itemlist = servertools.find_video_items(data=url) + + for videoitem in itemlist: + videoitem.title = item.title + videoitem.channel = item.channel + + return itemlist diff --git a/plugin.video.alfa/channels/torrentlocura.json b/plugin.video.alfa/channels/torrentlocura.json new file mode 100755 index 00000000..1dc0d89c --- /dev/null +++ b/plugin.video.alfa/channels/torrentlocura.json @@ -0,0 +1,43 @@ +{ + "id": "torrentlocura", + "name": "Torrentlocura", + "active": true, + "adult": false, + "language": "es", + "banner": "torrentlocura.png", + "thumbnail": "http://imgur.com/EWmLS3d.png", + "version": 1, + "changes": [ + { + "date": "31/12/2016", + "description": "Release" + }, + { + "date": "13/01/2017", + "description": "Añadida info a cápitulos en bloque" + }, + { + "date": "04/04/2017", + "description": "Reparación cambios web" + }, + { + "date": "28/06/2017", + "description": "Corrección código y algunas mejoras" + } + ], + "categories": [ + "torrent", + "movie", + "tvshow" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/torrentlocura.py b/plugin.video.alfa/channels/torrentlocura.py new file mode 100755 index 00000000..fc983d44 --- /dev/null +++ b/plugin.video.alfa/channels/torrentlocura.py @@ -0,0 +1,1199 @@ +# -*- coding: utf-8 -*- + +import os +import re +import urllib + +import xbmc +import xbmcgui +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item +from core.scrapertools import decodeHtmlentities as dhe + +ACTION_SHOW_FULLSCREEN = 36 +ACTION_GESTURE_SWIPE_LEFT = 511 +ACTION_SELECT_ITEM = 7 +ACTION_PREVIOUS_MENU = 10 +ACTION_MOVE_LEFT = 1 +ACTION_MOVE_RIGHT = 2 +ACTION_MOVE_DOWN = 4 +ACTION_MOVE_UP = 3 +OPTION_PANEL = 6 +OPTIONS_OK = 5 + + +# Para la busqueda en bing evitando baneos + +def browser(url): + import mechanize + + # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing + br = mechanize.Browser() + # Browser options + br.set_handle_equiv(False) + br.set_handle_gzip(True) + br.set_handle_redirect(True) + br.set_handle_referer(False) + br.set_handle_robots(False) + # Follows refresh 0 but not hangs on refresh > 0 + br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) + # Want debugging messages? + # br.set_debug_http(True) + # br.set_debug_redirects(True) + # br.set_debug_responses(True) + + # User-Agent (this is cheating, ok?) + # br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')] + # br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')] + # Open some site, let's pick a random one, the first that pops in mind + r = br.open(url) + response = r.read() + print response + if "img,divreturn" in response: + r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url) + print "prooooxy" + response = r.read() + + return response + + +api_key = "2e2160006592024ba87ccdf78c28f49f" +api_fankey = "dffe90fba4d02c199ae7a9e71330c987" + + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append(item.clone(title="[COLOR crimson][B]Películas[/B][/COLOR]", action="scraper", + url="http://torrentlocura.com/peliculas/", thumbnail="http://imgur.com/RfZjMBi.png", + fanart="http://imgur.com/V7QZLAL.jpg", contentType="movie")) + itemlist.append(itemlist[-1].clone(title="[COLOR crimson][B] Películas HD[/B][/COLOR]", action="scraper", + url="http://torrentlocura.com/peliculas-hd/", + thumbnail="http://imgur.com/RfZjMBi.png", fanart="http://imgur.com/V7QZLAL.jpg", + contentType="movie")) + itemlist.append(itemlist[-1].clone(title=" [COLOR crimson][B]Estrenos[/B][/COLOR]", action="scraper", + url="http://torrentlocura.com/estrenos-de-cine/", + thumbnail="http://imgur.com/RfZjMBi.png", fanart="http://imgur.com/V7QZLAL.jpg", + contentType="movie")) + itemlist.append(itemlist[-1].clone(title="[COLOR crimson][B] Películas 3D[/B][/COLOR]", action="scraper", + url="http://torrentlocura.com/peliculas-3d/", + thumbnail="http://imgur.com/RfZjMBi.png", fanart="http://imgur.com/V7QZLAL.jpg", + contentType="movie")) + itemlist.append( + itemlist[-1].clone(title=" [COLOR crimson][B]Películas subtituladas[/B][/COLOR]", action="scraper", + url="http://torrentlocura.com/peliculas-vo/", thumbnail="http://imgur.com/RfZjMBi.png", + fanart="http://imgur.com/V7QZLAL.jpg", contentType="movie")) + itemlist.append( + itemlist[-1].clone(title="[COLOR crimson][B] Películas Audio Latino[/B][/COLOR]", action="scraper", + url="http://torrentlocura.com/peliculas-latino/", thumbnail="http://imgur.com/RfZjMBi.png", + fanart="http://imgur.com/V7QZLAL.jpg", contentType="movie")) + itemlist.append(itemlist[-1].clone(title="[COLOR crimson][B]Series[/B][/COLOR]", action="scraper", + url="http://torrentlocura.com/series/", thumbnail="http://imgur.com/vX2dUYl.png", + contentType="tvshow")) + itemlist.append(itemlist[-1].clone(title=" [COLOR crimson][B]Series HD[/B][/COLOR]", action="scraper", + url="http://torrentlocura.com/series-hd/", + thumbnail="http://imgur.com/vX2dUYl.png", fanart="http://imgur.com/V7QZLAL.jpg", + contentType="tvshow")) + itemlist.append(itemlist[-1].clone(title="[COLOR crimson][B]Buscar[/B][/COLOR]", action="search", url="", + thumbnail="http://imgur.com/rSttk79.png", fanart="http://imgur.com/V7QZLAL.jpg")) + + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = "http://torrentlocura.com/buscar" + item.extra = urllib.urlencode({'q': texto}) + item.contentType != "movie" + try: + return buscador(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def buscador(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url, post=item.extra, ).data + data = unicode(data, "latin1").encode("utf8") + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + check_item = [] + bloque_enlaces = scrapertools.find_single_match(data, 'Resultados(.*?)end .page-box') + result_0 = scrapertools.find_multiple_matches(bloque_enlaces, + 'a href="([^"]+)" title="Descargar (.*?) ([^<]+)"><img src="([^"]+)".*?Descargar</a>') + for url, tipo, title, thumb in result_0: + try: + year = scrapertools.find_single_match(title, '(\d\d\d\d)') + except: + year = "" + if tipo == "Serie": + contentType = "tv" + title = re.sub(r'-.*', '', title) + title_check = title.strip() + else: + contentType = "movie" + # tipo="Pelicula" + title = re.sub(r'de Cine', 'Screener', title) + title = title.replace("RIP", "HdRip") + title_check = (title + " " + tipo).strip() + if "pc" in tipo or "PC" in tipo or "XBOX" in tipo or "Nintendo" in tipo or "Windows" in tipo or "varios" in url or "juego" in url: + continue + + if title_check in str(check_item): + continue + check_item.append([title_check]) + if "�" in title: + title = title.replace("�", "ñ") + title_fan = title + title_fan = re.sub( + r"\(.*?\)|-Remastered|Black And Chrome Edition|V.extendidaHD|1080p|Screener|V.O|HdRip|.*?--|\(\d+\)|\d\d\d\d|HD", + "", title_fan) + itemlist.append(Item(channel=item.channel, + title="[COLOR firebrick][B]" + tipo + "[/B][/COLOR]--" + "[COLOR red][B]" + title + "[/B][/COLOR]", + url=url, action="fanart", thumbnail=thumb, fanart="", contentType=contentType, + extra=title_fan + "|" + "[COLOR red][B]" + title_fan + "[/B][/COLOR]" + "|" + year, + folder=True)) + return itemlist + + +def scraper(item): + logger.info() + itemlist = [] + # Descarga la página + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + bloque_enlaces = scrapertools.find_single_match(data, '<ul class="pelilist">(.*?)end .page-box') + if item.contentType != "movie": + matches = scrapertools.find_multiple_matches(bloque_enlaces, + '<a href="([^"]+)".*?src="([^"]+)".*?28px;">([^<]+)<\/h2><span>([^<]+)<\/span>') + else: + matches = scrapertools.find_multiple_matches(bloque_enlaces, + '<a href="([^"]+)".*?src="([^"]+)".*?Descargar ([^<]+) gratis">.*?<\/h2><span>([^<]+)<\/span>') + for url, thumb, title, quality in matches: + try: + year = scrapertools.find_single_match(title, '(\d\d\d\d)') + except: + year = "" + title = unicode(title, "latin1").encode("utf8") + if "�" in title: + title = title.replace("�", "ñ") + title = re.sub(r'\(\d+\)|\d\d\d\d', '', title) + title_fan = title + title_item = "[COLOR red][B]" + title + "[/B][/COLOR]" + if "HD" in item.title and item.contentType != "movie": + title = "[COLOR red][B]" + title + "[/B][/COLOR]" + else: + title = "[COLOR red][B]" + title + "[/B][/COLOR]" + "[COLOR floralwhite] " + quality + "[/COLOR]" + itemlist.append( + Item(channel=item.channel, title=title, url=url, action="fanart", thumbnail=thumb, fanart=item.fanart, + extra=title_fan + "|" + title_item + "|" + year, contentType=item.contentType, folder=True)) + ## Paginación + next = scrapertools.find_single_match(data, 'href="([^"]+)">Next<\/a>') + if len(next) > 0: + url = next + itemlist.append( + Item(channel=item.channel, action="scraper", title="[COLOR darkred][B]siguiente[/B][/COLOR]", url=url, + thumbnail="http://imgur.com/D4ZgFri.png", fanart=item.fanart, extra=item.extra, + contentType=item.contentType, folder=True)) + + return itemlist + + +def fanart(item): + logger.info() + itemlist = [] + year = item.extra.split("|")[2] + if item.contentType != "movie": + tipo_ps = "tv" + else: + tipo_ps = "movie" + url = item.url + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + title = item.extra.split("|")[0] + title_o = scrapertools.find_single_match(data, '<meta name="description"[^<]+original(.*?)&') + item.title = item.extra.split("|")[1] + title_imdb = re.sub(r'\[.*?\]', '', item.extra.split("|")[1]) + title = re.sub( + r"\(.*?\)|-Remastered|Black And Chrome Edition|V.extendida|Version Extendida|V.Extendida|HEVC|X\d+|x\d+|LINE|HD|1080p|Screeener|V.O|Hdrip|.*?--|3D|SBS|HOU", + "", title) + + sinopsis = scrapertools.find_single_match(data, 'Sinopsis<br \/>(.*?)<\/div>') + if sinopsis == "": + try: + sinopsis = scrapertools.find_single_match(data, 'sinopsis\'>(.*?)<\/div>') + except: + sinopsis = "" + if "Miniserie" in sinopsis: + tipo_ps = "tv" + year = scrapertools.find_single_match(sinopsis, 'de TV \((\d+)\)') + if year == "": + if item.contentType != "movie": + try: + year = scrapertools.find_single_match(data, '<strong>Estreno:<\/strong>(\d+)<\/span>') + except: + year = "" + else: + year = scrapertools.find_single_match(data, '<br \/>A.*?(\d+)<br \/>') + if year == "": + year = scrapertools.find_single_match(data, 'Estreno.*?\d+/\d+/(\d+)') + if year == "": + year = scrapertools.find_single_match(data, + '<div class=\'descripcion_top\'>.*?Año<br />.*?(\d\d\d\d)') + if year == "": + year = scrapertools.find_single_match(data, + '<meta name="description"[^<]+Año[^<]+\d\d\d\d') + if year == "": + year = scrapertools.find_single_match(data, '<h1><strong>.*?(\d\d\d\d).*?<') + if year == "": + year = " " + + infoLabels = {'title': title, 'sinopsis': sinopsis, 'year': year} + critica, rating_filma, year_f, sinopsis_f = filmaffinity(item, infoLabels) + if sinopsis == "": + sinopsis = sinopsis_f + if year == "": + year = year_f + otmdb = tmdb.Tmdb(texto_buscado=title, year=year, tipo=tipo_ps) + id = otmdb.result.get("id") + posterdb = otmdb.result.get("poster_path") + if posterdb == None: + otmdb = tmdb.Tmdb(texto_buscado=title, tipo=tipo_ps) + id = otmdb.result.get("id") + posterdb = otmdb.result.get("poster_path") + if posterdb == None: + if item.contentType != "movie": + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % ( + title_imdb.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data) + subdata_imdb = scrapertools.find_single_match(data, + '<li class="b_algo">(.*?)h="ID.*?<strong>.*?TV Series') + else: + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % (title_imdb.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data) + subdata_imdb = scrapertools.find_single_match(data, '<li class="b_algo">(.*?)h="ID.*?<strong>') + try: + imdb_id = scrapertools.get_match(subdata_imdb, '<a href=.*?http.*?imdb.com/title/(.*?)/.*?"') + except: + try: + imdb_id = scrapertools.get_match(subdata_imdb, '<a href=.*?http.*?imdb.com/.*?/title/(.*?)/.*?"') + except: + imdb_id = "" + otmdb = tmdb.Tmdb(external_id=imdb_id, external_source="imdb_id", tipo=tipo_ps, idioma_busqueda="es") + id = otmdb.result.get("id") + posterdb = otmdb.result.get("poster_path") + if not posterdb: + if "(" in title_imdb: + title = scrapertools.find_single_match(title_imdb, '\(.*?\)') + if item.contentType != "movie": + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % ( + title_imdb.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data) + subdata_imdb = scrapertools.find_single_match(data, + '<li class="b_algo">(.*?)h="ID.*?<strong>.*?TV Series') + else: + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % ( + title_imdb.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data) + subdata_imdb = scrapertools.find_single_match(data, '<li class="b_algo">(.*?)h="ID.*?<strong>') + try: + imdb_id = scrapertools.get_match(subdata_imdb, '<a href=.*?http.*?imdb.com/title/(.*?)/.*?"') + except: + try: + imdb_id = scrapertools.get_match(subdata_imdb, + '<a href=.*?http.*?imdb.com/.*?/title/(.*?)/.*?"') + except: + imdb_id = "" + otmdb = tmdb.Tmdb(external_id=imdb_id, external_source="imdb_id", tipo=tipo_ps, + idioma_busqueda="es") + id = otmdb.result.get("id") + posterdb = otmdb.result.get("poster_path") + if not posterdb: + id = tiw = rating = tagline = id_tvdb = "" + fanart_4 = fanart_2 = fanart_3 = item.fanart + rating = "Sin Puntuación" + posterdb = tvf = item.thumbnail + fanart_info = item.fanart + thumbnail_art = item.thumbnail + extra = str(fanart_2) + "|" + str(fanart_3) + "|" + str(fanart_4) + "|" + str(id) + "|" + str( + tvf) + "|" + str(id_tvdb) + "|" + str(tiw) + "|" + str(rating) + itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action="findvideos", + thumbnail=item.thumbnail, fanart=item.fanart, extra=extra, folder=True)) + else: + if tipo_ps != "movie": + action = "findvideos" + else: + action = "findvideos_enlaces" + id = tiw = rating = tagline = id_tvdb = "" + fanart_4 = fanart_2 = fanart_3 = item.fanart + rating = "Sin Puntuación" + posterdb = tvf = item.thumbnail + fanart_info = item.fanart + thumbnail_art = item.thumbnail + extra = str(fanart_2) + "|" + str(fanart_3) + "|" + str(fanart_4) + "|" + str(id) + "|" + str( + tvf) + "|" + str(id_tvdb) + "|" + str(tiw) + "|" + str(rating) + itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action=action, + thumbnail=item.thumbnail, fanart=item.fanart, extra=extra, + contentType=item.contentType, folder=True)) + if posterdb != item.thumbnail: + if not "null" in posterdb: + posterdb = "https://image.tmdb.org/t/p/original" + posterdb + else: + posterdb = item.thumbnail + + if otmdb.result.get("backdrop_path"): + fanart = "https://image.tmdb.org/t/p/original" + otmdb.result.get("backdrop_path") + else: + fanart = item.fanart + if sinopsis == "": + if otmdb.result.get("'overview'"): + sinopsis = otmdb.result.get("'overview'") + else: + sinopsis = "" + if otmdb.result.get("vote_average"): + rating = otmdb.result.get("vote_average") + else: + rating = "Sin puntuacíon" + imagenes = [] + itmdb = tmdb.Tmdb(id_Tmdb=id, tipo=tipo_ps) + images = itmdb.result.get("images") + for key, value in images.iteritems(): + for detail in value: + imagenes.append('https://image.tmdb.org/t/p/original' + detail["file_path"]) + if item.contentType != "movie": + if itmdb.result.get("number_of_seasons"): + season_number = itmdb.result.get("number_of_seasons") + else: + season_episode = "" + if itmdb.result.get("number_of_episodes"): + season_episode = itmdb.result.get("number_of_episodes") + else: + season_episode = "" + if itmdb.result.get("status"): + status = itmdb.result.get("status") + else: + status = "" + if status == "Ended": + status = "Finalizada" + else: + status = "En emisión" + tagline = str(status) + " (Temporadas:" + str(season_number) + ",Episodios:" + str(season_episode) + ")" + if itmdb.result.get("external_ids").get("tvdb_id"): + id_tvdb = itmdb.result.get("external_ids").get("tvdb_id") + else: + id_tvdb = "" + else: + id_tvdb = "" + if itmdb.result.get("tagline"): + tagline = itmdb.result.get("tagline") + else: + tagline = "" + if len(imagenes) >= 5: + fanart_info = imagenes[1] + fanart_2 = imagenes[2] + fanart_3 = imagenes[3] + fanart_4 = imagenes[4] + if fanart == item.fanart: + fanart = fanart_info + elif len(imagenes) == 4: + fanart_info = imagenes[1] + fanart_2 = imagenes[2] + fanart_3 = imagenes[3] + fanart_4 = imagenes[1] + if fanart == item.fanart: + fanart = fanart_info + elif len(imagenes) == 3: + fanart_info = imagenes[1] + fanart_2 = imagenes[2] + fanart_3 = imagenes[1] + fanart_4 = imagenes[0] + if fanart == item.fanart: + fanart = fanart_info + elif len(imagenes) == 2: + fanart_info = imagenes[1] + fanart_2 = imagenes[0] + fanart_3 = imagenes[1] + fanart_4 = imagenes[1] + if fanart == item.fanart: + fanart = fanart_info + else: + fanart_info = fanart + fanart_2 = fanart + fanart_3 = fanart + fanart_4 = fanart + images_fanarttv = fanartv(item, id_tvdb, id) + if item.contentType != "movie": + action = "findvideos" + if images_fanarttv: + try: + thumbnail_art = images_fanarttv.get("hdtvlogo")[0].get("url") + except: + try: + thumbnail_art = images_fanarttv.get("clearlogo")[0].get("url") + except: + thumbnail_art = posterdb + if images_fanarttv.get("tvbanner"): + tvf = images_fanarttv.get("tvbanner")[0].get("url") + elif images_fanarttv.get("tvthumb"): + tvf = images_fanarttv.get("tvthumb")[0].get("url") + elif images_fanarttv.get("tvposter"): + tvf = images_fanarttv.get("tvposter")[0].get("url") + else: + tvf = posterdb + if images_fanarttv.get("tvthumb"): + thumb_info = images_fanarttv.get("tvthumb")[0].get("url") + else: + thumb_info = thumbnail_art + + if images_fanarttv.get("hdclearart"): + tiw = images_fanarttv.get("hdclearart")[0].get("url") + elif images_fanarttv.get("characterart"): + tiw = images_fanarttv.get("characterart")[0].get("url") + elif images_fanarttv.get("hdtvlogo"): + tiw = images_fanarttv.get("hdtvlogo")[0].get("url") + else: + tiw = "" + else: + tiw = "" + tvf = thumbnail_info = thumbnail_art = posterdb + else: + action = "findvideos_enlaces" + if images_fanarttv: + if images_fanarttv.get("hdmovielogo"): + thumbnail_art = images_fanarttv.get("hdmovielogo")[0].get("url") + elif images_fanarttv.get("moviethumb"): + thumbnail_art = images_fanarttv.get("moviethumb")[0].get("url") + elif images_fanarttv.get("moviebanner"): + thumbnail_art = images_fanarttv.get("moviebanner")[0].get("url") + else: + thumbnail_art = posterdb + if images_fanarttv.get("moviedisc"): + tvf = images_fanarttv.get("moviedisc")[0].get("url") + elif images_fanarttv.get("hdmovielogo"): + tvf = images_fanarttv.get("hdmovielogo")[0].get("url") + else: + tvf = posterdb + if images_fanarttv.get("hdmovieclearart"): + tiw = images_fanarttv.get("hdmovieclearart")[0].get("url") + elif images_fanarttv.get("hdmovielogo"): + tiw = images_fanarttv.get("hdmovielogo")[0].get("url") + else: + tiw = "" + else: + tiw = "" + tvf = thumbnail_art = posterdb + extra = str(fanart_2) + "|" + str(fanart_3) + "|" + str(fanart_4) + "|" + str(id) + "|" + str(tvf) + "|" + str( + id_tvdb) + "|" + str(tiw) + "|" + str(rating) + "|" + tipo_ps + itemlist.append( + Item(channel=item.channel, title=item.title, url=item.url, action=action, thumbnail=thumbnail_art, + fanart=fanart, extra=extra, contentType=item.contentType, folder=True)) + title_info = "[COLOR indianred][B]Info[/B][/COLOR]" + extra = str(rating) + "|" + str(rating_filma) + "|" + str(id) + "|" + str(item.title) + "|" + str( + id_tvdb) + "|" + str(tagline) + "|" + str(sinopsis) + "|" + str(critica) + "|" + str(thumbnail_art) + "|" + str( + fanart_4) + itemlist.append(Item(channel=item.channel, action="info", title=title_info, url=item.url, thumbnail=posterdb, + fanart=fanart_info, extra=extra, contentType=item.contentType, folder=False)) + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + fanart = "" + data = httptools.downloadpage(item.url).data + if item.contentType != "movie": + itmdb = tmdb.Tmdb(id_Tmdb=item.extra.split("|")[3], tipo=item.extra.split("|")[8]) + season = itmdb.result.get("seasons") + check = "no" + bloque_enlaces = scrapertools.find_single_match(data, + '<ul class="buscar-list">(.*?)<\/ul><!-- end \.buscar-list -->') + if check == "no": + check_temp = scrapertools.find_single_match(bloque_enlaces, 'Temporada (\d+)') + if check_temp == "": + check_temp = 1 + if len(check_temp) == 1: + try: + check_temp = scrapertools.find_single_match(bloque_enlaces, 'Temporada (\d+) Capitulos') + check_temp = None + except: + pass + thumbnail = "" + if season: + for detail in season: + if str(detail["season_number"]) == check_temp: + if detail["poster_path"]: + thumbnail = "https://image.tmdb.org/t/p/original" + detail["poster_path"] + images_fanarttv = fanartv(item, item.extra.split("|")[5], item.extra.split("|")[3]) + if images_fanarttv: + season_f = images_fanarttv.get("showbackground") + if season_f: + for detail in season_f: + if str(detail["season"]) == check_temp: + if detail["url"]: + fanart = detail["url"] + if fanart == "": + fanart = item.extra.split("|")[0] + if thumbnail == "": + thumbnail = item.thumbnail + if check_temp: + itemlist.append( + Item(channel=item.channel, title="[COLOR red][B]Temporada " + check_temp + "[/B][/COLOR]", url="", + action="", thumbnail=thumbnail, fanart=fanart, folder=False)) + temp_bloque = scrapertools.find_multiple_matches(bloque_enlaces, + 'href="([^"]+).*?" title=".*?Temporada (\d+) Capitulo (\d+).*?Serie <strong style="color:red;background:none;">(.*?)<\/strong>.*?Calidad <span style="color:red;background:none;">(\[.*?\])<\/span>.*?<span>.*?<span>(.*?)<\/span>.*?Descargar') + if temp_bloque != "": + for url, temp, capi, check_capi, calidad, peso in temp_bloque: + if "Capitulos" in check_capi: + extra = item.extra + "|" + check_capi + "|" + temp + title = scrapertools.find_single_match(check_capi, '-.*?(Capitulos.*)') + title = " [COLOR red][B]" + title + "[/B][/COLOR]" + else: + extra = item.extra + "|" + "Nocapi" + "|" + temp + "|" + capi + title = " [COLOR red][B]Capítulo " + capi + "[/B][/COLOR]" + if temp != check_temp: + check_temp = temp + check = "yes" + for detail in season: + if detail["season_number"]: + if str(detail["season_number"]) == temp: + if detail["poster_path"]: + thumbnail = "https://image.tmdb.org/t/p/original" + detail["poster_path"] + else: + thumbnail = "" + else: + thumbail = "" + if images_fanarttv: + season_f = images_fanarttv.get("showbackground") + if season_f: + for detail in season_f: + if str(detail["season"]) == check_temp: + if detail["season"]: + fanart = detail["url"] + if fanart == "": + fanart = item.extra.split("|")[0] + if thumbnail == "": + thumbnail = item.thumbnail + itemlist.append( + Item(channel=item.channel, title="[COLOR red][B]Temporada " + temp + "[/B][/COLOR]", url="", + action="", thumbnail=thumbnail, fanart=fanart, folder=False)) + + itemlist.append( + Item(channel=item.channel, title=title, url=url, action="findvideos_enlaces", thumbnail=thumbnail, + fanart=item.extra.split("|")[0], extra=extra, contentType=item.contentType, folder=True)) + else: + temp_bloque = scrapertools.find_multiple_matches(bloque_enlaces, + 'href="([^"]+).*?Temporada (\d+) Capitulo (\d+).*?Calidad.*?\[(.*?)\]<\/span>.*?<span>.*?<span>(.*?)<\/span>') + for url, capi, calidad, peso in temp_bloque: + itemlist.append( + Item(channel=item.channel, title=" [COLOR red][B]Capítulo " + capi + "[/B][/COLOR]", + url="", action="findvideos_enlaces", thumbnail=item.extra.split("|")[4], + fanart=item.extra.split("|")[0], folder=True)) + + ## Paginación + next = scrapertools.find_single_match(data, 'href="([^"]+)">Next<\/a>') + if len(next) > 0: + url = next + + itemlist.append( + Item(channel=item.channel, action="findvideos", title="[COLOR darkred][B]siguiente[/B][/COLOR]", + url=url, thumbnail="http://imgur.com/D4ZgFri.png", fanart=item.fanart, extra=item.extra, + contentType=item.contentType, folder=True)) + return itemlist + + +def findvideos_enlaces(item): + logger.info() + itemlist = [] + check_epi2 = "" + data = httptools.downloadpage(item.url).data + + url = scrapertools.find_single_match(data, 'window.location.href = "([^"]+)"').strip() + + try: + + if not url.endswith(".torrent"): + url = httptools.downloadpage(url, follow_redirects=False) + url = url.headers.get("location") + + if not url.endswith(".torrent"): + url = httptools.downloadpage(url, follow_redirects=False) + url = url.headers.get("location") + else: + url = httptools.downloadpage(url, follow_redirects=False) + url = url.headers.get("location") + torrents_path = config.get_videolibrary_path() + '/torrents' + + if not os.path.exists(torrents_path): + os.mkdir(torrents_path) + try: + urllib.URLopener.version = 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0' + urllib.urlretrieve(url, torrents_path + "/temp.torrent") + pepe = open(torrents_path + "/temp.torrent", "rb").read() + except: + pepe = "" + if "used CloudFlare" in pepe: + try: + urllib.urlretrieve("http://anonymouse.org/cgi-bin/anon-www.cgi/" + url.strip(), + torrents_path + "/temp.torrent") + pepe = open(torrents_path + "/temp.torrent", "rb").read() + except: + pepe = "" + torrent = decode(pepe) + + try: + name = torrent["info"]["name"] + sizet = torrent["info"]['length'] + sizet = convert_size(sizet) + except: + name = "no disponible" + try: + check_video = scrapertools.find_multiple_matches(str(torrent["info"]["files"]), "'length': (\d+)}") + + size = max([int(i) for i in check_video]) + + for file in torrent["info"]["files"]: + manolo = "%r - %d bytes" % ("/".join(file["path"]), file["length"]) + if str(size) in manolo: + video = manolo + size = convert_size(size) + ext_v = re.sub(r"-.*? bytes|.*?\[.*?\].|'|.*?COM.|.*?\[.*?\]|\(.*?\)|.*?\.", "", video) + try: + os.remove(torrents_path + "/temp.torrent") + except: + pass + except: + size = sizet + ext_v = re.sub(r"-.*? bytes|.*?\[.*?\].|'|.*?COM.|.*?\.es.|.*?\[.*?\]|.*?\(.*?\)\.|.*?\.", "", name) + try: + os.remove(torrents_path + "/temp.torrent") + except: + pass + except: + size = "en estos momentos..." + ext_v = "no disponible" + if "rar" in ext_v: + ext_v = ext_v + " -- No reproducible" + if item.contentType != "movie": + fanart = item.extra.split("|")[1] + else: + fanart = item.extra.split("|")[0] + itemlist.append(Item(channel=item.channel, + title="[COLOR orangered][B]Torrent[/B][/COLOR] " + "[COLOR lemonchiffon]( Video [/COLOR]" + "[COLOR lemonchiffon]" + ext_v + "--" + size + " )[/COLOR]", + url=url, action="play", server="torrent", thumbnail=item.extra.split("|")[4], fanart=fanart, + folder=False)) + + if item.contentType != "movie": + if "Capitulos" in item.extra.split("|")[9]: + epis = scrapertools.find_multiple_matches(item.extra.split("|")[9], 'Capitulos (\d+) al (\d+)') + for epi1, epi2 in epis: + len_epis = int(epi2) - int(epi1) + if len_epis == 1: + extra = item.extra + "|" + epi1 + check_epi2 = "ok" + title_info = " Info Cap." + epi1 + title_info = "[COLOR indianred]" + title_info + "[/COLOR]" + itemlist.append(Item(channel=item.channel, action="info_capitulos", title=title_info, url=item.url, + thumbnail=item.extra.split("|")[6], fanart=item.extra.split("|")[1], + extra=extra, folder=False)) + else: + check_epi2 = "" + epis_len = range(int(epi1), int(epi2) + 1) + extra = item.extra + "|" + str(epis_len) + title_info = " Info Capítulos" + title_info = "[COLOR indianred]" + title_info + "[/COLOR]" + itemlist.append(Item(channel=item.channel, action="capitulos", title=title_info, url=item.url, + thumbnail=item.extra.split("|")[6], fanart=item.extra.split("|")[1], + extra=extra, folder=True)) + else: + title_info = " Info" + title_info = "[COLOR indianred]" + title_info + "[/COLOR]" + itemlist.append(Item(channel=item.channel, action="info_capitulos", title=title_info, url=item.url, + thumbnail=item.extra.split("|")[6], fanart=item.extra.split("|")[1], extra=item.extra, + folder=False)) + if check_epi2 == "ok": + extra = item.extra + "|" + epi2 + title_info = " Info Cap." + epi2 + title_info = "[COLOR indianred]" + title_info + "[/COLOR]" + itemlist.append(Item(channel=item.channel, action="info_capitulos", title=title_info, url=item.url, + thumbnail=item.extra.split("|")[6], fanart=item.extra.split("|")[1], extra=extra, + folder=False)) + dd = scrapertools.find_single_match(data, 'DESCARGAS DIRECTA(.*?)VER ONLINE') + if dd: + extra = item.extra + "|" + dd + itemlist.append(Item(channel=item.channel, title="[COLOR floralwhite][B]Descarga directa y online[/B][/COLOR]", + url=item.url, action="dd_y_o", thumbnail="http://imgur.com/as7Ie6p.png", + fanart=item.extra.split("|")[1], contentType=item.contentType, extra=extra, folder=True)) + return itemlist + + +def dd_y_o(item): + logger.info() + itemlist = [] + if item.contentType == "movie": + data = item.extra.split("|")[9] + else: + data = item.extra.split("|")[12] + enlaces = scrapertools.find_multiple_matches(data, + "class=\"box1\"><img src='([^']+)'.*?<div class=\"box2\">([^<]+)<\/div>.*?>([^<]+)<\/div>.*?>([^<]+)<\/div>.*?><a href='([^']+)'.*?Des") + for thumb, server_name, idioma, calidad, url_d in enlaces: + videolist = servertools.find_video_items(data=url_d) + for video in videolist: + itemlist.append(Item(channel=item.channel, url=video.url, server=video.server, + title="[COLOR floralwhite][B]" + server_name + "[/B][/COLOR]", thumbnail=thumb, + fanart=item.extra.split("|")[2], action="play", folder=False)) + return itemlist + + +def capitulos(item): + logger.info() + itemlist = [] + url = item.url + Join_extras = "|".join(item.extra.split("|")[0:11]) + capis = item.extra.split("|")[11] + capis = re.sub(r'\[|\]', '', capis) + capis = [int(k) for k in capis.split(',')] + for i in capis: + extra = Join_extras + "|" + str(i) + itemlist.append(Item(channel=item.channel, action="info_capitulos", + title="[COLOR indianred]Info Cap." + str(i) + "[/COLOR]", url=item.url, + thumbnail=item.thumbnail, fanart=item.fanart, extra=extra, folder=False)) + return itemlist + + +def info(item): + logger.info() + itemlist = [] + url = item.url + rating_tmdba_tvdb = item.extra.split("|")[0] + if item.extra.split("|")[6] == "": + rating_tmdba_tvdb = "Sin puntuación" + rating_filma = item.extra.split("|")[1] + filma = "http://s6.postimg.org/6yhe5fgy9/filma.png" + title = item.extra.split("|")[3] + title = title.replace("%20", " ") + try: + if "." in rating_tmdba_tvdb: + check_rat_tmdba = scrapertools.get_match(rating_tmdba_tvdb, '(\d+).') + else: + check_rat_tmdba = rating_tmdba_tvdb + if int(check_rat_tmdba) >= 5 and int(check_rat_tmdba) < 8: + rating = "[COLOR springgreen][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + elif int(check_rat_tmdba) >= 8 or rating_tmdba_tvdb == 10: + rating = "[COLOR yellow][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + else: + rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + print "lolaymaue" + except: + rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + if "10." in rating: + rating = re.sub(r'10\.\d+', '10', rating) + try: + check_rat_filma = scrapertools.get_match(rating_filma, '(\d)') + print "paco" + print check_rat_filma + if int(check_rat_filma) >= 5 and int(check_rat_filma) < 8: + print "dios" + print check_rat_filma + rating_filma = "[COLOR springgreen][B]" + rating_filma + "[/B][/COLOR]" + elif int(check_rat_filma) >= 8: + + print check_rat_filma + rating_filma = "[COLOR yellow][B]" + rating_filma + "[/B][/COLOR]" + else: + rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]" + print "rojo??" + print check_rat_filma + except: + rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]" + plot = item.extra.split("|")[6] + plot = "[COLOR moccasin][B]" + plot + "[/B][/COLOR]" + plot = re.sub(r"\\|<br />", "", plot) + if item.extra.split("|")[5] != "": + tagline = item.extra.split("|")[5] + if tagline == "\"\"": + tagline = " " + tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]" + else: + tagline = "" + if item.contentType != "movie": + icon = "http://s6.postimg.org/hzcjag975/tvdb.png" + else: + icon = "http://imgur.com/SenkyxF.png" + + foto = item.extra.split("|")[9] + if not "tmdb" in foto: + foto = "" + if item.extra.split("|")[7] != "": + critica = item.extra.split("|")[7] + else: + critica = "Esta serie no tiene críticas..." + + photo = item.extra.split("|")[8].replace(" ", "%20") + if ".jpg" in photo: + photo = "" + # Tambien te puede interesar + peliculas = [] + if item.contentType != "movie": + url_tpi = "http://api.themoviedb.org/3/tv/" + item.extra.split("|")[ + 2] + "/recommendations?api_key=" + api_key + "&language=es" + data_tpi = httptools.downloadpage(url_tpi).data + tpi = scrapertools.find_multiple_matches(data_tpi, + 'id":(.*?),.*?"original_name":"(.*?)",.*?"poster_path":(.*?),"popularity"') + else: + url_tpi = "http://api.themoviedb.org/3/movie/" + item.extra.split("|")[ + 2] + "/recommendations?api_key=" + api_key + "&language=es" + data_tpi = httptools.downloadpage(url_tpi).data + tpi = scrapertools.find_multiple_matches(data_tpi, + 'id":(.*?),.*?"original_title":"(.*?)",.*?"poster_path":(.*?),"popularity"') + + for idp, peli, thumb in tpi: + + thumb = re.sub(r'"|}', '', thumb) + if "null" in thumb: + thumb = "http://s6.postimg.org/tw1vhymj5/noposter.png" + else: + thumb = "https://image.tmdb.org/t/p/original" + thumb + peliculas.append([idp, peli, thumb]) + + extra = "" + "|" + item.extra.split("|")[2] + "|" + item.extra.split("|")[2] + "|" + item.extra.split("|")[ + 6] + "|" + "" + infoLabels = {'title': title, 'plot': plot, 'thumbnail': photo, 'fanart': foto, 'tagline': tagline, + 'rating': rating} + item_info = item.clone(info=infoLabels, icon=icon, extra=extra, rating=rating, rating_filma=rating_filma, + critica=critica, contentType=item.contentType, thumb_busqueda="http://imgur.com/j0A9lnu.png") + from channels import infoplus + infoplus.start(item_info, peliculas) + + +def info_capitulos(item, images={}): + logger.info() + url = "https://api.themoviedb.org/3/tv/" + item.extra.split("|")[3] + "/season/" + item.extra.split("|")[ + 10] + "/episode/" + item.extra.split("|")[11] + "?api_key=" + api_key + "&language=es" + if "/0" in url: + url = url.replace("/0", "/") + from core import jsontools + data = jsontools.load(scrapertools.downloadpage(url)) + foto = item.extra.split("|")[6] + if not ".png" in foto: + foto = "http://imgur.com/j0A9lnu.png" + if data: + if data.get("name"): + title = data.get("name") + else: + title = "" + title = "[COLOR red][B]" + title + "[/B][/COLOR]" + if data.get("still_path"): + image = "https://image.tmdb.org/t/p/original" + data.get("still_path") + else: + image = "http://imgur.com/ZiEAVOD.png" + if data.get("overview"): + plot = data.get("overview") + else: + plot = "Sin informacion del capítulo aún..." + plot = "[COLOR floralwhite][B]" + plot + "[/B][/COLOR]" + if data.get("vote_average"): + rating = data.get("vote_average") + else: + rating = 0 + try: + + if rating >= 5 and rating < 8: + rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR springgreen][B]" + str(rating) + "[/B][/COLOR]" + elif rating >= 8 and rating < 10: + rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR yellow][B]" + str(rating) + "[/B][/COLOR]" + elif rating == 10: + rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR orangered][B]" + str(rating) + "[/B][/COLOR]" + else: + rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR crimson][B]" + str(rating) + "[/B][/COLOR]" + except: + rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR crimson][B]" + str(rating) + "[/B][/COLOR]" + if "10." in rating: + rating = re.sub(r'10\.\d+', '10', rating) + + + else: + + title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" + plot = "Este capitulo no tiene informacion..." + plot = "[COLOR yellow][B]" + plot + "[/B][/COLOR]" + image = "http://s6.postimg.org/ub7pb76c1/noinfo.png" + foto = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png" + rating = "" + + ventana = TextBox2(title=title, plot=plot, thumbnail=image, fanart=foto, rating=rating) + ventana.doModal() + + +class TextBox2(xbmcgui.WindowDialog): + """ Create a skinned textbox window """ + + def __init__(self, *args, **kwargs): + self.getTitle = kwargs.get('title') + self.getPlot = kwargs.get('plot') + self.getThumbnail = kwargs.get('thumbnail') + self.getFanart = kwargs.get('fanart') + self.getRating = kwargs.get('rating') + + self.background = xbmcgui.ControlImage(70, 20, 1150, 630, 'http://imgur.com/sDp4M2R.jpg') + self.title = xbmcgui.ControlTextBox(120, 60, 430, 50) + self.rating = xbmcgui.ControlTextBox(145, 112, 1030, 45) + self.plot = xbmcgui.ControlTextBox(120, 150, 1056, 100) + self.thumbnail = xbmcgui.ControlImage(120, 300, 1056, 300, self.getThumbnail) + self.fanart = xbmcgui.ControlImage(780, 43, 390, 100, self.getFanart) + + self.addControl(self.background) + self.background.setAnimations( + [('conditional', 'effect=slide start=1000% end=0% time=1500 condition=true tween=bounce',), + ('WindowClose', 'effect=slide delay=800 start=0% end=1000% time=800 condition=true',)]) + self.addControl(self.thumbnail) + self.thumbnail.setAnimations([('conditional', + 'effect=zoom start=0% end=100% delay=2700 time=1500 condition=true tween=elastic easing=inout',), + ('WindowClose', 'effect=slide end=0,700% time=300 condition=true',)]) + self.addControl(self.plot) + self.plot.setAnimations( + [('conditional', 'effect=zoom delay=2000 center=auto start=0 end=100 time=800 condition=true ',), ( + 'conditional', + 'effect=rotate delay=2000 center=auto aceleration=6000 start=0% end=360% time=800 condition=true',), + ('WindowClose', 'effect=zoom center=auto start=100% end=-0% time=600 condition=true',)]) + self.addControl(self.fanart) + self.fanart.setAnimations( + [('WindowOpen', 'effect=slide start=0,-700 delay=1000 time=2500 tween=bounce condition=true',), ( + 'conditional', + 'effect=rotate center=auto start=0% end=360% delay=3000 time=2500 tween=bounce condition=true',), + ('WindowClose', 'effect=slide end=0,-700% time=1000 condition=true',)]) + self.addControl(self.title) + self.title.setText(self.getTitle) + self.title.setAnimations( + [('conditional', 'effect=slide start=-1500% end=0% delay=1000 time=2000 condition=true tween=elastic',), + ('WindowClose', 'effect=slide start=0% end=-1500% time=800 condition=true',)]) + self.addControl(self.rating) + self.rating.setText(self.getRating) + self.rating.setAnimations( + [('conditional', 'effect=fade start=0% end=100% delay=3000 time=1500 condition=true',), + ('WindowClose', 'effect=slide end=0,-700% time=600 condition=true',)]) + xbmc.sleep(200) + + try: + self.plot.autoScroll(7000, 6000, 30000) + except: + + xbmc.executebuiltin( + 'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000,"https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/kodi-icon.png")') + self.plot.setText(self.getPlot) + + def get(self): + self.show() + + def onAction(self, action): + if action == ACTION_PREVIOUS_MENU or action.getId() == ACTION_GESTURE_SWIPE_LEFT or action == 110 or action == 92: + self.close() + + +def test(): + return True + + +def tokenize(text, match=re.compile("([idel])|(\d+):|(-?\d+)").match): + i = 0 + while i < len(text): + m = match(text, i) + s = m.group(m.lastindex) + i = m.end() + if m.lastindex == 2: + yield "s" + yield text[i:i + int(s)] + i = i + int(s) + else: + yield s + + +def decode_item(next, token): + if token == "i": + # integer: "i" value "e" + data = int(next()) + if next() != "e": + raise ValueError + elif token == "s": + # string: "s" value (virtual tokens) + data = next() + elif token == "l" or token == "d": + # container: "l" (or "d") values "e" + data = [] + tok = next() + while tok != "e": + data.append(decode_item(next, tok)) + tok = next() + if token == "d": + data = dict(zip(data[0::2], data[1::2])) + else: + raise ValueError + return data + + +def decode(text): + try: + src = tokenize(text) + data = decode_item(src.next, src.next()) + for token in src: # look for more tokens + raise SyntaxError("trailing junk") + except (AttributeError, ValueError, StopIteration): + try: + data = data + except: + data = src + + return data + + +def convert_size(size): + import math + if (size == 0): + return '0B' + size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") + i = int(math.floor(math.log(size, 1024))) + p = math.pow(1024, i) + s = round(size / p, 2) + return '%s %s' % (s, size_name[i]) + + +def fanartv(item, id_tvdb, id, images={}): + headers = [['Content-Type', 'application/json']] + from core import jsontools + if item.contentType == "movie": + url = "http://webservice.fanart.tv/v3/movies/%s?api_key=cab16e262d72fea6a6843d679aa10300" \ + % id + else: + url = "http://webservice.fanart.tv/v3/tv/%s?api_key=cab16e262d72fea6a6843d679aa10300" % id_tvdb + try: + data = jsontools.load(scrapertools.downloadpage(url, headers=headers)) + if data and not "error message" in data: + for key, value in data.items(): + if key not in ["name", "tmdb_id", "imdb_id", "thetvdb_id"]: + images[key] = value + else: + images = [] + + except: + images = [] + return images + + +def filmaffinity(item, infoLabels): + title = infoLabels["title"].replace(" ", "+") + try: + year = infoLabels["year"] + except: + year = "" + sinopsis = infoLabels["sinopsis"] + + if year == "": + if item.contentType != "movie": + tipo = "serie" + url_bing = "http://www.bing.com/search?q=%s+Serie+de+tv+site:filmaffinity.com" % title + else: + tipo = "película" + url_bing = "http://www.bing.com/search?q=%s+site:filmaffinity.com" % title + try: + data = browser(url_bing) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + if "myaddrproxy.php" in data: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><div class="b_title"><h2>(<a href="/myaddrproxy.php/http/www.filmaffinity.com/es/film.*?)"') + subdata_bing = re.sub(r'\/myaddrproxy.php\/http\/', '', subdata_bing) + else: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><h2>(<a href="http://www.filmaffinity.com/.*?/film.*?)"') + + url_filma = scrapertools.get_match(subdata_bing, '<a href="([^"]+)') + if not "http" in url_filma: + try: + data = httptools.downloadpage("http://" + url_filma, cookies=False, timeout=1).data + except: + data = httptools.downloadpage("http://" + url_filma, cookies=False, timeout=1).data + else: + try: + data = httptools.downloadpage(url_filma, cookies=False, timeout=1).data + except: + data = httptools.downloadpage(url_filma, cookies=False, timeout=1).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + except: + pass + else: + tipo = "Pelicula" + url = "http://www.filmaffinity.com/es/advsearch.php?stext={0}&stype%5B%5D=title&country=&genre=&fromyear={1}&toyear={1}".format( + title, year) + data = httptools.downloadpage(url, cookies=False).data + url_filmaf = scrapertools.find_single_match(data, '<div class="mc-poster">\s*<a title="[^"]*" href="([^"]+)"') + if url_filmaf: + url_filmaf = "http://www.filmaffinity.com%s" % url_filmaf + data = httptools.downloadpage(url_filmaf, cookies=False).data + else: + if item.contentType != "movie": + tipo = "serie" + url_bing = "http://www.bing.com/search?q=%s+Serie+de+tv+site:filmaffinity.com" % title + else: + tipo = "película" + url_bing = "http://www.bing.com/search?q=%s+site:filmaffinity.com" % title + try: + data = browser(url_bing) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + if "myaddrproxy.php" in data: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><div class="b_title"><h2>(<a href="/myaddrproxy.php/http/www.filmaffinity.com/es/film.*?)"') + subdata_bing = re.sub(r'\/myaddrproxy.php\/http\/', '', subdata_bing) + else: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><h2>(<a href="http://www.filmaffinity.com/.*?/film.*?)"') + + url_filma = scrapertools.get_match(subdata_bing, '<a href="([^"]+)') + if not "http" in url_filma: + data = httptools.downloadpage("http://" + url_filma, cookies=False).data + else: + data = httptools.downloadpage(url_filma, cookies=False).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + except: + pass + sinopsis_f = scrapertools.find_single_match(data, '<dd itemprop="description">(.*?)</dd>') + sinopsis_f = sinopsis_f.replace("<br><br />", "\n") + sinopsis_f = re.sub(r"\(FILMAFFINITY\)<br />", "", sinopsis_f) + try: + year_f = scrapertools.get_match(data, '<dt>Año</dt>.*?>(\d+)</dd>') + except: + year_f = "" + try: + rating_filma = scrapertools.get_match(data, 'itemprop="ratingValue" content="(.*?)">') + except: + rating_filma = "Sin puntuacion" + critica = "" + patron = '<div itemprop="reviewBody">(.*?)</div>.*?itemprop="author">(.*?)\s*<i alt="([^"]+)"' + matches_reviews = scrapertools.find_multiple_matches(data, patron) + + if matches_reviews: + for review, autor, valoracion in matches_reviews: + review = dhe(scrapertools.htmlclean(review)) + review += "\n" + autor + "[CR]" + review = re.sub(r'Puntuac.*?\)', '', review) + if "positiva" in valoracion: + critica += "[COLOR green][B]%s[/B][/COLOR]\n" % review + elif "neutral" in valoracion: + critica += "[COLOR yellow][B]%s[/B][/COLOR]\n" % review + else: + critica += "[COLOR red][B]%s[/B][/COLOR]\n" % review + else: + critica = "[COLOR floralwhite][B]Esta %s no tiene críticas todavía...[/B][/COLOR]" % tipo + + return critica, rating_filma, year_f, sinopsis_f diff --git a/plugin.video.alfa/channels/trailertools.py b/plugin.video.alfa/channels/trailertools.py new file mode 100755 index 00000000..7dcde1bc --- /dev/null +++ b/plugin.video.alfa/channels/trailertools.py @@ -0,0 +1,560 @@ +# -*- coding: utf-8 -*- +# -------------------------------------------------------------------------------- +# Search trailers from youtube, filmaffinity, abandomoviez, vimeo, etc... +# -------------------------------------------------------------------------------- + +import re +import urllib +import urlparse + +from core import config +from core import jsontools +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item +from platformcode import platformtools + +result = None +window_select = [] +# Para habilitar o no la opción de búsqueda manual +if config.get_platform() != "plex": + keyboard = True +else: + keyboard = False + + +def buscartrailer(item, trailers=[]): + logger.info() + + # Lista de acciones si se ejecuta desde el menú contextual + if item.action == "manual_search" and item.contextual: + itemlist = manual_search(item) + item.contentTitle = itemlist[0].contentTitle + elif 'search' in item.action and item.contextual: + itemlist = globals()[item.action](item) + else: + # Se elimina la opción de Buscar Trailer del menú contextual para evitar redundancias + if type(item.context) is str and "buscar_trailer" in item.context: + item.context = item.context.replace("buscar_trailer", "") + elif type(item.context) is list and "buscar_trailer" in item.context: + item.context.remove("buscar_trailer") + + item.text_color = "" + + itemlist = [] + if item.contentTitle != "": + item.contentTitle = item.contentTitle.strip() + elif keyboard: + fulltitle = re.sub('\[\/*(B|I|COLOR)\s*[^\]]*\]', '', item.fulltitle.strip()) + item.contentTitle = platformtools.dialog_input(default=fulltitle, heading="Introduce el título a buscar") + if item.contentTitle is None: + item.contentTitle = fulltitle + else: + item.contentTitle = item.contentTitle.strip() + else: + fulltitle = re.sub('\[\/*(B|I|COLOR)\s*[^\]]*\]', '', item.fulltitle.strip()) + item.contentTitle = fulltitle + + item.year = item.infoLabels['year'] + + logger.info("Búsqueda: %s" % item.contentTitle) + logger.info("Año: %s" % item.year) + if item.infoLabels['trailer'] and not trailers: + url = item.infoLabels['trailer'] + if "youtube" in url: + url = url.replace("embed/", "watch?v=") + titulo, url, server = servertools.findvideos(url)[0] + title = "Trailer por defecto [" + server + "]" + itemlist.append(item.clone(title=title, url=url, server=server, action="play")) + if item.show or item.infoLabels['tvshowtitle'] or item.contentType != "movie": + tipo = "tv" + else: + tipo = "movie" + try: + if not trailers: + itemlist.extend(tmdb_trailers(item, tipo)) + else: + for trailer in trailers: + title = trailer['name'] + " [" + trailer['size'] + "p] (" + trailer['language'].replace("en", "ING") \ + .replace("es", "ESP") + ") [tmdb/youtube]" + itemlist.append(item.clone(action="play", title=title, url=trailer['url'], server="youtube")) + except: + import traceback + logger.error(traceback.format_exc()) + + if item.contextual: + title = "[COLOR green]%s[/COLOR]" + else: + title = "%s" + itemlist.append(item.clone(title=title % "Búsqueda en Youtube", action="youtube_search", + text_color="green")) + itemlist.append(item.clone(title=title % "Búsqueda en Filmaffinity", + action="filmaffinity_search", text_color="green")) + # Si se trata de una serie, no se incluye la opción de buscar en Abandomoviez + if not item.show and not item.infoLabels['tvshowtitle']: + itemlist.append(item.clone(title=title % "Búsqueda en Abandomoviez", + action="abandomoviez_search", text_color="green")) + itemlist.append(item.clone(title=title % "Búsqueda en Jayhap (Youtube, Vimeo & Dailymotion)", + action="jayhap_search", text_color="green")) + + if item.contextual: + global window_select, result + select = Select("DialogSelect.xml", config.get_runtime_path(), item=item, itemlist=itemlist, + caption="Buscando: " + item.contentTitle) + window_select.append(select) + select.doModal() + + if item.windowed: + return result, window_select + else: + return itemlist + + +def manual_search(item): + logger.info() + texto = platformtools.dialog_input(default=item.contentTitle, heading=config.get_localized_string(30112)) + if texto is not None: + if item.extra == "abandomoviez": + return abandomoviez_search(item.clone(contentTitle=texto, page="", year="")) + elif item.extra == "youtube": + return youtube_search(item.clone(contentTitle=texto, page="")) + elif item.extra == "filmaffinity": + return filmaffinity_search(item.clone(contentTitle=texto, page="", year="")) + elif item.extra == "jayhap": + return jayhap_search(item.clone(contentTitle=texto)) + + +def tmdb_trailers(item, tipo="movie"): + logger.info() + + from core.tmdb import Tmdb + itemlist = [] + tmdb_search = None + if item.infoLabels['tmdb_id']: + tmdb_search = Tmdb(id_Tmdb=item.infoLabels['tmdb_id'], tipo=tipo, idioma_busqueda='es') + elif item.infoLabels['year']: + tmdb_search = Tmdb(texto_buscado=item.contentTitle, tipo=tipo, year=item.infoLabels['year']) + + if tmdb_search: + for result in tmdb_search.get_videos(): + title = result['name'] + " [" + result['size'] + "p] (" + result['language'].replace("en", "ING") \ + .replace("es", "ESP") + ") [tmdb/youtube]" + itemlist.append(item.clone(action="play", title=title, url=result['url'], server="youtube")) + + return itemlist + + +def youtube_search(item): + logger.info() + itemlist = [] + + titulo = item.contentTitle + if item.extra != "youtube": + titulo += " trailer" + # Comprueba si es una búsqueda de cero o viene de la opción Siguiente + if item.page != "": + data = scrapertools.downloadpage(item.page) + else: + titulo = urllib.quote(titulo) + titulo = titulo.replace("%20", "+") + data = scrapertools.downloadpage("https://www.youtube.com/results?sp=EgIQAQ%253D%253D&q=" + titulo) + + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '<span class="yt-thumb-simple">.*?(?:src="https://i.ytimg.com/|data-thumb="https://i.ytimg.com/)([^"]+)"' \ + '.*?<h3 class="yt-lockup-title ">.*?<a href="([^"]+)".*?title="([^"]+)".*?' \ + '</a><span class="accessible-description".*?>.*?(\d+:\d+)' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedthumbnail, scrapedurl, scrapedtitle, scrapedduration in matches: + scrapedthumbnail = urlparse.urljoin("https://i.ytimg.com/", scrapedthumbnail) + scrapedtitle = scrapedtitle.decode("utf-8") + scrapedtitle = scrapedtitle + " (" + scrapedduration + ")" + if item.contextual: + scrapedtitle = "[COLOR white]%s[/COLOR]" % scrapedtitle + url = urlparse.urljoin('https://www.youtube.com/', scrapedurl) + itemlist.append(item.clone(title=scrapedtitle, action="play", server="youtube", url=url, + thumbnail=scrapedthumbnail, text_color="white")) + + next_page = scrapertools.find_single_match(data, '<a href="([^"]+)"[^>]+><span class="yt-uix-button-content">' + 'Siguiente') + if next_page != "": + next_page = urlparse.urljoin("https://www.youtube.com", next_page) + itemlist.append(item.clone(title=">> Siguiente", action="youtube_search", extra="youtube", page=next_page, + thumbnail="", text_color="")) + + if not itemlist: + itemlist.append(item.clone(title="La búsqueda no ha dado resultados (%s)" % titulo, + action="", thumbnail="", text_color="")) + + if keyboard: + if item.contextual: + title = "[COLOR green]%s[/COLOR]" + else: + title = "%s" + itemlist.append(item.clone(title=title % "Búsqueda Manual en Youtube", action="manual_search", + text_color="green", thumbnail="", extra="youtube")) + + return itemlist + + +def abandomoviez_search(item): + logger.info() + + # Comprueba si es una búsqueda de cero o viene de la opción Siguiente + if item.page != "": + data = scrapertools.downloadpage(item.page) + else: + titulo = item.contentTitle.decode('utf-8').encode('iso-8859-1') + post = urllib.urlencode({'query': titulo, 'searchby': '1', 'posicion': '1', 'orden': '1', + 'anioin': item.year, 'anioout': item.year, 'orderby': '1'}) + url = "http://www.abandomoviez.net/db/busca_titulo_advance.php" + item.prefix = "db/" + data = scrapertools.downloadpage(url, post=post) + if "No hemos encontrado ninguna" in data: + url = "http://www.abandomoviez.net/indie/busca_titulo_advance.php" + item.prefix = "indie/" + data = scrapertools.downloadpage(url, post=post).decode("iso-8859-1").encode('utf-8') + + itemlist = [] + patron = '(?:<td width="85"|<div class="col-md-2 col-sm-2 col-xs-3">).*?<img src="([^"]+)"' \ + '.*?href="([^"]+)">(.*?)(?:<\/td>|<\/small>)' + matches = scrapertools.find_multiple_matches(data, patron) + # Si solo hay un resultado busca directamente los trailers, sino lista todos los resultados + if len(matches) == 1: + item.url = urlparse.urljoin("http://www.abandomoviez.net/%s" % item.prefix, matches[0][1]) + item.thumbnail = matches[0][0] + itemlist = search_links_abando(item) + elif len(matches) > 1: + for scrapedthumbnail, scrapedurl, scrapedtitle in matches: + scrapedurl = urlparse.urljoin("http://www.abandomoviez.net/%s" % item.prefix, scrapedurl) + scrapedtitle = scrapertools.htmlclean(scrapedtitle) + itemlist.append(item.clone(title=scrapedtitle, action="search_links_abando", + url=scrapedurl, thumbnail=scrapedthumbnail, text_color="white")) + + next_page = scrapertools.find_single_match(data, '<a href="([^"]+)">Siguiente') + if next_page != "": + next_page = urlparse.urljoin("http://www.abandomoviez.net/%s" % item.prefix, next_page) + itemlist.append(item.clone(title=">> Siguiente", action="abandomoviez_search", page=next_page, thumbnail="", + text_color="")) + + if not itemlist: + itemlist.append(item.clone(title="La búsqueda no ha dado resultados", action="", thumbnail="", + text_color="")) + + if keyboard: + if item.contextual: + title = "[COLOR green]%s[/COLOR]" + else: + title = "%s" + itemlist.append(item.clone(title=title % "Búsqueda Manual en Abandomoviez", + action="manual_search", thumbnail="", text_color="green", extra="abandomoviez")) + + return itemlist + + +def search_links_abando(item): + logger.info() + + data = scrapertools.downloadpage(item.url) + itemlist = [] + if "Lo sentimos, no tenemos trailer" in data: + itemlist.append(item.clone(title="No hay ningún vídeo disponible", action="", text_color="")) + else: + if item.contextual: + progreso = platformtools.dialog_progress("Buscando en abandomoviez", "Cargando trailers...") + progreso.update(10) + i = 0 + message = "Cargando trailers..." + patron = '<div class="col-md-3 col-xs-6"><a href="([^"]+)".*?' \ + 'Images/(\d+).gif.*?</div><small>(.*?)</small>' + matches = scrapertools.find_multiple_matches(data, patron) + if len(matches) == 0: + trailer_url = scrapertools.find_single_match(data, '<iframe.*?src="([^"]+)"') + if trailer_url != "": + trailer_url = trailer_url.replace("embed/", "watch?v=") + code = scrapertools.find_single_match(trailer_url, 'v=([A-z0-9\-_]+)') + thumbnail = "https://img.youtube.com/vi/%s/0.jpg" % code + itemlist.append(item.clone(title="Trailer [youtube]", url=trailer_url, server="youtube", + thumbnail=thumbnail, action="play", text_color="white")) + else: + for scrapedurl, language, scrapedtitle in matches: + if language == "1": + idioma = " (ESP)" + else: + idioma = " (V.O)" + scrapedurl = urlparse.urljoin("http://www.abandomoviez.net/%s" % item.prefix, scrapedurl) + scrapedtitle = scrapertools.htmlclean(scrapedtitle) + idioma + " [youtube]" + if item.contextual: + i += 1 + message += ".." + progreso.update(10 + (90 * i / len(matches)), message) + scrapedtitle = "[COLOR white]%s[/COLOR]" % scrapedtitle + + data_trailer = scrapertools.downloadpage(scrapedurl) + trailer_url = scrapertools.find_single_match(data_trailer, 'iframe.*?src="([^"]+)"') + trailer_url = trailer_url.replace("embed/", "watch?v=") + code = scrapertools.find_single_match(trailer_url, 'v=([A-z0-9\-_]+)') + thumbnail = "https://img.youtube.com/vi/%s/0.jpg" % code + itemlist.append(item.clone(title=scrapedtitle, url=trailer_url, server="youtube", action="play", + thumbnail=thumbnail, text_color="white")) + + if item.contextual: + progreso.close() + + if keyboard: + if item.contextual: + title = "[COLOR green]%s[/COLOR]" + else: + title = "%s" + itemlist.append(item.clone(title=title % "Búsqueda Manual en Abandomoviez", + action="manual_search", thumbnail="", text_color="green", extra="abandomoviez")) + return itemlist + + +def filmaffinity_search(item): + logger.info() + + if item.filmaffinity: + item.url = item.filmaffinity + return search_links_filmaff(item) + + # Comprueba si es una búsqueda de cero o viene de la opción Siguiente + if item.page != "": + data = scrapertools.downloadpage(item.page) + else: + params = urllib.urlencode([('stext', item.contentTitle), ('stype%5B%5D', 'title'), ('country', ''), + ('genre', ''), ('fromyear', item.year), ('toyear', item.year)]) + url = "http://www.filmaffinity.com/es/advsearch.php?%s" % params + data = scrapertools.downloadpage(url) + + itemlist = [] + patron = '<div class="mc-poster">.*?<img.*?src="([^"]+)".*?' \ + '<div class="mc-title"><a href="/es/film(\d+).html"[^>]+>(.*?)<img' + matches = scrapertools.find_multiple_matches(data, patron) + # Si solo hay un resultado, busca directamente los trailers, sino lista todos los resultados + if len(matches) == 1: + item.url = "http://www.filmaffinity.com/es/evideos.php?movie_id=%s" % matches[0][1] + item.thumbnail = matches[0][0] + if not item.thumbnail.startswith("http"): + item.thumbnail = "http://www.filmaffinity.com" + item.thumbnail + itemlist = search_links_filmaff(item) + elif len(matches) > 1: + for scrapedthumbnail, id, scrapedtitle in matches: + if not scrapedthumbnail.startswith("http"): + scrapedthumbnail = "http://www.filmaffinity.com" + scrapedthumbnail + scrapedurl = "http://www.filmaffinity.com/es/evideos.php?movie_id=%s" % id + scrapedtitle = unicode(scrapedtitle, encoding="utf-8", errors="ignore") + scrapedtitle = scrapertools.htmlclean(scrapedtitle) + itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, text_color="white", + action="search_links_filmaff", thumbnail=scrapedthumbnail)) + + next_page = scrapertools.find_single_match(data, '<a href="([^"]+)">>></a>') + if next_page != "": + next_page = urlparse.urljoin("http://www.filmaffinity.com/es/", next_page) + itemlist.append(item.clone(title=">> Siguiente", page=next_page, action="filmaffinity_search", thumbnail="", + text_color="")) + + if not itemlist: + itemlist.append(item.clone(title="La búsqueda no ha dado resultados (%s)" % item.contentTitle, + action="", thumbnail="", text_color="")) + + if keyboard: + if item.contextual: + title = "[COLOR green]%s[/COLOR]" + else: + title = "%s" + itemlist.append(item.clone(title=title % "Búsqueda Manual en Filmaffinity", + action="manual_search", text_color="green", thumbnail="", extra="filmaffinity")) + + return itemlist + + +def search_links_filmaff(item): + logger.info() + + itemlist = [] + data = scrapertools.downloadpage(item.url) + if not '<a class="lnkvvid"' in data: + itemlist.append(item.clone(title="No hay ningún vídeo disponible", action="", text_color="")) + else: + patron = '<a class="lnkvvid".*?<b>(.*?)</b>.*?iframe.*?src="([^"]+)"' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedtitle, scrapedurl in matches: + if not scrapedurl.startswith("http:"): + scrapedurl = urlparse.urljoin("http:", scrapedurl) + trailer_url = scrapedurl.replace("-nocookie.com/embed/", ".com/watch?v=") + if "youtube" in trailer_url: + server = "youtube" + code = scrapertools.find_single_match(trailer_url, 'v=([A-z0-9\-_]+)') + thumbnail = "https://img.youtube.com/vi/%s/0.jpg" % code + else: + server = "" + thumbnail = item.thumbnail + scrapedtitle = unicode(scrapedtitle, encoding="utf-8", errors="ignore") + scrapedtitle = scrapertools.htmlclean(scrapedtitle) + scrapedtitle += " [" + server + "]" + if item.contextual: + scrapedtitle = "[COLOR white]%s[/COLOR]" % scrapedtitle + itemlist.append(item.clone(title=scrapedtitle, url=trailer_url, server=server, action="play", + thumbnail=thumbnail, text_color="white")) + + itemlist = servertools.get_servers_itemlist(itemlist) + if keyboard: + if item.contextual: + title = "[COLOR green]%s[/COLOR]" + else: + title = "%s" + itemlist.append(item.clone(title=title % "Búsqueda Manual en Filmaffinity", + action="manual_search", thumbnail="", text_color="green", extra="filmaffinity")) + + return itemlist + + +def jayhap_search(item): + logger.info() + itemlist = [] + + if item.extra != "jayhap": + item.contentTitle += " trailer" + texto = item.contentTitle + post = urllib.urlencode({'q': texto, 'yt': 'true', 'vm': 'true', 'dm': 'true', + 'v': 'all', 'l': 'all', 'd': 'all'}) + + # Comprueba si es una búsqueda de cero o viene de la opción Siguiente + if item.page != "": + post += urllib.urlencode(item.page) + data = scrapertools.downloadpage("https://www.jayhap.com/load_more.php", post=post) + else: + data = scrapertools.downloadpage("https://www.jayhap.com/get_results.php", post=post) + data = jsontools.load(data) + for video in data['videos']: + url = video['url'] + server = video['source'].lower() + duration = " (" + video['duration'] + ")" + title = video['title'].decode("utf-8") + duration + " [" + server.capitalize() + "]" + thumbnail = video['thumbnail'] + if item.contextual: + title = "[COLOR white]%s[/COLOR]" % title + itemlist.append(item.clone(action="play", server=server, title=title, url=url, thumbnail=thumbnail, + text_color="white")) + + if not itemlist: + itemlist.append(item.clone(title="La búsqueda no ha dado resultados (%s)" % item.contentTitle, + action="", thumbnail="", text_color="")) + else: + tokens = data['tokens'] + tokens['yt_token'] = tokens.pop('youtube') + tokens['vm_token'] = tokens.pop('vimeo') + tokens['dm_token'] = tokens.pop('dailymotion') + itemlist.append(item.clone(title=">> Siguiente", page=tokens, action="jayhap_search", extra="jayhap", + thumbnail="", text_color="")) + + if keyboard: + if item.contextual: + title = "[COLOR green]%s[/COLOR]" + else: + title = "%s" + itemlist.append(item.clone(title=title % "Búsqueda Manual en Jayhap", action="manual_search", + text_color="green", thumbnail="", extra="jayhap")) + + return itemlist + + +try: + import xbmcgui + import xbmc + + + class Select(xbmcgui.WindowXMLDialog): + def __init__(self, *args, **kwargs): + self.item = kwargs.get('item') + self.itemlist = kwargs.get('itemlist') + self.caption = kwargs.get('caption') + self.result = None + + def onInit(self): + try: + self.control_list = self.getControl(6) + self.getControl(5).setNavigation(self.control_list, self.control_list, self.control_list, + self.control_list) + self.getControl(3).setEnabled(0) + self.getControl(3).setVisible(0) + except: + pass + + try: + self.getControl(99).setVisible(False) + except: + pass + self.getControl(1).setLabel("[COLOR orange]" + self.caption + "[/COLOR]") + self.getControl(5).setLabel("[COLOR tomato][B]Cerrar[/B][/COLOR]") + self.items = [] + for item in self.itemlist: + item_l = xbmcgui.ListItem(item.title) + item_l.setArt({'thumb': item.thumbnail}) + item_l.setProperty('item_copy', item.tourl()) + self.items.append(item_l) + self.control_list.reset() + self.control_list.addItems(self.items) + self.setFocus(self.control_list) + + def onClick(self, id): + # Boton Cancelar y [X] + if id == 5: + global window_select, result + self.result = "_no_video" + result = "no_video" + self.close() + window_select.pop() + if not window_select: + if not self.item.windowed: + del window_select + else: + window_select[-1].doModal() + + def onAction(self, action): + global window_select, result + if action == 92 or action == 110: + self.result = "no_video" + result = "no_video" + self.close() + window_select.pop() + if not window_select: + if not self.item.windowed: + del window_select + else: + window_select[-1].doModal() + + try: + if (action == 7 or action == 100) and self.getFocusId() == 6: + selectitem = self.control_list.getSelectedItem() + item = Item().fromurl(selectitem.getProperty("item_copy")) + if item.action == "play" and self.item.windowed: + video_urls, puede, motivo = servertools.resolve_video_urls_for_playing(item.server, item.url) + self.close() + xbmc.sleep(200) + if puede: + result = video_urls[-1][1] + self.result = video_urls[-1][1] + else: + result = None + self.result = None + + elif item.action == "play" and not self.item.windowed: + for window in window_select: + window.close() + retorna = platformtools.play_video(item) + if not retorna: + while True: + xbmc.sleep(1000) + if not xbmc.Player().isPlaying(): + break + window_select[-1].doModal() + else: + self.close() + buscartrailer(item) + except: + import traceback + logger.error(traceback.format_exc()) +except: + pass diff --git a/plugin.video.alfa/channels/tubehentai.json b/plugin.video.alfa/channels/tubehentai.json new file mode 100755 index 00000000..7a724688 --- /dev/null +++ b/plugin.video.alfa/channels/tubehentai.json @@ -0,0 +1,33 @@ +{ + "id": "tubehentai", + "name": "tubehentai", + "active": true, + "adult": true, + "language": "es", + "banner": "tubehentai.png", + "thumbnail": "tubehentai.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "05/08/2016", + "description": "Eliminado de sección películas." + } + ], + "categories": [ + "adult" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/tubehentai.py b/plugin.video.alfa/channels/tubehentai.py new file mode 100755 index 00000000..cb3665f3 --- /dev/null +++ b/plugin.video.alfa/channels/tubehentai.py @@ -0,0 +1,91 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import logger +from core import scrapertools +from core.item import Item + + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append(Item(channel=item.channel, title="Novedades", action="novedades", url="http://tubehentai.com/")) + itemlist.append( + Item(channel=item.channel, title="Buscar", action="search", url="http://tubehentai.com/search/%s/page1.html")) + + return itemlist + + +def search(item, texto): + logger.info() + + texto = texto.replace(" ", "%20") + + item.url = item.url % texto + try: + return novedades(item) + # Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def novedades(item): + logger.info() + + # Descarga la página + data = scrapertools.cachePage(item.url) + # <a href="http://tubehentai.com/videos/slave_market_¨c_ep1-595.html"><img class="img" width="145" src="http://tubehentai.com/media/thumbs/5/9/5/./f/595/595.flv-3.jpg" alt="Slave_Market_¨C_Ep1" id="4f4fbf26f36 + patron = '<a href="(http://tubehentai.com/videos/[^"]+)"><img.*?src="(http://tubehentai.com/media/thumbs/[^"]+)" alt="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + itemlist = [] + for match in matches: + # Titulo + scrapedtitle = match[2] + scrapedurl = match[0] + scrapedthumbnail = match[1].replace(" ", "%20") + scrapedplot = scrapertools.htmlclean(match[2].strip()) + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + + # Añade al listado de XBMC + itemlist.append( + Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, + plot=scrapedplot, folder=False)) + + # ------------------------------------------------------ + # Extrae el paginador + # ------------------------------------------------------ + # <a href="page2.html" class="next">Next »</a> + patronvideos = '<a href=\'(page[^\.]+\.html)\'[^>]*?>Next[^<]*?<\/a>' + matches = re.compile(patronvideos, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + if len(matches) > 0: + scrapedurl = urlparse.urljoin(item.url, "/" + matches[0]) + logger.info(scrapedurl) + itemlist.append(Item(channel=item.channel, action="novedades", title=">> Página siguiente", url=scrapedurl)) + + return itemlist + + +def play(item): + logger.info() + itemlist = [] + + # s1.addParam("flashvars","overlay=http://tubehentai.com/media/thumbs/5/2/3/9/c/5239cf74632cbTHLaBlueGirlep3%20%20Segment2000855.000001355.000.mp4 + # http://tubehentai.com/media/thumbs/5/2/3/9/c/5239cf74632cbTHLaBlueGirlep3%20%20Segment2000855.000001355.000.mp4 + # http://tubehentai.com/media/videos/5/2/3/9/c/5239cf74632cbTHLaBlueGirlep3%20%20Segment2000855.000001355.000.mp4?start=0 + data = scrapertools.cachePage(item.url) + url = scrapertools.get_match(data, 's1.addParam\("flashvars","bufferlength=1&autostart=true&overlay=(.*?\.mp4)') + url = url.replace("/thumbs", "/videos") + # url = url+"?start=0" + logger.info("url=" + url) + server = "Directo" + itemlist.append(Item(channel=item.channel, title="", url=url, server=server, folder=False)) + + return itemlist diff --git a/plugin.video.alfa/channels/tupornotv.json b/plugin.video.alfa/channels/tupornotv.json new file mode 100755 index 00000000..ba5e7769 --- /dev/null +++ b/plugin.video.alfa/channels/tupornotv.json @@ -0,0 +1,33 @@ +{ + "id": "tupornotv", + "name": "tuporno.tv", + "active": true, + "adult": true, + "language": "es", + "banner": "tupornotv.png", + "thumbnail": "tupornotv.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "05/08/2016", + "description": "Eliminado de sección películas." + } + ], + "categories": [ + "adult" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/tupornotv.py b/plugin.video.alfa/channels/tupornotv.py new file mode 100755 index 00000000..c549dd2e --- /dev/null +++ b/plugin.video.alfa/channels/tupornotv.py @@ -0,0 +1,264 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import logger +from core import scrapertools +from core.item import Item + + +def mainlist(item): + logger.info() + + itemlist = [] + itemlist.append(Item(channel=item.channel, title="Pendientes de Votación", action="novedades", + url="http://tuporno.tv/pendientes")) + itemlist.append( + Item(channel=item.channel, title="Populares", action="masVistos", url="http://tuporno.tv/", folder=True)) + itemlist.append( + Item(channel=item.channel, title="Categorias", action="categorias", url="http://tuporno.tv/categorias/", + folder=True)) + itemlist.append(Item(channel=item.channel, title="Videos Recientes", action="novedades", + url="http://tuporno.tv/videosRecientes/", folder=True)) + itemlist.append(Item(channel=item.channel, title="Top Videos (mas votados)", action="masVotados", + url="http://tuporno.tv/topVideos/", folder=True)) + itemlist.append(Item(channel=item.channel, title="Nube de Tags", action="categorias", url="http://tuporno.tv/tags/", + folder=True)) + itemlist.append(Item(channel=item.channel, title="Buscar", action="search")) + + return itemlist + + +def novedades(item): + logger.info() + url = item.url + # ------------------------------------------------------ + # Descarga la página + # ------------------------------------------------------ + data = scrapertools.cachePage(url) + # logger.info(data) + + # ------------------------------------------------------ + # Extrae las entradas + # ------------------------------------------------------ + # seccion novedades + ''' + <table border="0" cellpadding="0" cellspacing="0" ><tr><td align="center" width="100%" valign="top" height="160px"> + <a href="/videos/cogiendo-en-el-bosque"><img src="imagenes/videos//c/o/cogiendo-en-el-bosque_imagen2.jpg" alt="Cogiendo en el bosque" border="0" align="top" /></a> + <h2><a href="/videos/cogiendo-en-el-bosque">Cogiendo en el bosque</a></h2> + ''' + patronvideos = '<div class="relative">(.*?)</div><div class="video' + + matches = re.compile(patronvideos, re.DOTALL).findall(data) + # if DEBUG: scrapertools.printMatches(matches) + + itemlist = [] + for match in matches: + # Titulo + try: + scrapedtitle = re.compile('title="(.+?)"').findall(match)[0] + + except: + scrapedtitle = '' + try: + scrapedurl = re.compile('href="(.+?)"').findall(match)[0] + scrapedurl = urlparse.urljoin(url, scrapedurl) + except: + continue + try: + scrapedthumbnail = re.compile('src="(.+?)"').findall(match)[0] + scrapedthumbnail = urlparse.urljoin(url, scrapedthumbnail) + except: + scrapedthumbnail = '' + scrapedplot = "" + try: + duracion = re.compile('<div class="duracion">(.+?)<').findall(match)[0] + except: + try: + duracion = re.compile('\((.+?)\)<br').findall(match[3])[0] + except: + duracion = "" + + # logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"], duracion=["+duracion+"]") + # Añade al listado de XBMC + # trozos = scrapedurl.split("/") + # id = trozos[len(trozos)-1] + # videos = "http://149.12.64.129/videoscodiH264/"+id[0:1]+"/"+id[1:2]+"/"+id+".flv" + itemlist.append( + Item(channel=item.channel, action="play", title=scrapedtitle + " [" + duracion + "]", url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot, server="Directo", folder=False)) + + # ------------------------------------------------------ + # Extrae el paginador + # ------------------------------------------------------ + # <a href="/topVideos/todas/mes/2/" class="enlace_si">Siguiente </a> + patronsiguiente = '<a href="(.+?)" class="enlace_si">Siguiente </a>' + siguiente = re.compile(patronsiguiente, re.DOTALL).findall(data) + if len(siguiente) > 0: + scrapedurl = urlparse.urljoin(url, siguiente[0]) + itemlist.append(Item(channel=item.channel, action="novedades", title="!Next page", url=scrapedurl, folder=True)) + + return itemlist + + +def masVistos(item): + logger.info() + + itemlist = [] + itemlist.append( + Item(channel=item.channel, title="Hoy", action="novedades", url="http://tuporno.tv/hoy", folder=True)) + itemlist.append(Item(channel=item.channel, title="Recientes", action="novedades", url="http://tuporno.tv/recientes", + folder=True)) + itemlist.append( + Item(channel=item.channel, title="Semana", action="novedades", url="http://tuporno.tv/semana", folder=True)) + itemlist.append( + Item(channel=item.channel, title="Mes", action="novedades", url="http://tuporno.tv/mes", folder=True)) + itemlist.append( + Item(channel=item.channel, title="Año", action="novedades", url="http://tuporno.tv/ano", folder=True)) + return itemlist + + +def categorias(item): + logger.info() + + url = item.url + # ------------------------------------------------------ + # Descarga la página + # ------------------------------------------------------ + data = scrapertools.cachePage(url) + # logger.info(data) + # ------------------------------------------------------ + # Extrae las entradas + # ------------------------------------------------------ + # seccion categorias + # Patron de las entradas + if url == "http://tuporno.tv/categorias/": + patronvideos = '<li><a href="([^"]+)"' # URL + patronvideos += '>([^<]+)</a></li>' # TITULO + else: + patronvideos = '<a href="(.tags[^"]+)"' # URL + patronvideos += ' class="[^"]+">([^<]+)</a>' # TITULO + + matches = re.compile(patronvideos, re.DOTALL).findall(data) + # if DEBUG: scrapertools.printMatches(matches) + + itemlist = [] + for match in matches: + if match[1] in ["SexShop", "Videochat", "Videoclub"]: + continue + # Titulo + scrapedtitle = match[1] + scrapedurl = urlparse.urljoin(url, match[0]) + scrapedthumbnail = "" + scrapedplot = "" + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + + # Añade al listado de XBMC + itemlist.append(Item(channel=item.channel, action="novedades", title=scrapedtitle.capitalize(), url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True)) + return itemlist + + +def masVotados(item): + logger.info() + + itemlist = [] + itemlist.append( + Item(channel=item.channel, title="Hoy", action="novedades", url="http://tuporno.tv/topVideos/todas/hoy", + folder=True)) + itemlist.append(Item(channel=item.channel, title="Recientes", action="novedades", + url="http://tuporno.tv/topVideos/todas/recientes", folder=True)) + itemlist.append( + Item(channel=item.channel, title="Semana", action="novedades", url="http://tuporno.tv/topVideos/todas/semana", + folder=True)) + itemlist.append( + Item(channel=item.channel, title="Mes", action="novedades", url="http://tuporno.tv/topVideos/todas/mes", + folder=True)) + itemlist.append( + Item(channel=item.channel, title="Año", action="novedades", url="http://tuporno.tv/topVideos/todas/ano", + folder=True)) + return itemlist + + +def search(item, texto): + logger.info() + if texto != "": + texto = texto.replace(" ", "+") + else: + texto = item.extra.replace(" ", "+") + item.url = "http://tuporno.tv/buscador/?str=" + texto + try: + return getsearch(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def getsearch(item): + logger.info() + data = scrapertools.cachePage(item.url) + data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data) + patronvideos = '<div class="relative"><a href="(.videos[^"]+)"[^>]+><img.+?src="([^"]+)" alt="(.+?)" .*?<div class="duracion">(.+?)</div></div></div>' + matches = re.compile(patronvideos, re.DOTALL).findall(data) + + if len(matches) > 0: + itemlist = [] + for match in matches: + # Titulo + scrapedtitle = match[2].replace("<b>", "") + scrapedtitle = scrapedtitle.replace("</b>", "") + scrapedurl = urlparse.urljoin("http://tuporno.tv/", match[0]) + scrapedthumbnail = urlparse.urljoin("http://tuporno.tv/", match[1]) + scrapedplot = "" + duracion = match[3] + + itemlist.append( + Item(channel=item.channel, action="play", title=scrapedtitle + " [" + duracion + "]", url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot, server="Directo", folder=False)) + + '''<a href="/buscador/?str=busqueda&desde=HV_PAGINA_SIGUIENTE" class="enlace_si">Siguiente </a>''' + patronsiguiente = '<a href="([^"]+)" class="enlace_si">Siguiente </a>' + siguiente = re.compile(patronsiguiente, re.DOTALL).findall(data) + if len(siguiente) > 0: + patronultima = '<!--HV_SIGUIENTE_ENLACE' + ultpagina = re.compile(patronultima, re.DOTALL).findall(data) + scrapertools.printMatches(siguiente) + + if len(ultpagina) == 0: + scrapedurl = urlparse.urljoin(item.url, siguiente[0]) + itemlist.append( + Item(channel=item.channel, action="getsearch", title="!Next page", url=scrapedurl, folder=True)) + return itemlist + + +def play(item): + logger.info() + itemlist = [] + + # Lee la pagina del video + data = scrapertools.cachePage(item.url) + codVideo = scrapertools.get_match(data, 'body id="([^"]+)"') + logger.info("codVideo=" + codVideo) + + # Lee la pagina con el codigo + # http://tuporno.tv/flvurl.php?codVideo=188098&v=MAC%2011,5,502,146 + url = "http://tuporno.tv/flvurl.php?codVideo=" + codVideo + "&v=MAC%2011,5,502,146" + data = scrapertools.cachePage(url) + logger.info("data=" + data) + kpt = scrapertools.get_match(data, "kpt\=(.+?)\&") + logger.info("kpt=" + kpt) + + # Decodifica + import base64 + url = base64.decodestring(kpt) + logger.info("url=" + url) + + itemlist.append( + Item(channel=item.channel, action="play", title=item.title, url=url, thumbnail=item.thumbnail, plot=item.plot, + server="Directo", folder=False)) + + return itemlist diff --git a/plugin.video.alfa/channels/tvmoviedb.json b/plugin.video.alfa/channels/tvmoviedb.json new file mode 100755 index 00000000..cc476ca8 --- /dev/null +++ b/plugin.video.alfa/channels/tvmoviedb.json @@ -0,0 +1,193 @@ +{ + "id": "tvmoviedb", + "name": "TvMovieDB", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "http://i.imgur.com/HA7fvgD.png", + "version": 1, + "changes": [ + { + "date": "12/05/2017", + "description": "Corregida sección myanimelist y cambios menores" + }, + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "04/01/2017", + "description": "Primera version." + } + ], + "categories": [ + "movie", + "tvshow", + "anime" + ], + "settings": [ + { + "id": "tmdb", + "type": "list", + "label": "Idioma de búsqueda en TMDB", + "default": 7, + "enabled": true, + "visible": true, + "lvalues": [ + "Alemán", + "Francés", + "Portugués", + "Italiano", + "Español Latino", + "Catalán", + "Inglés", + "Castellano" + ] + }, + { + "id": "tmdb_alternativo", + "type": "list", + "label": "Idioma alternativo para TMDB (No sinopsis idioma principal)", + "default": 6, + "enabled": true, + "visible": true, + "lvalues": [ + "Alemán", + "Francés", + "Portugués", + "Italiano", + "Español Latino", + "Catalán", + "Inglés", + "Castellano" + ] + }, + { + "id": "imdb", + "type": "list", + "label": "Idioma de los títulos en IMDB", + "color": "0xFFE0F04B", + "default": 7, + "enabled": true, + "visible": true, + "lvalues": [ + "Alemán", + "Francés", + "Portugués", + "Italiano", + "Español Latino", + "Catalán", + "Inglés", + "Castellano" + ] + }, + { + "id": "label1", + "type": "label", + "label": "", + "enabled": false, + "visible": true + }, + { + "id": "filmaff", + "type": "list", + "label": "Sitio Web Filmaffinity", + "color": "0xFF25AA48", + "default": 5, + "enabled": true, + "visible": true, + "lvalues": [ + "Colombia", + "Chile", + "Argentina", + "México", + "US/UK", + "España" + ] + }, + { + "id": "usuariofa", + "type": "text", + "label": "Usuario Filmaffinity (Opcional)", + "color": "0xFFd50b0b", + "default": "", + "enabled": true, + "visible": true + }, + { + "id": "passfa", + "type": "text", + "label": "Contraseña Filmaffinity", + "color": "0xFFd50b0b", + "default": "", + "enabled": "!eq(-1,'')", + "hidden": true, + "visible": true + }, + { + "id": "orderfa", + "type": "list", + "label": "Ordenar listas personales de Filmaffinity por:", + "color": "0xFF25AA48", + "default": 0, + "enabled": "!eq(-1,'')", + "visible": true, + "lvalues": [ + "Posición", + "Título", + "Año", + "Voto", + "Nota media" + ] + }, + { + "id": "label2", + "type": "label", + "label": "", + "enabled": false, + "visible": true + }, + { + "id": "usuariomal", + "type": "text", + "label": "Usuario MyAnimeList (Opcional)", + "color": "0xFF25AA48", + "default": "", + "enabled": true, + "visible": true + }, + { + "id": "passmal", + "type": "text", + "label": "Contraseña MyAnimeList", + "color": "0xFF25AA48", + "default": "", + "enabled": "!eq(-1,'')", + "hidden": true, + "visible": true + }, + { + "id": "adult_mal", + "type": "bool", + "label": "Mostrar Hentais en MyAnimeList", + "color": "0xFFd50b0b", + "default": false, + "enabled": true, + "visible": true + }, + { + "id": "perfil", + "type": "list", + "label": "Perfil de color", + "default": 2, + "enabled": true, + "visible": true, + "lvalues": [ + "Perfil 3", + "Perfil 2", + "Perfil 1", + "Ninguno" + ] + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/tvmoviedb.py b/plugin.video.alfa/channels/tvmoviedb.py new file mode 100755 index 00000000..fffc0085 --- /dev/null +++ b/plugin.video.alfa/channels/tvmoviedb.py @@ -0,0 +1,3420 @@ +# -*- coding: utf-8 -*- + +import re +import urllib +from base64 import b64decode as bdec + +from core import config +from core import filetools +from core import httptools +from core import jsontools +from core import logger +from core import scrapertools +from core.item import Item +from core.tmdb import Tmdb +from platformcode import platformtools + +__perfil__ = config.get_setting('perfil', "tvmoviedb") + +# Fijar perfil de color +perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFF088A08', '0xFFFFD700'], + ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFF088A08', '0xFFFFD700'], + ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFF088A08', '0xFFFFD700']] +if __perfil__ < 3: + color1, color2, color3, color4, color5, color6 = perfil[__perfil__] +else: + color1 = color2 = color3 = color4 = color5 = color6 = "" + +langs = ['de', 'fr', 'pt', 'it', 'es-MX', 'ca', 'en', 'es'] +langt = langs[config.get_setting('tmdb', "tvmoviedb")] +langt_alt = langs[config.get_setting('tmdb_alternativo', "tvmoviedb")] +langs = ['co', 'cl', 'ar', 'mx', 'en', 'es'] +langf = langs[config.get_setting('filmaff', "tvmoviedb")] +langs = ['de-de', 'fr-fr', 'pt-pt', 'it-it', 'es-MX', 'ca-es', 'en', 'es'] +langi = langs[config.get_setting('imdb', "tvmoviedb")] +adult_mal = config.get_setting('adult_mal', "tvmoviedb") +mal_ck = "MzE1MDQ2cGQ5N2llYTY4Z2xwbGVzZjFzbTY=" +images_predef = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/" +default_fan = filetools.join(config.get_runtime_path(), "fanart.jpg") + + +def mainlist(item): + logger.info() + item.text_color = color1 + itemlist = [] + + itemlist.append(item.clone(title="Búsqueda en TMDB", action="", text_color=color2)) + itemlist.append(item.clone(title=" - Películas", action="tmdb", extra="movie", + thumbnail="%s0/Movies.png" % images_predef)) + itemlist.append(item.clone(title=" - Series", action="tmdb", extra="tv", + thumbnail=images_predef + "0/TV%20Series.png")) + itemlist.append(item.clone(title="Búsqueda en Filmaffinity", action="", text_color=color2)) + itemlist.append(item.clone(title=" - Películas", action="filmaf", extra="movie", + thumbnail="%s0/Movies.png" % images_predef)) + itemlist.append(item.clone(title=" - Series", action="filmaf", extra="tv", + thumbnail=images_predef + "0/TV%20Series.png")) + itemlist.append(item.clone(title="Búsqueda en IMDB", action="", text_color=color2)) + itemlist.append(item.clone(title=" - Películas", action="imdb", extra="movie", + url='&title_type=feature,tv_movie', + thumbnail="%s0/Movies.png" % images_predef)) + itemlist.append(item.clone(title=" - Series", action="imdb", extra="tv", + url='&title_type=tv_series,tv_special,mini_series', + thumbnail=images_predef + "0/TV%20Series.png")) + itemlist.append( + item.clone(title="Trakt.tv", action="trakt", text_color=color2, thumbnail="http://i.imgur.com/5sQjjuk.png")) + itemlist.append( + item.clone(title="MyAnimeList", action="mal", text_color=color2, thumbnail="http://i.imgur.com/RhsYWmd.png")) + itemlist.append(item.clone(title="", action="")) + itemlist.append( + item.clone(title="Ajustes motores de búsqueda", action="configuracion", text_color=color6, folder=False)) + return itemlist + + +def configuracion(item): + ret = platformtools.show_channel_settings() + platformtools.itemlist_refresh() + return ret + + +def search_(item): + texto = platformtools.dialog_input(heading=item.title) + if texto: + if "imdb" in item.url: + item.url += texto.replace(" ", "+") + item.action = "listado_imdb" + return listado_imdb(item) + if "filmaffinity" in item.url: + item.url += texto.replace(" ", "+") + item.action = "listado_fa" + return listado_fa(item) + if "myanimelist" in item.url: + item.url += texto.replace(" ", "%20") + item.url += "&type=0&score=0&status=0&p=0&r=0&sm=0&sd=0&sy=0&em=0&ed=0&ey=0&c[0]=a" \ + "&c[1]=b&c[2]=c&c[3]=d&c[4]=f&gx=0" + item.action = "busqueda_mal" + return busqueda_mal(item) + + item.search['query'] = texto + item.action = "listado_tmdb" + return listado_tmdb(item) + + +def busqueda(item): + logger.info() + cat = [item.extra.replace("tv", "serie")] + new_item = Item() + new_item.extra = item.contentTitle.replace("+", " ") + new_item.category = item.extra + + from channels import search + return search.do_search(new_item, cat) + + +def tmdb(item): + item.contentType = item.extra.replace("tv", "tvshow") + + itemlist = [] + itemlist.append(item.clone(title="Más Populares", action="listado_tmdb", + search={'url': item.extra + "/popular", 'language': langt, 'page': 1})) + itemlist.append(item.clone(title="Más Valoradas", action="listado_tmdb", + search={'url': item.extra + "/top_rated", 'language': langt, 'page': 1})) + if item.extra == "movie": + itemlist.append(item.clone(title="En Cartelera", action="listado_tmdb", + search={'url': item.extra + "/now_playing", 'language': langt, 'page': 1})) + else: + itemlist.append(item.clone(title="En Emisión", action="listado_tmdb", + search={'url': item.extra + "/on_the_air", 'language': langt, 'page': 1})) + itemlist.append(item.clone(title="Géneros", action="indices_tmdb", + thumbnail="%s0/Genres.png" % images_predef)) + itemlist.append(item.clone(title="Año", action="indices_tmdb", thumbnail="%s0/Year.png" % images_predef)) + + if item.extra == "movie": + itemlist.append(item.clone(title="Actores/Actrices por popularidad", action="listado_tmdb", + search={'url': 'person/popular', 'language': langt, 'page': 1})) + itemlist.append(item.clone(title="Próximamente", action="listado_tmdb", + search={'url': item.extra + "/upcoming", 'language': langt, 'page': 1})) + + if config.get_platform() != "plex": + title = item.contentType.replace("movie", "película").replace("tvshow", "serie") + itemlist.append(item.clone(title="Buscar %s" % title, action="search_", + search={'url': 'search/%s' % item.extra, 'language': langt, 'page': 1})) + + itemlist.append(item.clone(title=" Buscar actor/actriz", action="search_", + search={'url': 'search/person', 'language': langt, 'page': 1})) + if item.extra == "movie": + itemlist.append(item.clone(title=" Buscar director, guionista...", action="search_", + search={'url': "search/person", 'language': langt, 'page': 1}, crew=True)) + + itemlist.append(item.clone(title="Filtro Personalizado", action="filtro", text_color=color4)) + itemlist.append(item.clone(title="Filtro por palabra clave", action="filtro", text_color=color4)) + + return itemlist + + +def imdb(item): + item.contentType = item.extra.replace("tv", "tvshow") + + itemlist = [] + itemlist.append(item.clone(title="Más Populares", action="listado_imdb")) + itemlist.append(item.clone(title="Más Valoradas", action="listado_imdb", + url=item.url + "&num_votes=25000,&sort=user_rating,desc")) + if item.extra == "movie": + itemlist.append(item.clone(title="En Cartelera", action="listado_imdb", + url="http://www.imdb.com/showtimes/location?ref_=inth_ov_sh_sm")) + itemlist.append(item.clone(title="Géneros", action="indices_imdb", + thumbnail="%s0/Genres.png" % images_predef)) + itemlist.append(item.clone(title="Año", action="indices_imdb", thumbnail="%s0/Year.png" % images_predef)) + + if item.extra == "movie": + itemlist.append(item.clone(title="Actores/Actrices por popularidad", action="listado_imdb", + url="http://www.imdb.com/search/name?gender=male,female&ref_=nv_cel_m_3")) + + itemlist.append(item.clone(title="Próximamente", action="listado_imdb", + url="http://www.imdb.com/movies-coming-soon/?ref_=shlc_cs")) + + if config.get_platform() != "plex": + title = item.contentType.replace("movie", "película").replace("tvshow", "serie") + itemlist.append(item.clone(title="Buscar %s" % title, action="search_", + url="http://www.imdb.com/search/title?title=" + item.url)) + + itemlist.append(item.clone(title=" Buscar actor/actriz", action="search_", + url="http://www.imdb.com/search/name?name=")) + + itemlist.append(item.clone(title="Filtro Personalizado", action="filtro_imdb", text_color=color4)) + + return itemlist + + +def filmaf(item): + item.contentType = item.extra.replace("tv", "tvshow") + login, message = login_fa() + + itemlist = [] + if item.extra == "movie": + itemlist.append(item.clone(title="Top Filmaffinity", action="listado_fa", extra="top", + url="http://m.filmaffinity.com/%s/topgen.php?genre=&country=&" + "fromyear=&toyear=¬vse=1&nodoc=1" % langf)) + itemlist.append(item.clone(title="En Cartelera", action="listado_fa", + url="http://m.filmaffinity.com/%s/rdcat.php?id=new_th_%s" % (langf, langf))) + itemlist.append(item.clone(title="Géneros", action="indices_fa", url="http://m.filmaffinity.com/%s/topgen.php" + % langf, + thumbnail="%s0/Genres.png" % images_predef)) + else: + itemlist.append(item.clone(title="Top Filmaffinity", action="listado_fa", extra="top", + url="http://m.filmaffinity.com/%s/topgen.php?genre=TV_SE&country=&" + "fromyear=&toyear=&nodoc" % langf)) + itemlist.append(item.clone(title="Series de actualidad", action="listado_fa", + url="http://m.filmaffinity.com/%s/category.php?id=current_tv" % langf)) + + itemlist.append(item.clone(title="Año", action="indices_fa", thumbnail="%s0/Year.png" % images_predef)) + if item.extra == "movie": + itemlist.append(item.clone(title="Próximos Estrenos", action="listado_fa", extra="estrenos", + url="http://m.filmaffinity.com/%s/rdcat.php?id=upc_th_%s" % (langf, langf))) + itemlist.append(item.clone(title="Sagas y Colecciones", action="indices_fa", extra="sagas", + url="http://www.filmaffinity.com/%s/movie-groups-all.php" % langf)) + itemlist.append(item.clone(title="Películas/Series/Documentales por Temas", action="indices_fa", + url='http://m.filmaffinity.com/%s/topics.php' % langf, text_color=color3)) + if config.get_platform() != "plex": + itemlist.append(item.clone(title="Buscar Películas/Series", action="search_", text_color=color4, + url="http://m.filmaffinity.com/%s/search.php?stype=title&stext=" % langf)) + + itemlist.append(item.clone(title=" Buscar por actor/actriz", action="search_", text_color=color4, + url="http://m.filmaffinity.com/%s/search.php?stype=cast&stext=" % langf)) + itemlist.append(item.clone(title=" Buscar por director", action="search_", text_color=color4, + url="http://m.filmaffinity.com/%s/search.php?stype=director&stext=" % langf)) + + itemlist.append(item.clone(title="Filtro Personalizado", action="filtro_fa", text_color=color4, extra="top")) + itemlist.append(item.clone(title="Mi cuenta", action="cuenta_fa", text_color=color3)) + + return itemlist + + +def trakt(item): + itemlist = [] + item.text_color = color1 + token_auth = config.get_setting("token_trakt", "tvmoviedb") + page = "?page=1&limit=20&extended=full" + if not item.extra: + item.extra = "movie" + itemlist.append(item.clone(title="Películas", action="", text_color=color2)) + itemlist.append(item.clone(title=" Más Populares", action="acciones_trakt", url="movies/popular%s" % page)) + itemlist.append( + item.clone(title=" Viéndose Ahora", action="acciones_trakt", url="movies/trending%s" % page)) + itemlist.append(item.clone(title=" Más Vistas", action="acciones_trakt", url="movies/watched/all%s" % page)) + itemlist.append( + item.clone(title=" Más Esperadas", action="acciones_trakt", url="movies/anticipated%s" % page)) + if token_auth: + itemlist.append(item.clone(title=" Recomendaciones personalizadas", action="acciones_trakt", + url="recommendations/movies?limit=100&extended=full", pagina=0)) + itemlist.append(item.clone(title="Series", action="", text_color=color2)) + item.extra = "show" + itemlist.append(item.clone(title=" Más Populares", action="acciones_trakt", url="shows/popular%s" % page)) + itemlist.append(item.clone(title=" Viéndose Ahora", action="acciones_trakt", url="shows/trending%s" % page)) + itemlist.append(item.clone(title=" Más Vistas", action="acciones_trakt", url="shows/watched/all%s" % page)) + itemlist.append( + item.clone(title=" Más Esperadas", action="acciones_trakt", url="shows/anticipated%s" % page)) + if token_auth: + itemlist.append(item.clone(title=" Recomendaciones personalizadas", action="acciones_trakt", + url="recommendations/shows?limit=100&extended=full", pagina=0)) + itemlist.append(item.clone(title=" Mi Cuenta", text_color=color2, extra="cuenta")) + else: + item.extra = "movie" + # Se comprueba si existe un token guardado y sino se ejecuta el proceso de autentificación + if not token_auth: + folder = (config.get_platform() == "plex") + itemlist.append(item.clone(title="Vincula tu cuenta trakt", action="auth_trakt", folder=folder)) + else: + itemlist.append(item.clone(title="Watchlists", action="", text_color=color2)) + itemlist.append( + item.clone(title=" Películas", action="acciones_trakt", url="users/me/watchlist/movies%s" % page, + order="added", how="desc")) + itemlist.append( + item.clone(title=" Series", action="acciones_trakt", url="users/me/watchlist/shows%s" % page, + extra="show", + order="added", how="desc")) + itemlist.append(item.clone(title="Vistas", action="", text_color=color2)) + itemlist.append( + item.clone(title=" Películas", action="acciones_trakt", url="users/me/watched/movies%s" % page, + order="added", how="desc")) + itemlist.append( + item.clone(title=" Series", action="acciones_trakt", url="users/me/watched/shows%s" % page, + extra="show", + order="added", how="desc")) + itemlist.append(item.clone(title="En mi Colección", action="", text_color=color2)) + itemlist.append( + item.clone(title=" Películas", action="acciones_trakt", url="users/me/collection/movies%s" % page, + order="added", how="desc")) + itemlist.append( + item.clone(title=" Series", action="acciones_trakt", url="users/me/collection/shows%s" % page, + extra="show", + order="added", how="desc")) + itemlist.append( + item.clone(title="Mis listas", action="acciones_trakt", url="users/me/lists", text_color=color2)) + + return itemlist + + +def mal(item): + itemlist = [] + item.text_color = color1 + login, message, user = login_mal() + if login: + item.login = True + + itemlist.append( + item.clone(title="Top Series", url="https://myanimelist.net/topanime.php?type=tv&limit=0", action="top_mal", + contentType="tvshow", extra="tv")) + itemlist.append(item.clone(title="Top Películas", url="https://myanimelist.net/topanime.php?type=movie&limit=0", + action="top_mal", + contentType="movie", extra="movie")) + itemlist.append( + item.clone(title="Top Ovas", url="https://myanimelist.net/topanime.php?type=ova&limit=0", action="top_mal", + contentType="tvshow", extra="tv", tipo="ova")) + itemlist.append( + item.clone(title="Más Populares", url="https://myanimelist.net/topanime.php?type=bypopularity&limit=0", + action="top_mal")) + itemlist.append(item.clone(title="Más Esperados", url="https://myanimelist.net/topanime.php?type=upcoming&limit=0", + action="top_mal")) + itemlist.append(item.clone(title="Anime por Temporadas", url="", action="indices_mal")) + itemlist.append(item.clone(title="Anime por Géneros", url="", action="indices_mal")) + if config.get_platform() != "plex": + itemlist.append(item.clone(title="Buscar Series/Películas/Ovas", url="https://myanimelist.net/anime.php?q=", + action="search_")) + itemlist.append(item.clone(title="Filtro Personalizado", action="filtro_mal", text_color=color4)) + + itemlist.append(item.clone(title="Mis listas", action="cuenta_mal", text_color=color3)) + + return itemlist + + +##-------------------- SECCION TMDB ------------------------## +def listado_tmdb(item): + # Listados principales de la categoría Tmdb (Más populares, más vistas, etc...) + itemlist = [] + item.text_color = color1 + item.fanart = default_fan + if not item.pagina: + item.pagina = 1 + + # Listado de actores + if 'nm' in item.infoLabels['imdb_id']: + try: + ob_tmdb = Tmdb(discover=item.search, tipo=item.extra, idioma_busqueda=langt) + id_cast = ob_tmdb.result["person_results"][0]["id"] + if item.contentType == "movie": + item.search = {'url': 'discover/movie', 'with_cast': id_cast, 'page': item.pagina, + 'sort_by': 'primary_release_date.desc', 'language': langt} + else: + item.search = {'url': 'person/%s/tv_credits' % id_cast, 'language': langt} + ob_tmdb = Tmdb(discover=item.search, tipo=item.extra, idioma_busqueda=langt) + except: + pass + else: + ob_tmdb = Tmdb(discover=item.search, tipo=item.extra, idioma_busqueda=langt) + + # Sagas y colecciones + if "collection" in item.search["url"]: + try: + new_item = item.clone(action="", url='') + new_item.infoLabels["plot"] = ob_tmdb.result["overview"] + itemlist.append(new_item) + for parte in ob_tmdb.result["parts"]: + new_item = item.clone(action="detalles") + new_item.infoLabels = ob_tmdb.get_infoLabels(new_item.infoLabels, origen=parte) + if new_item.infoLabels['thumbnail']: + new_item.thumbnail = new_item.infoLabels['thumbnail'] + if new_item.infoLabels['fanart']: + new_item.fanart = new_item.infoLabels['fanart'] + + if new_item.infoLabels['year']: + new_item.title = "%s (%s) [COLOR %s]%s[/COLOR]" \ + % (new_item.contentTitle, new_item.infoLabels['year'], color6, + str(new_item.infoLabels['rating']).replace("0.0", "")) + else: + new_item.title = "%s [COLOR %s]%s[/COLOR]" \ + % (new_item.contentTitle, color6, new_item.infoLabels['rating'].replace("0.0", "")) + itemlist.append(new_item) + except: + pass + else: + try: + orden = False + # Si se hace una búsqueda por actores o directores, se extraen esos resultados + if "cast" in ob_tmdb.result and not item.crew: + ob_tmdb.results = ob_tmdb.result["cast"] + orden = True + elif "crew" in ob_tmdb.result and item.crew: + ob_tmdb.results = ob_tmdb.result["crew"] + orden = True + for i in range(0, len(ob_tmdb.results)): + new_item = item.clone(action="detalles", url='', infoLabels={'mediatype': item.contentType}) + new_item.infoLabels = ob_tmdb.get_infoLabels(new_item.infoLabels, origen=ob_tmdb.results[i]) + # Si no hay sinopsis en idioma elegido, buscar en el alternativo + if not new_item.infoLabels["plot"] and not 'person' in item.search["url"]: + ob_tmdb2 = Tmdb(id_Tmdb=new_item.infoLabels["tmdb_id"], tipo=item.extra, idioma_busqueda=langt_alt) + new_item.infoLabels["plot"] = ob_tmdb2.get_sinopsis() + if new_item.infoLabels['thumbnail']: + new_item.thumbnail = new_item.infoLabels['thumbnail'] + elif new_item.infoLabels['profile_path']: + new_item.thumbnail = 'http://image.tmdb.org/t/p/original' + new_item.infoLabels['profile_path'] + new_item.infoLabels['profile_path'] = '' + new_item.plot = new_item.infoLabels["biography"] + if not item.search.get('with_cast', '') and not item.search.get('with_crew', ''): + if item.contentType == "movie": + new_item.action = "listado_tmdb" + cast = 'with_cast' + if item.crew: + cast = 'with_crew' + new_item.search = {'url': 'discover/movie', cast: new_item.infoLabels['tmdb_id'], + 'sort_by': 'primary_release_date.desc', 'language': langt, + 'page': item.pagina} + else: + new_item.action = "listado_tmdb" + new_item.search = {'url': 'person/%s/tv_credits' % new_item.infoLabels['tmdb_id'], + 'language': langt} + + elif not new_item.infoLabels['thumbnail'] and not new_item.infoLabels['profile_path']: + new_item.thumbnail = '' + if new_item.infoLabels['fanart']: + new_item.fanart = new_item.infoLabels['fanart'] + + if not 'person' in item.search["url"] or 'tv_credits' in item.search["url"]: + if new_item.infoLabels['year']: + new_item.title = "%s (%s) [COLOR %s]%s[/COLOR]" \ + % (new_item.contentTitle, new_item.infoLabels['year'], color6, + str(new_item.infoLabels['rating']).replace("0.0", "")) + else: + new_item.title = "%s [COLOR %s]%s[/COLOR]" \ + % (new_item.contentTitle, color6, + new_item.infoLabels['rating'].replace("0.0", "")) + else: + # Si es una búsqueda de personas se incluye en el título y fanart una película por la que es conocido + known_for = ob_tmdb.results[i].get("known_for") + if known_for: + from random import randint + random = randint(0, len(known_for) - 1) + new_item.title = "%s [COLOR %s](%s)[/COLOR]" \ + % (new_item.contentTitle, color6, + known_for[random].get("title", known_for[random].get("name"))) + if known_for[random]["backdrop_path"]: + new_item.fanart = 'http://image.tmdb.org/t/p/original' + known_for[random]["backdrop_path"] + else: + new_item.title = new_item.contentTitle + itemlist.append(new_item) + except: + import traceback + logger.error(traceback.format_exc()) + + if orden: + itemlist.sort(key=lambda item: item.infoLabels["year"], reverse=True) + if "page" in item.search and ob_tmdb.total_pages > item.search["page"]: + item.search["page"] += 1 + itemlist.append(Item(channel=item.channel, action=item.action, title=">> Página siguiente", + search=item.search, extra=item.extra, pagina=item.pagina + 1, + contentType=item.contentType)) + + return itemlist + + +def detalles(item): + itemlist = [] + images = {} + data = "" + # Si viene de seccion imdb + if not item.infoLabels["tmdb_id"]: + headers = [['Accept-Language', langi]] + data = httptools.downloadpage("http://www.imdb.com/title/" + item.infoLabels['imdb_id'], headers=headers, + replace_headers=True).data + pics = scrapertools.find_single_match(data, 'showAllVidsAndPics.*?href=".*?(tt\d+)') + # Imágenes imdb + if pics: + images["imdb"] = {'url': 'http://www.imdb.com/_json/title/%s/mediaviewer' % pics} + + ob_tmdb = Tmdb(external_id=item.infoLabels["imdb_id"], external_source="imdb_id", tipo=item.extra, + idioma_busqueda=langt) + item.infoLabels["tmdb_id"] = ob_tmdb.get_id() + + ob_tmdb = Tmdb(id_Tmdb=item.infoLabels["tmdb_id"], tipo=item.extra, idioma_busqueda=langt) + + try: + item.infoLabels = ob_tmdb.get_infoLabels(item.infoLabels) + # Si no hay sinopsis en idioma elegido, buscar en el alternativo + if not item.infoLabels["plot"]: + item.infoLabels["plot"] = ob_tmdb.get_sinopsis(idioma_alternativo=langt_alt) + except: + pass + if not item.fanart and item.infoLabels['fanart']: + item.fanart = item.infoLabels['fanart'] + if item.infoLabels['thumbnail']: + item.thumbnail = item.infoLabels['thumbnail'] + + # Sinopsis, votos de imdb + if data: + plot = scrapertools.find_single_match(data, 'class="inline canwrap" itemprop="description">(.*?)</div>') + plot = scrapertools.htmlclean(plot) + plot = re.sub(r'(?i)<em[^>]+>|\n|\s{2}', ' ', plot).strip() + if plot and (item.infoLabels['plot'] and item.infoLabels['plot'] != plot): + item.infoLabels['plot'] += " (TMDB)\n" + plot + " (IMDB)" + elif plot and not item.infoLabels['plot']: + item.infoLabels['plot'] = plot + rating = scrapertools.find_single_match(data, 'itemprop="ratingValue">([^<]+)<') + if rating: + item.infoLabels['rating'] = float(rating.replace(",", ".")) + votos = scrapertools.find_single_match(data, 'itemprop="ratingCount">([^<]+)<') + if votos: + item.infoLabels['votes'] = votos + + if item.infoLabels['tagline']: + itemlist.append(item.clone(title="--- %s ---" % item.infoLabels['tagline'], text_color="0xFFFF8C00", action="")) + + title = item.contentType.replace("movie", "película").replace("tvshow", "serie") + # Búsqueda por títulos idioma elegido y/o versión original y español + itemlist.append(item.clone(action="busqueda", title="Buscar %s en alfa: %s" % (title, item.contentTitle))) + if item.infoLabels['originaltitle'] and item.contentTitle != item.infoLabels['originaltitle']: + itemlist.append(item.clone(action="busqueda", contentTitle=item.infoLabels['originaltitle'], + title=" Buscar por su nombre original: %s" % item.infoLabels['originaltitle'])) + + if langt != "es" and langt != "en" and item.infoLabels["tmdb_id"]: + tmdb_lang = Tmdb(id_Tmdb=item.infoLabels["tmdb_id"], tipo=item.extra, idioma_busqueda='es') + if tmdb_lang.result.get("title") and tmdb_lang.result["title"] != item.contentTitle \ + and tmdb_lang.result["title"] != item.infoLabels['originaltitle']: + tmdb_lang = tmdb_lang.result["title"] + itemlist.append(item.clone(action="busqueda", title=" Buscar por su título en español: %s" % tmdb_lang, + contentTitle=tmdb_lang)) + + # En caso de serie, opción de info por temporadas + if item.contentType == "tvshow" and item.infoLabels['tmdb_id']: + itemlist.append(item.clone(action="info_seasons", text_color=color4, + title="Info de temporadas [%s]" % item.infoLabels["number_of_seasons"])) + # Opción de ver el reparto y navegar por sus películas/series + if item.infoLabels['tmdb_id']: + itemlist.append(item.clone(action="reparto", title="Ver Reparto", text_color=color4, + infoLabels={'tmdb_id': item.infoLabels['tmdb_id'], + 'mediatype': item.contentType})) + + if config.is_xbmc(): + item.contextual = True + itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler", + text_color=color5)) + + try: + images['tmdb'] = ob_tmdb.result["images"] + itemlist.append(item.clone(action="imagenes", title="Lista de Imágenes", text_color=color5, images=images, + extra="menu")) + except: + pass + + try: + if item.contentType == "movie" and item.infoLabels["year"] < 2014: + post_url = "https://theost.com/search/custom/?key=%s&year=%s&country=0&genre=0" % ( + item.infoLabels['originaltitle'].replace(" ", "+"), item.infoLabels["year"]) + url = "https://nl.hideproxy.me/includes/process.php?action=update" + post = "u=%s&proxy_formdata_server=nl&allowCookies=1&encodeURL=1&encodePage=0&stripObjects=0&stripJS=0&go=" % urllib.quote( + post_url) + while True: + response = httptools.downloadpage(url, post, follow_redirects=False) + if response.headers.get("location"): + url = response.headers["location"] + post = "" + else: + data_music = response.data + break + + url_album = scrapertools.find_single_match(data_music, 'album(?:|s) on request.*?href="([^"]+)"') + if url_album: + url_album = "https://nl.hideproxy.me" + url_album + itemlist.append( + item.clone(action="musica_movie", title="Escuchar BSO - Lista de canciones", url=url_album, + text_color=color5)) + except: + pass + + token_auth = config.get_setting("token_trakt", "tvmoviedb") + if token_auth: + itemlist.append(item.clone(title="Gestionar con tu cuenta Trakt", action="menu_trakt")) + + itemlist.append(item.clone(title="", action="")) + # Es parte de una colección + try: + if ob_tmdb.result.get("belongs_to_collection"): + new_item = item.clone(search='', infoLabels={'mediatype': item.contentType}) + saga = ob_tmdb.result["belongs_to_collection"] + new_item.infoLabels["tmdb_id"] = saga["id"] + if saga["poster_path"]: + new_item.thumbnail = 'http://image.tmdb.org/t/p/original' + saga["poster_path"] + if saga["backdrop_path"]: + new_item.fanart = 'http://image.tmdb.org/t/p/original' + saga["backdrop_path"] + new_item.search = {'url': 'collection/%s' % saga['id'], 'language': langt} + itemlist.append(new_item.clone(title="Es parte de: %s" % saga["name"], action="listado_tmdb", + text_color=color5)) + except: + pass + + # Películas/Series similares y recomendaciones + if item.infoLabels['tmdb_id']: + title = title.replace("película", "Películas").replace("serie", "Series") + itemlist.append(item.clone(title="%s similares" % title, action="listado_tmdb", + search={'url': '%s/%s/similar' % (item.extra, item.infoLabels['tmdb_id']), + 'language': langt, 'page': 1}, infoLabels={'mediatype': item.contentType}, + text_color=color2)) + itemlist.append( + item.clone(title="Recomendaciones", action="listado_tmdb", infoLabels={'mediatype': item.contentType}, + search={'url': '%s/%s/recommendations' % (item.extra, item.infoLabels['tmdb_id']), + 'language': langt, 'page': 1}, text_color=color2)) + + return itemlist + + +def reparto(item): + # Actores y equipo de rodaje de una película/serie + itemlist = [] + item.text_color = color1 + item.search = {'url': '%s/%s/credits' % (item.extra, item.infoLabels['tmdb_id'])} + ob_tmdb = Tmdb(discover=item.search, tipo=item.extra, idioma_busqueda=langt) + + try: + cast = ob_tmdb.result["cast"] + if cast: + itemlist.append(item.clone(title="Actores/Actrices", action="", text_color=color2)) + for actor in cast: + new_item = item.clone(action="listado_tmdb", fanart=default_fan) + new_item.title = " " + actor["name"] + " as " + actor["character"] + if actor["profile_path"]: + new_item.thumbnail = 'http://image.tmdb.org/t/p/original' + actor["profile_path"] + if item.contentType == "movie": + new_item.search = {'url': 'discover/movie', 'with_cast': actor['id'], + 'language': langt, 'page': 1, + 'sort_by': 'primary_release_date.desc'} + else: + new_item.search = {'url': 'person/%s/tv_credits' % actor['id'], 'language': langt} + itemlist.append(new_item) + except: + pass + + try: + crew = ob_tmdb.result["crew"] + if crew: + itemlist.append(item.clone(title="Equipo de rodaje", action="", text_color=color2)) + for c in crew: + new_item = item.clone(action="listado_tmdb", fanart=default_fan) + new_item.title = " " + c["job"] + ": " + c["name"] + if c["profile_path"]: + new_item.thumbnail = 'http://image.tmdb.org/t/p/original' + c["profile_path"] + if item.contentType == "movie": + new_item.search = {'url': 'discover/movie', 'with_crew': c['id'], 'page': 1, + 'sort_by': 'primary_release_date.desc'} + else: + new_item.search = {'url': 'person/%s/tv_credits' % c['id'], 'language': langt} + new_item.crew = True + itemlist.append(new_item) + except: + pass + + return itemlist + + +def info_seasons(item): + # Info de temporadas y episodios + itemlist = [] + item.text_color = color4 + ob_tmdb = Tmdb(id_Tmdb=item.infoLabels["tmdb_id"], tipo="tv", idioma_busqueda=langt) + + for temp in range(item.infoLabels["number_of_seasons"], 0, -1): + temporada = ob_tmdb.get_temporada(temp) + if temporada: + new_item = item.clone(action="", mediatype="season") + new_item.infoLabels['title'] = temporada['name'] + new_item.infoLabels['season'] = temp + if temporada['overview']: + new_item.infoLabels['plot'] = temporada['overview'] + if temporada['air_date']: + date = temporada['air_date'].split('-') + new_item.infoLabels['aired'] = date[2] + "/" + date[1] + "/" + date[0] + new_item.infoLabels['year'] = date[0] + if temporada['poster_path']: + new_item.infoLabels['poster_path'] = 'http://image.tmdb.org/t/p/original' + temporada['poster_path'] + new_item.thumbnail = new_item.infoLabels['poster_path'] + new_item.title = "Temporada %s" % temp + itemlist.append(new_item) + + for epi in range(1, len(temporada["episodes"])): + episodio = ob_tmdb.get_episodio(temp, epi) + if episodio: + new_item = item.clone(action="", text_color=color1, mediatype="episode") + new_item.infoLabels['season'] = temp + new_item.infoLabels['episode'] = epi + new_item.infoLabels['title'] = episodio['episodio_titulo'] + if episodio['episodio_sinopsis']: + new_item.infoLabels['plot'] = episodio['episodio_sinopsis'] + if episodio['episodio_imagen']: + new_item.infoLabels['poster_path'] = episodio['episodio_imagen'] + new_item.thumbnail = new_item.infoLabels['poster_path'] + if episodio['episodio_air_date']: + new_item.infoLabels['aired'] = episodio['episodio_air_date'] + new_item.infoLabels['year'] = episodio['episodio_air_date'].rsplit("/", 1)[1] + if episodio['episodio_vote_average']: + new_item.infoLabels['rating'] = episodio['episodio_vote_average'] + new_item.infoLabels['votes'] = episodio['episodio_vote_count'] + new_item.title = " %sx%s - %s" % (temp, epi, new_item.infoLabels['title']) + itemlist.append(new_item) + + return itemlist + + +def indices_tmdb(item): + # Indices por genero y año + itemlist = [] + from datetime import datetime + if "Géneros" in item.title: + thumbnail = {} + url = ('http://api.themoviedb.org/3/genre/%s/list?api_key=f7f51775877e0bb6703520952b3c7840&language=%s' + % (item.extra, langt)) + try: + lista = jsontools.load(httptools.downloadpage(url, cookies=False).data)["genres"] + lista_generos = {} + for l in lista: + lista_generos[str(l["id"])] = l["name"] + if "es" in langt: + thumbnail[str(l["id"])] = "%s1/%s.jpg" % (images_predef, l["name"].lower() \ + .replace("ó", "o").replace("í", "i") \ + .replace(" ", "%20").replace("Aventuras", "Aventura") + .replace("ú", "u")) + else: + thumbnail[str(l["id"])] = "%s2/%s.jpg" % (images_predef, l["name"]) + except: + pass + + fecha = datetime.now().strftime('%Y-%m-%d') + sort_by = 'release_date.desc' + param_year = 'release_date.lte' + if item.contentType == 'tvshow': + sort_by = 'first_air_date.desc' + param_year = 'air_date.lte' + for key, value in lista_generos.items(): + new_item = item.clone() + new_item.title = value + new_item.thumbnail = thumbnail[key] + new_item.search = {'url': 'discover/%s' % item.extra, 'with_genres': key, 'sort_by': sort_by, + param_year: fecha, + 'language': langt, 'page': 1} + itemlist.append(new_item) + + itemlist.sort(key=lambda item: item.title) + else: + year = datetime.now().year + 3 + for i in range(year, 1899, -1): + if item.contentType == 'tvshow': + param_year = 'first_air_date_year' + else: + param_year = 'primary_release_year' + search = {'url': 'discover/%s' % item.extra, param_year: i, 'language': langt, 'page': 1} + itemlist.append(item.clone(title=str(i), action='listado_tmdb', search=search)) + + return itemlist + + +def filtro(item): + logger.info() + + from datetime import datetime + list_controls = [] + valores = {} + + dict_values = None + + list_controls.append({'id': 'years', 'label': 'Año', 'enabled': True, 'color': '0xFFCC2EFA', + 'type': 'list', 'default': -1, 'visible': True}) + list_controls[0]['lvalues'] = [] + valores['years'] = [] + year = datetime.now().year + 1 + for i in range(1900, year + 1): + list_controls[0]['lvalues'].append(str(i)) + valores['years'].append(str(i)) + list_controls[0]['lvalues'].append('Cualquiera') + valores['years'].append('') + + if "Personalizado" in item.title: + # Se utilizan los valores por defecto/guardados + valores_guardados = config.get_setting("filtro_defecto_" + item.extra, item.channel) + if valores_guardados: + dict_values = valores_guardados + url = ('http://api.themoviedb.org/3/genre/%s/list?api_key=f7f51775877e0bb6703520952b3c7840&language=%s' + % (item.extra, langt)) + try: + lista = jsontools.load(httptools.downloadpage(url, cookies=False).data)["genres"] + if lista: + list_controls.append({'id': 'labelgenre', 'enabled': True, 'type': 'label', 'default': None, + 'label': 'Selecciona uno, ninguno o más de un género', + 'visible': True, 'color': '0xFFC52020'}) + for l in lista: + list_controls.append({'id': 'genre' + str(l["id"]), 'label': l["name"], 'enabled': True, + 'type': 'bool', 'default': False, 'visible': True}) + except: + pass + + list_controls.append({'id': 'orden', 'label': 'Ordenar por', 'enabled': True, 'color': '0xFF25AA48', + 'type': 'list', 'default': -1, 'visible': True}) + orden = ['Popularidad Desc', 'Popularidad Asc', 'Año Desc', 'Año Asc', 'Valoración Desc', 'Valoración Asc'] + if item.extra == "movie": + orden.extend(['Título [A-Z]', 'Título [Z-A]']) + orden_tmdb = ['popularity.desc', 'popularity.asc', 'release_date.desc', 'release_date.asc', + 'vote_average.desc', 'vote_average.asc', 'original_title.asc', 'original_title.desc'] + valores['orden'] = [] + list_controls[-1]['lvalues'] = [] + for i, tipo_orden in enumerate(orden): + list_controls[-1]['lvalues'].insert(0, tipo_orden) + valores['orden'].insert(0, orden_tmdb[i]) + + list_controls.append({'id': 'espacio', 'label': '', 'enabled': False, + 'type': 'label', 'default': None, 'visible': True}) + list_controls.append({'id': 'save', 'label': 'Establecer como filtro por defecto', 'enabled': True, + 'type': 'bool', 'default': False, 'visible': True}) + else: + list_controls.append({'id': 'keyword', 'label': 'Palabra Clave', 'enabled': True, + 'type': 'text', 'default': '', 'visible': True}) + + item.valores = valores + return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values, + caption="Filtra la búsqueda", item=item, callback='filtrado') + + +def filtrado(item, values): + values_copy = values.copy() + # Guarda el filtro para que sea el que se cargue por defecto + if "save" in values and values["save"]: + values_copy.pop("save") + config.set_setting("filtro_defecto_" + item.extra, values_copy, item.channel) + + year = item.valores["years"][values["years"]] + if "Personalizado" in item.title: + orden = item.valores["orden"][values["orden"]] + if item.extra == "tv": + orden = orden.replace('release_date', 'first_air_date') + + genero_ids = [] + for v in values: + if "genre" in v: + if values[v]: + genero_ids.append(v.replace('genre', '')) + genero_ids = ",".join(genero_ids) + + if "clave" in item.title: + item.search = {'url': 'search/%s' % item.extra, 'year': year, 'query': values["keyword"], + 'language': langt, 'page': 1} + elif item.extra == "movie": + item.search = {'url': 'discover/%s' % item.extra, 'sort_by': orden, 'primary_release_year': year, + 'with_genres': genero_ids, 'vote_count.gte': '10', 'language': langt, 'page': 1} + else: + item.search = {'url': 'discover/%s' % item.extra, 'sort_by': orden, 'first_air_date_year': year, + 'with_genres': genero_ids, 'vote_count.gte': '10', 'language': langt, 'page': 1} + + item.action = "listado_tmdb" + return listado_tmdb(item) + + +def musica_movie(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + patron = '<td class="left">([^<]+)<br><small>([^<]+)</small>.*?<td>(\d+:\d+).*?<p id="([^"]+)"' + matches = scrapertools.find_multiple_matches(data, patron) + for titulo, artist, duration, id_p in matches: + title = "%s (%s) [%s]" % (titulo, artist, duration) + url = scrapertools.find_single_match(data, "AudioPlayer.embed\('%s'.*?soundFile: '([^']+)'" % id_p) + itemlist.append(Item(channel=item.channel, action="play", server="directo", url=url, title=title, + thumbnail=item.thumbnail, fanart=item.fanart, text_color=color5)) + return itemlist + + +##-------------------- SECCION IMDB ------------------------## +def listado_imdb(item): + # Método principal para secciones de imdb + itemlist = [] + item.text_color = color1 + + headers = [['Accept-Language', langi]] + if "www.imdb.com" in item.url: + data = httptools.downloadpage(item.url, headers=headers, replace_headers=True).data + else: + url = 'http://www.imdb.com/search/title?' + item.url + data = httptools.downloadpage(url, headers=headers, replace_headers=True).data + + data = re.sub(r"\n|\r|\t| ", "", data) + data = re.sub(r"\s{2}", " ", data) + + # Listado de actores + if 'search/name' in item.url: + patron = '<td class="image">.*?src="([^"]+)".*?href="/name/(nm\d+).*?>([^<]+)<.*?href.*?>([^<]+)</a>' \ + '</span>(.*?)</td>' + matches = scrapertools.find_multiple_matches(data, patron) + for thumbnail, imdb_id, title, movie, datos in matches: + new_item = item.clone(action='listado_tmdb') + try: + if "@" in thumbnail: + thumbnail = thumbnail.rsplit('@', 1)[0] + thumbnail += "@._UX482.jpg" + elif "._V1_" in thumbnail: + thumbnail = thumbnail.rsplit('._V1_', 1)[0] + thumbnail += "._V1_UX482.jpg" + except: + pass + new_item.thumbnail = thumbnail + + datos = datos.strip() + if datos: + new_item.infoLabels['plot'] = scrapertools.htmlclean(datos) + new_item.title = title.strip() + ' [COLOR %s](%s)[/COLOR]' % (color6, movie.strip()) + new_item.infoLabels['imdb_id'] = imdb_id + new_item.search = {'url': 'find/%s' % imdb_id, 'external_source': 'imdb_id', 'language': langt} + itemlist.append(new_item) + else: + patron = '(?:<div class="image">|<div class="lister-item-image).*?(?:loadlate="([^"]+)"|src="([^"]+)")' \ + '.*?href=".*?/(tt\d+).*?>([^<]+)</a>(.*?)(?:<p class="(?:text-muted|)">([^<]+)|"description">([^<]+)<)' + matches = scrapertools.find_multiple_matches(data, patron) + for thumbnail, thumb2, imdb_id, title, datos, plot, plot2 in matches: + + new_item = item.clone(action='detalles') + new_item.title = title.strip() + if not thumbnail: + thumbnail = thumb2 + try: + if "@" in thumbnail: + thumbnail = thumbnail.rsplit('@', 1)[0] + thumbnail += "@._UX482.jpg" + elif "._V1_" in thumbnail: + thumbnail = thumbnail.rsplit('._V1_', 1)[0] + thumbnail += "._V1_UX482.jpg" + except: + pass + new_item.thumbnail = thumbnail + + if not plot: + plot = plot2 + new_item.infoLabels['plot'] = scrapertools.htmlclean(plot.strip()) + + generos = scrapertools.find_multiple_matches(datos, 'genre">([^<]+)<') + if generos: + new_item.infoLabels["genre"] = ", ".join(generos) + duracion = scrapertools.find_single_match(datos, '(\d+) min') + if duracion: + new_item.infoLabels['duration'] = int(duracion) * 60 + + new_item.infoLabels['year'] = scrapertools.find_single_match(new_item.title, '\((\d{4})') + if not new_item.infoLabels['year']: + new_item.infoLabels['year'] = scrapertools.find_single_match(datos, 'year.*?\((\d{4})') + if new_item.infoLabels['year']: + new_item.title += ' (%s)' % new_item.infoLabels['year'] + + rating = scrapertools.find_single_match(datos, '(?:rating|Metascore).*?<strong>([^<]*)</strong>') + rating = rating.replace(",", ".") + if rating: + if not "." in rating: + try: + rating = float(rating) / 10 + except: + rating = None + if rating: + new_item.title += " [COLOR %s]%s[/COLOR]" % (color6, str(rating)) + new_item.infoLabels['rating'] = float(rating) + new_item.infoLabels['imdb_id'] = imdb_id + itemlist.append(new_item) + + next_page = scrapertools.find_single_match(data, '<a href="([^"]+)"[^>]*>Next') + if next_page: + if not "title_type" in item.url: + next_page = 'http://www.imdb.com' + next_page + else: + next_page = 'http://www.imdb.com/search/title' + next_page + itemlist.append(item.clone(title=">> Página Siguiente", url=next_page, text_color='')) + + return itemlist + + +def filtro_imdb(item): + logger.info() + + from datetime import datetime + list_controls = [] + valores = {} + + dict_values = None + # Se utilizan los valores por defecto/guardados + valores_guardados = config.get_setting("filtro_defecto_imdb_" + item.extra, item.channel) + if valores_guardados: + dict_values = valores_guardados + + list_controls.append({'id': 'title', 'label': 'Título', 'enabled': True, + 'type': 'text', 'default': '', 'visible': True}) + + list_controls.append({'id': 'yearsdesde', 'label': 'Año desde:', 'enabled': True, 'color': '0xFFCC2EFA', + 'type': 'list', 'default': -1, 'visible': True}) + list_controls.append({'id': 'yearshasta', 'label': 'Año hasta:', 'enabled': True, 'color': '0xFF2ECCFA', + 'type': 'list', 'default': -1, 'visible': True}) + list_controls[1]['lvalues'] = [] + list_controls[2]['lvalues'] = [] + valores['years'] = [] + year = datetime.now().year + 1 + for i in range(1900, year + 1): + list_controls[1]['lvalues'].append(str(i)) + list_controls[2]['lvalues'].append(str(i)) + valores['years'].append(str(i)) + list_controls[1]['lvalues'].append('Cualquiera') + list_controls[2]['lvalues'].append('Cualquiera') + valores['years'].append('') + + try: + generos_spa = {'Action': 'Acción', 'Adventure': 'Aventura', 'Animation': 'Animación', 'Biography': 'Biografía', + 'Comedy': 'Comedia', 'Crime': 'Crimen', 'Documentary': 'Documental', 'Family': 'Familia', + 'Fantasy': 'Fantástico', 'Film-Noir': 'Cine Negro', 'Game-Show': 'Concursos', + 'History': 'Historia', 'Horror': 'Terror', 'Music': 'Música', 'Mistery': 'Intriga', + 'News': 'Noticias', 'Reality-TV': 'Reality', 'Sci-Fi': 'Ciencia Ficción', 'Sport': 'Deportes', + 'Talk-Show': 'Entrevistas', 'War': 'Cine Bélico'} + data = httptools.downloadpage("http://www.imdb.com/search/title", cookies=False).data + bloque = scrapertools.find_single_match(data, '<h3>Genres</h3>(.*?)</table>') + matches = scrapertools.find_multiple_matches(bloque, ' value="([^"]+)"\s*>\s*<label.*?>([^<]+)<') + if matches: + list_controls.append({'id': 'labelgenre', 'enabled': True, 'type': 'label', 'visible': True, + 'label': 'Selecciona uno, ninguno o más de un género', 'color': '0xFFC52020'}) + lista = [] + for valor, titulo in matches: + titulo = generos_spa.get(titulo, titulo) + lista.append([valor, titulo]) + lista.sort(key=lambda lista: lista[1]) + for valor, titulo in lista: + list_controls.append({'id': 'genre' + valor, 'label': titulo, 'enabled': True, + 'type': 'bool', 'default': False, 'visible': True}) + except: + pass + + list_controls.append({'id': 'espacio', 'label': '', 'enabled': False, + 'type': 'label', 'default': None, 'visible': True}) + try: + bloque = scrapertools.find_single_match(data, '<h3>Countries</h3>(.*?)Less-Common') + matches = scrapertools.find_multiple_matches(bloque, ' value="([^"]+)"\s*>([^<]+)<') + if matches: + list_controls.append({'id': 'pais', 'label': 'País', 'enabled': True, 'color': '0xFFFF8000', + 'type': 'list', 'default': -1, 'visible': True}) + list_controls[-1]['lvalues'] = [] + list_controls[-1]['lvalues'].append('Cualquiera') + valores['pais'] = [] + valores['pais'].append('') + for valor, titulo in matches: + list_controls[-1]['lvalues'].insert(0, titulo) + valores['pais'].insert(0, valor) + + except: + pass + + list_controls.append({'id': 'votos', 'label': 'Número mínimo de votos', 'enabled': True, + 'type': 'text', 'default': '10000', 'visible': True, 'color': '0xFFF4FA58'}) + + list_controls.append({'id': 'orden', 'label': 'Ordenar por', 'enabled': True, 'color': '0xFF25AA48', + 'type': 'list', 'default': -1, 'visible': True}) + orden = ['Popularidad Desc', 'Popularidad Asc', 'Año Desc', 'Año Asc', 'Valoración Desc', 'Valoración Asc', + 'Título [A-Z]', 'Título [Z-A]'] + + orden_imdb = ['moviemeter,asc', 'moviemeter,desc', 'year,desc', 'year,asc', + 'user_rating,desc', 'user_rating,asc', 'alpha,asc', 'alpha,desc'] + valores['orden'] = [] + list_controls[-1]['lvalues'] = [] + for i, tipo_orden in enumerate(orden): + list_controls[-1]['lvalues'].insert(0, tipo_orden) + valores['orden'].insert(0, orden_imdb[i]) + + list_controls.append({'id': 'save', 'label': 'Establecer como filtro por defecto', 'enabled': True, + 'type': 'bool', 'default': False, 'visible': True}) + + item.valores = valores + return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values, + caption="Filtra la búsqueda", item=item, callback='filtrado_imdb') + + +def filtrado_imdb(item, values): + values_copy = values.copy() + # Guarda el filtro para que sea el que se cargue por defecto + if "save" in values and values["save"]: + values_copy.pop("save") + config.set_setting("filtro_defecto_imdb_" + item.extra, values_copy, item.channel) + + yeard = item.valores["years"][values["yearsdesde"]] + yearh = item.valores["years"][values["yearshasta"]] + orden = item.valores["orden"][values["orden"]] + pais = item.valores["pais"][values["pais"]] + + genero_ids = [] + for v in values: + if "genre" in v: + if values[v]: + genero_ids.append(v.replace('genre', '')) + genero_ids = ",".join(genero_ids) + try: + votos = int(values["votos"]) + except: + votos = "" + + item.url = 'http://www.imdb.com/search/title?countries=%s&num_votes=%s,&genres=%s&release_date=%s,%s&sort=%s&' \ + 'title=%s&title_type=' % (pais, str(votos), genero_ids, yeard, yearh, orden, values["title"]) + if item.contentType == "movie": + item.url += "feature,tv_movie" + else: + item.url += "tv_series,tv_special,mini_series" + + item.action = "listado_imdb" + return listado_imdb(item) + + +def indices_imdb(item): + # Índices imdb por año y genero + itemlist = [] + from datetime import datetime + if "Géneros" in item.title: + generos_spa = {'Action': 'Accion', 'Adventure': 'Aventura', 'Animation': 'Animacion', 'Biography': 'Biografía', + 'Comedy': 'Comedia', 'Crime': 'Crimen', 'Documentary': 'Documental', 'Family': 'Familia', + 'Fantasy': 'Fantasia', 'Film-Noir': 'Cine Negro', 'Game-Show': 'Concursos', + 'History': 'Historia', 'Horror': 'Terror', 'Music': 'Música', 'Mistery': 'Intriga', + 'News': 'Noticias', 'Reality-TV': 'Reality', 'Sci-Fi': 'Ciencia Ficcion', 'Sport': 'Deportes', + 'Talk-Show': 'Entrevistas', 'War': 'Cine Bélico'} + data = httptools.downloadpage("http://www.imdb.com/search/title", cookies=False).data + bloque = scrapertools.find_single_match(data, '<h3>Genres</h3>(.*?)</table>') + matches = scrapertools.find_multiple_matches(bloque, ' value="([^"]+)"\s*>\s*<label.*?>([^<]+)<') + if matches: + for valor, titulo in matches: + title = generos_spa.get(titulo, titulo) + thumbnail = "%s2/%s.jpg" % (images_predef, titulo) + itemlist.append(item.clone(title=title, action='listado_imdb', thumbnail=thumbnail, + url='http://www.imdb.com/search/title?genres=%s%s' % (valor, item.url))) + itemlist.sort(key=lambda item: item.title) + else: + year = datetime.now().year + 3 + for i in range(year, 1899, -1): + itemlist.append(item.clone(title=str(i), action='listado_imdb', + url='http://www.imdb.com/search/title?release_date=%s,%s%s' % (i, i, item.url))) + + return itemlist + + +##-------------------- SECCION FILMAFFINITY ------------------------## +def listado_fa(item): + # Método para listados principales de filmaffinity + itemlist = [] + item.text_color = color1 + + # Listados con paginación por post + if item.extra == "top": + if item.page_fa: + post = "from=%s" % item.page_fa + data = httptools.downloadpage(item.url, post).data + if item.total > item.page_fa: + item.page_fa += 30 + else: + item.page_fa = "" + else: + item.page_fa = 30 + data = httptools.downloadpage(item.url).data + item.total = int(scrapertools.find_single_match(data, 'tmResCount\s*=\s*(\d+)')) + if item.total <= item.page_fa: + item.page_fa = "" + + else: + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| ", "", data) + data = re.sub(r"\s{2}", " ", data) + + votaciones = [] + # Si es la sección de estrenos cambia la estructura del scraper + if item.extra == "estrenos": + patron = '<i class="fa fa-calendar"></i>\s*(\d+[^<]+)<(.*?)(?:<div class="panel panel-default">|' \ + '<div class="text-center")' + bloques = scrapertools.find_multiple_matches(data, patron) + for fecha, bloque in bloques: + itemlist.append(item.clone(title=fecha, action='', text_color="0xFFFF8C00")) + patron = '<a class="list-group-item" href="([^"]+)">.*?(?:data-src="([^"]+)"|' \ + 'src="((?!/images/empty.gif)[^"]+)").*?<div class="mc-title.*?>([^<]+)<small>\((\d+)\)</small>' \ + '.*?(?:<div class="avgrat-box in-block ">([^<]+)</div>' \ + '\s*<small class="ratcount-box">(.*?)\s*<|</li>)' + matches = scrapertools.find_multiple_matches(bloque, patron) + for url, thumb, thumb2, title, year, rating, votos in matches: + title = title.strip() + new_item = item.clone(action="detalles_fa", contentType="movie", extra="movie", contentTitle=title) + if not url.startswith("http://m.filmaffinity"): + new_item.url = "http://m.filmaffinity.com" + url + else: + new_item.url = url + + if not thumb: + thumb = thumb2 + new_item.thumbnail = thumb.replace("msmall", "large") + if not new_item.thumbnail.startswith("http"): + new_item.thumbnail = "http://m.filmaffinity.com" + new_item.thumbnail + + new_item.title = " " + title + " (%s) [COLOR %s]%s[/COLOR]" % (year, color6, rating) + new_item.infoLabels['year'] = year + votaciones.append([rating, votos]) + if rating: + new_item.infoLabels['rating'] = float(rating.replace(",", ".")) + new_item.infoLabels['votes'] = votos + itemlist.append(new_item) + else: + patron = '(?:<a class="list-group-item[^"]*" href="([^"]+)">|<a href="([^"]+)" class="list-group-item[^"]*">)' \ + '.*?(?:data-src="([^"]+)"|src="((?!/images/empty.gif)[^"]+)").*?' \ + '<div class="mc-title.*?>([^<]+)<small>\((\d+)\)</small>.*?(?:<div class="avgrat-box in-block ">' \ + '([^<]+)</div>\s*<small class="ratcount-box">(.*?)\s*<|</li>)' + matches = scrapertools.find_multiple_matches(data, patron) + for url, url2, thumb, thumb2, title, year, rating, votos in matches: + title = title.strip() + new_item = item.clone(action="detalles_fa", extra="movie") + if not url: + url = url2 + if not url.startswith("http://m.filmaffinity"): + new_item.url = "http://m.filmaffinity.com" + url + else: + new_item.url = url + + if not thumb: + thumb = thumb2 + new_item.thumbnail = thumb.replace("msmall", "large") + if not new_item.thumbnail.startswith("http"): + new_item.thumbnail = "http://m.filmaffinity.com" + new_item.thumbnail + + new_item.title = title.replace("(Serie de TV)", "").replace("(TV)", "") + " (%s) [COLOR %s]%s[/COLOR]" \ + % (year, color6, rating) + new_item.contentTitle = re.sub(r'(?i)\(serie de tv\)|\(tv\)|\(c\)', '', title) + if re.search(r'(?i)serie de tv|\(tv\)', title): + new_item.contentType = "tvshow" + new_item.extra = "tv" + new_item.infoLabels["tvshowtitle"] = new_item.contentTitle + + new_item.infoLabels['year'] = year + votaciones.append([rating, votos]) + if rating: + new_item.infoLabels['rating'] = float(rating.replace(",", ".")) + new_item.infoLabels['votes'] = votos + itemlist.append(new_item) + + if len(itemlist) < 31: + from core import tmdb + tmdb.set_infoLabels_itemlist(itemlist, True) + for i, it in enumerate(itemlist): + try: + it.infoLabels['votes'] = votaciones[i][1] + it.infoLabels['rating'] = float(votaciones[i][0].replace(",", ".")) + except: + pass + + next_page = scrapertools.find_single_match(data, 'aria-label="Next" href="([^"]+)"') + if next_page: + if not next_page.startswith("http://m.filmaffinity"): + next_page = "http://m.filmaffinity.com" + next_page + + itemlist.append(Item(channel=item.channel, action=item.action, title=">> Página Siguiente", url=next_page, + extra=item.extra)) + elif item.page_fa: + itemlist.append(item.clone(title=">> Página Siguiente", text_color="")) + return itemlist + + +def indices_fa(item): + # Índices por genero, año, temas y sagas/colecciones + itemlist = [] + item.text_color = color1 + if item.url: + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| ", "", data) + data = re.sub(r"\s{2}", " ", data) + if "sagas" in item.extra: + patron = '<li class="fa-shadow">.*?href="([^"]+)".*?group-name">([^<]+)<.*?src="([^"]+)".*?' \ + '"count-movies">([^<]+)<' + matches = scrapertools.find_multiple_matches(data, patron) + for url, title, thumbnail, info in matches: + new_item = item.clone(action="listado_fa") + if not url.startswith("http://www.filmaffinity"): + new_item.url = "http://m.filmaffinity.com" + url + else: + new_item.url = url.replace("www.filmaffinity.com", "m.filmaffinity.com") + + new_item.thumbnail = thumbnail.replace("mmed", "large") + new_item.title = title.strip() + " [COLOR %s](%s)[/COLOR]" % (color6, info) + itemlist.append(new_item) + + next_page = scrapertools.find_single_match(data, '<a href="([^"]+)">>>') + if next_page: + if not next_page.startswith("http://www.filmaffinity.com"): + next_page = "http://www.filmaffinity.com" + next_page + itemlist.append(Item(channel=item.channel, action=item.action, title=">> Página Siguiente", url=next_page, + extra=item.extra)) + elif "Géneros" in item.title: + bloque = scrapertools.find_single_match(data, 'name="genre">.*?</option>(.*?)</select>') + matches = scrapertools.find_multiple_matches(bloque, '<option value="([^"]+)">([^<]+)</option>') + for valor, titulo in matches: + if valor == "TV_SE": + continue + new_item = item.clone(title=titulo, action="listado_fa", extra="top") + new_item.url = "http://m.filmaffinity.com/%s/topgen.php?genre=%s&country=&fromyear=&toyear=&nodoc=1" \ + % (langf, valor) + if item.contentType == "movie": + new_item.url += "¬vse=1" + generos = ['1/accion.jpg', '1/animacion.jpg', '1/aventura.jpg', '1/guerra.jpg', '1/ciencia%20ficcion.jpg', + '2/Film-Noir.jpg', '1/comedia.jpg', '0/Unknown.png', '1/documental.jpg', '1/drama.jpg', + '1/fantasia.jpg', '2/Kids.jpg', '2/Suspense.jpg', '1/musical.jpg', '1/romance.jpg', + '1/terror.jpg', '1/thriler.jpg', '1/western.jpg'] + if langf != "en": + try: + new_item.thumbnail = "%s/%s" % (images_predef, generos[len(itemlist)]) + except: + new_item.thumbnail = "%s1/%s.jpg" % (images_predef, titulo.lower()) + else: + new_item.thumbnail = "%s2/%s.jpg" % (images_predef, titulo) + itemlist.append(new_item) + elif "Temas" in item.title: + bloques = scrapertools.find_multiple_matches(data, '<div class="panel-heading" id="topic_([^"]+)".*?' + '<div class="list-group">(.*?)</div>') + for letra, bloque in bloques: + patron = 'href="([^"]+)">([^<]+)<.*?"badge">(\d+)</span>' + matches = scrapertools.find_multiple_matches(bloque, patron) + extra = len(matches) + 1 + action = "" + folder = True + if config.is_xbmc(): + action = "move" + folder = False + itemlist.append(item.clone(title=letra, text_color=color2, action=action, extra=extra, folder=folder)) + for url, titulo, numero in matches: + new_item = item.clone(action="temas_fa") + topic_id = scrapertools.find_single_match(url, "topic=(\d+)") + new_item.url = "http://www.filmaffinity.com/%s/%s&attr=all" % ( + langf, url.replace("&nodoc", "").replace("¬vse", "")) + new_item.title = titulo + " (%s)" % numero + itemlist.append(new_item) + else: + from datetime import datetime + year = datetime.now().year + for i in range(year, 1899, -1): + new_item = item.clone(title=str(i), action="listado_fa", extra="top") + genre = '' + if item.contentType == "tvshow": + genre = 'TV_SE' + new_item.url = "http://m.filmaffinity.com/%s/topgen.php?genre=%s&country=&fromyear=%s&toyear=%s&nodoc=1" \ + % (langf, genre, i, i) + if item.contentType == "movie": + new_item.url += "¬vse=1" + itemlist.append(new_item) + + return itemlist + + +def temas_fa(item): + # Películas y series por temas + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| ", "", data) + data = re.sub(r"\s{2}", " ", data) + item.infoLabels['plot'] = scrapertools.find_single_match(data, '<p class="description">([^<]+)</p>') + + patron = '<div class="mc-poster">\s*<a href=".*?(\d+)\.html".*?src="([^"]+)".*?' \ + 'href.*?>([^<]+)</a>\s*\((\d+)\)' + matches = scrapertools.find_multiple_matches(data, patron) + for url, thumb, title, year in matches: + title = title.strip() + new_item = item.clone(action="detalles_fa", contentType="movie", extra="movie", text_color=color2) + new_item.url = "http://m.filmaffinity.com/%s/movie.php?id=%s" % (langf, url) + new_item.thumbnail = thumb.replace("msmall", "large") + if not new_item.thumbnail.startswith("http"): + new_item.thumbnail = "http://www.filmaffinity.com" + new_item.thumbnail + new_item.infoLabels["year"] = year + new_item.title = title + " (%s)" % year + if re.search(r'(?i)serie de tv|\(tv\)', title): + new_item.contentType = "tvshow" + new_item.extra = "tv" + new_item.contentTitle = re.sub(r'(?i)\(serie de tv\)|\(tv\)|\(c\)', '', title) + itemlist.append(new_item) + + next_page = scrapertools.find_single_match(data, '<a href="([^"]+)">>>') + if next_page: + if not next_page.startswith("http://www.filmaffinity.com"): + next_page = "http://www.filmaffinity.com/%s/%s" % (langf, next_page) + itemlist.append(Item(channel=item.channel, action=item.action, title=">> Página Siguiente", url=next_page)) + + return itemlist + + +def detalles_fa(item): + itemlist = [] + item.plot = "" + rating = item.infoLabels['rating'] + votos = item.infoLabels['votes'] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| ", "", data) + data = re.sub(r"\s{2}", " ", data) + + # Se extrae el título original para posibles búsquedas en tmdb posteriores + orig_title = scrapertools.find_single_match(data, 'itemprop="datePublished">.*?<dd>([^<]+)</dd>').strip() + if item.contentType == "movie": + item.infoLabels['originaltitle'] = re.sub(r"(?i)\(TV Series\)|\(S\)|\(TV\)", "", orig_title) + else: + item.infoLabels['tvshowtitle'] = re.sub(r"(?i)\(TV Series\)|\(S\)|\(TV\)", "", orig_title) + item_tmdb = item.clone() + + if item.contentType == "movie": + ob_tmdb = Tmdb(texto_buscado=item_tmdb.contentTitle, year=item_tmdb.infoLabels['year'], tipo=item_tmdb.extra, + idioma_busqueda=langt) + if not ob_tmdb.result: + ob_tmdb = Tmdb(texto_buscado=item_tmdb.infoLabels['originaltitle'], year=item_tmdb.infoLabels['year'], + tipo=item_tmdb.extra, idioma_busqueda=langt) + else: + ob_tmdb = Tmdb(texto_buscado=item_tmdb.contentTitle, tipo=item_tmdb.extra, idioma_busqueda=langt) + if not ob_tmdb.result: + ob_tmdb = Tmdb(texto_buscado=item_tmdb.infoLabels['tvshowtitle'], tipo=item_tmdb.extra, + idioma_busqueda=langt) + + if ob_tmdb.result: + ob_tmdb = Tmdb(id_Tmdb=ob_tmdb.get_id(), tipo=item_tmdb.extra, idioma_busqueda=langt) + item.infoLabels = ob_tmdb.get_infoLabels(item.infoLabels) + + # Si no hay sinopsis en idioma elegido, buscar en el alternativo + if not item.infoLabels["plot"]: + item.infoLabels["plot"] = ob_tmdb.get_sinopsis(idioma_alternativo=langt_alt) + + # Se concatena el plot de filmaffinity al de tmdb si lo hay + plot = scrapertools.find_single_match(data, '<dd itemprop="description">(.*?)</dd>') + plot = plot.replace("<br><br />", "\n") + plot = scrapertools.decodeHtmlentities(plot).replace(" (FILMAFFINITY)", "") + if plot and (item.infoLabels['plot'] and item.infoLabels['plot'] != plot): + item.infoLabels['plot'] += " (TMDB)\n" + plot + " (FILMAFFINITY)" + elif plot and not item.infoLabels['plot']: + item.infoLabels['plot'] = plot + + # Se busca y rellena con la info de filmaffinity para diferenciarla de tmdb + if not item.infoLabels['duration']: + duration = scrapertools.find_single_match(data, '<dd itemprop="duration">(\d+)') + if duration: + item.infoLabels['duration'] = int(duration) * 60 + + if not item.infoLabels['genre']: + generos = scrapertools.find_multiple_matches(data, 'class="g-t-item">(.*?)</a>') + genres = [] + for g in generos: + genres.append(scrapertools.htmlclean(g.strip())) + item.infoLabels['genre'] = ", ".join(genres) + + if not rating: + rating = scrapertools.find_single_match(data, 'itemprop="ratingValue".*?>([^<]+)<') + if rating: + rating = float(rating.replace(",", ".")) + elif ob_tmdb.result: + rating = float(ob_tmdb.result.get('vote_average', 0)) + item.infoLabels['rating'] = rating + + if not votos: + votos = scrapertools.find_single_match(data, 'itemprop="ratingCount".*?>([^<]+)<') + if votos == "0" and ob_tmdb.result: + votos = ob_tmdb.result.get('vote_count', '') + item.infoLabels['votes'] = votos + + if item.infoLabels['fanart']: + item.fanart = item.infoLabels['fanart'] + else: + item.fanart = scrapertools.find_single_match(data, 'Imagen Principal.*?src: "([^"]+)"') + if item.infoLabels['thumbnail']: + item.thumbnail = item.infoLabels['thumbnail'] + + if item.infoLabels['tagline']: + itemlist.append(item.clone(title="--- %s ---" % item.infoLabels['tagline'], text_color="0xFFFF8C00", action="")) + + title = item.contentType.replace("movie", "película").replace("tvshow", "serie") + itemlist.append(item.clone(action="busqueda", title="Buscar %s en alfa: %s" % (title, item.contentTitle))) + if item.infoLabels['originaltitle'] and item.contentTitle != item.infoLabels['originaltitle']: + itemlist.append(item.clone(action="busqueda", contentTitle=item.infoLabels['originaltitle'], + title=" Buscar por su nombre original: %s" % item.infoLabels['originaltitle'])) + + if langt != "es" and langt != "en" and item.infoLabels["tmdb_id"]: + tmdb_lang = Tmdb(id_Tmdb=item.infoLabels["tmdb_id"], tipo=item.extra, idioma_busqueda='es') + if tmdb_lang.result.get("title") and tmdb_lang.result["title"] != item.contentTitle: + tmdb_lang = tmdb_lang.result["title"] + itemlist.append(item.clone(action="busqueda", title=" Buscar por su título en español: %s" % tmdb_lang, + contentTitle=tmdb_lang)) + + if item.contentType == "tvshow" and ob_tmdb.result: + itemlist.append(item.clone(action="info_seasons", text_color=color4, + title="Info de temporadas [%s]" % item.infoLabels["number_of_seasons"])) + if ob_tmdb.result: + itemlist.append(item.clone(action="reparto", title="Ver Reparto", text_color=color4, + infoLabels={'tmdb_id': item.infoLabels['tmdb_id'], + 'mediatype': item.contentType})) + + if config.is_xbmc(): + item.contextual = True + trailer_url = scrapertools.find_single_match(data, + '<a href="(?:http://m.filmaffinity.com|)/%s/movieTrailer\.php\?id=(\d+)"' % langf) + if trailer_url: + trailer_url = "http://www.filmaffinity.com/%s/evideos.php?movie_id=%s" % (langf, trailer_url) + itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler", + text_color=color5, filmaffinity=trailer_url)) + + url_img = scrapertools.find_single_match(data, + 'href="(?:http://m.filmaffinity.com|)(/%s/movieposters[^"]+)">' % langf) + images = {} + if ob_tmdb.result and ob_tmdb.result.get("images"): + images['tmdb'] = ob_tmdb.result["images"] + if url_img: + images['filmaffinity'] = {} + if images: + itemlist.append(item.clone(action="imagenes", title="Lista de Imágenes", text_color=color5, images=images, + url=url_img, extra="menu")) + try: + if item.contentType == "movie" and item.infoLabels["year"] < 2014: + post_url = "https://theost.com/search/custom/?key=%s&year=%s&country=0&genre=0" % ( + item.infoLabels['originaltitle'].replace(" ", "+"), item.infoLabels["year"]) + url = "https://nl.hideproxy.me/includes/process.php?action=update" + post = "u=%s&proxy_formdata_server=nl&allowCookies=1&encodeURL=1&encodePage=0&stripObjects=0&stripJS=0&go=" % urllib.quote( + post_url) + while True: + response = httptools.downloadpage(url, post, follow_redirects=False) + if response.headers.get("location"): + url = response.headers["location"] + post = "" + else: + data_music = response.data + break + + url_album = scrapertools.find_single_match(data_music, 'album(?:s|) on request.*?href="([^"]+)"') + if url_album: + url_album = "https://nl.hideproxy.me" + url_album + itemlist.append( + item.clone(action="musica_movie", title="Escuchar BSO - Lista de canciones", url=url_album, + text_color=color5)) + except: + pass + + token_auth = config.get_setting("token_trakt", "tvmoviedb") + if token_auth and ob_tmdb.result: + itemlist.append(item.clone(title="[Trakt] Gestionar con tu cuenta", action="menu_trakt")) + # Acciones si se configura cuenta en FA (Votar y añadir/quitar en listas) + mivoto = scrapertools.find_single_match(data, 'bg-my-rating.*?>\s*(\d+)') + itk = scrapertools.find_single_match(data, 'data-itk="([^"]+)"') + folder = not config.is_xbmc() + if mivoto: + item.infoLabels["userrating"] = int(mivoto) + new_item = item.clone(action="votar_fa", title="[FA] Mi voto: %s ---> ¿Cambiar?" % mivoto, + itk=itk, voto=int(mivoto), folder=folder) + new_item.infoLabels["duration"] = "" + itemlist.append(new_item) + else: + if itk: + new_item = item.clone(action="votar_fa", title="[FA] Votar %s" % title, itk=itk, accion="votar", + folder=folder) + new_item.infoLabels["duration"] = "" + itemlist.append(new_item) + + if itk: + itk = scrapertools.find_single_match(data, 'var itk="([^"]+)"') + new_item = item.clone(action="acciones_fa", accion="lista_movie", itk=itk, + title="[FA] Añadir o quitar de una lista de usuario") + new_item.infoLabels["duration"] = "" + itemlist.append(new_item) + + # Si pertenece a una saga/colección + if ob_tmdb.result: + itemlist.append(item.clone(title="", action="", infoLabels={})) + if ob_tmdb.result.get("belongs_to_collection"): + new_item = item.clone(infoLabels={'mediatype': item.contentType}, action="listado_tmdb", text_color=color5) + saga = ob_tmdb.result["belongs_to_collection"] + new_item.infoLabels["tmdb_id"] = saga["id"] + if saga["poster_path"]: + new_item.thumbnail = 'http://image.tmdb.org/t/p/original' + saga["poster_path"] + if saga["backdrop_path"]: + new_item.fanart = 'http://image.tmdb.org/t/p/original' + saga["backdrop_path"] + new_item.search = {'url': 'collection/%s' % saga['id'], 'language': langt} + new_item.title = "Es parte de: %s" % saga["name"] + itemlist.append(new_item) + + itemlist.append(item.clone(title="%ss similares" % title.capitalize(), action="listado_tmdb", + search={'url': '%s/%s/similar' % (item.extra, item.infoLabels['tmdb_id']), + 'language': langt, 'page': 1}, infoLabels={'mediatype': item.contentType}, + text_color=color2)) + itemlist.append( + item.clone(title="Recomendaciones", action="listado_tmdb", infoLabels={'mediatype': item.contentType}, + search={'url': '%s/%s/recommendations' % (item.extra, item.infoLabels['tmdb_id']), + 'language': langt, 'page': 1}, text_color=color2)) + + return itemlist + + +def filtro_fa(item): + logger.info() + + from datetime import datetime + list_controls = [] + valores = {} + + dict_values = None + # Se utilizan los valores por defecto/guardados + valores_guardados = config.get_setting("filtro_defecto_filmaf_" + item.extra, item.channel) + if valores_guardados: + dict_values = valores_guardados + + list_controls.append({'id': 'yearsdesde', 'label': 'Año desde:', 'enabled': True, + 'type': 'list', 'default': -1, 'visible': True}) + list_controls.append({'id': 'yearshasta', 'label': 'Año hasta:', 'enabled': True, + 'type': 'list', 'default': -1, 'visible': True}) + list_controls[0]['lvalues'] = [] + list_controls[1]['lvalues'] = [] + valores['years'] = [] + year = datetime.now().year + for i in range(1900, year + 1): + list_controls[0]['lvalues'].append(str(i)) + list_controls[1]['lvalues'].append(str(i)) + valores['years'].append(str(i)) + list_controls[0]['lvalues'].append('Cualquiera') + list_controls[1]['lvalues'].append('Cualquiera') + valores['years'].append('') + + data = httptools.downloadpage("http://m.filmaffinity.com/%s/topgen.php" % langf).data + data = re.sub(r"\n|\r|\t| ", "", data) + data = re.sub(r"\s{2}", " ", data) + if item.contentType == "movie": + try: + bloque = scrapertools.find_single_match(data, 'name="genre">.*?</option>(.*?)</select>') + matches = scrapertools.find_multiple_matches(bloque, '<option value="([^"]+)">([^<]+)</option>') + if matches: + list_controls.append({'id': 'genero', 'label': 'Selecciona un género', 'enabled': True, + 'type': 'list', 'default': -1, 'visible': True}) + list_controls[2]['lvalues'] = [] + list_controls[2]['lvalues'].append("Todos") + valores['genero'] = [] + valores['genero'].append('') + for valor, titulo in matches: + if valor == "TV_SE": + continue + list_controls[2]['lvalues'].insert(0, titulo) + valores['genero'].insert(0, valor) + + except: + pass + + try: + bloque = scrapertools.find_single_match(data, 'name="country">.*?</option>(.*?)</select>') + matches = scrapertools.find_multiple_matches(bloque, '<option value="([^"]+)"\s*>([^<]+)</option>') + if matches: + list_controls.append({'id': 'pais', 'label': 'País', 'enabled': True, + 'type': 'list', 'default': -1, 'visible': True}) + list_controls[-1]['lvalues'] = [] + list_controls[-1]['lvalues'].append('Todos') + valores['pais'] = [] + valores['pais'].append('') + for valor, titulo in matches: + list_controls[-1]['lvalues'].insert(0, titulo) + valores['pais'].insert(0, valor) + except: + pass + + list_controls.append({'id': 'espacio', 'label': '', 'enabled': False, + 'type': 'label', 'default': None, 'visible': True}) + list_controls.append({'id': 'save', 'label': 'Establecer como filtro por defecto', 'enabled': True, + 'type': 'bool', 'default': False, 'visible': True}) + + item.valores = valores + return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values, + caption="Filtra la búsqueda", item=item, callback='filtrado_fa') + + +def filtrado_fa(item, values): + values_copy = values.copy() + # Guarda el filtro para que sea el que se cargue por defecto + if "save" in values and values["save"]: + values_copy.pop("save") + config.set_setting("filtro_defecto_filmaf_" + item.extra, values_copy, item.channel) + + yeard = item.valores["years"][values["yearsdesde"]] + yearh = item.valores["years"][values["yearshasta"]] + pais = item.valores["pais"][values["pais"]] + if item.contentType == "movie": + genero = item.valores["genero"][values["genero"]] + else: + genero = "TV_SE" + + item.url = 'http://m.filmaffinity.com/%s/topgen.php?genre=%s&country=%s&fromyear=%s&toyear=%s&nodoc=1' \ + % (langf, genero, pais, yeard, yearh) + if item.contentType == "movie": + item.url += "¬vse=1" + item.action = "listado_fa" + + return listado_fa(item) + + +def login_fa(): + logger.info() + + try: + user = config.get_setting("usuariofa", "tvmoviedb") + password = config.get_setting("passfa", "tvmoviedb") + userid = config.get_setting("userid", "tvmoviedb") + if user == "" or password == "": + return False, "Usuario y/o contraseñas no configurados" + data = httptools.downloadpage("http://m.filmaffinity.com/%s" % langf).data + if "modal-menu-user" in data and userid: + return True, "" + + post = "postback=1&rp=&username=%s&password=%s&rememberme=on" % (user, password) + data = httptools.downloadpage("https://m.filmaffinity.com/%s/account.ajax.php?action=login" % langf, post).data + + if "Invalid username" in data: + logger.error("Error en el login") + return False, "Error en el usuario y/o contraseña. Comprueba tus credenciales" + else: + post = "name=user-menu&url=http://m.filmaffinity.com/%s/main.php" % langf + data = httptools.downloadpage("http://m.filmaffinity.com/%s/tpl.ajax.php?action=getTemplate" % langf, + post).data + userid = scrapertools.find_single_match(data, 'id-user=(\d+)') + if userid: + config.set_setting("userid", userid, "tvmoviedb") + logger.info("Login correcto") + return True, "" + except: + import traceback + logger.error(traceback.format_exc()) + return False, "Error durante el login. Comprueba tus credenciales" + + +def cuenta_fa(item): + # Menú de cuenta filmaffinity + itemlist = [] + login, message = login_fa() + if not login: + itemlist.append(item.clone(action="", title=message, text_color=color4)) + else: + userid = config.get_setting("userid", "tvmoviedb") + itemlist.append(item.clone(action="acciones_fa", title="Mis votaciones", text_color=color5, accion="votos", + url="http://m.filmaffinity.com/%s/user_ratings.php?id-user=%s" % (langf, userid))) + itemlist.append(item.clone(action="acciones_fa", title="Mis listas", text_color=color5, accion="listas", + url="http://m.filmaffinity.com/%s/mylists.php" % langf)) + + return itemlist + + +def acciones_fa(item): + # Acciones cuenta filmaffinity, votar, ver listas o añadir/quitar de lista + itemlist = [] + + if item.accion == "votos" or item.accion == "lista": + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| ", "", data) + data = re.sub(r"\s{2}", " ", data) + bloques = scrapertools.find_multiple_matches(data, + 'list-group-item(?:[^>]+>\s*<a| rip)(.*?</div>)\s*</div>\s*<div') + for bloque in bloques: + patron = 'href="([^"]+)".*?data-src="([^"]+)".*?mc-title.*?>([^<]+)' \ + '<small>\((\d+)\)</small>.*?(?:<div class="avgrat-box in-block ">' \ + '([^<]+)</div>\s*<small class="ratcount-box">(.*?)\s*<|</li>).*?' + matches = scrapertools.find_multiple_matches(bloque, patron) + mivoto = scrapertools.find_single_match(bloque, 'bg-my-rating[^>]+>(?:\s*<strong>|)([^<]+)<') + for url, thumb, title, year, rating, votos in matches: + new_item = item.clone(action="detalles_fa", text_color=color1) + if not url.startswith("http://m.filmaffinity"): + new_item.url = "http://m.filmaffinity.com" + url + else: + new_item.url = url + + new_item.infoLabels["year"] = year + rating = rating.replace(",", ".") + new_item.infoLabels["rating"] = float(rating) + new_item.infoLabels["votes"] = votos.replace(".", "") + if mivoto.isdigit(): + new_item.infoLabels["userrating"] = int(mivoto) + new_item.thumbnail = thumb.replace("msmall", "large") + if not new_item.thumbnail.startswith("http"): + new_item.thumbnail = "http://m.filmaffinity.com" + new_item.thumbnail + + if re.search(r'(?i)serie de tv|\(tv\)', title): + new_item.contentType = "tvshow" + new_item.extra = "tv" + new_item.title = title.strip() + " (%s) [COLOR %s]%s[/COLOR]/[COLOR %s]%s[/COLOR]" % ( + year, color6, rating, color4, mivoto) + new_item.contentTitle = title.strip() + itemlist.append(new_item) + elif item.accion == "listas": + orderby = config.get_setting("orderfa", "tvmoviedb") + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| ", "", data) + data = re.sub(r"\s{2}", " ", data) + patron = 'list-group-item rip.*?href="([^"]+)".*?<strong>([^<]+)</strong>.*?<em>([^<]+)</em>' \ + '.*?(?:<div class="ls-imgs">(.*?)</a>|</a>)' + matches = scrapertools.find_multiple_matches(data, patron) + for url, title, content, imgs in matches: + new_item = item.clone(accion="lista", text_color=color1) + if not url.startswith("http://m.filmaffinity.com"): + new_item.url = "http://m.filmaffinity.com%s&orderby=%s" % (url, orderby) + else: + new_item.url = "%s&orderby=%s" % (url, orderby) + new_item.title = title + " [COLOR %s](%s)[/COLOR]" % (color6, content) + if imgs: + imagenes = scrapertools.find_multiple_matches(imgs, 'data-src="([^"]+)"') + from random import randint + random = randint(0, len(imagenes) - 1) + new_item.thumbnail = imagenes[random].replace("msmall", "large") + itemlist.append(new_item) + elif item.accion == "lista_movie": + movieid = item.url.rsplit("=", 1)[1] + url = "http://m.filmaffinity.com/%s/edtmovielists.php?movie_id=%s" % (langf, movieid) + data = httptools.downloadpage(url).data + patron = 'data-list-id="([^"]+)"(.*?)<div class="in-block list-name"><strong>([^<]+)<' + matches = scrapertools.find_multiple_matches(data, patron) + for listid, chequeo, title in matches: + new_item = item.clone(folder=not config.is_xbmc()) + new_item.infoLabels["duration"] = "" + new_item.listid = listid + if "checked" in chequeo: + new_item.title = "[COLOR %s]%s[/COLOR] %s" % ("green", u"\u0474".encode('utf-8'), title) + new_item.accion = "removeMovieFromList" + else: + new_item.title = "[COLOR %s]%s[/COLOR] %s" % (color4, u"\u04FE".encode('utf-8'), title) + new_item.accion = "addMovieToList" + itemlist.append(new_item) + new_item = item.clone(action="newlist", title="Añadir una nueva lista", text_color=color6) + new_item.infoLabels["duration"] = "" + itemlist.append(new_item) + else: + url = "http://filmaffinity.com/%s/movieslist.ajax.php" % langf + movieid = item.url.rsplit("=", 1)[1] + post = "action=%s&listId=%s&movieId=%s&itk=%s" % (item.accion, item.listid, movieid, item.itk) + data = jsontools.load(httptools.downloadpage(url, post).data) + if not item.folder: + import xbmc + return xbmc.executebuiltin("Container.Refresh") + else: + if data["result"] == 0: + title = "Acción completada con éxito" + else: + title = "Error, algo ha fallado durante el proceso" + itemlist.append(item.clone(action="", title=title)) + + return itemlist + + +def votar_fa(item): + # Ventana para seleccionar el voto + logger.info() + + list_controls = [] + valores = {} + dict_values = None + if item.voto: + dict_values = {'voto': item.voto} + list_controls.append({'id': 'voto', 'label': 'Indica tu voto:', 'enabled': True, + 'type': 'list', 'default': 0, 'visible': True}) + list_controls[0]['lvalues'] = ['No vista'] + valores['voto'] = ["-1"] + for i in range(1, 11): + list_controls[0]['lvalues'].append(str(i)) + valores['voto'].append(i) + + item.valores = valores + return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values, + caption="Votar %s" % item.contentTitle, item=item, + callback='callback_voto') + + +def callback_voto(item, values): + item.voto = item.valores["voto"][values["voto"]] + item.action = "acciones_fa" + movieid = item.url.rsplit("=", 1)[1] + post = "id=%s&rating=%s&itk=%s&action=rate" % (movieid, item.voto, item.itk) + data = jsontools.load(httptools.downloadpage("http://filmaffinity.com/%s/ratingajax.php" % langf, post).data) + + if not item.folder: + import xbmc + return xbmc.executebuiltin("Container.Refresh") + else: + if data["result"] == 0: + title = "Voto contabilizado con éxito" + else: + title = "Error, algo ha fallado durante el proceso" + itemlist.append(item.clone(action="", title=title)) + return itemlist + + +def newlist(item): + # Creación de nueva lista en filmaffinity + itemlist = [] + if item.accion == "lista": + location = httptools.downloadpage(item.url).headers["location"] + data = httptools.downloadpage("http://m.filmaffinity.com" + location).data + itemlist.append(item.clone(action="", title="Lista creada correctamente")) + else: + url = "http://m.filmaffinity.com/%s/addlist.php?rp=%s" % (langf, item.url) + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t| |\s{2}", "", data) + + patron = 'data-list-id="[^"]+" href="([^"]+)"><[^>]+><div class="col-xs-10">' \ + '([^<]+)</div><div[^>]+><div type="button" class="btn btn-primary">' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, title in matches: + scrapedurl = "http://m.filmaffinity.com" + scrapedurl + itemlist.append(item.clone(title=title, url=scrapedurl, accion="lista")) + + return itemlist + + +##-------------------- LISTADOS DE IMAGENES ------------------------## +def imagenes(item): + itemlist = [] + if item.extra == "menu": + item.folder = not config.is_xbmc() + if "tmdb" in item.images: + itemlist.append(item.clone(title="Tmdb", text_color=color2, extra="")) + itemlist.append(item.clone(title="Fanart.Tv", text_color=color2, extra="")) + if "imdb" in item.images: + itemlist.append(item.clone(title="Imdb", text_color=color2, extra="")) + if "filmaffinity" in item.images: + itemlist.append(item.clone(title="Filmaffinity", text_color=color2, extra="")) + if "myanimelist" in item.images: + data = httptools.downloadpage(item.url + "/pics", cookies=False).data + images = scrapertools.find_multiple_matches(data, + '<div class="picSurround"><a href="([^"]+)" title="([^"]+)"') + if images: + for thumb, title in images: + item.images["myanimelist"].append([thumb, title]) + itemlist.append(item.clone(title="MyAnimeList", text_color=color2, extra="")) + + return itemlist + + if "Fanart" in item.title: + try: + item, resultado = fanartv(item) + except: + resultado = None + + if not resultado: + itemlist.append(item.clone(title="Web de Fanart.tv no disponible. Vuelve a intentarlo", action="")) + return itemlist + elif "Filmaffinity" in item.title: + try: + url = "http://m.filmaffinity.com" + item.url + data = httptools.downloadpage(url).data + matches = scrapertools.find_multiple_matches(data, 'data-src="([^"]+)" alt="[^-]+\s*([^"]+)"') + if matches: + item.images["filmaffinity"] = matches + else: + item.images.pop("filmaffinity", None) + except: + itemlist.append(item.clone(title="No hay imágenes disponibles", action="")) + return itemlist + elif "Imdb" in item.title: + try: + data = jsontools.load(httptools.downloadpage(item.images["imdb"]["url"], cookies=False).data) + item.images["imdb"].pop("url") + if data.get("allImages"): + item.images["imdb"] = data["allImages"] + else: + item.images.pop("imdb", None) + except: + itemlist.append(item.clone(title="No hay imágenes disponibles", action="")) + return itemlist + + if item.images: + from channels import infoplus + for key, value in item.images.iteritems(): + if key == "tmdb" and "Tmdb" in item.title: + if item.folder: + for tipo, child in value.iteritems(): + for i, imagen in enumerate(child): + thumb = 'http://image.tmdb.org/t/p/w500' + imagen["file_path"] + fanart = 'http://image.tmdb.org/t/p/original' + imagen["file_path"] + title = " %s %s [%sx%s]" % (tipo.capitalize(), i + 1, imagen["width"], imagen["height"]) + itemlist.append(Item(channel=item.channel, action="", thumbnail=thumb, fanart=fanart, + title=title, text_color=color1, infoLabels=item.infoLabels)) + else: + imagesWindow = infoplus.images(tmdb=value).doModal() + + elif key == "fanart.tv": + if item.folder: + for tipo, child in value.iteritems(): + for i, imagen in enumerate(child): + thumb = imagen["url"].replace("/fanart/", "/preview/") + fanart = imagen["url"] + title = " %s %s [%s]" % (tipo.capitalize(), i + 1, imagen["lang"]) + itemlist.append(Item(channel=item.channel, action="", thumbnail=thumb, fanart=fanart, + title=title, text_color=color1, infoLabels=item.infoLabels)) + else: + imagesWindow = infoplus.images(fanartv=value).doModal() + + elif key == "filmaffinity" and "Filmaffinity" in item.title: + if item.folder: + for thumb, title in value: + thumb = thumb.replace("-s200", "-large") + itemlist.append(Item(channel=item.channel, action="", thumbnail=thumb, fanart=thumb, + title=title, text_color=color1, infoLabels=item.infoLabels)) + else: + imagesWindow = infoplus.images(fa=value).doModal() + + elif key == "imdb" and "Imdb" in item.title: + if item.folder: + for imagen in value: + thumb = imagen["msrc"] + fanart = imagen["src"] + title = imagen["altText"] + itemlist.append( + Item(channel=item.channel, action="", thumbnail=thumb, fanart=fanart, title=title, + text_color=color1, infoLabels=item.infoLabels)) + else: + imagesWindow = infoplus.images(imdb=value).doModal() + + elif key == "myanimelist" and "MyAnimeList" in item.title: + if item.folder: + for imagen, title in value: + itemlist.append( + Item(channel=item.channel, action="", thumbnail=imagen, fanart=imagen, title=title, + text_color=color1, infoLabels=item.infoLabels)) + else: + imagesWindow = infoplus.images(mal=value).doModal() + + return itemlist + + +def fanartv(item): + headers = [['Content-Type', 'application/json']] + id_search = item.infoLabels['tmdb_id'] + if item.contentType == "tvshow" and id_search: + search = {'url': 'tv/%s/external_ids' % item.infoLabels['tmdb_id'], 'language': langt} + ob_tmdb = Tmdb(discover=search, idioma_busqueda=langt) + id_search = ob_tmdb.result.get("tvdb_id") + + resultado = False + if id_search: + if item.contentType == "movie": + url = "http://webservice.fanart.tv/v3/movies/%s?api_key=cab16e262d72fea6a6843d679aa10300" \ + % item.infoLabels['tmdb_id'] + else: + url = "http://webservice.fanart.tv/v3/tv/%s?api_key=cab16e262d72fea6a6843d679aa10300" % id_search + data = jsontools.load(httptools.downloadpage(url, headers=headers, replace_headers=True).data) + if data and not "error message" in data: + item.images['fanart.tv'] = {} + for key, value in data.items(): + if key not in ["name", "tmdb_id", "imdb_id", "thetvdb_id"]: + item.images['fanart.tv'][key] = value + resultado = True + + return item, resultado + + +##-------------------- SECCION TRAKT.TV ------------------------## +def auth_trakt(item): + # Autentificación de cuenta Trakt, proceso parecido a real-debrid + client_id = "a83c1a92d1313bd7ac7baa37a3fc83add26833d4b006f9f9562cae213a761260" + headers = {'Content-Type': 'application/json', 'trakt-api-key': client_id, 'trakt-api-version': '2'} + try: + post = {'client_id': client_id} + post = jsontools.dump(post) + # Se solicita url y código de verificación para conceder permiso a la app + url = "http://api-v2launch.trakt.tv/oauth/device/code" + data = httptools.downloadpage(url, post=post, headers=headers, replace_headers=True).data + data = jsontools.load(data) + item.verify_url = data["verification_url"] + item.user_code = data["user_code"] + item.device_code = data["device_code"] + item.intervalo = data["interval"] + if not item.folder: + token_trakt(item) + else: + itemlist = [] + title = "Accede a esta página: %s" % item.verify_url + itemlist.append(item.clone(title=title, action="")) + title = "Ingresa este código y acepta: %s" % item.user_code + itemlist.append(item.clone(title=title, action="")) + title = "Una vez hecho, pulsa aquí!" + itemlist.append(item.clone(title=title, action="token_trakt")) + return itemlist + except: + import traceback + logger.error(traceback.format_exc()) + + +def token_trakt(item): + client_id = "a83c1a92d1313bd7ac7baa37a3fc83add26833d4b006f9f9562cae213a761260" + client_secret = "cb22e3c36547ba375e5de077fa4aa497daf486e29b92a5b9c25bb17ac39b98bf" + headers = {'Content-Type': 'application/json', 'trakt-api-key': client_id, 'trakt-api-version': '2'} + try: + if item.extra == "renew": + refresh = config.get_setting("refresh_token_trakt", "tvmoviedb") + url = "http://api-v2launch.trakt.tv/oauth/device/token" + post = {'refresh_token': refresh, 'client_id': client_id, 'client_secret': client_secret, + 'redirect_uri': 'urn:ietf:wg:oauth:2.0:oob', 'grant_type': 'refresh_token'} + post = jsontools.dump(post) + data = httptools.downloadpage(url, post, headers, replace_headers=True).data + data = jsontools.load(data) + elif item.action == "token_trakt": + url = "http://api-v2launch.trakt.tv/oauth/device/token" + post = {'code': item.device_code, 'client_id': client_id, 'client_secret': client_secret} + post = jsontools.dump(post) + post = "code=%s&client_id=%s&client_secret=%s" % (item.device_code, client_id, client_secret) + data = httptools.downloadpage(url, post, headers, replace_headers=True).data + data = jsontools.load(data) + else: + import time + dialog_auth = platformtools.dialog_progress("Autentificación. No cierres esta ventana!!", + "1. Entra en la siguiente url: %s" % item.verify_url, + "2. Ingresa este código en la página y acepta: %s" % item.user_code, + "3. Espera a que se cierre esta ventana") + + # Generalmente cada 5 segundos se intenta comprobar si el usuario ha introducido el código + while True: + time.sleep(item.intervalo) + try: + if dialog_auth.iscanceled(): + return + + url = "http://api-v2launch.trakt.tv/oauth/device/token" + post = {'code': item.device_code, 'client_id': client_id, 'client_secret': client_secret} + post = jsontools.dump(post) + data = httptools.downloadpage(url, post, headers, replace_headers=True).data + data = jsontools.load(data) + if "access_token" in data: + # Código introducido, salimos del bucle + break + except: + pass + + try: + dialog_auth.close() + except: + pass + + token = data["access_token"] + refresh = data["refresh_token"] + + config.set_setting("token_trakt", token, "tvmoviedb") + config.set_setting("refresh_token_trakt", refresh, "tvmoviedb") + if not item.folder: + platformtools.dialog_notification("Éxito", "Cuenta vinculada correctamente") + if config.is_xbmc(): + import xbmc + xbmc.executebuiltin("Container.Refresh") + return + + except: + import traceback + logger.error(traceback.format_exc()) + if not item.folder: + return platformtools.dialog_notification("Error", "Fallo en el proceso de vinculación") + token = "" + + itemlist = [] + if token: + itemlist.append(item.clone("Cuenta vinculada con éxito", action="")) + else: + itemlist.append(item.clone("Fallo en el proceso de vinculación", action="")) + + return itemlist + + +def menu_trakt(item): + # Menú con acciones de cuenta trakt (vistas, watchlist, coleccion) + itemlist = [] + token_auth = config.get_setting("token_trakt", "tvmoviedb") + tipo = item.extra.replace("tv", "show") + "s" + title = item.contentType.replace("movie", "película").replace("tvshow", "serie") + try: + result = acciones_trakt(item.clone(url="sync/watched/%s" % tipo)) + post = {tipo: [{"ids": {"tmdb": item.infoLabels["tmdb_id"]}}]} + if '"tmdb":%s' % item.infoLabels["tmdb_id"] in result: + itemlist.append(item.clone(title="[Trakt] Marcar %s como no vista" % title, action="acciones_trakt", + url="sync/history/remove", post=post)) + else: + itemlist.append(item.clone(title="[Trakt] Marcar %s como vista" % title, action="acciones_trakt", + url="sync/history", post=post)) + except: + pass + + try: + result = acciones_trakt(item.clone(url="sync/watchlist/%s" % tipo)) + post = {tipo: [{"ids": {"tmdb": item.infoLabels["tmdb_id"]}}]} + if '"tmdb":%s' % item.infoLabels["tmdb_id"] in result: + itemlist.append(item.clone(title="[Trakt] Eliminar %s de tu watchlist" % title, action="acciones_trakt", + url="sync/watchlist/remove", post=post)) + else: + itemlist.append(item.clone(title="[Trakt] Añadir %s a tu watchlist" % title, action="acciones_trakt", + url="sync/watchlist", post=post)) + except: + pass + + try: + result = acciones_trakt(item.clone(url="sync/collection/%s" % tipo)) + post = {tipo: [{"ids": {"tmdb": item.infoLabels["tmdb_id"]}}]} + if '"tmdb":%s' % item.infoLabels["tmdb_id"] in result: + itemlist.append(item.clone(title="[Trakt] Eliminar %s de tu colección" % title, action="acciones_trakt", + url="sync/collection/remove", post=post)) + else: + itemlist.append(item.clone(title="[Trakt] Añadir %s a tu colección" % title, action="acciones_trakt", + url="sync/collection", post=post)) + except: + pass + + return itemlist + + +def acciones_trakt(item): + token_auth = config.get_setting("token_trakt", "tvmoviedb") + itemlist = [] + item.text_color = color1 + + item.contentType = item.extra.replace("show", "tvshow") + + client_id = "a83c1a92d1313bd7ac7baa37a3fc83add26833d4b006f9f9562cae213a761260" + headers = [['Content-Type', 'application/json'], ['trakt-api-key', client_id], + ['trakt-api-version', '2']] + if token_auth: + headers.append(['Authorization', "Bearer %s" % token_auth]) + + post = None + if item.post: + post = jsontools.dump(item.post) + + url = "http://api-v2launch.trakt.tv/%s" % item.url + data = httptools.downloadpage(url, post, headers=headers, replace_headers=True) + if data.code == "401": + token_trakt(item.clone(extra="renew")) + token_auth = config.get_setting("token_trakt", "tvmoviedb") + headers[3][1] = "Bearer %s" % token_auth + data = httptools.downloadpage(url, post, headers=headers, replace_headers=True) + + data = data.data + if data and "sync" in item.url: + if not item.post: + return data + else: + data = jsontools.load(data) + if "not_found" in data: + return platformtools.dialog_notification("Trakt", "Acción realizada correctamente") + else: + return platformtools.dialog_notification("Trakt", "Hubo error en el proceso") + elif data and "recommendations" in item.url: + data = jsontools.load(data) + ratings = [] + try: + for i, entry in enumerate(data): + if i <= item.pagina: + continue + try: + entry = entry[item.extra] + except: + pass + new_item = item.clone(action="detalles") + new_item.title = entry["title"] + " (%s)" % entry["year"] + new_item.infoLabels["tmdb_id"] = entry["ids"]["tmdb"] + try: + ratings.append(entry["rating"]) + except: + ratings.append(0.0) + itemlist.append(new_item) + if i == item.pagina + 20: + itemlist.append(item.clone(title=">> Página Siguiente", text_color="", pagina=item.pagina + 20)) + break + + from core import tmdb + tmdb.set_infoLabels_itemlist(itemlist[:-1], True) + for i, new_item in enumerate(itemlist[:-1]): + if new_item.infoLabels["title"]: + new_item.title = new_item.infoLabels["title"] + " (%s)" % new_item.infoLabels["year"] + if ratings[i]: + new_item.title += " [COLOR %s]Trakt:%.2f[/COLOR]/[COLOR %s]Tmdb:%.2f[/COLOR]" \ + % (color6, ratings[i], color4, new_item.infoLabels["rating"]) + except: + pass + + elif data and not item.url.endswith("lists"): + data = jsontools.load(data) + if data and "page=1" in item.url and item.order: + valores = {'rank': 'Por defecto', 'added': 'Añadido', 'title': 'Título', 'released': 'Estreno', + 'runtime': 'Duración', 'popularity': 'Popularidad', 'percentage': 'Valoración', + 'votes': 'Votos', 'asc': 'ascendente', 'desc': 'descendente'} + orden = valores[item.order] + " " + valores[item.how] + itemlist.append(item.clone(title="Lista ordenada por %s. ¿Cambiar orden?" % orden, action="order_list", + text_color=color4)) + ratings = [] + try: + if item.order: + if item.how == "asc": + reverse = False + else: + reverse = True + + if item.order == "rank" or item.order == "added": + data = sorted(data, key=lambda x: x[item.order.replace("added", "listed_at")], reverse=reverse) + else: + order = item.order.replace("popularity", "votes").replace("percentage", "rating") + data = sorted(data, key=lambda x: x[x['type']].get(order, 0), reverse=reverse) + + for entry in data: + try: + entry = entry[item.extra] + except: + pass + new_item = item.clone(action="detalles") + new_item.title = entry["title"] + " (%s)" % entry["year"] + new_item.infoLabels["tmdb_id"] = entry["ids"]["tmdb"] + try: + ratings.append(entry["rating"]) + except: + ratings.append("") + itemlist.append(new_item) + + from core import tmdb + if "page=1" in item.url and item.order: + tmdb.set_infoLabels_itemlist(itemlist[1:], True) + for i, new_item in enumerate(itemlist[1:]): + if new_item.infoLabels["title"]: + new_item.title = new_item.infoLabels["title"] + " (%s)" % new_item.infoLabels["year"] + if ratings[i]: + new_item.title += " [COLOR %s]Trakt:%.2f[/COLOR]/[COLOR %s]Tmdb:%.2f[/COLOR]" \ + % (color6, ratings[i], color4, new_item.infoLabels["rating"]) + else: + tmdb.set_infoLabels_itemlist(itemlist, True) + for i, new_item in enumerate(itemlist): + if new_item.infoLabels["title"]: + new_item.title = new_item.infoLabels["title"] + " (%s)" % new_item.infoLabels["year"] + if ratings[i]: + new_item.title += " [COLOR %s]Trakt:%.2f[/COLOR]/[COLOR %s]Tmdb:%.2f[/COLOR]" \ + % (color6, ratings[i], color4, new_item.infoLabels["rating"]) + except: + import traceback + logger.error(traceback.format_exc()) + + if "page" in item.url and len(itemlist) == 20: + page = scrapertools.find_single_match(item.url, 'page=(\d+)') + page_new = int(page) + 1 + url = item.url.replace("page=" + page, "page=" + str(page_new)) + itemlist.append(item.clone(title=">> Página Siguiente", text_color="", url=url)) + else: + data = jsontools.load(data) + for entry in data: + new_item = item.clone() + new_item.title = entry["name"] + " [COLOR %s](%s)[/COLOR]" % (color6, entry["item_count"]) + new_item.infoLabels["plot"] = entry.get("description") + new_item.url = "users/me/lists/%s/items/?page=1&limit=20&extended=full" % entry["ids"]["trakt"] + new_item.order = entry.get("sort_by") + new_item.how = entry.get("sort_how") + itemlist.append(new_item) + + return itemlist + + +def order_list(item): + logger.info() + + list_controls = [] + valores1 = ['rank', 'added', 'title', 'released', 'runtime', 'popularity', 'percentage', 'votes'] + valores2 = ['asc', 'desc'] + + dict_values = {'orderby': valores1.index(item.order), 'orderhow': valores2.index(item.how)} + + list_controls.append({'id': 'orderby', 'label': 'Ordenar por:', 'enabled': True, + 'type': 'list', 'default': 0, 'visible': True}) + list_controls.append({'id': 'orderhow', 'label': 'De forma:', 'enabled': True, + 'type': 'list', 'default': 0, 'visible': True}) + list_controls[0]['lvalues'] = ['Por defecto', 'Añadido', 'Título', 'Estreno', 'Duración', 'Popularidad', + 'Valoración', 'Votos'] + list_controls[1]['lvalues'] = ['Ascendente', 'Descendente'] + return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values, + caption="Filtra la búsqueda", item=item, callback='order_trakt') + + +def order_trakt(item, values): + valores1 = ['rank', 'added', 'title', 'released', 'runtime', 'popularity', 'percentage', 'votes'] + valores2 = ['asc', 'desc'] + orderby = valores1[values["orderby"]] + item.order = orderby + orderhow = valores2[values["orderhow"]] + item.how = orderhow + + item.action = "acciones_trakt" + + return acciones_trakt(item) + + +##-------------------- SECCION MYANIMELIST ------------------------## +def top_mal(item): + # Para los menús principales de tops pelícuas/series/ovas + itemlist = [] + item.text_color = color1 + data = httptools.downloadpage(item.url, cookies=False).data + data = re.sub(r"\n|\r|\t| ", "", data) + data = re.sub(r"\s{2}", " ", data) + + patron = '<td class="title al va-t word-break">.*?href="([^"]+)".*?src="(.*?).jpg.*?' \ + '<div class="di-ib clearfix">.*?href.*?>([^<]+)<.*?<div class="information di-ib mt4">' \ + '(.*?)<br>.*?(\d{4}|-).*?<span class="text.*?>(.*?)</span>' + matches = scrapertools.find_multiple_matches(data, patron) + for url, thumb, title, info, year, rating in matches: + new_item = item.clone(action="detalles_mal", url=url) + info = info.strip() + new_item.thumbnail = thumb.replace("r/50x70/", "") + "l.jpg" + year = year.replace("-", "") + if year: + new_item.infoLabels["year"] = year + new_item.title = title.strip() + if not item.extra: + new_item.title += " %s" % info.replace("Movie (1 eps)", "Movie").replace("TV ", "") + if "Movie (" in info or "Special (" in info: + new_item.contentType = "movie" + new_item.extra = "movie" + else: + new_item.contentType = "tvshow" + new_item.extra = "tv" + new_item.show = title.strip() + else: + new_item.title += " (%s" % info.split("(", 1)[1] + if not year in title and year: + new_item.title += " [%s]" % year + if rating != "N/A": + new_item.infoLabels["rating"] = float(rating) + new_item.title += " [COLOR %s]%s[/COLOR]" % (color6, rating) + new_item.contentTitle = title.strip() + itemlist.append(new_item) + + next_page = scrapertools.find_single_match(data, 'limit=(\d+)" class="link-blue-box next">') + if next_page: + next_page = item.url.rsplit("=", 1)[0] + "=%s" % next_page + itemlist.append(item.clone(title=">> Página Siguiente", url=next_page, text_color="")) + + return itemlist + + +def detalles_mal(item): + itemlist = [] + + cookie_session = get_cookie_value() + header_mal = {'Cookie': '%s search_sort_anime=score; search_view=tile; is_logged_in=1' % cookie_session} + data = httptools.downloadpage(item.url, headers=header_mal, cookies=False).data + data = re.sub(r"\n|\r|\t| ", "", data) + data = re.sub(r"\s{2}", " ", data) + + item.contentTitle = item.contentTitle.replace("(TV)", "").replace("(Movie)", "") + item.fanart = default_fan + item.infoLabels["plot"] = "" + + title_mal = item.contentTitle + if not item.extra: + extra = scrapertools.find_single_match(data, 'Type:</span>.*?>([^<]+)</a>').lower() + item.tipo = extra + if extra == "movie" or extra == "special": + item.extra = "movie" + item.contentType = "movie" + else: + item.extra = "tv" + item.contentType = "tvshow" + + if item.infoLabels['rating'] != "0.0": + rating = item.infoLabels['rating'] + else: + rating = scrapertools.find_single_match(data, '<span itemprop="ratingValue">(\d.\d+)</span>') + + if not item.infoLabels["year"]: + item.infoLabels["year"] = scrapertools.find_single_match(data, '>Aired:</span>.*?(\d{4})') + + eng_title = scrapertools.find_single_match(data, 'English:</span> ([^<]+)</div>').strip() + item_tmdb = item.clone() + + if item.contentType == "movie": + ob_tmdb = Tmdb(texto_buscado=item_tmdb.contentTitle, year=item_tmdb.infoLabels['year'], tipo=item_tmdb.extra, + idioma_busqueda=langt) + if not ob_tmdb.result and eng_title: + ob_tmdb = Tmdb(texto_buscado=eng_title, year=item_tmdb.infoLabels['year'], + tipo=item_tmdb.extra, idioma_busqueda=langt) + if not ob_tmdb.result and ("Special (" in item.title or item.tipo == "special"): + item_tmdb.extra = "tv" + search = {'url': 'search/tv', 'language': langt, 'query': item_tmdb.contentTitle, + 'first_air_date': item_tmdb.infoLabels["year"]} + ob_tmdb = Tmdb(discover=search, tipo=item_tmdb.extra, idioma_busqueda=langt) + else: + search = {'url': 'search/tv', 'language': langt, 'query': eng_title, + 'first_air_date': item_tmdb.infoLabels["year"]} + ob_tmdb = Tmdb(discover=search, tipo=item_tmdb.extra, idioma_busqueda=langt) + if not ob_tmdb.result and eng_title: + search['query'] = eng_title + ob_tmdb = Tmdb(discover=search, tipo=item_tmdb.extra, idioma_busqueda=langt) + if not ob_tmdb.result and ("OVA (" in item.title or item.tipo == "ova"): + item_tmdb.extra = "movie" + ob_tmdb = Tmdb(texto_buscado=item_tmdb.contentTitle, tipo=item_tmdb.extra, idioma_busqueda=langt, + year=item_tmdb.infoLabels['year']) + + if ob_tmdb.result: + ob_tmdb = Tmdb(id_Tmdb=ob_tmdb.get_id(), tipo=item_tmdb.extra, idioma_busqueda=langt) + item.infoLabels = ob_tmdb.get_infoLabels(item.infoLabels) + + # Se concatena sinopsis myanimelist con la de tmdb si la hubiese + plot = scrapertools.find_single_match(data, '<span itemprop="description">(.*?)</span>') + plot = plot.replace("<br />", "\n").replace("<i>", "[I]").replace("</i>", "[/I]") + plot = scrapertools.decodeHtmlentities(plot) + if plot and (item.infoLabels['plot'] and item.infoLabels['plot'] != plot): + item.infoLabels['plot'] += " (TMDB)\n\n" + plot + " (MYANIMELIST)" + elif plot and not item.infoLabels['plot']: + item.infoLabels['plot'] = plot + + if not item.infoLabels['duration']: + try: + horas, min1, min2 = scrapertools.find_single_match(data, + 'Duration:</span>\s*(?:(\d+) hr\. (\d+) min|(\d+) min)') + if horas: + horas = int(horas) * 360 + else: + horas = 0 + if not min1: + min1 = min2 + item.infoLabels['duration'] = horas + (int(min1) * 60) + except: + pass + + # Se sobreescribe la info de myanimelist sobre la de tmdb + generos = scrapertools.find_single_match(data, 'Genres:</span>(.*?)</div>') + if generos: + item.infoLabels['genre'] = scrapertools.htmlclean(generos) + + item.infoLabels['rating'] = float(rating) + votos = scrapertools.find_single_match(data, '<span itemprop="ratingCount">([^<]+)<') + item.infoLabels['votes'] = votos.replace(",", "") + + if item.infoLabels['fanart']: + item.fanart = item.infoLabels['fanart'] + if item.infoLabels['thumbnail']: + item.thumbnail = item.infoLabels['thumbnail'] + if not item.thumbnail: + item.thumbnail = scrapertools.find_single_match(data, '/pics">.*?<img src="([^"]+)"').replace(".jpg", "l.jpg") + + itemlist.append( + item.clone(action="busqueda", title="Buscar en alfa: %s" % title_mal, contentTitle=title_mal, + extra=item.extra.replace("tv", "anime"))) + if item.infoLabels["title"] and title_mal != item.infoLabels["title"]: + itemlist.append(item.clone(action="busqueda", contentTitle=item.infoLabels["title"], + title=" Buscar por título alternativo: %s" % item.infoLabels["title"])) + + if eng_title and item.contentTitle != eng_title and title_mal != eng_title: + itemlist.append(item.clone(action="busqueda", contentTitle=eng_title, + title=" Buscar por su título en inglés: %s" % eng_title)) + + if item_tmdb.extra == "tv" and ob_tmdb.result: + itemlist.append(item.clone(action="info_seasons", text_color=color4, + title="Info de temporadas [%s]" % item.infoLabels["number_of_seasons"])) + + itemlist.append(item.clone(action="videos_mal", title="Vídeos (Episodios, Trailers...)", text_color=color5, + url=item.url + "/video")) + + # Opción para ver la info de personajes y dobladores/equipo de rodaje + if not "No characters or voice actors" in data and not "No staff for this anime" in data: + itemlist.append(item.clone(action="staff_mal", title="Personajes/Staff", text_color=color2, + url=item.url + "/characters")) + if config.is_xbmc(): + item.contextual = True + itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler", + text_color=color5)) + + images = {} + if ob_tmdb.result and ob_tmdb.result.get("images"): + images['tmdb'] = ob_tmdb.result["images"] + images['myanimelist'] = [] + itemlist.append(item.clone(action="imagenes", title="Lista de Imágenes", text_color=color5, images=images, + extra="menu")) + + try: + title_search = re.sub(r'[^0-9A-z]+', ' ', title_mal) + post = "busqueda=%s&button=Search" % urllib.quote(title_search) + data_music = httptools.downloadpage("http://www.freeanimemusic.org/song_search.php", post).data + if not "NO MATCHES IN YOUR SEARCH" in data_music: + itemlist.append( + item.clone(action="musica_anime", title="Escuchar BSO - Lista de canciones", text_color=color5, + post=post)) + except: + pass + + score = scrapertools.find_single_match(data, 'id="myinfo_score".*?selected" value="(\d+)"') + if score != "0": + score = "[COLOR %s]Puntuado:%s" % (color4, score) + else: + score = "Votar" + if item.login and "Add to My List</span>" in data and config.is_xbmc(): + itemlist.append( + item.clone(title="[MAL] Añadir a tus listas/%s" % score, action="menu_mal", contentTitle=title_mal)) + elif item.login and config.is_xbmc(): + status = {'1': 'Viendo Actualmente', '2': 'Completados', '3': 'En pausa', '4': 'Descartados', + '6': 'Previstos para ver'} + estado = scrapertools.find_single_match(data, 'myinfo_updateInfo".*?option selected="selected" value="(\d+)"') + try: + estado = status[estado] + itemlist.append( + item.clone(title="[MAL] En tu lista de [COLOR %s]%s[/COLOR]. ¿Cambiar?/%s" % (color6, estado, score), + action="menu_mal", + contentTitle=title_mal)) + except: + pass + + token_auth = config.get_setting("token_trakt", "tvmoviedb") + if token_auth and ob_tmdb.result: + itemlist.append(item.clone(title="[Trakt] Gestionar con tu cuenta", action="menu_trakt")) + + # Se listan precuelas, secuelas y series alternativas + prequel = scrapertools.find_single_match(data, 'Prequel:</td>(.*?)</td>') + if prequel: + matches = scrapertools.find_multiple_matches(prequel, 'href="([^"]+)">(.*?)</a>') + for url, title in matches: + new_item = item.clone(infoLabels={'mediatype': item.contentType}, extra="", fanart=default_fan, + thumbnail="") + new_item.title = "Precuela: %s" % title + new_item.contentTitle = title + new_item.url = "https://myanimelist.net%s" % url + itemlist.append(new_item) + + sequel = scrapertools.find_single_match(data, 'Sequel:</td>(.*?)</td>') + if sequel: + matches = scrapertools.find_multiple_matches(sequel, 'href="([^"]+)">(.*?)</a>') + for url, title in matches: + new_item = item.clone(infoLabels={'mediatype': item.contentType}, extra="", fanart=default_fan, + thumbnail="") + new_item.title = "Secuela: %s" % title + new_item.contentTitle = title + new_item.url = "https://myanimelist.net%s" % url + itemlist.append(new_item) + + alt_version = scrapertools.find_single_match(data, 'Alternative version:</td>(.*?)</td>') + if alt_version: + matches = scrapertools.find_multiple_matches(alt_version, 'href="([^"]+)">(.*?)</a>') + for url, title in matches: + new_item = item.clone(infoLabels={'mediatype': item.contentType}, extra="", fanart=default_fan, + thumbnail="") + new_item.title = "Versión alternativa: %s" % title + new_item.contentTitle = title + new_item.url = "https://myanimelist.net%s" % url + itemlist.append(new_item) + + if ob_tmdb.result: + itemlist.append(item.clone(title="", action="", infoLabels={})) + if ob_tmdb.result.get("belongs_to_collection"): + new_item = item.clone(infoLabels={'mediatype': item.contentType}, action="listado_tmdb", text_color=color5) + saga = ob_tmdb.result["belongs_to_collection"] + new_item.infoLabels["tmdb_id"] = saga["id"] + if saga["poster_path"]: + new_item.thumbnail = 'http://image.tmdb.org/t/p/original' + saga["poster_path"] + if saga["backdrop_path"]: + new_item.fanart = 'http://image.tmdb.org/t/p/original' + saga["backdrop_path"] + new_item.search = {'url': 'collection/%s' % saga['id'], 'language': langt} + new_item.title = "Es parte de: %s" % saga["name"] + itemlist.append(new_item) + + itemlist.append( + item.clone(title="Recomendaciones TMDB", action="listado_tmdb", infoLabels={'mediatype': item.contentType}, + search={'url': '%s/%s/recommendations' % (item.extra, item.infoLabels['tmdb_id']), + 'language': langt, 'page': 1}, text_color=color2)) + + # Recomendaciones myanimelist y búsqueda de info en anidb (fansubs en español) + itemlist.append(item.clone(title="Recomendaciones MyAnimeList", action="reco_mal")) + anidb_link = scrapertools.find_single_match(data, + '<a href="(http://anidb.info/perl-bin/animedb.pl\?show=anime&aid=\d+)') + if anidb_link: + anidb_link = anidb_link.replace("&", "&") + "&showallag=1#grouplist" + info_anidb(item, itemlist, anidb_link) + + return itemlist + + +def videos_mal(item): + # Método para episodios en crunchyroll y trailer/promocionales + itemlist = [] + + data = httptools.downloadpage(item.url, cookies=False).data + data = re.sub(r"\n|\r|\t| ", "", data) + data = re.sub(r"\s{2}", " ", data) + + if not "No episode video" in data and not "http://www.hulu.com/" in data: + patron = '<a class="video-list di-ib po-r" href="([^"]+)".*?data-src="([^"]+)".*?' \ + '<span class="title">([^<]+)<(.*?)<span class="episode-title" title="([^"]+)"' + matches = scrapertools.find_multiple_matches(data, patron) + for url, thumb, epi, info, title in matches: + if "icon-pay" in info and "icon-banned-youtube" in thumb: + continue + url = "https://myanimelist.net%s" % url + new_item = item.clone(url=url, thumbnail=thumb, action="play", text_color=color1) + new_item.title = epi + " - " + title.strip() + if "icon-pay" in info: + new_item.title += " [COLOR %s](Crunchyroll Premium)[/COLOR]" % color6 + if "icon-banned-youtube" in thumb: + new_item.title += " [COLOR %s][Sin subs en castellano][/COLOR]" % color4 + itemlist.append(new_item) + + next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" class="link-blue-box">More') + if next_page: + itemlist.append(item.clone(title=">> Más Episodios", url=next_page, text_color="")) + if itemlist: + itemlist.insert(0, item.clone(title="Episodios", action="", text_color=color3)) + + patron = '<a class="iframe.*?href="(https://www.youtube.*?)\?.*?data-src="([^"]+)".*?<span class="title">([^<]+)<' + matches = scrapertools.find_multiple_matches(data, patron) + if matches: + itemlist.append(item.clone(title="Tráilers/Promocionales", action="", text_color=color3)) + for url, thumb, title in matches: + url = url.replace("embed/", "watch?v=") + itemlist.append( + item.clone(title=title, url=url, server="youtube", action="play", thumbnail=thumb, text_color=color1)) + + return itemlist + + +def reco_mal(item): + # Recomendaciones de myanimelist + itemlist = [] + + data = httptools.downloadpage(item.url + "/userrecs", cookies=False).data + data = re.sub(r"\n|\r|\t| ", "", data) + data = re.sub(r"\s{2}", " ", data) + bloque = scrapertools.find_single_match(data, + '<div class="anime-slide-block" id="anime_recommendation"(.*?)</ul></div>') + patron = '<div class="picSurround"><a href="([^"]+)".*?data-src="([^"]+)".*?<strong>(.*?)</strong>.*?' \ + '<div class="spaceit_pad">(.*?)</div>' + matches = scrapertools.find_multiple_matches(data, patron) + for url, thumb, title, plot in matches: + new_item = item.clone(infoLabels={'mediatype': item.contentType}, action="detalles_mal", fanart=default_fan, + title=title, contentType="", extra="", + contentTitle=title) + new_item.infoLabels["plot"] = scrapertools.htmlclean(plot) + new_item.url = "https://myanimelist.net%s" % url + new_item.thumbnail = thumb.replace("r/50x70/", "").replace(".jpg", "l.jpg") + itemlist.append(new_item) + + return itemlist + + +def indices_mal(item): + # Índices por temporadas y generos + itemlist = [] + url_base = "" + if "Temporadas" in item.title: + data = httptools.downloadpage("https://myanimelist.net/anime/season/archive", cookies=False).data + data = re.sub(r"\n|\r|\t| ", "", data) + data = re.sub(r"\s{2}", " ", data) + matches = scrapertools.find_multiple_matches(data, '<td>\s*<a href="([^"]+)">\s*(.*?)\s*</a>') + for url, title in matches: + year = title.rsplit(" ", 1)[1] + thumbnail = item.thumbnail + if int(year) >= 1968: + thumbnail = url_base % year + title = title.replace("Winter", "Invierno").replace("Spring", "Primavera") \ + .replace("Summer", "Verano").replace("Fall", "Otoño") + itemlist.append(Item(channel=item.channel, action="season_mal", title=title, url=url, + thumbnail=thumbnail, text_color=color1, info=True, fanart=thumbnail)) + else: + data = httptools.downloadpage("https://myanimelist.net/anime.php", cookies=False).data + data = re.sub(r"\n|\r|\t| ", "", data) + data = re.sub(r"\s{2}", " ", data) + bloque = scrapertools.find_single_match(data, 'Genres</div>(.*?)View More</a>') + matches = scrapertools.find_multiple_matches(bloque, '<a href="([^"]+)" class="genre-name-link">(.*?)</a>') + for url, title in matches: + genero = title.split(" (", 1)[0] + thumbnail = url_base % genero.lower().replace(" ", "%20") + if genero in ["Hentai", "Yaoi", "Yuri"] and not adult_mal: + continue + url = "https://myanimelist.net%s" % url + itemlist.append(Item(channel=item.channel, action="season_mal", title=title, url=url, + thumbnail=thumbnail, fanart=thumbnail, text_color=color1)) + + return itemlist + + +def season_mal(item): + # Scraper para temporadas de anime + itemlist = [] + + cookie_session = get_cookie_value() + header_mal = {'Cookie': '%s search_sort_anime=score; search_view=tile; is_logged_in=1' % cookie_session} + data = httptools.downloadpage(item.url, headers=header_mal, cookies=False).data + data = re.sub(r"\n|\r|\t| ", "", data) + data = re.sub(r"\s{2}", " ", data) + + if item.info: + patron = '<div class="anime-header">([^<]+)</div>(.*?)</div>\s*</div></div></div>' + bloques = scrapertools.find_multiple_matches(data, patron) + for head_title, bloque in bloques: + head_title = head_title.replace("(New)", "(Nuevos)").replace("(Continuing)", "(Continuación)") + patron = '<a href="([^"]+)" class="link-title">(.*?)</a>.*?<span>(\? ep|\d+ ep).*?' \ + '<div class="genres-inner js-genre-inner">(.*?)</div>.*?<div class="image".*?src="(.*?).jpg' \ + '.*?<span class="preline">(.*?)</span>.*?<div class="info">\s*(.*?)\s*-.*?(\d{4}).*?' \ + 'title="Score">\s*(N/A|\d\.\d+)' + matches = scrapertools.find_multiple_matches(bloque, patron) + if matches: + itemlist.append(Item(channel=item.channel, action="", title=head_title, text_color=color3)) + for url, scrapedtitle, epis, generos, thumb, plot, tipo, year, score in matches: + if ("Hentai" in generos or "Yaoi" in generos or "Yuri" in generos) and adult_mal: + continue + scrapedtitle = scrapedtitle.replace("(TV)", "").replace("(Movie)", "") + if tipo == "Movie": + title = scrapedtitle + " (%s)" % year + else: + title = scrapedtitle + " %ss (%s)" % (epis, year) + infoLabels = {} + if score != "N/A": + title += " [COLOR %s]%s[COLOR]" % (color6, score) + infoLabels["rating"] = float(score) + infoLabels["plot"] = scrapertools.htmlclean(plot) + infoLabels["year"] = year + + genres = scrapertools.find_multiple_matches(generos, 'title="([^"]+)"') + infoLabels["genre"] = ", ".join(genres) + tipo = tipo.lower() + if tipo == "movie" or tipo == "special": + extra = "movie" + contentType = "movie" + else: + extra = "tv" + contentType = "tvshow" + thumb = thumb.replace("r/167x242/", "") + "l.jpg" + itemlist.append(Item(channel=item.channel, action="detalles_mal", url=url, title=title, + thumbnail=thumb, infoLabels=infoLabels, extra=extra, tipo=tipo, + contentTitle=scrapedtitle, contentType=contentType, text_color=color1, + fanart=default_fan)) + else: + patron = '<a href="([^"]+)" class="link-title">(.*?)</a>.*?<span>(\? ep|\d+ ep).*?' \ + '<div class="genres-inner js-genre-inner">(.*?)</div>.*?<div class="image".*?src="(.*?).jpg.*?' \ + '<span class="preline">(.*?)</span>.*?<div class="info">\s*(.*?)\s*-.*?(\d{4}).*?' \ + 'title="Score">\s*(N/A|\d\.\d+)' + matches = scrapertools.find_multiple_matches(data, patron) + for url, scrapedtitle, epis, generos, thumb, plot, tipo, year, score in matches: + if ("Hentai" in generos or "Yaoi" in generos or "Yuri" in generos) and not adult_mal: + continue + scrapedtitle = scrapedtitle.replace("(TV)", "").replace("(Movie)", "") + if tipo == "Movie": + title = scrapedtitle + " (%s)" % year + else: + title = scrapedtitle + " %ss (%s)" % (epis, year) + infoLabels = {} + if score != "N/A": + title += " [COLOR %s]%s[COLOR]" % (color6, score) + infoLabels["rating"] = float(score) + infoLabels["plot"] = scrapertools.htmlclean(plot) + infoLabels["year"] = year + + genres = scrapertools.find_multiple_matches(generos, 'title="([^"]+)"') + infoLabels["genre"] = ", ".join(genres) + tipo = tipo.lower() + if tipo == "movie" or tipo == "special": + extra = "movie" + contentType = "movie" + else: + extra = "tv" + contentType = "tvshow" + thumb = thumb.replace("r/167x242/", "") + "l.jpg" + itemlist.append(Item(channel=item.channel, action="detalles_mal", url=url, title=title, + thumbnail=thumb, infoLabels=infoLabels, extra=extra, tipo=tipo, + contentTitle=scrapedtitle, contentType=contentType, text_color=color1, + fanart=default_fan)) + next_page = scrapertools.find_single_match(data, '<a class="link current" href.*?href="([^"]+)"') + if next_page: + itemlist.append(Item(channel=item.channel, action="season_mal", url=next_page, text_color="", + title=">> Página Siguiente", thumbnail=item.thumbnail)) + + return itemlist + + +def staff_mal(item): + # Dobladores/Equipo de rodaje + itemlist = [] + data = httptools.downloadpage(item.url, cookies=False).data + data = re.sub(r"\n|\r|\t| ", "", data) + data = re.sub(r"\s{2}", " ", data) + patron = '<a href="(/character[^"]+)".*?data-src="([^"]+)".*?href=.*?>([^<]+)<.*?<small>([^<]+)</small>' \ + '(.*?)</table>' + matches = scrapertools.find_multiple_matches(data, patron) + if matches: + itemlist.append(item.clone(title="Personajes/Dobladores", action="", text_color=color3)) + for url, thumb, nombre, rol, voces in matches: + url = "https://myanimelist.net%s" % url + rol = rol.replace("Main", "Principal").replace("Supporting", "Secundario") + nombre = " %s [%s]" % (nombre, rol) + thumb = thumb.replace("r/46x64/", "") + itemlist.append(Item(channel=item.channel, action="detail_staff", url=url, text_color=color2, + thumbnail=thumb, fanart=default_fan, title=nombre, extra="character")) + patron_voces = '<a href="(/people[^"]+)">([^<]+)<.*?<small>([^<]+)</small>.*?data-src="([^"]+)"' + voces_match = scrapertools.find_multiple_matches(voces, patron_voces) + for vurl, vnombre, vidioma, vthumb in voces_match: + vurl = "https://myanimelist.net%s" % vurl + vnombre = " %s [%s]" % (vnombre, vidioma) + vthumb = vthumb.replace("r/46x64/", "") + itemlist.append(Item(channel=item.channel, action="detail_staff", url=vurl, text_color=color1, + thumbnail=vthumb, fanart=default_fan, title=vnombre)) + bloque = scrapertools.find_single_match(data, '<a name="staff">(.*?)</table>') + patron = '<a href="(/people[^"]+)".*?data-src="([^"]+)".*?href=.*?>([^<]+)<.*?<small>([^<]+)</small>' + matches = scrapertools.find_multiple_matches(bloque, patron) + if matches: + itemlist.append(item.clone(title="Staff", action="", text_color=color3)) + for url, thumb, nombre, rol in matches: + url = "https://myanimelist.net%s" % url + nombre = " %s [%s]" % (nombre, rol) + thumb = thumb.replace("r/46x64/", "") + itemlist.append(Item(channel=item.channel, action="detail_staff", url=url, text_color=color1, + thumbnail=thumb, fanart=default_fan, title=nombre)) + + return itemlist + + +def detail_staff(item): + itemlist = [] + data = httptools.downloadpage(item.url, cookies=False).data + data = re.sub(r"\n|\r|\t| ", "", data) + data = re.sub(r"\s{2}", " ", data) + + if item.extra == "character" and not "No biography written" in data: + bio = scrapertools.find_single_match(data, + 'itemprop="name">.*?<div class="normal_header".*?</div>(.*?)<div class="normal_header"') + bio = bio.replace("<br />", "\n") + bio = scrapertools.htmlclean(bio) + if not "questionmark" in item.thumbnail: + data_img = httptools.downloadpage(item.url + "/pictures", cookies=False).data + matches = scrapertools.find_multiple_matches(data_img, 'rel="gallery-character"><img src="([^"]+)"') + for i, thumb in enumerate(matches): + title = "Imagen %s" % (i + 1) + infoLabels = {'plot': bio} + itemlist.append( + Item(channel=item.channel, action="", title=title, infoLabels=infoLabels, text_color=color1, + thumbnail=thumb)) + + matches = scrapertools.find_multiple_matches(data, + '<a href="(/anime[^"]+)"><img src="([^"]+)".*?href.*?>(.*?)</a>') + if matches: + itemlist.append(Item(channel=item.channel, title="Animes donde aparece:", action="", text_color=color3)) + for url, thumb, title in matches: + url = "https://myanimelist.net%s" % url + thumb = thumb.replace("r/23x32/", "") + itemlist.append(Item(channel=item.channel, action="detalles_mal", url=url, text_color=color1, + thumbnail=thumb, fanart=default_fan, title=title, contentTitle=title)) + else: + patron_bio = '<div class="js-sns-icon-container icon-block ">.*?<div class="spaceit_pad">(.*?)</td>' + bio = scrapertools.find_single_match(data, patron_bio) + bio = scrapertools.htmlclean(bio.replace("</div>", "\n")) + infoLabels = {'plot': bio} + if not "No voice acting roles" in data: + itemlist.append(Item(channel=item.channel, title="Da voz a/en:", action="", text_color=color3, + thumbnail=item.thumbnail, infoLabels=infoLabels)) + bloque = scrapertools.find_single_match(data, 'Voice Acting Roles</div>(.*?)</table>') + patron = '<a href="(/anime[^"]+)"><img data-src="([^"]+)".*?href.*?>(.*?)</a>.*?href="(/character[^"]+)".*?' \ + '>(.*?)</a>.*?data-src="([^"]+)"' + matches = scrapertools.find_multiple_matches(bloque, patron) + for url, thumb, title, url_p, personaje, thumb_p in matches: + url = "https://myanimelist.net%s" % url + url_p = "https://myanimelist.net%s" % url_p + thumb = thumb.replace("r/46x64/", "") + thumb_p = thumb_p.replace("r/46x64/", "") + itemlist.append(Item(channel=item.channel, action="detalles_mal", url=url, text_color=color2, + thumbnail=thumb, fanart=default_fan, title=title, contentTitle=title)) + itemlist.append(Item(channel=item.channel, action="detail_staff", url=url_p, text_color=color1, + thumbnail=thumb_p, fanart=default_fan, title=" %s" % personaje, + extra="character")) + + if not "No staff positions" in data: + itemlist.append(Item(channel=item.channel, title="Staff en animes:", action="", text_color=color3, + thumbnail=item.thumbnail, infoLabels=infoLabels)) + bloque = scrapertools.find_single_match(data, 'Anime Staff Positions</div>(.*?)</table>') + patron = '<a href="(/anime[^"]+)"><img data-src="([^"]+)".*?href.*?>(.*?)</a>.*?<small>(.*?)</div>' + matches = scrapertools.find_multiple_matches(bloque, patron) + for url, thumb, title, rol in matches: + url = "https://myanimelist.net%s" % url + thumb = thumb.replace("r/46x64/", "") + rol = scrapertools.htmlclean(rol) + titulo = "%s [COLOR %s][%s][/COLOR]" % (title, color6, rol) + itemlist.append(Item(channel=item.channel, action="detalles_mal", url=url, text_color=color2, + thumbnail=thumb, fanart=default_fan, title=titulo, contentTitle=title)) + + return itemlist + + +def busqueda_mal(item): + # Scraper para búsquedas en myanimelist + itemlist = [] + + cookie_session = get_cookie_value() + header_mal = {'Cookie': '%s search_sort_anime=score; search_view=tile; is_logged_in=1' % cookie_session} + data = httptools.downloadpage(item.url, headers=header_mal, cookies=False).data + data = re.sub(r"\n|\r|\t| ", "", data) + data = re.sub(r"\s{2}", " ", data) + + patron = '<a class="hoverinfo_trigger" href="([^"]+)".*?(?:data-src|src)="([^"]+)".*?' \ + '<div class="hoverinfo".*?href.*?><strong>([^<]+)<.*?<div class="pt4">(.*?)<' \ + '.*?<td.*?>(.*?)</td>.*?<td.*?>(.*?)</td>.*?<td.*?>(.*?)</td>.*?<td.*?>(.*?)</td>' + matches = scrapertools.find_multiple_matches(data, patron) + for url, thumb, titulo, plot, tipo, epis, rating, date in matches: + infolabels = {"mediatype": "tvshow"} + contentType = "tvshow" + extra = "tv" + titulo = titulo.strip() + tipo = tipo.strip() + rating = rating.strip() + epis = epis.strip() + infolabels["plot"] = scrapertools.htmlclean(plot.strip()) + thumb = thumb.replace("r/50x70/", "").replace(".jpg", "l.jpg") + show = titulo + contentitle = titulo + title = titulo + try: + year = date.strip().rsplit("-", 1)[1] + if year.isdigit(): + if int(year) < 30: + year = "20%s" % year + else: + year = "19%s" % year + infolabels["year"] = year + if not year in title: + title += " (%s)" % year + except: + import traceback + logger.error(traceback.format_exc()) + + if tipo == "Movie" or tipo == "OVA": + infolabels["mediatype"] = "movie" + contentType = "movie" + extra = "movie" + show = "" + + if epis and tipo != "Movie": + title += " %s eps" % epis + if rating != "0.00" and rating != "N/A": + infolabels["rating"] = float(rating) + title += " [COLOR %s]%s[/COLOR]" % (color6, rating) + itemlist.append(Item(channel=item.channel, title=title, action="detalles_mal", url=url, show=show, + thumbnail=thumb, infoLabels=infolabels, contentTitle=contentitle, text_color=color1, + contentType=contentType, tipo=tipo.lower(), extra=extra)) + + if not "&show=" in item.url: + next_page = item.url + "&show=50" + else: + pagina = int(item.url.rsplit("=", 1)[1]) + next_page = item.url.replace("&show=%s" % str(pagina), "&show=%s" % str(pagina + 50)) + + check_page = next_page.replace("https://myanimelist.net/anime.php", "") + if check_page in data: + itemlist.append(item.clone(title=">> Página Siguiente", url=next_page, text_color="")) + else: + check_page = check_page.replace("[", "%5B").replace("]", "%5D") + if check_page in data: + itemlist.append(item.clone(title=">> Página Siguiente", url=next_page, text_color="")) + + return itemlist + + +def info_anidb(item, itemlist, url): + # Extrae info, puntuación y fansubs en anidb + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t| ", "", data) + data = re.sub(r"\s{2}", " ", data) + + infoLabels = {'mediatype': item.contentType} + plot = scrapertools.find_single_match(data, 'itemprop="description">(.*?)</div>') + infoLabels["plot"] = scrapertools.htmlclean(plot) + + generos = scrapertools.find_multiple_matches(data, '<div class="tag".*?<span class="tagname">(.*?)</span>') + for i, genero in enumerate(generos): + generos[i] = genero.capitalize() + infoLabels["genre"] = ", ".join(generos) + rating = scrapertools.find_single_match(data, 'itemprop="ratingValue">(.*?)</span>') + try: + infoLabels["rating"] = float(rating) + except: + pass + infoLabels["votes"] = scrapertools.find_single_match(data, 'itemprop="ratingCount">(.*?)</span>') + thumbnail = scrapertools.find_single_match(data, '<div class="image">.*?src="([^"]+)"') + if infoLabels: + title = "Info en AniDB [COLOR %s]%s[/COLOR]" % (color6, rating) + if re.search(r'(?:subtitle|audio) | language: spanish"', data): + title += " - [COLOR %s]Fansubs en español:[/COLOR]" % color3 + itemlist.append(Item(channel=item.channel, title=title, infoLabels=infoLabels, action="", + thumbnail=thumbnail, text_color=color4)) + + if re.search(r'(?:subtitle|audio) | language: spanish"', data): + epi_total = scrapertools.find_single_match(data, 'itemprop="numberOfEpisodes">([^<]+)</span>') + patron = '<td class="name group">.*?title="([^"]+)">(.*?)</a>.*?>([^<]+)</a>.*?' \ + '<td class="epno lastep">([^<]+)</td>.*?title="audio(.*?)</td>.*?' \ + 'class="source" title="([^"]+)"' + matches = scrapertools.find_multiple_matches(data, patron) + for fansub, abrev, estado, epis, lang, source in matches: + if not "spanish" in lang: + continue + title = " " + fansub + if abrev != title: + title += " [%s]" % abrev + estado = estado.replace("complete", "Completa").replace("finished", "Terminada") \ + .replace("stalled", "Pausa").replace("dropped", "Abandonada") + title += " [COLOR %s](%s)[/COLOR] %s/%s [%s]" % (color6, estado, epis, epi_total, source) + itemlist.append(Item(channel=item.channel, title=title, infoLabels=infoLabels, action="", + thumbnail=thumbnail, text_color=color4)) + + +def filtro_mal(item): + logger.info() + + list_controls = [] + valores = {} + dict_values = None + # Se utilizan los valores por defecto/guardados + valores_guardados = config.get_setting("filtro_defecto_mal", item.channel) + if valores_guardados: + dict_values = valores_guardados + + list_controls.append({'id': 'keyword', 'label': 'Palabra Clave', 'enabled': True, + 'type': 'text', 'default': '', 'visible': True}) + list_controls.append({'id': 'tipo', 'label': 'Tipo', 'enabled': True, + 'type': 'list', 'default': -1, 'visible': True}) + list_controls[1]['lvalues'] = ['Especial', 'OVA', 'Película', 'Serie', 'Cualquiera'] + valores["tipo"] = ['4', '2', '3', '1', '0'] + list_controls.append({'id': 'valoracion', 'label': 'Valoración', 'enabled': True, + 'type': 'list', 'default': -1, 'visible': True}) + list_controls[2]['lvalues'] = ['(1) Grotesca', '(2) Horrible', '(3) Muy mala', '(4) Mala', + '(5) Regular', '(6) Pasable', '(7) Buena', '(8) Muy buena', + '(9) Genial', '(10) Obra maestra', 'Cualquiera'] + valores["valoracion"] = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '0'] + + list_controls.append({'id': 'estado', 'label': 'Estado', 'enabled': True, + 'type': 'list', 'default': -1, 'visible': True}) + list_controls[3]['lvalues'] = ['Por estrenar', 'En emisión', 'Terminada', 'Cualquiera'] + valores["estado"] = ['3', '1', '2', '0'] + + try: + data = httptools.downloadpage('https://myanimelist.net/anime.php', cookies=False).data + + patron = 'name="genre\[\]" type="checkbox" value="([^"]+)">.*?>([^<]+)<' + generos = scrapertools.find_multiple_matches(data, patron) + if generos: + list_controls.append({'id': 'labelgenre', 'enabled': True, 'type': 'label', 'default': None, + 'label': 'Selecciona uno, ninguno o más de un género', + 'visible': True, 'color': '0xFFC52020'}) + for value, genre in generos: + list_controls.append({'id': 'genre' + value, 'label': genre, 'enabled': True, + 'type': 'bool', 'default': False, 'visible': True}) + except: + pass + + list_controls.append({'id': 'espacio', 'label': '', 'enabled': False, + 'type': 'label', 'default': None, 'visible': True}) + list_controls.append({'id': 'save', 'label': 'Establecer como filtro por defecto', 'enabled': True, + 'type': 'bool', 'default': False, 'visible': True}) + + item.valores = valores + return platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values, + caption="Filtra la búsqueda", item=item, callback='callback_mal') + + +def callback_mal(item, values): + values_copy = values.copy() + # Guarda el filtro para que sea el que se cargue por defecto + if "save" in values and values["save"]: + values_copy.pop("save") + config.set_setting("filtro_defecto_mal", values_copy, item.channel) + + genero_ids = [] + for v in values: + if "genre" in v: + if values[v]: + genero_ids.append("genre[%s]=%s" % (len(genero_ids), v.replace('genre', ''))) + + genero_ids = "&".join(genero_ids) + query = values["keyword"].replace(" ", "%20") + tipo = item.valores["tipo"][values["tipo"]] + valoracion = item.valores["valoracion"][values["valoracion"]] + estado = item.valores["estado"][values["estado"]] + + item.url = "https://myanimelist.net/anime.php?q=%s&type=%s&score=%s&status=%s" \ + "&p=0&r=0&sm=0&sd=0&sy=0&em=0&ed=0&ey=0&c[0]=a&c[1]=b&c[2]=c&c[3]=d&c[4]=f&gx=0" \ + % (query, tipo, valoracion, estado) + if genero_ids: + item.url += "&" + genero_ids + + item.action = "busqueda_mal" + return busqueda_mal(item) + + +def musica_anime(item): + # Lista los animes y canciones disponibles similares al título del anime + logger.info() + itemlist = [] + + data = httptools.downloadpage("http://www.freeanimemusic.org/song_search.php", post=item.post).data + patron = "<span class='Estilo6'>(\d+).*?<span class='Estilo22'>([^<]+)<.*?<span class='Estilo22'>([^<]+)<" \ + ".*?href='http://www.freeanimemusic.org/anime/([^/]+)/index.php\?var=(\d+)" + matches = scrapertools.find_multiple_matches(data, patron) + animes = {} + action = "" + if config.is_xbmc(): + action = "move" + for number, song, anime, id_anime, id_song in matches: + if not animes.get(anime): + animes[anime] = [] + animes[anime].append( + Item(channel=item.channel, action=action, title="[COLOR %s][%s][/COLOR]" % (color6, anime.capitalize()), + url="", + number="0", thumbnail=item.thumbnail, fanart=item.fanart)) + title = "%s - %s" % (number, song) + animes[anime].append( + Item(channel=item.channel, action="play", title=title, server="directo", url=id_anime, song=id_song, + number=number, + thumbnail=item.thumbnail, fanart=item.fanart, text_color=color5)) + + for k, v in sorted(animes.items()): + v.sort(key=lambda x: (x.url, int(x.number))) + for lt in v: + if lt.action == "move": + lt.extra = len(v) + lt.folder = False + itemlist.append(lt) + + return itemlist + + +def login_mal(from_list=False): + logger.info() + + try: + user = config.get_setting("usuariomal", "tvmoviedb") + password = config.get_setting("passmal", "tvmoviedb") + generic = False + if not user or not password: + if not from_list: + user = bdec("Y3VlbnRhdHZtb3ZpZWRi") + password = bdec("dFlTakE3ekYzbng1") + generic = True + else: + return False, "Usuario y/o contraseña de Myanimelist en blanco", user + data = httptools.downloadpage("https://myanimelist.net/login.php?from=%2F").data + if re.search(r'(?i)' + user, data) and not generic: + return True, "", user + token = scrapertools.find_single_match(data, "name='csrf_token' content='([^']+)'") + response = httptools.downloadpage("https://myanimelist.net/logout.php", post="csrf_token=%s" % token) + post = "user_name=%s&password=%s&cookie=1&sublogin=Login&submit=1&csrf_token=%s" % (user, password, token) + response = httptools.downloadpage("https://myanimelist.net/login.php?from=%2F", post=post) + + if not re.search(r'(?i)' + user, response.data): + logger.error("Error en el login") + return False, "Error en el usuario y/o contraseña. Comprueba tus credenciales", user + else: + if generic: + return False, "Usuario y/o contraseña de Myanimelist en blanco", user + logger.info("Login correcto") + return True, "", user + except: + import traceback + logger.error(traceback.format_exc()) + return False, "Error durante el login. Comprueba tus credenciales" + + +def cuenta_mal(item): + # Menú de cuenta myanimelist + itemlist = [] + login, message, user = login_mal(True) + if not login: + itemlist.append(item.clone(action="configuracion", title=message, text_color=color4)) + else: + itemlist.append( + item.clone(action="items_mal", title="Viendo actualmente", text_color=color5, accion="lista_mal", + url="https://myanimelist.net/animelist/%s?status=1" % user, login=True)) + itemlist.append(item.clone(action="items_mal", title="Completados", text_color=color5, accion="lista_mal", + url="https://myanimelist.net/animelist/%s?status=2" % user, login=True)) + itemlist.append(item.clone(action="items_mal", title="En pausa", text_color=color5, accion="lista_mal", + url="https://myanimelist.net/animelist/%s?status=3" % user, login=True)) + itemlist.append(item.clone(action="items_mal", title="Descartados", text_color=color5, accion="lista_mal", + url="https://myanimelist.net/animelist/%s?status=4" % user, login=True)) + itemlist.append(item.clone(action="items_mal", title="Ver más adelante", text_color=color5, accion="lista_mal", + url="https://myanimelist.net/animelist/%s?status=6" % user, login=True)) + + return itemlist + + +def items_mal(item): + # Scraper para las listas personales + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| ", "", data) + data = re.sub(r"\s{2}", " ", data) + + data_items = scrapertools.find_single_match(data, 'data-items="([^"]+)"') + data_items = data_items.replace(""", "'").replace("null", "None") \ + .replace("false", "False").replace("true", "True") + data_items = eval(data_items) + for d in data_items: + if d["anime_airing_status"] == 1: + title = "[E]" + if d["anime_airing_status"] == 2: + title = "[F]" + else: + title = "[P]" + title += " %s [COLOR %s][%s/%s][/COLOR] (%s)" % ( + d["anime_title"], color6, d["num_watched_episodes"], d["anime_num_episodes"], d["anime_media_type_string"]) + title = title.replace("\\", "") + contentTitle = d["anime_title"].replace("\\", "") + thumbnail = d["anime_image_path"].replace("\\", "").replace("r/96x136/", "").replace(".jpg", "l.jpg") + url = "https://myanimelist.net" + d["anime_url"].replace("\\", "") + if d["score"] != 0: + title += " [COLOR %s]Punt:%s[/COLOR]" % (color4, d["score"]) + if title.count("(TV)") == 2: + title = title.replace("] (TV)", "]") + elif title.count("(Movie)") == 2: + title = title.replace("] (Movie)", "]") + tipo = "tvshow" + extra = "tv" + if "Movie" in d["anime_media_type_string"]: + tipo = "movie" + extra = "movie" + itemlist.append(Item(channel=item.channel, action="detalles_mal", url=url, title=title, thumbnail=thumbnail, + text_color=color3, + contentTitle=contentTitle, contentType=tipo, extra=extra, login=True)) + + if itemlist: + itemlist.insert(0, Item(channel=item.channel, action="", title="E=En emisión | F=Finalizado | P=Próximamente")) + + return itemlist + + +def menu_mal(item): + # Opciones cuenta MAL, añadir a lista/votar + itemlist = [] + + data = httptools.downloadpage(item.url).data + try: + status = {'1': 'Viendo Actualmente', '2': 'Completados', '3': 'En pausa', '4': 'Descartados', + '6': 'Previstos para ver'} + button, estado = scrapertools.find_single_match(data, + 'myinfo_updateInfo"(.*?)>.*?option selected="selected" value="(\d+)"') + if "disabled" in button: + title_estado = ". Acciones disponibles:" + estado = "1" + else: + title_estado = ". En tu lista [COLOR %s]%s[/COLOR]" % (color6, status[estado]) + except: + title_estado = ". Acciones disponibles:" + + score = scrapertools.find_single_match(data, 'id="myinfo_score".*?selected" value="(\d+)"') + if score != "0": + title_estado += " (Punt:%s)" % score + if "lista" in title_estado: + item.lista = True + + itemlist.append(item.clone(title="Anime: %s%s" % (item.contentTitle, title_estado), action="")) + status = {'1': 'Viendo Actualmente', '2': 'Completados', '3': 'En pausa', '4': 'Descartados', + '6': 'Previstos para ver'} + for key, value in status.items(): + if not value in title_estado: + itemlist.append( + item.clone(title="Añadir a lista %s" % value, action="addlist_mal", text_color=color5, value=key, + estado=value)) + + for i in range(10, 0, -1): + if i != int(score): + itemlist.append(item.clone(title="Puntuar con un [COLOR %s]%s[/COLOR]" % (color6, i), action="addlist_mal", + value=estado, estado=status[estado], score=i)) + return itemlist + + +def addlist_mal(item): + data = httptools.downloadpage(item.url).data + + anime_id = scrapertools.find_single_match(data, 'id="myinfo_anime_id" value="([^"]+)"') + if item.value == "2": + vistos = scrapertools.find_single_match(data, 'id="myinfo_watchedeps".*?<span id="curEps">(\d+)') + else: + vistos = scrapertools.find_single_match(data, 'id="myinfo_watchedeps".*?value="(\d+)"') + if not item.score: + item.score = scrapertools.find_single_match(data, 'id="myinfo_score".*?selected" value="(\d+)"') + token = scrapertools.find_single_match(data, "name='csrf_token' content='([^']+)'") + + post = {'anime_id': int(anime_id), 'status': int(item.value), 'score': int(item.score), + 'num_watched_episodes': int(vistos), 'csrf_token': token} + headers_mal = {'User-Agent': 'Mozilla/5.0', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', + 'Referer': item.url, 'X-Requested-With': 'XMLHttpRequest'} + url = "https://myanimelist.net/ownlist/anime/add.json" + if item.lista: + url = "https://myanimelist.net/ownlist/anime/edit.json" + data = httptools.downloadpage(url, post=jsontools.dump(post), headers=headers_mal, replace_headers=True).data + item.title = "En tu lista" + if config.is_xbmc(): + import xbmc + xbmc.executebuiltin("Container.Refresh") + + +def move(item): + import xbmcgui, xbmc + item_focus = str(item.extra) + wnd = xbmcgui.Window(xbmcgui.getCurrentWindowId()) + id = wnd.getFocusId() + return xbmc.executebuiltin('Control.Move(' + str(id) + ',' + item_focus + ')') + + +def play(item): + itemlist = [] + if not item.server: + data = httptools.downloadpage(item.url).data + if "Sorry, this video is not available to be embedded" in data: + id_video = scrapertools.find_single_match(data, '<div class="video-embed.*?-(\d+)\&aff') + crunchy = "https://www.crunchyroll.com/affiliate_iframeplayer?aff=af-12299-plwa&media_id=%s&video_format=106&video_quality=60&auto_play=0" % id_video + else: + crunchy = scrapertools.find_single_match(data, '<iframe src="([^"]+)"') + itemlist.append(item.clone(server="crunchyroll", url=crunchy)) + else: + if item.server == "directo" and item.song: + url = "" + data_music = jsontools.load( + httptools.downloadpage("http://www.musicaanime.org/scripts/resources/artists1.php").data) + for child in data_music["data"]: + if child["title"] == item.url.upper(): + url = "http://www.musicaanime.org/aannmm11/%s/imagen%s.mp3" % (child["artist"], item.song.zfill(3)) + break + if url: + itemlist.append(item.clone(url=url)) + else: + itemlist.append(item) + + return itemlist + + +def get_cookie_value(): + cookies = filetools.join(config.get_data_path(), 'cookies.dat') + cookiedata = filetools.read(cookies) + malsess = scrapertools.find_single_match(cookiedata, "myanimelist.*?MALHLOGSESSID\s+([A-z0-9\+\=]+)") + cookievalue = "MALHLOGSESSID=" + malsess + mal_id = scrapertools.find_single_match(cookiedata, "myanimelist.*?MALSESSIONID\s+([A-z0-9\+\=\-]+)") + if mal_id: + cookievalue += "; MALSESSIONID=%s;" % mal_id + + return cookievalue diff --git a/plugin.video.alfa/channels/tvvip.json b/plugin.video.alfa/channels/tvvip.json new file mode 100755 index 00000000..cccf69ed --- /dev/null +++ b/plugin.video.alfa/channels/tvvip.json @@ -0,0 +1,35 @@ +{ + "id": "tvvip", + "name": "TV-VIP", + "active": false, + "adult": false, + "language": "es", + "banner": "http://i.imgur.com/wyRk5AG.png", + "thumbnail": "http://i.imgur.com/gNHVlI4.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "04/01/2017", + "description": "Se desactiva el canal hasta posible arreglo." + } + ], + "categories": [ + "movie", + "tvshow", + "documentary" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/tvvip.py b/plugin.video.alfa/channels/tvvip.py new file mode 100755 index 00000000..da2f26cf --- /dev/null +++ b/plugin.video.alfa/channels/tvvip.py @@ -0,0 +1,806 @@ +# -*- coding: utf-8 -*- + +import os +import re +import unicodedata +import urllib + +from core import config +from core import jsontools +from core import logger +from core import scrapertools +from core.item import Item + +host = "http://tv-vip.com" +headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0'], + ['Accept', 'application/json, text/javascript, */*; q=0.01'], + ['Accept-Language', 'es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3'], + ['Accept-Encoding', 'gzip, deflate'], + ['Connection', 'keep-alive'], + ['DNT', '1'], + ['Referer', 'http://tv-vip.com']] + +header_string = "|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0" \ + "&Referer=http://tv-vip.com&Cookie=" + + +def mainlist(item): + logger.info() + item.viewmode = "movie" + itemlist = [] + + data = scrapertools.anti_cloudflare("http://tv-vip.com/json/playlist/home/index.json", host=host, headers=headers) + + head = header_string + get_cookie_value() + itemlist.append(Item(channel=item.channel, title="Películas", action="submenu", + thumbnail="http://tv-vip.com/json/playlist/peliculas/thumbnail.jpg" + head, + fanart="http://tv-vip.com/json/playlist/peliculas/background.jpg" + head, viewmode="movie")) + itemlist.append(Item(channel=item.channel, title="Series", action="submenu", + thumbnail="http://tv-vip.com/json/playlist/series/poster.jpg" + head, + fanart="http://tv-vip.com/json/playlist/series/background.jpg" + head, viewmode="movie")) + itemlist.append(Item(channel=item.channel, title="Versión Original", action="entradasconlistas", + url="http://tv-vip.com/json/playlist/version-original/index.json", + thumbnail="http://tv-vip.com/json/playlist/version-original/thumbnail.jpg" + head, + fanart="http://tv-vip.com/json/playlist/version-original/background.jpg" + head, + viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, title="Documentales", action="entradasconlistas", + url="http://tv-vip.com/json/playlist/documentales/index.json", + thumbnail="http://tv-vip.com/json/playlist/documentales/thumbnail.jpg" + head, + fanart="http://tv-vip.com/json/playlist/documentales/background.jpg" + head, + viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, title="Películas Infantiles", action="entradasconlistas", + url="http://tv-vip.com/json/playlist/peliculas-infantiles/index.json", + thumbnail="http://tv-vip.com/json/playlist/peliculas-infantiles/thumbnail.jpg" + head, + fanart="http://tv-vip.com/json/playlist/peliculas-infantiles/background.jpg" + head, + viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, title="Series Infantiles", action="entradasconlistas", + url="http://tv-vip.com/json/playlist/series-infantiles/index.json", + thumbnail="http://tv-vip.com/json/playlist/series-infantiles/thumbnail.jpg" + head, + fanart="http://tv-vip.com/json/playlist/series-infantiles/background.jpg" + head, + viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", + thumbnail="http://i.imgur.com/gNHVlI4.png", fanart="http://i.imgur.com/9loVksV.png")) + + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "%20") + if item.title == "Buscar...": item.extra = "local" + item.url = "http://tv-vip.com/video-prod/s/search?q=%s&n=100" % texto + + try: + return busqueda(item, texto) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def busqueda(item, texto): + logger.info() + itemlist = [] + + data = scrapertools.anti_cloudflare(item.url, host=host, headers=headers) + data = jsontools.load(data) + head = header_string + get_cookie_value() + for child in data["objectList"]: + infolabels = {} + + infolabels['year'] = child['year'] + if child['tags']: infolabels['genre'] = ', '.join([x.strip() for x in child['tags']]) + infolabels['rating'] = child['rate'].replace(',', '.') + infolabels['votes'] = child['rateCount'] + if child['cast']: infolabels['cast'] = child['cast'].split(",") + infolabels['director'] = child['director'] + + if 'playListChilds' not in child: + infolabels['plot'] = child['description'] + type = "repo" + fulltitle = unicodedata.normalize('NFD', unicode(child['name'], 'utf-8')) \ + .encode('ASCII', 'ignore').decode("utf-8") + title = child['name'] + infolabels['duration'] = child['duration'] + if child['height'] < 720: + quality = "[B] [SD][/B]" + elif child['height'] < 1080: + quality = "[B] [720p][/B]" + elif child['height'] >= 1080: + quality = "[B] [1080p][/B]" + if child['name'] == "": + title = child['id'].rsplit(".", 1)[0] + else: + title = child['name'] + if child['year']: + title += " (" + child['year'] + ")" + title += quality + else: + type = "playlist" + infolabels['plot'] = "Contiene:\n" + "\n".join(child['playListChilds']) + "\n".join(child['repoChilds']) + fulltitle = child['id'] + title = "[COLOR red][LISTA][/COLOR] " + child['id'].replace('-', ' ').capitalize() + " ([COLOR gold]" + \ + str(child['number']) + "[/COLOR])" + + # En caso de búsqueda global se filtran los resultados + if item.extra != "local": + if "+" in texto: texto = "|".join(texto.split("+")) + if not re.search(r'(?i)' + texto, title, flags=re.DOTALL): continue + + url = "http://tv-vip.com/json/%s/%s/index.json" % (type, child["id"]) + # Fanart + if child['hashBackground']: + fanart = "http://tv-vip.com/json/%s/%s/background.jpg" % (type, child["id"]) + else: + fanart = "http://tv-vip.com/json/%s/%s/thumbnail.jpg" % (type, child["id"]) + # Thumbnail + if child['hasPoster']: + thumbnail = "http://tv-vip.com/json/%s/%s/poster.jpg" % (type, child["id"]) + else: + thumbnail = fanart + thumbnail += head + fanart += head + + if type == 'playlist': + itemlist.insert(0, Item(channel=item.channel, action="entradasconlistas", title=bbcode_kodi2html(title), + url=url, thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, + infoLabels=infolabels, viewmode="movie_with_plot", folder=True)) + else: + itemlist.append(Item(channel=item.channel, action="findvideos", title=bbcode_kodi2html(title), url=url, + thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, contentTitle=fulltitle, + context="05", infoLabels=infolabels, viewmode="movie_with_plot", folder=True)) + + return itemlist + + +def submenu(item): + logger.info() + itemlist = [] + data = scrapertools.anti_cloudflare("http://tv-vip.com/json/playlist/home/index.json", host=host, headers=headers) + head = header_string + get_cookie_value() + if item.title == "Series": + itemlist.append(Item(channel=item.channel, title="Nuevos Capítulos", action="episodios", + url="http://tv-vip.com/json/playlist/nuevos-capitulos/index.json", + thumbnail="http://tv-vip.com/json/playlist/nuevos-capitulos/background.jpg" + head, + fanart="http://tv-vip.com/json/playlist/nuevos-capitulos/background.jpg" + head, + viewmode="movie")) + itemlist.append(Item(channel=item.channel, title="Más Vistas", action="series", + url="http://tv-vip.com/json/playlist/top-series/index.json", + thumbnail="http://tv-vip.com/json/playlist/top-series/thumbnail.jpg" + head, + fanart="http://tv-vip.com/json/playlist/top-series/background.jpg" + head, + contentTitle="Series", viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, title="Últimas Series", action="series", + url="http://tv-vip.com/json/playlist/series/index.json", + thumbnail=item.thumbnail, fanart=item.fanart, contentTitle="Series", + viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, title="Lista de Series A-Z", action="series", + url="http://tv-vip.com/json/playlist/series/index.json", thumbnail=item.thumbnail, + fanart=item.fanart, contentTitle="Series", viewmode="movie_with_plot")) + else: + itemlist.append(Item(channel=item.channel, title="Novedades", action="entradas", + url="http://tv-vip.com/json/playlist/000-novedades/index.json", + thumbnail="http://tv-vip.com/json/playlist/ultimas-peliculas/thumbnail.jpg" + head, + fanart="http://tv-vip.com/json/playlist/ultimas-peliculas/background.jpg" + head, + viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, title="Más vistas", action="entradas", + url="http://tv-vip.com/json/playlist/peliculas-mas-vistas/index.json", + thumbnail="http://tv-vip.com/json/playlist/peliculas-mas-vistas/thumbnail.jpg" + head, + fanart="http://tv-vip.com/json/playlist/peliculas-mas-vistas/background.jpg" + head, + viewmode="movie_with_plot")) + itemlist.append(Item(channel=item.channel, title="Categorías", action="cat", + url="http://tv-vip.com/json/playlist/peliculas/index.json", + thumbnail=item.thumbnail, fanart=item.fanart)) + itemlist.append(Item(channel=item.channel, title="Películas 3D", action="entradasconlistas", + url="http://tv-vip.com/json/playlist/3D/index.json", + thumbnail="http://tv-vip.com/json/playlist/3D/thumbnail.jpg" + head, + fanart="http://tv-vip.com/json/playlist/3D/background.jpg" + head, + viewmode="movie_with_plot")) + return itemlist + + +def cat(item): + logger.info() + itemlist = [] + + data = scrapertools.anti_cloudflare(item.url, host=host, headers=headers) + data = jsontools.load(data) + head = header_string + get_cookie_value() + exception = ["peliculas-mas-vistas", "ultimas-peliculas"] + for child in data["sortedPlaylistChilds"]: + if child["id"] not in exception: + url = "http://tv-vip.com/json/playlist/%s/index.json" % child["id"] + # Fanart + if child['hashBackground']: + fanart = "http://tv-vip.com/json/playlist/%s/background.jpg" % child["id"] + else: + fanart = "http://tv-vip.com/json/playlist/%s/thumbnail.jpg" % child["id"] + # Thumbnail + thumbnail = "http://tv-vip.com/json/playlist/%s/thumbnail.jpg" % child["id"] + thumbnail += head + fanart += head + title = child['id'].replace('-', ' ').capitalize().replace("Manga", "Animación/Cine Oriental") + title += " ([COLOR gold]" + str(child['number']) + "[/COLOR])" + itemlist.append( + Item(channel=item.channel, action="entradasconlistas", title=bbcode_kodi2html(title), url=url, + thumbnail=thumbnail, fanart=fanart, folder=True)) + + return itemlist + + +def entradas(item): + logger.info() + itemlist = [] + if item.title == "Nuevos Capítulos": + context = "5" + else: + context = "05" + data = scrapertools.anti_cloudflare(item.url, host=host, headers=headers) + data = jsontools.load(data) + head = header_string + get_cookie_value() + for child in data["sortedRepoChilds"]: + infolabels = {} + + infolabels['plot'] = child['description'] + infolabels['year'] = child['year'] + if child['tags']: infolabels['genre'] = ', '.join([x.strip() for x in child['tags']]) + infolabels['rating'] = child['rate'].replace(',', '.') + infolabels['votes'] = child['rateCount'] + infolabels['duration'] = child['duration'] + if child['cast']: infolabels['cast'] = child['cast'].split(",") + infolabels['director'] = child['director'] + url = "http://tv-vip.com/json/repo/%s/index.json" % child["id"] + # Fanart + if child['hashBackground']: + fanart = "http://tv-vip.com/json/repo/%s/background.jpg" % child["id"] + else: + fanart = "http://tv-vip.com/json/repo/%s/thumbnail.jpg" % child["id"] + # Thumbnail + if child['hasPoster']: + thumbnail = "http://tv-vip.com/json/repo/%s/poster.jpg" % child["id"] + else: + thumbnail = fanart + thumbnail += head + fanart += head + + if child['height'] < 720: + quality = "[B] [SD][/B]" + elif child['height'] < 1080: + quality = "[B] [720p][/B]" + elif child['height'] >= 1080: + quality = "[B] [1080p][/B]" + fulltitle = unicodedata.normalize('NFD', unicode(child['name'], 'utf-8')).encode('ASCII', 'ignore') \ + .decode("utf-8") + if child['name'] == "": + title = child['id'].rsplit(".", 1)[0] + else: + title = child['name'] + if child['year']: + title += " (" + child['year'] + ")" + title += quality + + itemlist.append(Item(channel=item.channel, action="findvideos", server="", title=title, url=url, + thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, infoLabels=infolabels, + contentTitle=fulltitle, context=context)) + + return itemlist + + +def entradasconlistas(item): + logger.info() + itemlist = [] + + data = scrapertools.anti_cloudflare(item.url, host=host, headers=headers) + data = jsontools.load(data) + head = header_string + get_cookie_value() + # Si hay alguna lista + contentSerie = False + contentList = False + if data['playListChilds']: + itemlist.append(Item(channel=item.channel, title="**LISTAS**", action="", text_color="red", text_bold=True, + folder=False)) + for child in data['sortedPlaylistChilds']: + infolabels = {} + + infolabels['plot'] = "Contiene:\n" + "\n".join(child['playListChilds']) + "\n".join(child['repoChilds']) + if child['seasonNumber'] and not contentList and re.search(r'(?i)temporada', child['id']): + infolabels['season'] = child['seasonNumber'] + contentSerie = True + else: + contentSerie = False + contentList = True + title = child['id'].replace('-', ' ').capitalize() + " ([COLOR gold]" + str(child['number']) + "[/COLOR])" + url = "http://tv-vip.com/json/playlist/%s/index.json" % child["id"] + thumbnail = "http://tv-vip.com/json/playlist/%s/thumbnail.jpg" % child["id"] + if child['hashBackground']: + fanart = "http://tv-vip.com/json/playlist/%s/background.jpg" % child["id"] + else: + fanart = "http://tv-vip.com/json/playlist/%s/thumbnail.jpg" % child["id"] + + thumbnail += head + fanart += head + itemlist.append(Item(channel=item.channel, action="entradasconlistas", title=bbcode_kodi2html(title), + url=url, thumbnail=thumbnail, fanart=fanart, fulltitle=child['id'], + infoLabels=infolabels, viewmode="movie_with_plot")) + else: + contentList = True + if data["sortedRepoChilds"] and len(itemlist) > 0: + itemlist.append(Item(channel=item.channel, title="**VÍDEOS**", action="", text_color="blue", text_bold=True, + folder=False)) + + for child in data["sortedRepoChilds"]: + infolabels = {} + + infolabels['plot'] = child['description'] + infolabels['year'] = data['year'] + if child['tags']: infolabels['genre'] = ', '.join([x.strip() for x in child['tags']]) + infolabels['rating'] = child['rate'].replace(',', '.') + infolabels['votes'] = child['rateCount'] + infolabels['duration'] = child['duration'] + if child['cast']: infolabels['cast'] = child['cast'].split(",") + infolabels['director'] = child['director'] + url = "http://tv-vip.com/json/repo/%s/index.json" % child["id"] + # Fanart + if child['hashBackground']: + fanart = "http://tv-vip.com/json/repo/%s/background.jpg" % child["id"] + else: + fanart = "http://tv-vip.com/json/repo/%s/thumbnail.jpg" % child["id"] + # Thumbnail + if child['hasPoster']: + thumbnail = "http://tv-vip.com/json/repo/%s/poster.jpg" % child["id"] + else: + thumbnail = fanart + thumbnail += head + fanart += head + if child['height'] < 720: + quality = "[B] [SD][/B]" + elif child['height'] < 1080: + quality = "[B] [720p][/B]" + elif child['height'] >= 1080: + quality = "[B] [1080p][/B]" + fulltitle = unicodedata.normalize('NFD', unicode(child['name'], 'utf-8')).encode('ASCII', 'ignore') \ + .decode("utf-8") + if child['name'] == "": + title = child['id'].rsplit(".", 1)[0] + else: + title = child['name'] + if child['year']: + title += " (" + child['year'] + ")" + title += quality + + itemlist.append(Item(channel=item.channel, action="findvideos", title=bbcode_kodi2html(title), url=url, + thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, infoLabels=infolabels, + contentTitle=fulltitle, context="05", viewmode="movie_with_plot", folder=True)) + + # Se añade item para añadir la lista de vídeos a la videoteca + if data['sortedRepoChilds'] and len(itemlist) > 0 and contentList: + if config.get_videolibrary_support(): + itemlist.append(Item(channel=item.channel, text_color="green", title="Añadir esta lista a la videoteca", + url=item.url, action="listas")) + elif contentSerie: + if config.get_videolibrary_support(): + itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url, + action="series_library", fulltitle=data['name'], show=data['name'], + text_color="green")) + + return itemlist + + +def series(item): + logger.info() + itemlist = [] + + data = scrapertools.anti_cloudflare(item.url, host=host, headers=headers) + data = jsontools.load(data) + head = header_string + get_cookie_value() + exception = ["top-series", "nuevos-capitulos"] + for child in data["sortedPlaylistChilds"]: + if child["id"] not in exception: + infolabels = {} + + infolabels['plot'] = child['description'] + infolabels['year'] = child['year'] + if child['tags']: infolabels['genre'] = ', '.join([x.strip() for x in child['tags']]) + infolabels['rating'] = child['rate'].replace(',', '.') + infolabels['votes'] = child['rateCount'] + if child['cast']: infolabels['cast'] = child['cast'].split(",") + infolabels['director'] = child['director'] + infolabels['mediatype'] = "episode" + if child['seasonNumber']: infolabels['season'] = child['seasonNumber'] + url = "http://tv-vip.com/json/playlist/%s/index.json" % child["id"] + # Fanart + if child['hashBackground']: + fanart = "http://tv-vip.com/json/playlist/%s/background.jpg" % child["id"] + else: + fanart = "http://tv-vip.com/json/playlist/%s/thumbnail.jpg" % child["id"] + # Thumbnail + if child['hasPoster']: + thumbnail = "http://tv-vip.com/json/playlist/%s/poster.jpg" % child["id"] + else: + thumbnail = fanart + thumbnail += head + fanart += head + + if item.contentTitle == "Series": + if child['name'] != "": + fulltitle = unicodedata.normalize('NFD', unicode(child['name'].split(" Temporada")[0], 'utf-8')) \ + .encode('ASCII', 'ignore').decode("utf-8") + fulltitle = fulltitle.replace('-', '') + title = child['name'] + " (" + child['year'] + ")" + else: + title = fulltitle = child['id'].capitalize() + if "Temporada" not in title: + title += " [Temporadas: [COLOR gold]" + str(child['numberOfSeasons']) + "[/COLOR]]" + elif item.title == "Más Vistas": + title = title.replace("- Temporada", "--- Temporada") + else: + if data['name'] != "": + fulltitle = unicodedata.normalize('NFD', unicode(data['name'], 'utf-8')).encode('ASCII', 'ignore') \ + .decode("utf-8") + if child['seasonNumber']: + title = data['name'] + " --- Temporada " + child['seasonNumber'] + \ + " [COLOR gold](" + str(child['number']) + ")[/COLOR]" + else: + title = child['name'] + " [COLOR gold](" + str(child['number']) + ")[/COLOR]" + else: + fulltitle = unicodedata.normalize('NFD', unicode(data['id'], 'utf-8')).encode('ASCII', 'ignore') \ + .decode("utf-8") + if child['seasonNumber']: + title = data['id'].capitalize() + " --- Temporada " + child['seasonNumber'] + \ + " [COLOR gold](" + str(child['number']) + ")[/COLOR]" + else: + title = data['id'].capitalize() + " [COLOR gold](" + str(child['number']) + ")[/COLOR]" + if not child['playListChilds']: + action = "episodios" + else: + action = "series" + itemlist.append(Item(channel=item.channel, action=action, title=bbcode_kodi2html(title), url=url, server="", + thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, infoLabels=infolabels, + contentTitle=fulltitle, context="25", viewmode="movie_with_plot", folder=True)) + if len(itemlist) == len(data["sortedPlaylistChilds"]) and item.contentTitle != "Series": + + itemlist.sort(key=lambda item: item.title, reverse=True) + if config.get_videolibrary_support(): + itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url, + action="add_serie_to_library", show=data['name'], + text_color="green", extra="series_library")) + + if item.title == "Últimas Series": return itemlist + if item.title == "Lista de Series A-Z": itemlist.sort(key=lambda item: item.fulltitle) + + if data["sortedRepoChilds"] and len(itemlist) > 0: + itemlist.append(Item(channel=item.channel, title="**VÍDEOS RELACIONADOS/MISMA TEMÁTICA**", text_color="blue", + text_bold=True, action="", folder=False)) + for child in data["sortedRepoChilds"]: + infolabels = {} + + if child['description']: + infolabels['plot'] = data['description'] + else: + infolabels['plot'] = child['description'] + infolabels['year'] = data['year'] + if not child['tags']: + infolabels['genre'] = ', '.join([x.strip() for x in data['tags']]) + else: + infolabels['genre'] = ', '.join([x.strip() for x in child['tags']]) + infolabels['rating'] = child['rate'].replace(',', '.') + infolabels['duration'] = child['duration'] + if child['cast']: infolabels['cast'] = child['cast'].split(",") + infolabels['director'] = child['director'] + + url = "http://tv-vip.com/json/repo/%s/index.json" % child["id"] + # Fanart + if child['hashBackground']: + fanart = "http://tv-vip.com/json/repo/%s/background.jpg" % child["id"] + else: + fanart = "http://tv-vip.com/json/repo/%s/thumbnail.jpg" % child["id"] + # Thumbnail + if child['hasPoster']: + thumbnail = "http://tv-vip.com/json/repo/%s/poster.jpg" % child["id"] + else: + thumbnail = fanart + thumbnail += head + fanart += head + + if child['height'] < 720: + quality = "[B] [SD][/B]" + elif child['height'] < 1080: + quality = "[B] [720p][/B]" + elif child['height'] >= 1080: + quality = "[B] [1080p][/B]" + fulltitle = unicodedata.normalize('NFD', unicode(child['name'], 'utf-8')).encode('ASCII', 'ignore') \ + .decode("utf-8") + + if child['name'] == "": + title = child['id'].rsplit(".", 1)[0] + else: + title = child['name'] + if child['year']: + title += " (" + child['year'] + ")" + title += quality + + itemlist.append(Item(channel=item.channel, action="findvideos", title=bbcode_kodi2html(title), url=url, + server="", thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, infoLabels=infolabels, + contentTitle=fulltitle, context="25", viewmode="movie_with_plot", folder=True)) + if item.extra == "new": + itemlist.sort(key=lambda item: item.title, reverse=True) + + return itemlist + + +def episodios(item): + logger.info() + logger.info("categoriaaa es " + item.tostring()) + itemlist = [] + # Redirección para actualización de videoteca + if item.extra == "series_library": + itemlist = series_library(item) + return itemlist + + data = scrapertools.anti_cloudflare(item.url, host=host, headers=headers) + data = jsontools.load(data) + head = header_string + get_cookie_value() + # Se prueba un método u otro porque algunas series no están bien listadas + if data["sortedRepoChilds"]: + for child in data["sortedRepoChilds"]: + if item.infoLabels: + item.infoLabels['duration'] = str(child['duration']) + item.infoLabels['season'] = str(data['seasonNumber']) + item.infoLabels['episode'] = str(child['episode']) + item.infoLabels['mediatype'] = "episode" + contentTitle = item.fulltitle + "|" + str(data['seasonNumber']) + "|" + str(child['episode']) + # En caso de venir del apartado nuevos capítulos se redirige a la función series para mostrar los demás + if item.title == "Nuevos Capítulos": + url = "http://tv-vip.com/json/playlist/%s/index.json" % child["season"] + action = "series" + extra = "new" + else: + url = "http://tv-vip.com/json/repo/%s/index.json" % child["id"] + action = "findvideos" + extra = "" + if child['hasPoster']: + thumbnail = "http://tv-vip.com/json/repo/%s/poster.jpg" % child["id"] + else: + thumbnail = "http://tv-vip.com/json/repo/%s/thumbnail.jpg" % child["id"] + thumbnail += head + try: + title = fulltitle = child['name'].rsplit(" ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1] + except: + title = fulltitle = child['id'] + itemlist.append(item.clone(action=action, server="", title=title, url=url, thumbnail=thumbnail, + fanart=item.fanart, fulltitle=fulltitle, contentTitle=contentTitle, context="35", + viewmode="movie", extra=extra, show=item.fulltitle, folder=True)) + else: + for child in data["repoChilds"]: + url = "http://tv-vip.com/json/repo/%s/index.json" % child + if data['hasPoster']: + thumbnail = "http://tv-vip.com/json/repo/%s/poster.jpg" % child + else: + thumbnail = "http://tv-vip.com/json/repo/%s/thumbnail.jpg" % child + thumbnail += head + title = fulltitle = child.capitalize().replace('_', ' ') + itemlist.append(item.clone(action="findvideos", server="", title=title, url=url, thumbnail=thumbnail, + fanart=item.fanart, fulltitle=fulltitle, contentTitle=item.fulltitle, + context="25", show=item.fulltitle, folder=True)) + + # Opción de añadir a la videoteca en casos de series de una única temporada + if len(itemlist) > 0 and not "---" in item.title and item.title != "Nuevos Capítulos": + if config.get_videolibrary_support() and item.show == "": + if "-" in item.title: + show = item.title.split('-')[0] + else: + show = item.title.split('(')[0] + itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", text_color="green", + url=item.url, action="add_serie_to_library", show=show, extra="series_library")) + return itemlist + + +def series_library(item): + logger.info() + # Funcion unicamente para añadir/actualizar series a la libreria + lista_episodios = [] + show = item.show.strip() + + data_serie = scrapertools.anti_cloudflare(item.url, host=host, headers=headers) + data_serie = jsontools.load(data_serie) + # Para series que en la web se listan divididas por temporadas + if data_serie["sortedPlaylistChilds"]: + for season_name in data_serie["sortedPlaylistChilds"]: + url_season = "http://tv-vip.com/json/playlist/%s/index.json" % season_name['id'] + data = scrapertools.anti_cloudflare(url_season, host=host, headers=headers) + data = jsontools.load(data) + + if data["sortedRepoChilds"]: + for child in data["sortedRepoChilds"]: + url = "http://tv-vip.com/json/repo/%s/index.json" % child["id"] + fulltitle = child['name'].rsplit(" ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1] + try: + check_filename = scrapertools.get_season_and_episode(fulltitle) + except: + fulltitle += " " + str(data['seasonNumber']) + "x00" + lista_episodios.append(Item(channel=item.channel, action="findvideos", server="", + title=fulltitle, extra=url, url=item.url, fulltitle=fulltitle, + contentTitle=fulltitle, show=show)) + else: + for child in data["repoChilds"]: + url = "http://tv-vip.com/json/repo/%s/index.json" % child + fulltitle = child.capitalize().replace('_', ' ') + try: + check_filename = scrapertools.get_season_and_episode(fulltitle) + except: + fulltitle += " " + str(data['seasonNumber']) + "x00" + lista_episodios.append(Item(channel=item.channel, action="findvideos", server="", + title=fulltitle, extra=url, url=item.url, contentTitle=fulltitle, + fulltitle=fulltitle, show=show)) + # Para series directas de una sola temporada + else: + data = data_serie + if data["sortedRepoChilds"]: + for child in data["sortedRepoChilds"]: + url = "http://tv-vip.com/json/repo/%s/index.json" % child["id"] + fulltitle = child['name'].rsplit(" ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1] + try: + check_filename = scrapertools.get_season_and_episode(fulltitle) + except: + fulltitle += " 1x00" + lista_episodios.append(Item(channel=item.channel, action="findvideos", server="", title=fulltitle, + contentTitle=fulltitle, url=item.url, extra=url, fulltitle=fulltitle, + show=show)) + else: + for child in data["repoChilds"]: + url = "http://tv-vip.com/json/repo/%s/index.json" % child + fulltitle = child.capitalize().replace('_', ' ') + try: + check_filename = scrapertools.get_season_and_episode(fulltitle) + except: + fulltitle += " 1x00" + lista_episodios.append(Item(channel=item.channel, action="findvideos", server="", title=fulltitle, + contentTitle=fulltitle, url=item.url, extra=url, fulltitle=fulltitle, + show=show)) + + return lista_episodios + + +def findvideos(item): + logger.info() + itemlist = [] + + # En caso de llamarse a la función desde una serie de la videoteca + if item.extra.startswith("http"): item.url = item.extra + data = scrapertools.anti_cloudflare(item.url, host=host, headers=headers) + data = jsontools.load(data) + id = urllib.quote(data['id']) + for child in data["profiles"].keys(): + videopath = urllib.quote(data["profiles"][child]['videoUri']) + for i in range(0, len(data["profiles"][child]['servers'])): + url = data["profiles"][child]['servers'][i]['url'] + videopath + size = " " + data["profiles"][child]["sizeHuman"] + resolution = " [" + (data["profiles"][child]['videoResolution']) + "]" + title = "Ver vídeo en " + resolution.replace('1920x1080', 'HD-1080p') + if i == 0: + title += size + " [COLOR purple]Mirror " + str(i + 1) + "[/COLOR]" + else: + title += size + " [COLOR green]Mirror " + str(i + 1) + "[/COLOR]" + # Para poner enlaces de mayor calidad al comienzo de la lista + if data["profiles"][child]["profileId"] == "default": + itemlist.insert(i, item.clone(action="play", server="directo", title=bbcode_kodi2html(title), url=url, + contentTitle=item.fulltitle, viewmode="list", extra=id, folder=False)) + else: + itemlist.append(item.clone(action="play", server="directo", title=bbcode_kodi2html(title), url=url, + contentTitle=item.fulltitle, viewmode="list", extra=id, folder=False)) + + itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler", + text_color="magenta")) + if len(itemlist) > 0 and item.extra == "": + if config.get_videolibrary_support(): + itemlist.append(Item(channel=item.channel, title="Añadir enlaces a la videoteca", text_color="green", + contentTitle=item.fulltitle, url=item.url, action="add_pelicula_to_library", + infoLabels={'title': item.fulltitle}, extra="findvideos", fulltitle=item.fulltitle)) + + return itemlist + + +def play(item): + import time + import requests + logger.info() + itemlist = [] + + cookie = get_cookie_value() + headers_play = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0', + 'Accept': 'application/json, text/javascript, */*; q=0.01', + 'Accept-Language': 'es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3', + 'Accept-Encoding': 'gzip, deflate', + 'Connection': 'keep-alive', + 'DNT': '1', + 'Referer': 'http://tv-vip.com/film/' + item.extra + '/', + 'Cookie': cookie} + + head = "|User-Agent=" + headers_play['User-Agent'] + "&Referer=" + headers_play['Referer'] + "&Cookie=" + \ + headers_play['Cookie'] + uri = scrapertools.find_single_match(item.url, '(/transcoder[\w\W]+)') + uri_request = "http://tv-vip.com/video-prod/s/uri?uri=%s&_=%s" % (uri, int(time.time())) + + data = requests.get(uri_request, headers=headers_play) + data = jsontools.load(data.text) + url = item.url.replace("/transcoder/", "/s/transcoder/") + "?tt=" + str(data['tt']) + \ + "&mm=" + data['mm'] + "&bb=" + data['bb'] + head + itemlist.append(item.clone(action="play", server="directo", url=url, folder=False)) + return itemlist + + +def listas(item): + logger.info() + # Para añadir listas a la videoteca en carpeta CINE + itemlist = [] + data = scrapertools.anti_cloudflare(item.url, host=host, headers=headers) + data = jsontools.load(data) + head = header_string + get_cookie_value() + for child in data["sortedRepoChilds"]: + infolabels = {} + + # Fanart + if child['hashBackground']: + fanart = "http://tv-vip.com/json/repo/%s/background.jpg" % child["id"] + else: + fanart = "http://tv-vip.com/json/repo/%s/thumbnail.jpg" % child["id"] + # Thumbnail + if child['hasPoster']: + thumbnail = "http://tv-vip.com/json/repo/%s/poster.jpg" % child["id"] + else: + thumbnail = fanart + thumbnail += head + fanart += head + + url = "http://tv-vip.com/json/repo/%s/index.json" % child["id"] + if child['name'] == "": + title = scrapertools.slugify(child['id'].rsplit(".", 1)[0]) + else: + title = scrapertools.slugify(child['name']) + title = title.replace('-', ' ').replace('_', ' ').capitalize() + infolabels['title'] = title + try: + from core import videolibrarytools + new_item = item.clone(title=title, url=url, fulltitle=title, fanart=fanart, extra="findvideos", + thumbnail=thumbnail, infoLabels=infolabels, category="Cine") + videolibrarytools.add_pelicula_to_library(new_item) + error = False + except: + error = True + import traceback + logger.error(traceback.format_exc()) + + if not error: + itemlist.append(Item(channel=item.channel, title='Lista añadida correctamente a la videoteca', + action="", folder=False)) + else: + itemlist.append(Item(channel=item.channel, title='ERROR. Han ocurrido uno o varios errores en el proceso', + action="", folder=False)) + + return itemlist + + +def get_cookie_value(): + cookies = os.path.join(config.get_data_path(), 'cookies', 'tv-vip.com.dat') + cookiedatafile = open(cookies, 'r') + cookiedata = cookiedatafile.read() + cookiedatafile.close() + cfduid = scrapertools.find_single_match(cookiedata, "tv-vip.*?__cfduid\s+([A-Za-z0-9\+\=]+)") + cfduid = "__cfduid=" + cfduid + return cfduid + + +def bbcode_kodi2html(text): + if config.get_platform().startswith("plex") or config.get_platform().startswith("mediaserver"): + import re + text = re.sub(r'\[COLOR\s([^\]]+)\]', + r'<span style="color: \1">', + text) + text = text.replace('[/COLOR]', '</span>') \ + .replace('[CR]', '<br>') \ + .replace('[B]', '<strong>') \ + .replace('[/B]', '</strong>') \ + .replace('"color: white"', '"color: auto"') + + return text diff --git a/plugin.video.alfa/channels/txibitsoft.json b/plugin.video.alfa/channels/txibitsoft.json new file mode 100755 index 00000000..0bd81b94 --- /dev/null +++ b/plugin.video.alfa/channels/txibitsoft.json @@ -0,0 +1,43 @@ +{ + "id": "txibitsoft", + "name": "Txibitsoft", + "active": true, + "adult": false, + "language": "es", + "banner": "txibitsoft.png", + "thumbnail": "http://s27.postimg.org/hx5ohryxf/tblogo.jpg", + "version": 1, + "changes": [ + { + "date": "07/12/2016", + "description": "Correciones código. Adaptación a Infoplus" + }, + { + "date": "04/04/2017", + "description": "Migración a Httptools.Algunas mejoras código" + }, + { + "date": "08/05/2017", + "description": "Workaround en error certificado ssl en algunos OS. Mejoras secciones de peliculas" + }, + { + "date": "28/06/2017", + "description": "Corrección código y algunas mejoras" + } + ], + "categories": [ + "torrent", + "tvshow", + "movie" + ], + "settings": [ + { + "label": "Incluir en busqueda global", + "type": "bool", + "id": "include_in_global_search", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/txibitsoft.py b/plugin.video.alfa/channels/txibitsoft.py new file mode 100755 index 00000000..1732546b --- /dev/null +++ b/plugin.video.alfa/channels/txibitsoft.py @@ -0,0 +1,1479 @@ +# -*- coding: utf-8 -*- + +import os +import re +import unicodedata +import urllib + +import xbmc +import xbmcgui +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core.item import Item +from core.scrapertools import decodeHtmlentities as dhe + +ACTION_SHOW_FULLSCREEN = 36 +ACTION_GESTURE_SWIPE_LEFT = 511 +ACTION_SELECT_ITEM = 7 +ACTION_PREVIOUS_MENU = 10 +ACTION_MOVE_LEFT = 1 +ACTION_MOVE_RIGHT = 2 +ACTION_MOVE_DOWN = 4 +ACTION_MOVE_UP = 3 +OPTION_PANEL = 6 +OPTIONS_OK = 5 + +api_key = "2e2160006592024ba87ccdf78c28f49f" +api_fankey = "dffe90fba4d02c199ae7a9e71330c987" + +host = "http://www.txibitsoft.com/" + + +def mainlist(item): + logger.info() + + itemlist = [] + + itemlist.append(Item(channel=item.channel, title="[COLOR white][B]Peliculas[/B][/COLOR]", action="peliculas", + url="http://www.txibitsoft.com/torrents.php?procesar=1&categorias=%27Otras%20Peliculas%27&subcategoria=peliculas&pagina=1", + thumbnail="http://imgur.com/v6iC6Er.jpg", fanart="http://imgur.com/tJUbfeC.jpg")) + itemlist.append(Item(channel=item.channel, title="[COLOR orange][B]Alta Calidad[/B][/COLOR]", action="", url="", + thumbnail="http://imgur.com/KXhvWIc.jpg", fanart="http://imgur.com/4kTqOKE.jpg")) + + itemlist.append(Item(channel=item.channel, title=" [COLOR white][B]1080[/B][/COLOR]", action="peliculas", + url="http://www.txibitsoft.com/torrents.php?procesar=1&categorias='Cine%20Alta%20Definicion%20HD'&subcategoria=1080p&pagina=1", + thumbnail="http://imgur.com/KXhvWIc.jpg", fanart="http://imgur.com/4kTqOKE.jpg")) + itemlist.append(Item(channel=item.channel, title=" [COLOR white][B]720[/B][/COLOR]", action="peliculas", + url="http://www.txibitsoft.com/torrents.php?procesar=1&categorias=%27Peliculas%20x264%20MKV%27&pagina=1", + thumbnail="http://imgur.com/KXhvWIc.jpg", fanart="http://imgur.com/4kTqOKE.jpg")) + itemlist.append(Item(channel=item.channel, title=" [COLOR white][B]4k[/B][/COLOR]", action="peliculas", + url="http://www.txibitsoft.com/torrents.php?procesar=1&categorias=%27Cine%20Alta%20Definicion%20HD%27&subcategoria=4KULTRAHD&pagina=1", + thumbnail="http://imgur.com/KXhvWIc.jpg", fanart="http://imgur.com/4kTqOKE.jpg")) + title = "[COLOR white][B]Series[/B][/COLOR]" + itemlist.append( + Item(channel=item.channel, title=" [COLOR white][B]BdRemux[/B][/COLOR]", action="peliculas", + url="http://www.txibitsoft.com/torrents.php?procesar=1&categorias=%27Cine%20Alta%20Definicion%20HD%27&subcategoria=BdRemux%201080p&pagina=1", + thumbnail="http://imgur.com/KXhvWIc.jpg", fanart="http://imgur.com/4kTqOKE.jpg")) + + itemlist.append( + Item(channel=item.channel, title=" [COLOR white][B]FullBluRay[/B][/COLOR]", action="peliculas", + url="http://www.txibitsoft.com/torrents.php?procesar=1&categorias=%27Cine%20Alta%20Definicion%20HD%27&subcategoria=FULLBluRay&pagina=1", + thumbnail="http://imgur.com/KXhvWIc.jpg", fanart="http://imgur.com/4kTqOKE.jpg")) + + itemlist.append(Item(channel=item.channel, title="[COLOR white][B]Series[/B][/COLOR]", action="peliculas", + url="http://www.txibitsoft.com/torrents.php?procesar=1&categorias='Series'&pagina=1", + thumbnail="http://imgur.com/qTqX9nU.jpg", fanart="http://imgur.com/rwjtkYj.jpg")) + title = "[COLOR white][B]Buscar...[/B][/COLOR]" + itemlist.append( + Item(channel=item.channel, title=title, action="search", url="", fanart="http://imgur.com/wmkgcCC.jpg", + thumbnail="http://imgur.com/b9xCys8.png")) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + + item.url = "http://www.txibitsoft.com/torrents.php?procesar=1&texto=%s" % (texto) + item.extra = "1" + try: + return buscador(item) + # Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def buscador(item): + logger.info() + itemlist = [] + + # Descarga la página + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| |&", "", data) + item.url = re.sub(r"&", "", item.url) + # corrige la falta de imagen + data = re.sub(r'<img src="<!doctype html><html xmlns="', + '</div><img src="http://s30.postimg.org/8n4ej5j0x/noimage.jpg" texto ><p>', data) + + # <div class="torrent-container-2 clearfix"><img class="torrent-image" src="uploads/torrents/images/thumbnails2/4441_step--up--all--in----blurayrip.jpg" alt="Imagen de Presentación" /><div class="torrent-info"><h4><a href ="/descargar_torrent_27233-id_step_up_all_in_microhd_1080p_ac3_5.1--castellano--ac3_5.1_ingles_subs.html">Step Up All In MicroHD 1080p AC3 5.1-Castellano-AC3 5.1 Ingles Subs</a> </h4><p>19-12-2014</p><p>Subido por: <strong>TorrentEstrenos</strong> en <a href="/ver_torrents_41-id_en_peliculas_microhd.html" title="Peliculas MICROHD">Peliculas MICROHD</a><br />Descargas <strong><a href="#" style="cursor:default">46</a></strong></p><a class="btn-download" href ="/descargar_torrent_27233-id_step_up_all_in_microhd_1080p_ac3_5.1--castellano--ac3_5.1_ingles_subs.html">Descargar</a></div></div> + + patron = '<dl class=".*?dosColumnasDobles"><dt>' + patron += '<a href="([^"]+)" ' + patron += 'title.*?:([^<]+)".*?' + patron += '<img src="([^"]+)".*?' + patron += 'Idioma: <span class="categoria">([^<]+).*?' + patron += 'Tamaño: <span class="categoria">([^<]+)' + + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedlenguage, scrapedsize in matches: + title_fan = re.sub(r"\[.*?\]|\(.*?\)|\d+x\d+.*?Final|\d\d\d\d|-\d+|-|\d+x\d+|Temporada.*?Completa| ;", "", + scrapedtitle) + scrapedurl = "http://www.txibitsoft.com" + scrapedurl + scrapedlenguage = scrapedlenguage.replace(scrapedlenguage, "[COLOR blue]" + scrapedlenguage + "[/COLOR]") + scrapedsize = scrapedsize.replace(scrapedsize, "[COLOR gold]" + scrapedsize + "[/COLOR]") + scrapedtitle = scrapedtitle.replace(scrapedtitle, "[COLOR white]" + scrapedtitle + "[/COLOR]") + scrapedtitle = scrapedtitle + "-(Idioma:" + scrapedlenguage + ")" + "-(Tamaño: " + scrapedsize + ")" + + itemlist.append( + Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="fanart", thumbnail=scrapedthumbnail, + fanart="http://s21.postimg.org/w0lgvyud3/tbfanartgeneral2.jpg", extra=title_fan, folder=True)) + # Paginacion + + try: + next_page = scrapertools.find_single_match(data, '<ul class="paginacion">.*?href=".*?>(\d)') + if float(next_page) > float(item.extra): + if next_page: + url = item.url + "&pagina=" + next_page + title = "siguiente>>" + title = title.replace(title, "[COLOR orange]" + title + "[/COLOR]") + extra = next_page + itemlist.append(Item(channel=item.channel, action="buscador", title=title, url=url, + thumbnail="http://s18.postimg.org/4l9172cqx/tbsiguiente.png", + fanart="http://s21.postimg.org/w0lgvyud3/tbfanartgeneral2.jpg", extra=extra, + folder=True)) + except: + pass + return itemlist + + +def peliculas(item): + logger.info() + itemlist = [] + + # Descar<div id="catalogheader">ga la página + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| |&|</span>|</li>|<li>", "", data) + item.url = re.sub(r"&", "", item.url) + # corrige la falta de imagen + data = re.sub(r'<img src="<!doctype html><html xmlns="', + '</div><img src="http://s30.postimg.org/8n4ej5j0x/noimage.jpg" texto ><p>', data) + + # <div class="torrent-container-2 clearfix"><img class="torrent-image" src="uploads/torrents/images/thumbnails2/4441_step--up--all--in----blurayrip.jpg" alt="Imagen de Presentación" /><div class="torrent-info"><h4><a href ="/descargar_torrent_27233-id_step_up_all_in_microhd_1080p_ac3_5.1--castellano--ac3_5.1_ingles_subs.html">Step Up All In MicroHD 1080p AC3 5.1-Castellano-AC3 5.1 Ingles Subs</a> </h4><p>19-12-2014</p><p>Subido por: <strong>TorrentEstrenos</strong> en <a href="/ver_torrents_41-id_en_peliculas_microhd.html" title="Peliculas MICROHD">Peliculas MICROHD</a><br />Descargas <strong><a href="#" style="cursor:default">46</a></strong></p><a class="btn-download" href ="/descargar_torrent_27233-id_step_up_all_in_microhd_1080p_ac3_5.1--castellano--ac3_5.1_ingles_subs.html">Descargar</a></div></div> + + patron = '<dl class=".*?dosColumnasDobles"><dt>' + patron += '<a href="([^"]+)" ' + patron += 'title.*?:([^<]+)".*?' + patron += '<img src="([^"]+)".*?' + patron += 'Idioma: <span class="categoria">(.*?)' + patron += 'Tamaño: <span class="categoria">([^<]+)' + + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + if len(matches) == 0: + itemlist.append(Item(channel=item.channel, + title="[COLOR gold][B]El video ya no se encuentra en la web, prueba a encontrala por busqueda...[/B][/COLOR]", + thumbnail="http://s6.postimg.org/vhczf38ep/oops.png", + fanart="http://s12.postimg.org/59o1c792l/oopstxibi.jpg", folder=False)) + + for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedlenguage, scrapedsize in matches: + title_fan = re.sub(r"\[.*?\]|\(.*?\)|\d+x\d+.*?Final|\d\d\d\d|-\d+|-|\d+x\d+|Temporada.*?Completa| ;", "", + scrapedtitle) + scrapedurl = "http://www.txibitsoft.com" + scrapedurl + scrapedlenguage = scrapedlenguage.replace(scrapedlenguage, "[COLOR blue]" + scrapedlenguage + "[/COLOR]") + scrapedsize = scrapedsize.replace(scrapedsize, "[COLOR gold]" + scrapedsize + "[/COLOR]") + scrapedtitle = scrapedtitle.replace(scrapedtitle, "[COLOR white]" + scrapedtitle + "[/COLOR]") + scrapedtitle = scrapedtitle + "-(Idioma:" + scrapedlenguage + ")" + "-(Tamaño: " + scrapedsize + ")" + + itemlist.append( + Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="fanart", thumbnail=scrapedthumbnail, + extra=title_fan, fanart="http://s21.postimg.org/w0lgvyud3/tbfanartgeneral2.jpg", + fulltitle=scrapedtitle, folder=True)) + + # Extrae el paginador + ## Paginación + + if "pagina=" in item.url: + current_page_number = int(scrapertools.get_match(item.url, 'pagina=(\d+)')) + item.url = re.sub(r"pagina=\d+", "pagina={0}", item.url) + else: + current_page_number = 1 + + next_page_number = current_page_number + 1 + next_page = item.url.format(next_page_number) + + title = "siguiente>>" + title = title.replace(title, "[COLOR orange]" + title + "[/COLOR]") + itemlist.append(Item(channel=item.channel, action="peliculas", title=title, url=next_page, + thumbnail="http://s18.postimg.org/4l9172cqx/tbsiguiente.png", + fanart="http://s21.postimg.org/w0lgvyud3/tbfanartgeneral2.jpg", folder=True)) + + return itemlist + + +def fanart(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + title_fan = item.extra + title = re.sub( + r'Serie Completa|Temporada.*?\d+|Fin Temporada|3D|SBS|Montaje del Director|V.Extendida|Quadrilogia|E.Coleccionista', + '', title_fan) + title = title.replace(' ', '%20') + title = ''.join((c for c in unicodedata.normalize('NFD', unicode(title.decode('utf-8'))) if + unicodedata.category(c) != 'Mn')).encode("ascii", "ignore") + + try: + sinopsis_area = scrapertools.get_match(data, '<textarea.*?">(.*?)</textarea></li>') + if "Sinopsis" in sinopsis_area: + sinopsis = scrapertools.get_match(data, '<textarea.*Sinopsis(.*?)</textarea></li>') + elif "SINOPSIS" in sinopsis_area: + sinopsis = scrapertools.get_match(data, '<textarea.*SINOPSIS(.*?)</textarea></li>') + elif "Sinópsis" in sinopsis_area: + sinopsis = scrapertools.get_match(data, '<textarea.*Sinópsis(.*?)</textarea></li>') + elif "REPARTO" in sinopsis_area: + sinopsis = scrapertools.get_match(data, '<textarea.*REPARTO(.*?)</textarea></li>') + else: + sinopsis = "" + + except: + + sinopsis = "" + + if not "series" in item.url: + + if "Estreno" in sinopsis_area: + year = scrapertools.find_single_match(data, 'Estreno.*?\d+\/\d+\/(\d+)') + else: + year = scrapertools.find_single_match(data, '<textarea.*?A.*?(\d+)') + # filmafinity + url = "http://www.filmaffinity.com/es/advsearch.php?stext={0}&stype%5B%5D=title&country=&genre=&fromyear={1}&toyear={1}".format( + title, year) + data = httptools.downloadpage(url).data + + url_filmaf = scrapertools.find_single_match(data, '<div class="mc-poster">\s*<a title="[^"]*" href="([^"]+)"') + if url_filmaf: + url_filmaf = "http://www.filmaffinity.com%s" % url_filmaf + data = httptools.downloadpage(url_filmaf).data + else: + + try: + url_bing = "http://www.bing.com/search?q=%s+%s+site:filmaffinity.com" % (title.replace(' ', '+'), year) + data = browser(url_bing) + data = re.sub(r'\n|\r|\t|\s{2}| ', '', data) + + if "myaddrproxy.php" in data: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><div class="b_title"><h2>(<a href="/ myaddrproxy.php/http/www.filmaffinity.com/es/film.*?)"') + subdata_bing = re.sub(r'\/myaddrproxy.php\/http\/', '', subdata_bing) + else: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><h2>(<a href="http://www.filmaffinity.com/es/film.*?)"') + + url_filma = scrapertools.get_match(subdata_bing, '<a href="([^"]+)') + + if not "http" in url_filma: + data = httptools.downloadpage("http://" + url_filma).data + else: + data = httptools.downloadpage(url_filma).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + except: + pass + if sinopsis == "": + try: + sinopsis = scrapertools.find_single_match(data, '<dd itemprop="description">(.*?)</dd>') + sinopsis = sinopsis.replace("<br><br />", "\n") + sinopsis = re.sub(r"\(FILMAFFINITY\)<br />", "", sinopsis) + except: + pass + try: + rating_filma = scrapertools.get_match(data, 'itemprop="ratingValue" content="(.*?)">') + except: + rating_filma = "Sin puntuacion" + + critica = "" + patron = '<div itemprop="reviewBody">(.*?)</div>.*?itemprop="author">(.*?)\s*<i alt="([^"]+)"' + matches_reviews = scrapertools.find_multiple_matches(data, patron) + + if matches_reviews: + for review, autor, valoracion in matches_reviews: + review = dhe(scrapertools.htmlclean(review)) + review += "\n" + autor + "[CR]" + review = re.sub(r'Puntuac.*?\)', '', review) + if "positiva" in valoracion: + critica += "[COLOR green][B]%s[/B][/COLOR]\n" % review + elif "neutral" in valoracion: + critica += "[COLOR yellow][B]%s[/B][/COLOR]\n" % review + else: + critica += "[COLOR red][B]%s[/B][/COLOR]\n" % review + else: + critica = "[COLOR floralwhite][B]Esta película no tiene críticas todavía...[/B][/COLOR]" + print "ozuu" + print critica + + url = "http://api.themoviedb.org/3/search/movie?api_key=" + api_key + "&query=" + title + "&year=" + year + "&language=es&include_adult=false" + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + + title = re.sub(r":.*|\(.*?\)", "", title) + url = "http://api.themoviedb.org/3/search/movie?api_key=" + api_key + "&query=" + title + "&language=es&include_adult=false" + + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + extra = item.thumbnail + "|" + "" + "|" + "" + "|" + "Sin puntuación" + "|" + rating_filma + "|" + critica + show = "http://imgur.com/21Oty9A.jpg" + "|" + "" + "|" + sinopsis + posterdb = item.thumbnail + fanart_info = "http://imgur.com/21Oty9A.jpg" + fanart_3 = "" + fanart_2 = "http://imgur.com/21Oty9A.jpg" + category = item.thumbnail + id_scraper = "" + + itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action="findvideos", + thumbnail=item.thumbnail, fanart="http://imgur.com/21Oty9A.jpg", extra=extra, + show=show, category=category, folder=True)) + + for id, fan in matches: + + fan = re.sub(r'\\|"', '', fan) + + try: + rating = scrapertools.find_single_match(data, '"vote_average":(.*?),') + except: + rating = "Sin puntuación" + + id_scraper = id + "|" + "peli" + "|" + rating + "|" + rating_filma + "|" + critica + try: + posterdb = scrapertools.get_match(data, '"page":1,.*?"poster_path":"\\\(.*?)"') + posterdb = "https://image.tmdb.org/t/p/original" + posterdb + except: + posterdb = item.thumbnail + + if "null" in fan: + fanart = "http://imgur.com/21Oty9A.jpg" + else: + fanart = "https://image.tmdb.org/t/p/original" + fan + item.extra = fanart + + url = "http://api.themoviedb.org/3/movie/" + id + "/images?api_key=" + api_key + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '"backdrops".*?"file_path":".*?",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + patron = '"backdrops".*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + fanart_info = item.extra + fanart_3 = "" + fanart_2 = item.extra + for fanart_info, fanart_3, fanart_2 in matches: + fanart_info = "https://image.tmdb.org/t/p/original" + fanart_info + fanart_3 = "https://image.tmdb.org/t/p/original" + fanart_3 + fanart_2 = "https://image.tmdb.org/t/p/original" + fanart_2 + if fanart == "http://imgur.com/21Oty9A.jpg": + fanart = fanart_info + # clearart, fanart_2 y logo + url = "http://webservice.fanart.tv/v3/movies/" + id + "?api_key=" + api_fankey + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '"hdmovielogo":.*?"url": "([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + if '"moviedisc"' in data: + disc = scrapertools.get_match(data, '"moviedisc":.*?"url": "([^"]+)"') + if '"movieposter"' in data: + poster = scrapertools.get_match(data, '"movieposter":.*?"url": "([^"]+)"') + if '"moviethumb"' in data: + thumb = scrapertools.get_match(data, '"moviethumb":.*?"url": "([^"]+)"') + if '"moviebanner"' in data: + banner = scrapertools.get_match(data, '"moviebanner":.*?"url": "([^"]+)"') + + if len(matches) == 0: + extra = posterdb + # "http://es.seaicons.com/wp-content/uploads/2015/11/Editing-Overview-Pages-1-icon.png" + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + category = posterdb + + itemlist.append( + Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, server="torrent", + thumbnail=posterdb, fanart=item.extra, extra=extra, show=show, category=category, folder=True)) + for logo in matches: + if '"hdmovieclearart"' in data: + clear = scrapertools.get_match(data, '"hdmovieclearart":.*?"url": "([^"]+)"') + if '"moviebackground"' in data: + + extra = clear + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + if '"moviedisc"' in data: + category = disc + else: + category = clear + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=logo, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + else: + extra = clear + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + if '"moviedisc"' in data: + category = disc + else: + category = clear + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=logo, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + + if '"moviebackground"' in data: + + if '"hdmovieclearart"' in data: + clear = scrapertools.get_match(data, '"hdmovieclearart":.*?"url": "([^"]+)"') + extra = clear + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + if '"moviedisc"' in data: + category = disc + else: + category = clear + else: + extra = logo + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + if '"moviedisc"' in data: + category = disc + else: + category = logo + + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=logo, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + + if not '"hdmovieclearart"' in data and not '"moviebackground"' in data: + extra = logo + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + if '"moviedisc"' in data: + category = disc + else: + category = item.extra + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=logo, fanart=item.extra, extra=extra, show=show, + category=category, folder=True)) + + + else: + # filmafinity + + url_bing = "http://www.bing.com/search?q=%s+Serie+de+tv+site:filmaffinity.com" % (title.replace(' ', '+')) + data = browser(url_bing) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + try: + if "myaddrproxy.php" in data: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><div class="b_title"><h2>(<a href="/myaddrproxy.php/http/www.filmaffinity.com/es/film.*?)"') + subdata_bing = re.sub(r'\/myaddrproxy.php\/http\/', '', subdata_bing) + else: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><h2>(<a href="http://www.filmaffinity.com/es/film.*?)"') + except: + pass + + try: + url_filma = scrapertools.get_match(subdata_bing, '<a href="([^"]+)') + if not "http" in url_filma: + data = httptools.downloadpage("http://" + url_filma).data + else: + data = httptools.downloadpage(url_filma).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + year = scrapertools.get_match(data, '<dt>Año</dt>.*?>(.*?)</dd>') + except: + year = "" + + if sinopsis == "": + sinopsis = scrapertools.find_single_match(data, '<dd itemprop="description">(.*?)</dd>') + sinopsis = sinopsis.replace("<br><br />", "\n") + sinopsis = re.sub(r"\(FILMAFFINITY\)<br />", "", sinopsis) + try: + rating_filma = scrapertools.get_match(data, 'itemprop="ratingValue" content="(.*?)">') + except: + rating_filma = "Sin puntuacion" + print "lobeznito" + print rating_filma + + critica = "" + patron = '<div itemprop="reviewBody">(.*?)</div>.*?itemprop="author">(.*?)\s*<i alt="([^"]+)"' + matches_reviews = scrapertools.find_multiple_matches(data, patron) + + if matches_reviews: + for review, autor, valoracion in matches_reviews: + review = dhe(scrapertools.htmlclean(review)) + review += "\n" + autor + "[CR]" + review = re.sub(r'Puntuac.*?\)', '', review) + if "positiva" in valoracion: + critica += "[COLOR green][B]%s[/B][/COLOR]\n" % review + elif "neutral" in valoracion: + critica += "[COLOR yellow][B]%s[/B][/COLOR]\n" % review + else: + critica += "[COLOR red][B]%s[/B][/COLOR]\n" % review + else: + critica = "[COLOR floralwhite][B]Esta serie no tiene críticas[/B][/COLOR]" + + ###Busqueda en tmdb + + url_tmdb = "http://api.themoviedb.org/3/search/tv?api_key=" + api_key + "&query=" + title + "&language=es&include_adult=false&first_air_date_year=" + year + + data_tmdb = scrapertools.cachePage(url_tmdb) + data_tmdb = re.sub(r"\n|\r|\t|\s{2}| ", "", data_tmdb) + patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),' + matches = re.compile(patron, re.DOTALL).findall(data_tmdb) + + ###Busqueda en bing el id de imdb de la serie + if len(matches) == 0: + url_tmdb = "http://api.themoviedb.org/3/search/tv?api_key=" + api_key + "&query=" + title + "&language=es" + data_tmdb = scrapertools.cachePage(url_tmdb) + data_tmdb = re.sub(r"\n|\r|\t|\s{2}| ", "", data_tmdb) + patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),' + matches = re.compile(patron, re.DOTALL).findall(data_tmdb) + if len(matches) == 0: + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % ( + title.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data) + try: + subdata_imdb = scrapertools.find_single_match(data, + '<li class="b_algo">(.*?)h="ID.*?<strong>.*?TV Series') + except: + pass + + try: + imdb_id = scrapertools.get_match(subdata_imdb, '<a href=.*?http.*?imdb.com/title/(.*?)/.*?"') + except: + imdb_id = "" + ###Busca id de tvdb y tmdb mediante imdb id + + urlremotetbdb = "https://api.themoviedb.org/3/find/" + imdb_id + "?api_key=" + api_key + "&external_source=imdb_id&language=es" + data_tmdb = scrapertools.cachePage(urlremotetbdb) + matches = scrapertools.find_multiple_matches(data_tmdb, + '"tv_results":.*?"id":(.*?),.*?"poster_path":(.*?),"popularity"') + + if len(matches) == 0: + id_tmdb = "" + fanart_3 = "" + extra = item.thumbnail + "|" + year + "|" + "no data" + "|" + "no data" + "|" + rating_filma + "|" + critica + "|" + "" + "|" + id_tmdb + show = "http://imgur.com/21Oty9A.jpg" + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + item.thumbnail + "|" + id_tmdb + fanart_info = "http://imgur.com/21Oty9A.jpg" + fanart_2 = "http://imgur.com/21Oty9A.jpg" + id_scraper = " " + "|" + "serie" + "|" + rating_filma + "|" + critica + "|" + " " + category = "" + posterdb = item.thumbnail + itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action="findvideos", + thumbnail=item.thumbnail, fanart="http://imgur.com/21Oty9A.jpg", extra=extra, + category=category, show=show, folder=True)) + + for id_tmdb, fan in matches: + ###Busca id tvdb + urlid_tvdb = "https://api.themoviedb.org/3/tv/" + id_tmdb + "/external_ids?api_key=" + api_key + "&language=es" + data_tvdb = scrapertools.cachePage(urlid_tvdb) + id = scrapertools.find_single_match(data_tvdb, 'tvdb_id":(.*?),"tvrage_id"') + if id == "null": + id = "" + category = id + ###Busqueda nºepisodios y temporadas,status + url_status = "http://api.themoviedb.org/3/tv/" + id_tmdb + "?api_key=" + api_key + "&append_to_response=credits&language=es" + data_status = scrapertools.cachePage(url_status) + season_episodes = scrapertools.find_single_match(data_status, + '"(number_of_episodes":\d+,"number_of_seasons":\d+,)"') + season_episodes = re.sub(r'"', '', season_episodes) + season_episodes = re.sub(r'number_of_episodes', 'Episodios ', season_episodes) + season_episodes = re.sub(r'number_of_seasons', 'Temporadas', season_episodes) + season_episodes = re.sub(r'_', ' ', season_episodes) + status = scrapertools.find_single_match(data_status, '"status":"(.*?)"') + if status == "Ended": + status = "Finalizada" + else: + status = "En emisión" + status = status + " (" + season_episodes + ")" + status = re.sub(r',', '.', status) + ####### + fan = re.sub(r'\\|"', '', fan) + try: + # rating tvdb + url_rating_tvdb = "http://thetvdb.com/api/1D62F2F90030C444/series/" + id + "/es.xml" + print "pepote" + print url_rating_tvdb + data = httptools.downloadpage(url_rating_tvdb).data + rating = scrapertools.find_single_match(data, '<Rating>(.*?)<') + except: + ratintg_tvdb = "" + try: + rating = scrapertools.get_match(data, '"vote_average":(.*?),') + except: + + rating = "Sin puntuación" + + id_scraper = id_tmdb + "|" + "serie" + "|" + rating_filma + "|" + critica + "|" + rating + "|" + status # +"|"+emision + posterdb = scrapertools.find_single_match(data_tmdb, '"poster_path":(.*?)","popularity"') + + if "null" in posterdb: + posterdb = item.thumbnail + else: + posterdb = re.sub(r'\\|"', '', posterdb) + posterdb = "https://image.tmdb.org/t/p/original" + posterdb + if "null" in fan: + fanart = "http://imgur.com/21Oty9A.jpg" + else: + fanart = "https://image.tmdb.org/t/p/original" + fan + + item.extra = fanart + + url = "http://api.themoviedb.org/3/tv/" + id_tmdb + "/images?api_key=" + api_key + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '"backdrops".*?"file_path":".*?",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + patron = '"backdrops".*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + fanart_info = item.extra + fanart_3 = "" + fanart_2 = item.extra + for fanart_info, fanart_3, fanart_2 in matches: + fanart_info = "https://image.tmdb.org/t/p/original" + fanart_info + fanart_3 = "https://image.tmdb.org/t/p/original" + fanart_3 + fanart_2 = "https://image.tmdb.org/t/p/original" + fanart_2 + if fanart == "http://imgur.com/21Oty9A.jpg": + fanart = fanart_info + url = "http://webservice.fanart.tv/v3/tv/" + id + "?api_key=" + api_fankey + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '"clearlogo":.*?"url": "([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + if '"tvbanner"' in data: + tvbanner = scrapertools.get_match(data, '"tvbanner":.*?"url": "([^"]+)"') + tfv = tvbanner + elif '"tvposter"' in data: + tvposter = scrapertools.get_match(data, '"tvposter":.*?"url": "([^"]+)"') + tfv = tvposter + else: + tfv = posterdb + if '"tvthumb"' in data: + tvthumb = scrapertools.get_match(data, '"tvthumb":.*?"url": "([^"]+)"') + if '"hdtvlogo"' in data: + hdtvlogo = scrapertools.get_match(data, '"hdtvlogo":.*?"url": "([^"]+)"') + if '"hdclearart"' in data: + hdtvclear = scrapertools.get_match(data, '"hdclearart":.*?"url": "([^"]+)"') + if len(matches) == 0: + if '"hdtvlogo"' in data: + if "showbackground" in data: + + if '"hdclearart"' in data: + thumbnail = hdtvlogo + extra = hdtvclear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + else: + thumbnail = hdtvlogo + extra = thumbnail + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=item.extra, + category=category, extra=extra, show=show, folder=True)) + + + else: + if '"hdclearart"' in data: + thumbnail = hdtvlogo + extra = hdtvclear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + else: + thumbnail = hdtvlogo + extra = thumbnail + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + else: + extra = "" + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=posterdb, fanart=fanart, extra=extra, show=show, + category=category, folder=True)) + + for logo in matches: + if '"hdtvlogo"' in data: + thumbnail = hdtvlogo + elif not '"hdtvlogo"' in data: + if '"clearlogo"' in data: + thumbnail = logo + else: + thumbnail = item.thumbnail + if '"clearart"' in data: + clear = scrapertools.get_match(data, '"clearart":.*?"url": "([^"]+)"') + if "showbackground" in data: + + extra = clear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + else: + extra = clear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + + if "showbackground" in data: + + if '"clearart"' in data: + clear = scrapertools.get_match(data, '"clearart":.*?"url": "([^"]+)"') + extra = clear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + else: + extra = logo + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + + if not '"clearart"' in data and not '"showbackground"' in data: + if '"hdclearart"' in data: + extra = hdtvclear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + else: + extra = thumbnail + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + + title_info = "[COLOR turquoise]Info[/COLOR]" + if not "series" in item.url: + thumbnail = posterdb + title_info = "[COLOR khaki]Info[/COLOR]" + if "series" in item.url: + title_info = "[COLOR skyblue]Info[/COLOR]" + if '"tvposter"' in data: + thumbnail = scrapertools.get_match(data, '"tvposter":.*?"url": "([^"]+)"') + else: + thumbnail = posterdb + + if "tvbanner" in data: + category = tvbanner + else: + category = show + if '"tvthumb"' in data: + plot = item.plot + "|" + tvthumb + else: + plot = item.plot + "|" + item.thumbnail + if '"tvbanner"' in data: + plot = plot + "|" + tvbanner + elif '"tvthumb"' in data: + plot = plot + "|" + tvthumb + else: + plot = plot + "|" + item.thumbnail + else: + if '"moviethumb"' in data: + plot = item.plot + "|" + thumb + else: + plot = item.plot + "|" + posterdb + + if '"moviebanner"' in data: + plot = plot + "|" + banner + else: + if '"hdmovieclearart"' in data: + plot = plot + "|" + clear + + else: + plot = plot + "|" + posterdb + + id = id_scraper + + extra = extra + "|" + id + "|" + title.encode('utf8') + + itemlist.append( + Item(channel=item.channel, action="info", title=title_info, plot=plot, url=item.url, thumbnail=thumbnail, + fanart=fanart_info, extra=extra, category=category, show=show, viewmode="movie_with_plot", folder=False)) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + + if not "serie" in item.url: + thumbnail = item.category + else: + thumbnail = item.show.split("|")[4] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '<form name="frm" id="frm" method="get" action="torrent.php">.*?' + patron += 'alt="([^<]+)".*?' + patron += '<p class="limpiar centro"><a class="torrent" href="([^"]+)"' + + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + itemlist.append(Item(channel=item.channel, + title="[COLOR gold][B]El video ya no se encuentra en la web, prueba a encontrala por busqueda...[/B][/COLOR]", + thumbnail="http://s6.postimg.org/vhczf38ep/oops.png", + fanart="http://s12.postimg.org/59o1c792l/oopstxibi.jpg", folder=False)) + + for scrapedtitle, scrapedurl in matches: + + if "x" in scrapedtitle: + patron = '<form name="frm" id="frm" method="get" action="torrent.php">.*?alt=".*?(\d+)x(\d+)' + matches = re.compile(patron, re.DOTALL).findall(data) + for temp, epi in matches: + plot = temp + "|" + epi + + if "Temporada" in scrapedtitle: + patron = '<form name="frm" id="frm" method="get" action="torrent.php">.*?alt=".*?Temporada (\d+).*?\[Cap.\d(\d+)' + matches = re.compile(patron, re.DOTALL).findall(data) + for temp, epi in matches: + epi = re.sub(r"101|201|301|401|501|601|701|801|901", "01", epi) + epi = re.sub(r"102|202|302|402|502|602|702|802|902", "02", epi) + epi = re.sub(r"103|203|303|403|503|603|703|803|903", "03", epi) + epi = re.sub(r"104|204|304|404|504|604|704|804|904", "04", epi) + epi = re.sub(r"105|205|305|405|505|605|705|805|905", "05", epi) + epi = re.sub(r"106|206|306|406|506|606|706|806|906", "06", epi) + epi = re.sub(r"107|207|307|407|507|607|707|807|907", "07", epi) + epi = re.sub(r"108|208|308|408|508|608|708|808|908", "08", epi) + epi = re.sub(r"109|209|309|409|509|609|709|809|909", "09", epi) + epi = re.sub(r"110|210|310|410|510|610|710|810|910", "10", epi) + epi = re.sub(r"111|211|311|411|511|611|711|811|911", "11", epi) + epi = re.sub(r"112|212|312|412|512|612|712|812|912", "12", epi) + epi = re.sub(r"113|213|313|413|513|613|713|813|913", "13", epi) + epi = re.sub(r"114|214|314|414|514|614|714|814|914", "14", epi) + epi = re.sub(r"115|215|315|415|515|615|715|815|915", "15", epi) + epi = re.sub(r"116|216|316|416|516|616|716|816|916", "16", epi) + epi = re.sub(r"117|217|317|417|517|617|717|817|917", "17", epi) + epi = re.sub(r"118|218|318|418|518|618|718|818|918", "18", epi) + epi = re.sub(r"119|219|319|419|519|619|719|819|919", "19", epi) + epi = re.sub(r"120|220|320|420|520|620|720|820|920", "20", epi) + epi = re.sub(r"121|221|321|421|521|621|721|821|921", "21", epi) + epi = re.sub(r"122|222|322|422|522|622|722|822|922", "22", epi) + epi = re.sub(r"123|223|323|423|523|623|723|823|923", "23", epi) + epi = re.sub(r"124|224|324|424|524|624|724|824|924", "24", epi) + epi = re.sub(r"125|225|325|425|525|625|725|825|925", "25", epi) + epi = re.sub(r"126|226|326|426|526|626|726|826|926", "26", epi) + epi = re.sub(r"127|227|327|427|527|627|727|827|927", "27", epi) + epi = re.sub(r"128|228|328|428|528|628|728|828|928", "28", epi) + epi = re.sub(r"129|229|329|429|529|629|729|829|929", "29", epi) + epi = re.sub(r"130|230|330|430|530|630|730|830|930", "30", epi) + plot = temp + "|" + epi + + if "series" in item.url: + + try: + # buscamos peso y formato + url = host + scrapedurl + + torrents_path = config.get_videolibrary_path() + '/torrents' + + if not os.path.exists(torrents_path): + os.mkdir(torrents_path) + # En algunos OS esta dando extraños problemas de certificado.Workaround + import ssl + + ssl._create_default_https_context = ssl._create_unverified_context + urllib.URLopener.version = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36 SE 2.X MetaSr 1.0' + urllib.urlretrieve(url, torrents_path + "/temp.torrent") + pepe = open(torrents_path + "/temp.torrent", "rb").read() + + if "used CloudFlare" in pepe: + try: + urllib.urlretrieve("http://anonymouse.org/cgi-bin/anon-www.cgi/" + url.strip(), + torrents_path + "/temp.torrent") + pepe = open(torrents_path + "/temp.torrent", "rb").read() + except: + pepe = "" + + torrent = decode(pepe) + + try: + name = torrent["info"]["name"] + sizet = torrent["info"]['length'] + sizet = convert_size(sizet) + except: + name = "no disponible" + try: + check_video = scrapertools.find_multiple_matches(str(torrent["info"]["files"]), "'length': (\d+)}") + + size = max([int(i) for i in check_video]) + + for file in torrent["info"]["files"]: + manolo = "%r - %d bytes" % ("/".join(file["path"]), file["length"]) + if str(size) in manolo: + video = manolo + size = convert_size(size) + ext_v = re.sub(r"-.*? bytes|.*?\[.*?\].|'|.*?COM.|.*?\[.*?\]|\(.*?\)|.*?\.", "", video) + try: + os.remove(torrents_path + "/temp.torrent") + except: + pass + except: + size = sizet + ext_v = re.sub(r"-.*? bytes|.*?\[.*?\].|'|.*?COM.|.*?\.es.|.*?\[.*?\]|.*?\(.*?\)\.|.*?\.", "", name) + try: + os.remove(torrents_path + "/temp.torrent") + except: + pass + except: + size = "en estos momentos..." + ext_v = "no disponible" + if "rar" in ext_v: + ext_v = ext_v + " -- No reproducible" + size = "" + scrapedurl = "http://www.txibitsoft.com" + scrapedurl + title_tag = "[COLOR orange]Ver--[/COLOR]" + scrapedtitle = "[COLOR magenta][B] capìtulo" + " " + temp + "x" + epi + "[/B][/COLOR]" + scrapedtitle = title_tag + scrapedtitle + " " + "[COLOR mediumslateblue]( Video [/COLOR]" + "[COLOR mediumpurple]" + ext_v + " -- " + size + " )[/COLOR]" + itemlist.append( + Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="play", server="torrent", + thumbnail=thumbnail, category=item.category, fanart=item.show.split("|")[0], folder=False)) + ###thumb temporada### + url = "http://api.themoviedb.org/3/tv/" + item.show.split("|")[ + 5] + "/season/" + temp + "/images?api_key=" + api_key + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '{"id".*?"file_path":"(.*?)","height"' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + thumbnail = item.thumbnail + for thumtemp in matches: + print "@@@@@" + thumtemp + thumbnail = "https://image.tmdb.org/t/p/original" + thumtemp + + extra = item.extra + "|" + temp + "|" + epi + title = "Info" + title = title.replace(title, "[COLOR skyblue]" + title + "[/COLOR]") + itemlist.append( + Item(channel=item.channel, action="info_capitulos", title=title, url=item.url, thumbnail=thumbnail, + fanart=item.show.split("|")[1], show=item.show, extra=extra, folder=False)) + + else: + try: + # buscamos peso y formato + url = "http://www.txibitsoft.com" + scrapedurl + + torrents_path = config.get_videolibrary_path() + '/torrents' + + if not os.path.exists(torrents_path): + os.mkdir(torrents_path) + + import ssl + + ssl._create_default_https_context = ssl._create_unverified_context + urllib.urlretrieve("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url, + torrents_path + "/temp.torrent") + pepe = open(torrents_path + "/temp.torrent", "rb").read() + + if "used CloudFlare" in pepe: + try: + urllib.URLopener.version = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36 SE 2.X MetaSr 1.0' + urllib.urlretrieve("http://anonymouse.org/cgi-bin/anon-www.cgi/" + url.strip(), + torrents_path + "/temp.torrent") + pepe = open(torrents_path + "/temp.torrent", "rb").read() + except: + pepe = "" + + torrent = decode(pepe) + + try: + name = torrent["info"]["name"] + sizet = torrent["info"]['length'] + sizet = convert_size(sizet) + except: + name = "no disponible" + try: + check_video = scrapertools.find_multiple_matches(str(torrent["info"]["files"]), "'length': (\d+)}") + + size = max([int(i) for i in check_video]) + + for file in torrent["info"]["files"]: + manolo = "%r - %d bytes" % ("/".join(file["path"]), file["length"]) + if str(size) in manolo: + video = manolo + size = convert_size(size) + ext_v = re.sub(r"-.*? bytes|.*?\[.*?\].|'|.*?COM.|.*?\[.*?\]|\(.*?\)|.*?\.", "", video) + try: + os.remove(torrents_path + "/temp.torrent") + except: + pass + except: + size = sizet + ext_v = re.sub(r"-.*? bytes|.*?\[.*?\].|'|.*?COM.|.*?\.es.|.*?\[.*?\]|.*?\(.*?\)\.|.*?\.", "", name) + try: + os.remove(torrents_path + "/temp.torrent") + except: + pass + except: + size = "en estos momentos..." + ext_v = "no disponible" + if "rar" in ext_v: + ext_v = ext_v + " -- No reproducible" + size = "" + scrapedurl = "http://www.txibitsoft.com" + scrapedurl + title_tag = "[COLOR orange]Ver--[/COLOR]" + scrapedtitle = re.sub(r"\[.*?\]|\(.*?\)|\d+x\d+.*?Final|-\d+|-|\d+x\d+|Temporada.*?Completa| ;", "", + scrapedtitle) + scrapedtitle = "[COLOR magenta][B]" + scrapedtitle + "[/B][/COLOR]" + scrapedtitle = title_tag + scrapedtitle + " " + "[COLOR mediumslateblue]( Video [/COLOR]" + "[COLOR mediumpurple]" + ext_v + " -- " + size + " )[/COLOR]" + itemlist.append( + Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="play", server="torrent", + thumbnail=item.extra, fanart=item.show, folder=False)) + + return itemlist + + +def info(item): + logger.info() + itemlist = [] + url = item.url + id = item.extra + + if "serie" in item.url: + try: + rating_tmdba_tvdb = item.extra.split("|")[6] + if item.extra.split("|")[6] == "": + rating_tmdba_tvdb = "Sin puntuación" + except: + rating_tmdba_tvdb = "Sin puntuación" + else: + rating_tmdba_tvdb = item.extra.split("|")[3] + rating_filma = item.extra.split("|")[4] + print "eztoquee" + print rating_filma + print rating_tmdba_tvdb + + filma = "http://s6.postimg.org/6yhe5fgy9/filma.png" + + try: + if "serie" in item.url: + title = item.extra.split("|")[8] + + else: + title = item.extra.split("|")[6] + title = title.replace("%20", " ") + title = "[COLOR yellow][B]" + title + "[/B][/COLOR]" + except: + title = item.title + + try: + if "." in rating_tmdba_tvdb: + check_rat_tmdba = scrapertools.get_match(rating_tmdba_tvdb, '(\d+).') + else: + check_rat_tmdba = rating_tmdba_tvdb + if int(check_rat_tmdba) >= 5 and int(check_rat_tmdba) < 8: + rating = "[COLOR springgreen][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + elif int(check_rat_tmdba) >= 8 or rating_tmdba_tvdb == 10: + rating = "[COLOR yellow][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + else: + rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + print "lolaymaue" + except: + rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + if "10." in rating: + rating = re.sub(r'10\.\d+', '10', rating) + try: + check_rat_filma = scrapertools.get_match(rating_filma, '(\d)') + print "paco" + print check_rat_filma + if int(check_rat_filma) >= 5 and int(check_rat_filma) < 8: + print "dios" + print check_rat_filma + rating_filma = "[COLOR springgreen][B]" + rating_filma + "[/B][/COLOR]" + elif int(check_rat_filma) >= 8: + + print check_rat_filma + rating_filma = "[COLOR yellow][B]" + rating_filma + "[/B][/COLOR]" + else: + rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]" + print "rojo??" + print check_rat_filma + except: + rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]" + + try: + if not "serie" in item.url: + url_plot = "http://api.themoviedb.org/3/movie/" + item.extra.split("|")[ + 1] + "?api_key=" + api_key + "&append_to_response=credits&language=es" + data_plot = scrapertools.cache_page(url_plot) + plot, tagline = scrapertools.find_single_match(data_plot, '"overview":"(.*?)",.*?"tagline":(".*?")') + if plot == "": + plot = item.show.split("|")[2] + + plot = "[COLOR moccasin][B]" + plot + "[/B][/COLOR]" + plot = re.sub(r"\\", "", plot) + + else: + plot = item.show.split("|")[2] + plot = "[COLOR moccasin][B]" + plot + "[/B][/COLOR]" + plot = re.sub(r"\\", "", plot) + + if item.extra.split("|")[7] != "": + tagline = item.extra.split("|")[7] + # tagline= re.sub(r',','.',tagline) + else: + tagline = "" + except: + title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" + plot = "Esta pelicula no tiene informacion..." + plot = plot.replace(plot, "[COLOR yellow][B]" + plot + "[/B][/COLOR]") + photo = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png" + foto = "http://s6.postimg.org/ub7pb76c1/noinfo.png" + info = "" + + if "serie" in item.url: + check2 = "serie" + icon = "http://s6.postimg.org/hzcjag975/tvdb.png" + foto = item.show.split("|")[1] + if item.extra.split("|")[5] != "": + critica = item.extra.split("|")[5] + else: + critica = "Esta serie no tiene críticas..." + + photo = item.extra.split("|")[0].replace(" ", "%20") + try: + tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]" + except: + tagline = "" + + else: + critica = item.extra.split("|")[5] + if "%20" in critica: + critica = "No hay críticas" + icon = "http://imgur.com/SenkyxF.png" + photo = item.extra.split("|")[0].replace(" ", "%20") + foto = item.show.split("|")[1] + if foto == item.thumbnail: + foto = "" + + try: + if tagline == "\"\"": + tagline = " " + except: + tagline = " " + tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]" + check2 = "pelicula" + # Tambien te puede interesar + peliculas = [] + if "serie" in item.url: + + url_tpi = "http://api.themoviedb.org/3/tv/" + item.show.split("|")[ + 5] + "/recommendations?api_key=" + api_key + "&language=es" + data_tpi = scrapertools.cachePage(url_tpi) + tpi = scrapertools.find_multiple_matches(data_tpi, + 'id":(.*?),.*?"original_name":"(.*?)",.*?"poster_path":(.*?),"popularity"') + + else: + url_tpi = "http://api.themoviedb.org/3/movie/" + item.extra.split("|")[ + 1] + "/recommendations?api_key=" + api_key + "&language=es" + data_tpi = scrapertools.cachePage(url_tpi) + tpi = scrapertools.find_multiple_matches(data_tpi, + 'id":(.*?),.*?"original_title":"(.*?)",.*?"poster_path":(.*?),"popularity"') + + for idp, peli, thumb in tpi: + + thumb = re.sub(r'"|}', '', thumb) + if "null" in thumb: + thumb = "http://s6.postimg.org/tw1vhymj5/noposter.png" + else: + thumb = "https://image.tmdb.org/t/p/original" + thumb + peliculas.append([idp, peli, thumb]) + + check2 = check2.replace("pelicula", "movie").replace("serie", "tvshow") + infoLabels = {'title': title, 'plot': plot, 'thumbnail': photo, 'fanart': foto, 'tagline': tagline, + 'rating': rating} + item_info = item.clone(info=infoLabels, icon=icon, extra=id, rating=rating, rating_filma=rating_filma, + critica=critica, contentType=check2, thumb_busqueda="http://imgur.com/jProvTt.png") + from channels import infoplus + infoplus.start(item_info, peliculas) + + +def info_capitulos(item): + logger.info() + + url = "https://api.themoviedb.org/3/tv/" + item.show.split("|")[5] + "/season/" + item.extra.split("|")[ + 2] + "/episode/" + item.extra.split("|")[3] + "?api_key=" + api_key + "&language=es" + + if "/0" in url: + url = url.replace("/0", "/") + + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '],"name":"(.*?)","overview":"(.*?)".*?"still_path":(.*?),"vote_average":(\d+\.\d).*?,"' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + + url = "http://thetvdb.com/api/1D62F2F90030C444/series/" + item.category + "/default/" + item.extra.split("|")[ + 2] + "/" + item.extra.split("|")[3] + "/es.xml" + if "/0" in url: + url = url.replace("/0", "/") + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '<Data>.*?<EpisodeName>([^<]+)</EpisodeName>.*?<Overview>(.*?)</Overview>.*?<Rating>(.*?)</Rating>' + + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + + title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" + plot = "Este capitulo no tiene informacion..." + plot = "[COLOR yellow][B]" + plot + "[/B][/COLOR]" + image = "http://s6.postimg.org/ub7pb76c1/noinfo.png" + foto = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png" + rating = "" + + + else: + + for name_epi, info, rating in matches: + if "<filename>episodes" in data: + foto = scrapertools.get_match(data, '<Data>.*?<filename>(.*?)</filename>') + fanart = "http://thetvdb.com/banners/" + foto + else: + fanart = "http://imgur.com/ZiEAVOD.png" + plot = info + plot = "[COLOR peachpuff][B]" + plot + "[/B][/COLOR]" + title = name_epi.upper() + title = "[COLOR bisque][B]" + title + "[/B][/COLOR]" + image = fanart + foto = item.extra.split("|")[0] + if not ".png" in foto: + foto = "http://imgur.com/PXJEqBn.png" + foto = re.sub(r'\(.*?\)|" "|" "', '', foto) + foto = re.sub(r' ', '', foto) + try: + + check_rating = scrapertools.get_match(rating, '(\d+).') + + if int(check_rating) >= 5 and int(check_rating) < 8: + rating = "Puntuación " + "[COLOR springgreen][B]" + rating + "[/B][/COLOR]" + elif int(check_rating) >= 8 and int(check_rating) < 10: + rating = "Puntuación " + "[COLOR yellow][B]" + rating + "[/B][/COLOR]" + elif int(check_rating) == 10: + rating = "Puntuación " + "[COLOR orangered][B]" + rating + "[/B][/COLOR]" + else: + rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" + + except: + rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" + if "10." in rating: + rating = re.sub(r'10\.\d+', '10', rating) + else: + for name_epi, info, fanart, rating in matches: + if info == "" or info == "\\": + info = "Sin informacion del capítulo aún..." + plot = info + plot = re.sub(r'/n', '', plot) + plot = "[COLOR peachpuff][B]" + plot + "[/B][/COLOR]" + title = name_epi.upper() + title = "[COLOR bisque][B]" + title + "[/B][/COLOR]" + image = fanart + image = re.sub(r'"|}', '', image) + if "null" in image: + image = "http://imgur.com/ZiEAVOD.png" + else: + image = "https://image.tmdb.org/t/p/original" + image + foto = item.extra.split("|")[0] + if not ".png" in foto: + foto = "http://imgur.com/PXJEqBn.png" + foto = re.sub(r'\(.*?\)|" "|" "', '', foto) + foto = re.sub(r' ', '', foto) + try: + + check_rating = scrapertools.get_match(rating, '(\d+).') + + if int(check_rating) >= 5 and int(check_rating) < 8: + rating = "Puntuación " + "[COLOR springgreen][B]" + rating + "[/B][/COLOR]" + elif int(check_rating) >= 8 and int(check_rating) < 10: + rating = "Puntuación " + "[COLOR yellow][B]" + rating + "[/B][/COLOR]" + elif int(check_rating) == 10: + rating = "Puntuación " + "[COLOR orangered][B]" + rating + "[/B][/COLOR]" + else: + rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" + + except: + rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" + if "10." in rating: + rating = re.sub(r'10\.\d+', '10', rating) + ventana = TextBox2(title=title, plot=plot, thumbnail=image, fanart=foto, rating=rating) + ventana.doModal() + + +class TextBox2(xbmcgui.WindowDialog): + """ Create a skinned textbox window """ + + def __init__(self, *args, **kwargs): + self.getTitle = kwargs.get('title') + self.getPlot = kwargs.get('plot') + self.getThumbnail = kwargs.get('thumbnail') + self.getFanart = kwargs.get('fanart') + self.getRating = kwargs.get('rating') + + self.background = xbmcgui.ControlImage(70, 20, 1150, 630, 'http://imgur.com/H2hMPTP.jpg') + self.title = xbmcgui.ControlTextBox(120, 60, 430, 50) + self.rating = xbmcgui.ControlTextBox(145, 112, 1030, 45) + self.plot = xbmcgui.ControlTextBox(120, 150, 1056, 100) + self.thumbnail = xbmcgui.ControlImage(120, 300, 1056, 300, self.getThumbnail) + self.fanart = xbmcgui.ControlImage(780, 43, 390, 100, self.getFanart) + + self.addControl(self.background) + self.background.setAnimations( + [('conditional', 'effect=slide start=1000% end=0% time=1500 condition=true tween=bounce',), + ('WindowClose', 'effect=slide delay=800 start=0% end=1000% time=800 condition=true',)]) + self.addControl(self.thumbnail) + self.thumbnail.setAnimations([('conditional', + 'effect=zoom start=0% end=100% delay=2700 time=1500 condition=true tween=elastic easing=inout',), + ('WindowClose', 'effect=slide end=0,700% time=300 condition=true',)]) + self.addControl(self.plot) + self.plot.setAnimations( + [('conditional', 'effect=zoom delay=2000 center=auto start=0 end=100 time=800 condition=true ',), ( + 'conditional', + 'effect=rotate delay=2000 center=auto aceleration=6000 start=0% end=360% time=800 condition=true',), + ('WindowClose', 'effect=zoom center=auto start=100% end=-0% time=600 condition=true',)]) + self.addControl(self.fanart) + self.fanart.setAnimations( + [('WindowOpen', 'effect=slide start=0,-700 delay=1000 time=2500 tween=bounce condition=true',), ( + 'conditional', + 'effect=rotate center=auto start=0% end=360% delay=3000 time=2500 tween=bounce condition=true',), + ('WindowClose', 'effect=slide end=0,-700% time=1000 condition=true',)]) + self.addControl(self.title) + self.title.setText(self.getTitle) + self.title.setAnimations( + [('conditional', 'effect=slide start=-1500% end=0% delay=1000 time=2000 condition=true tween=elastic',), + ('WindowClose', 'effect=slide start=0% end=-1500% time=800 condition=true',)]) + self.addControl(self.rating) + self.rating.setText(self.getRating) + self.rating.setAnimations( + [('conditional', 'effect=fade start=0% end=100% delay=3000 time=1500 condition=true',), + ('WindowClose', 'effect=slide end=0,-700% time=600 condition=true',)]) + xbmc.sleep(200) + + try: + self.plot.autoScroll(7000, 6000, 30000) + except: + + xbmc.executebuiltin( + 'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000,"https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/kodi-icon.png")') + self.plot.setText(self.getPlot) + + def get(self): + self.show() + + def onAction(self, action): + if action == ACTION_PREVIOUS_MENU or action.getId() == ACTION_GESTURE_SWIPE_LEFT or action == 110 or action == 92: + self.close() + + +def test(): + return True + + +def browser(url): + import mechanize + + # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing + br = mechanize.Browser() + # Browser options + br.set_handle_equiv(False) + br.set_handle_gzip(True) + br.set_handle_redirect(True) + br.set_handle_referer(False) + br.set_handle_robots(False) + # Follows refresh 0 but not hangs on refresh > 0 + br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) + # Want debugging messages? + # br.set_debug_http(True) + # br.set_debug_redirects(True) + # br.set_debug_responses(True) + + # User-Agent (this is cheating, ok?) + br.addheaders = [('User-agent', + 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')] + # br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')] + # Open some site, let's pick a random one, the first that pops in mind + r = br.open(url) + response = r.read() + print response + if "img,divreturn" in response: + r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url) + print "prooooxy" + response = r.read() + + return response + + +def tokenize(text, match=re.compile("([idel])|(\d+):|(-?\d+)").match): + i = 0 + while i < len(text): + m = match(text, i) + s = m.group(m.lastindex) + i = m.end() + if m.lastindex == 2: + yield "s" + yield text[i:i + int(s)] + i = i + int(s) + else: + yield s + + +def decode_item(next, token): + if token == "i": + # integer: "i" value "e" + data = int(next()) + if next() != "e": + raise ValueError + elif token == "s": + # string: "s" value (virtual tokens) + data = next() + elif token == "l" or token == "d": + # container: "l" (or "d") values "e" + data = [] + tok = next() + while tok != "e": + data.append(decode_item(next, tok)) + tok = next() + if token == "d": + data = dict(zip(data[0::2], data[1::2])) + else: + raise ValueError + return data + + +def decode(text): + try: + src = tokenize(text) + data = decode_item(src.next, src.next()) + for token in src: # look for more tokens + raise SyntaxError("trailing junk") + except (AttributeError, ValueError, StopIteration): + try: + data = data + except: + data = src + + return data + + +def convert_size(size): + import math + if (size == 0): + return '0B' + size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") + i = int(math.floor(math.log(size, 1024))) + p = math.pow(1024, i) + s = round(size / p, 2) + return '%s %s' % (s, size_name[i]) diff --git a/plugin.video.alfa/channels/ultrapeliculashd.json b/plugin.video.alfa/channels/ultrapeliculashd.json new file mode 100755 index 00000000..24a4c8b6 --- /dev/null +++ b/plugin.video.alfa/channels/ultrapeliculashd.json @@ -0,0 +1,50 @@ +{ + "id": "ultrapeliculashd", + "name": "UltraPeliculasHD", + "language": "es", + "active": true, + "adult": false, + "banner": "https://s9.postimg.org/5yxsq205r/ultrapeliculashd_banner.png", + "thumbnail": "https://s13.postimg.org/d042quw9j/ultrapeliculashd.png", + "version": 1, + "changes": [ + { + "date": "25/05/2017", + "description": "cambios esteticos" + }, + { + "date": "09/05/2017", + "description": "Fix por cambio de estructra de la pagina" + } + ], + "categories": [ + "latino", + "movie" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_infantiles", + "type": "bool", + "label": "Incluir en Novedades - Infantiles", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/ultrapeliculashd.py b/plugin.video.alfa/channels/ultrapeliculashd.py new file mode 100755 index 00000000..aa8097c4 --- /dev/null +++ b/plugin.video.alfa/channels/ultrapeliculashd.py @@ -0,0 +1,255 @@ +# -*- coding: utf-8 -*- + +import re + +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item + +host = 'http://www.ultrapeliculashd.com' + +tgenero = {"ACCIÓN": "https://s3.postimg.org/y6o9puflv/accion.png,", + "ANIMACIÓN": "https://s13.postimg.org/5on877l87/animacion.png", + "AVENTURA": "https://s10.postimg.org/6su40czih/aventura.png", + "CIENCIA FICCIÓN": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png", + "COMEDIA": "https://s7.postimg.org/ne9g9zgwb/comedia.png", + "CRIMEN": "https://s4.postimg.org/6z27zhirx/crimen.png", + "DRAMA": "https://s16.postimg.org/94sia332d/drama.png", + "ESTRENOS": "https://s21.postimg.org/fy69wzm93/estrenos.png", + "FAMILIA": "https://s7.postimg.org/6s7vdhqrf/familiar.png", + "FANTASÍA": "https://s13.postimg.org/65ylohgvb/fantasia.png", + "GUERRA": "https://s4.postimg.org/n1h2jp2jh/guerra.png", + "INFANTIL": "https://s23.postimg.org/g5rmazozv/infantil.png", + "MISTERIO": "https://s1.postimg.org/w7fdgf2vj/misterio.png", + "ROMANCE": "https://s15.postimg.org/fb5j8cl63/romance.png", + "SUSPENSO": "https://s13.postimg.org/wmw6vl1cn/suspenso.png", + "TERROR": "https://s7.postimg.org/yi0gij3gb/terror.png" + } + +thumbletras = {'#': 'https://s32.postimg.org/drojt686d/image.png', + 'a': 'https://s32.postimg.org/llp5ekfz9/image.png', + 'b': 'https://s32.postimg.org/y1qgm1yp1/image.png', + 'c': 'https://s32.postimg.org/vlon87gmd/image.png', + 'd': 'https://s32.postimg.org/3zlvnix9h/image.png', + 'e': 'https://s32.postimg.org/bgv32qmsl/image.png', + 'f': 'https://s32.postimg.org/y6u7vq605/image.png', + 'g': 'https://s32.postimg.org/9237ib6jp/image.png', + 'h': 'https://s32.postimg.org/812yt6pk5/image.png', + 'i': 'https://s32.postimg.org/6nbbxvqat/image.png', + 'j': 'https://s32.postimg.org/axpztgvdx/image.png', + 'k': 'https://s32.postimg.org/976yrzdut/image.png', + 'l': 'https://s32.postimg.org/fmal2e9yd/image.png', + 'm': 'https://s32.postimg.org/m19lz2go5/image.png', + 'n': 'https://s32.postimg.org/b2ycgvs2t/image.png', + 'o': 'https://s32.postimg.org/c6igsucpx/image.png', + 'p': 'https://s32.postimg.org/jnro82291/image.png', + 'q': 'https://s32.postimg.org/ve5lpfv1h/image.png', + 'r': 'https://s32.postimg.org/nmovqvqw5/image.png', + 's': 'https://s32.postimg.org/zd2t89jol/image.png', + 't': 'https://s32.postimg.org/wk9lo8jc5/image.png', + 'u': 'https://s32.postimg.org/w8s5bh2w5/image.png', + 'v': 'https://s32.postimg.org/e7dlrey91/image.png', + 'w': 'https://s32.postimg.org/fnp49k15x/image.png', + 'x': 'https://s32.postimg.org/dkep1w1d1/image.png', + 'y': 'https://s32.postimg.org/um7j3zg85/image.png', + 'z': 'https://s32.postimg.org/jb4vfm9d1/image.png' + } + +tcalidad = {'1080P': 'https://s21.postimg.org/4h1s0t1wn/hd1080.png', + '720P': 'https://s12.postimg.org/lthu7v4q5/hd720.png', "HD": "https://s27.postimg.org/m2dhhkrur/image.png"} + + +def mainlist(item): + logger.info() + + itemlist = [] + + itemlist.append(item.clone(title="Todas", + action="lista", + thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png', + fanart='https://s18.postimg.org/fwvaeo6qh/todas.png', + url=host + '/movies/' + )) + + itemlist.append(item.clone(title="Generos", + action="generos", + url=host, + thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png', + fanart='https://s3.postimg.org/5s9jg2wtf/generos.png' + )) + + itemlist.append(item.clone(title="Alfabetico", + action="seccion", + url=host, + thumbnail='https://s17.postimg.org/fwi1y99en/a-z.png', + fanart='https://s17.postimg.org/fwi1y99en/a-z.png', + extra='alfabetico' + )) + + itemlist.append(item.clone(title="Buscar", + action="search", + url=host + '/?s=', + thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png', + fanart='https://s30.postimg.org/pei7txpa9/buscar.png' + )) + + return itemlist + + +def lista(item): + logger.info() + + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data) + if item.extra != 'buscar': + patron = '<article id=post-.*? class=item movies><div class=poster><a href=(.*?)><img src=(.*?) ' + patron += 'alt=(.*?)>.*?quality>.*?<.*?<\/h3><span>(.*?)<\/span>' + else: + patron = '<article><div class=image>.*?<a href=(.*?)\/><img src=(.*?) alt=(.*?) \/>.*?year>(.*?)<\/span>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches: + url = scrapedurl + thumbnail = scrapedthumbnail + contentTitle = re.sub(r'\d{4}', '', scrapedtitle) + contentTitle = contentTitle.replace('|', '') + contentTitle = contentTitle.strip(' ') + title = scrapertools.decodeHtmlentities(contentTitle) + year = scrapedyear + fanart = '' + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=item.title, url=url, + thumbnail=thumbnail, fanart=fanart, contentTitle=contentTitle, infoLabels={'year': year})) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + # Paginacion + + if itemlist != []: + actual_page_url = item.url + next_page = scrapertools.find_single_match(data, '<div class=pag_b><a href=(.*?) >Siguiente<\/a>') + if next_page != '': + itemlist.append(Item(channel=item.channel, action="lista", title='Siguiente >>>', url=next_page, + thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png')) + return itemlist + + +def generos(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data) + patron = '<li class=cat-item cat-item-.*?><a href=(.*?) >(.*?)<\/a> <i>(.*?)<\/i><\/li>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle, cantidad in matches: + thumbnail = '' + fanart = '' + if scrapedtitle in tgenero: + thumbnail = tgenero[scrapedtitle] + title = scrapedtitle + ' (' + cantidad + ')' + url = scrapedurl + if scrapedtitle not in ['PRÓXIMAMENTE', 'EN CINE']: + itemlist.append(item.clone(action="lista", + title=title, + fulltitle=item.title, + url=url, + thumbnail=thumbnail, + fanart=fanart + )) + return itemlist + + +def seccion(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data) + if item.extra == 'year': + patron = '<li><a href=(.*?\/fecha-estreno.*?)>(.*?)<\/a>' + else: + patron = '<li><a href=(.*?) >(.*?)<\/a><\/li>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + thumbnail = '' + if scrapedtitle.lower() in thumbletras: + thumbnail = thumbletras[scrapedtitle.lower()] + fanart = '' + title = scrapedtitle + url = scrapedurl + + itemlist.append( + Item(channel=item.channel, action="lista", title=title, fulltitle=item.title, url=url, thumbnail=thumbnail, + fanart=fanart)) + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data) + + patron = '<iframe class=metaframe rptss src=(.*?) frameborder=0 allowfullscreen><\/iframe>' + matches = matches = re.compile(patron, re.DOTALL).findall(data) + for videoitem in matches: + itemlist.extend(servertools.find_video_items(data=videoitem)) + + for videoitem in itemlist: + videoitem.channel = item.channel + videoitem.action = 'play' + videoitem.thumbnail = servertools.guess_server_thumbnail(videoitem.server) + videoitem.infoLabels = item.infoLabels + videoitem.title = item.contentTitle + ' (' + videoitem.server + ')' + if 'youtube' in videoitem.url: + videoitem.title = '[COLOR orange]Trailer en Youtube[/COLOR]' + + if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': + itemlist.append( + Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url, + action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle)) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = item.url + texto + try: + if texto != '': + item.extra = 'buscar' + return lista(item) + else: + return [] + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + item.extra = 'estrenos/' + try: + if categoria == 'peliculas': + item.url = host + '/category/estrenos/' + + elif categoria == 'infantiles': + item.url = host + '/category/infantil/' + + itemlist = lista(item) + if itemlist[-1].title == 'Siguiente >>>': + itemlist.pop() + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist diff --git a/plugin.video.alfa/channels/unsoloclic.json b/plugin.video.alfa/channels/unsoloclic.json new file mode 100755 index 00000000..c41c0fea --- /dev/null +++ b/plugin.video.alfa/channels/unsoloclic.json @@ -0,0 +1,24 @@ +{ + "id": "unsoloclic", + "name": "Unsoloclic", + "active": true, + "adult": false, + "language": "es", + "banner": "unsoloclic.png", + "thumbnail": "unsoloclic.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ], + "categories": [ + "movie", + "tvshow" + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/unsoloclic.py b/plugin.video.alfa/channels/unsoloclic.py new file mode 100755 index 00000000..cf20c2aa --- /dev/null +++ b/plugin.video.alfa/channels/unsoloclic.py @@ -0,0 +1,144 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import logger +from core import scrapertools +from core.item import Item + + +def mainlist(item): + logger.info() + item.url = "http://unsoloclic.info" + return novedades(item) + + +def novedades(item): + logger.info() + itemlist = [] + + # Descarga la página + data = scrapertools.cachePage(item.url) + ''' + <div class="post-45732 post type-post status-publish format-standard hentry category-2012 category-blu-ray category-mkv-hd720p" id="post-45732"> + <h2 class="title"><a href="http://unsoloclic.info/2012/11/ek-tha-tiger-2012-blu-ray-720p-hd/" rel="bookmark" title="Permanent Link to Pelicula Ek Tha Tiger (2012) BLU-RAY 720p HD">Pelicula Ek Tha Tiger (2012) BLU-RAY 720p HD</a></h2> + <div class="postdate"><img src="http://unsoloclic.info/wp-content/themes/TinyWeb/images/date.png" /> noviembre 5th, 2012 + <!-- + <img src="http://unsoloclic.info/wp-content/themes/TinyWeb/images/user.png" /> unsoloclic + --> + </div> + <div class="entry"> + <p><a href="http://unsoloclic.info/2012/11/ek-tha-tiger-2012-blu-ray-720p-hd/" rel="attachment wp-att-45737"><img src="http://unsoloclic.info/wp-content/uploads/2012/11/Ek-Tha-Tiger-2012.jpg" alt="" title="Ek Tha Tiger (2012)" width="500" height="629" class="aligncenter size-full wp-image-45737" /></a></p> + <h2 style="text-align: center;"></h2> + <div class="readmorecontent"> + <a class="readmore" href="http://unsoloclic.info/2012/11/ek-tha-tiger-2012-blu-ray-720p-hd/" rel="bookmark" title="Permanent Link to Pelicula Ek Tha Tiger (2012) BLU-RAY 720p HD">Seguir Leyendo</a> + </div> + </div> + </div><!--/post-45732--> + ''' + ''' + <div class="post-45923 post type-post status-publish format-standard hentry category-2012 category-blu-ray category-comedia category-drama category-mkv category-mkv-hd720p category-romance tag-chris-messina tag-jenna-fischer tag-lee-kirk tag-the-giant-mechanical-man-pelicula tag-topher-grace" id="post-45923"> + <h2 class="title"><a href="http://unsoloclic.info/2012/12/the-giant-mechanical-man-2012-bluray-720p-hd/" rel="bookmark" title="Permanent Link to The Giant Mechanical Man (2012) BluRay 720p HD">The Giant Mechanical Man (2012) BluRay 720p HD</a></h2> + <div class="postdate"><img src="http://unsoloclic.info/wp-content/themes/TinyWeb/images/date.png" /> diciembre 24th, 2012 + <!-- + <img src="http://unsoloclic.info/wp-content/themes/TinyWeb/images/user.png" /> deportv + --> + </div> + <div class="entry"> + <p style="text-align: center;"><a href="http://unsoloclic.info/2012/12/the-giant-mechanical-man-2012-bluray-720p-hd/"><img class="aligncenter size-full wp-image-45924" title="Giant Michanical Man Pelicula Descargar" src="http://unsoloclic.info/wp-content/uploads/2012/12/Giant-Michanical-Man-Pelicula-Descargar.jpg" alt="" width="380" height="500" /></a></p> + <p style="text-align: center;"> + <div class="readmorecontent"> + <a class="readmore" href="http://unsoloclic.info/2012/12/the-giant-mechanical-man-2012-bluray-720p-hd/" rel="bookmark" title="Permanent Link to The Giant Mechanical Man (2012) BluRay 720p HD">Seguir Leyendo</a> + </div> + </div> + </div><!--/post-45923--> + ''' + patron = '<div class="post[^"]+" id="post-\d+">[^<]+' + patron += '<h2 class="title"><a href="([^"]+)" rel="bookmark" title="[^"]+">([^<]+)</a></h2>[^<]+' + patron += '<div class="postdate">.*?</div>[^<]+' + patron += '<div class="entry">[^<]+' + patron += '<p[^<]+<a[^<]+<img.*?src="([^"]+)"' + + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle, scrapedthumbnail in matches: + scrapedplot = "" + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True)) + + ''' + <a href="http://unsoloclic.info/page/2/" >« Peliculas anteriores</a> + ''' + patron = '<a href="([^"]+)" >\«\; Peliculas anteriores</a>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for match in matches: + scrapedtitle = ">> Página siguiente" + scrapedplot = "" + scrapedurl = urlparse.urljoin(item.url, match) + scrapedthumbnail = "" + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + itemlist.append(Item(channel=item.channel, action="novedades", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True)) + + return itemlist + + +def findvideos(item): + logger.info() + data = scrapertools.cache_page(item.url) + itemlist = [] + + # <a href="http://67cfb0db.linkbucks.com"><img title="billionuploads" src="http://unsoloclic.info/wp-content/uploads/2012/11/billonuploads2.png" alt="" width="380" height="50" /></a></p> + # <a href="http://1bd02d49.linkbucks.com"><img class="colorbox-57103" title="Freakeshare" alt="" src="http://unsoloclic.info/wp-content/uploads/2013/01/freakshare.png" width="390" height="55" /></a></p> + patron = '<a href="(http.//[a-z0-9]+.linkbucks.c[^"]+)[^>]+><img.*?title="([^"]+)".*?src="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + for url, servertag, serverthumb in matches: + itemlist.append( + Item(channel=item.channel, action="play", server="linkbucks", title=servertag + " [linkbucks]", url=url, + thumbnail=serverthumb, plot=item.plot, folder=False)) + + from core import servertools + itemlist.extend(servertools.find_video_items(data=data)) + for videoitem in itemlist: + if videoitem.server != "linkbucks": + videoitem.channel = item.channel + videoitem.action = "play" + videoitem.folder = False + videoitem.title = "[" + videoitem.server + "]" + + return itemlist + + +def play(item): + logger.info() + itemlist = [] + + if item.server == "linkbucks": + logger.info("Es linkbucks") + + # Averigua el enlace + from servers.decrypters import linkbucks + location = linkbucks.get_long_url(item.url) + logger.info("location=" + location) + + # Extrae la URL de saltar el anuncio en adf.ly + if location.startswith("http://adf"): + # Averigua el enlace + from servers.decrypters import adfly + location = adfly.get_long_url(location) + logger.info("location=" + location) + + from core import servertools + itemlist = servertools.find_video_items(data=location) + for videoitem in itemlist: + videoitem.channel = item.channel + videoitem.folder = False + + else: + itemlist.append(item) + + return itemlist diff --git a/plugin.video.alfa/channels/url.json b/plugin.video.alfa/channels/url.json new file mode 100755 index 00000000..51209dc6 --- /dev/null +++ b/plugin.video.alfa/channels/url.json @@ -0,0 +1,19 @@ +{ + "id": "url", + "name": "Tengo una URL", + "active": false, + "adult": false, + "thumbnail": "url.png", + "banner": "url.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/url.py b/plugin.video.alfa/channels/url.py new file mode 100755 index 00000000..e1db1428 --- /dev/null +++ b/plugin.video.alfa/channels/url.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- + +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + + +def mainlist(item): + logger.info() + + itemlist = [] + itemlist.append(Item(channel=item.channel, action="search", + title="Entra aquí y teclea la URL [Enlace a servidor online/descarga]")) + itemlist.append( + Item(channel=item.channel, action="search", title="Entra aquí y teclea la URL [Enlace directo a un vídeo]")) + itemlist.append(Item(channel=item.channel, action="search", + title="Entra aquí y teclea la URL [Búsqueda de enlaces en una url]")) + + return itemlist + + +# Al llamarse "search" la función, el launcher pide un texto a buscar y lo añade como parámetro +def search(item, texto): + logger.info("texto=" + texto) + + if not texto.startswith("http://"): + texto = "http://" + texto + + itemlist = [] + + if "servidor" in item.title: + itemlist = servertools.find_video_items(data=texto) + for item in itemlist: + item.channel = "url" + item.action = "play" + elif "directo" in item.title: + itemlist.append( + Item(channel=item.channel, action="play", url=texto, server="directo", title="Ver enlace directo")) + else: + data = scrapertools.downloadpage(texto) + itemlist = servertools.find_video_items(data=data) + for item in itemlist: + item.channel = "url" + item.action = "play" + + if len(itemlist) == 0: + itemlist.append(Item(channel=item.channel, action="search", title="No hay ningún vídeo compatible en esa URL")) + + return itemlist diff --git a/plugin.video.alfa/channels/vepelis.json b/plugin.video.alfa/channels/vepelis.json new file mode 100755 index 00000000..be41cc37 --- /dev/null +++ b/plugin.video.alfa/channels/vepelis.json @@ -0,0 +1,34 @@ +{ + "id": "vepelis", + "name": "VePelis", + "active": true, + "adult": false, + "language": "es", + "banner": "vepelis.png", + "thumbnail": "vepelis.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ], + "categories": [ + "latino", + "movie" + ], + "settings": [ + { + "default": false, + "enabled": true, + "id": "include_in_global_search", + "label": "Incluir en busqueda global", + "type": "bool", + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/vepelis.py b/plugin.video.alfa/channels/vepelis.py new file mode 100755 index 00000000..cfe59178 --- /dev/null +++ b/plugin.video.alfa/channels/vepelis.py @@ -0,0 +1,307 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import config +from core import logger +from core import scrapertools +from core.item import Item + + +def mainlist(item): + logger.info() + + itemlist = [] + itemlist.append(Item(channel=item.channel, title="Ultimas Agregadas", action="listado2", + url="http://www.vepelis.com/pelicula/ultimas-peliculas", + extra="http://www.vepelis.com/pelicula/ultimas-peliculas")) + itemlist.append(Item(channel=item.channel, title="Estrenos en DVD", action="listado2", + url="http://www.vepelis.com/pelicula/ultimas-peliculas/estrenos-dvd", + extra="http://www.vepelis.com/pelicula/ultimas-peliculas/estrenos-dvd")) + itemlist.append(Item(channel=item.channel, title="Peliculas en Cartelera", action="listado2", + url="http://www.vepelis.com/pelicula/ultimas-peliculas/cartelera", + extra="http://www.vepelis.com/pelicula/ultimas-peliculas/cartelera")) + itemlist.append(Item(channel=item.channel, title="Ultimas Actualizadas", action="listado2", + url="http://www.vepelis.com/pelicula/ultimas-peliculas/ultimas/actualizadas", + extra="http://www.vepelis.com/pelicula/ultimas-peliculas/ultimas/actualizadas")) + itemlist.append(Item(channel=item.channel, title="Por Genero", action="generos", url="http://www.vepelis.com/")) + itemlist.append( + Item(channel=item.channel, title="Por Orden Alfabetico", action="alfabetico", url="http://www.vepelis.com/")) + itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="http://www.vepelis.com/")) + return itemlist + + +def listarpeliculas(item): + logger.info() + + # Descarga la página + data = scrapertools.cachePage(item.url) + extra = item.extra + + # Extrae las entradas de la pagina seleccionada + '''<td class="DarkText" align="center" valign="top" width="100px" height="160px" style="background-color:#1e1e1e;" onmouseover="this.style.backgroundColor='#000000'" onmouseout="this.style.backgroundColor='#1e1e1e'"><p style="margin-bottom: 3px;border-bottom:#ABABAB 1px solid"> + <a href="http://www.peliculasaudiolatino.com/movies/Larry_Crowne.html"><img src="http://www.peliculasaudiolatino.com/poster/85x115/peliculas/movieimg/movie1317696842.jpg" alt="Larry Crowne" border="0" height="115" width="85"></a>''' + patron = '<td class=.*?<a ' + patron += 'href="([^"]+)"><img src="([^"]+)" alt="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + for match in matches: + scrapedurl = match[0] + scrapedtitle = match[2] + scrapedtitle = unicode(scrapedtitle, "iso-8859-1", errors="replace").encode("utf-8") + scrapedthumbnail = match[1] + scrapedplot = "" + logger.info(scrapedtitle) + + # Añade al listado + itemlist.append( + Item(channel=item.channel, action="findvideos", title=scrapedtitle, fulltitle=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot, extra=extra, folder=True)) + + # Extrae la marca de siguiente página + patron = 'Anterior.*? :: <a href="/../../.*?/page/([^"]+)">Siguiente ' + matches = re.compile(patron, re.DOTALL).findall(data) + for match in matches: + if len(matches) > 0: + scrapedurl = extra + match + scrapedtitle = "!Pagina Siguiente" + scrapedthumbnail = "" + scrapedplot = "" + + itemlist.append( + Item(channel=item.channel, action="listarpeliculas", title=scrapedtitle, fulltitle=scrapedtitle, + url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, extra=extra, folder=True)) + + return itemlist + + +def findvideos(item): + logger.info() + # Descarga la página + data = scrapertools.cachePage(item.url) + title = item.title + scrapedthumbnail = item.thumbnail + itemlist = [] + patron = '<li><a href="#ms.*?">([^"]+)</a></li>.*?<iframe src="(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + # itemlist.append( Item(channel=item.channel, action="play", title=title , fulltitle=item.fulltitle, url=item.url , thumbnail=scrapedthumbnail , folder=False) ) + + for match in matches: + url = match[1] + title = "SERVIDOR: " + match[0] + title = unicode(title, "iso-8859-1", errors="replace").encode("utf-8") + itemlist.append(Item(channel=item.channel, action="play", title=title, fulltitle=item.fulltitle, url=url, + thumbnail=scrapedthumbnail, folder=False)) + + return itemlist + + +def play(item): + logger.info() + itemlist = [] + + from core import servertools + itemlist = servertools.find_video_items(data=item.url) + for videoitem in itemlist: + videoitem.channel = item.channel + videoitem.action = "play" + videoitem.folder = False + + return itemlist + # data2 = scrapertools.cache_page(item.url) + # data2 = data2.replace("http://www.peliculasaudiolatino.com/show/mv.php?url=","http://www.megavideo.com/?v=") + # data2 = data2.replace("http://www.peliculasaudiolatino.com/show/videobb.php?url=","http://www.videobb.com/watch_video.php?v=") + # data2 = data2.replace("http://www.peliculasaudiolatino.com/show/vidbux.php?url=","http://www.vidbux.com/") + # data2 = data2.replace("http://www.peliculasaudiolatino.com/show/vidxden.php?url=","http://www.vidxden.com/") + # data2 = data2.replace("http://www.peliculasaudiolatino.com/show/videozer.php?url=","http://www.videozer.com/video/") + # data2 = data2.replace("http://www.peliculasaudiolatino.com/v/pl/play.php?url=","http://www.putlocker.com/embed/") + # data2 = data2.replace("http://www.peliculasaudiolatino.com/v/mv/play.php?url=","http://www.modovideo.com/frame.php?v=") + # data2 = data2.replace("http://www.peliculasaudiolatino.com/v/ss/play.php?url=","http://www.sockshare.com/embed/") + # data2 = data2.replace("http://www.peliculasaudiolatino.com/v/vb/play.php?url=","http://vidbull.com/") + # data2 = data2.replace("http://www.peliculasaudiolatino.com/show/sockshare.php?url=","http://www.sockshare.com/embed/") + # data2 = data2.replace("http://www.peliculasaudiolatino.com/show/moevide.php?url=","http://moevideo.net/?page=video&uid=") + # data2 = data2.replace("http://www.peliculasaudiolatino.com/show/novamov.php?url=","http://www.novamov.com/video/") + # data2 = data2.replace("http://www.peliculasaudiolatino.com/show/movshare.php?url=","http://www.movshare.net/video/") + # data2 = data2.replace("http://www.peliculasaudiolatino.com/show/divxstage.php?url=","http://www.divxstage.net/video/") + # listavideos = servertools.findvideos(data2) + + +# for video in listavideos: +# invalid = video[1] +# invalid = invalid[0:8] +# if invalid!= "FN3WE43K" and invalid!="9CC3F8&e": +# scrapedtitle = item.title+video[0] +# videourl = item.url +# server = video[2] +# if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+videourl+"]") +# logger.info("url=" + item.url) + +# Añade al listado de XBMC +# itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , fulltitle=item.fulltitle, url=videourl , server=server , folder=False) ) +# itemlist.append( Item(channel=item.channel, action="play" , title=item.title , url=item.url, thumbnail="", plot="", server=item.url)) + + +# return itemlist + +def generos(item): + logger.info() + itemlist = [] + + # Descarga la página + data = scrapertools.cachePage(item.url) + + patron = '>.*?<li><a title="(.*?)" href="(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + for match in matches: + scrapedurl = urlparse.urljoin("", match[1]) + scrapedurl = scrapedurl.replace(".html", "/page/0.html") + extra = scrapedurl.replace("/page/0.html", "/page/") + scrapedtitle = match[0] + # scrapedtitle = scrapedtitle.replace("","") + scrapedthumbnail = "" + scrapedplot = "" + logger.info(scrapedtitle) + + if scrapedtitle == "Eroticas +18": + if config.get_setting("adult_mode") != 0: + itemlist.append(Item(channel=item.channel, action="listado2", title="Eroticas +18", + url="http://www.myhotamateurvideos.com", thumbnail=scrapedthumbnail, + plot=scrapedplot, extra="", folder=True)) + else: + if scrapedtitle <> "" and len(scrapedtitle) < 20 and scrapedtitle <> "Iniciar Sesion": + itemlist.append(Item(channel=item.channel, action="listado2", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot, extra=extra, folder=True)) + + itemlist = sorted(itemlist, key=lambda Item: Item.title) + return itemlist + + +def alfabetico(item): + logger.info() + + extra = item.url + itemlist = [] + itemlist.append( + Item(channel=item.channel, action="listado2", title="0-9", url="http://www.vepelis.com/letra/09.html", + extra="http://www.vepelis.com/letra/09.html")) + itemlist.append(Item(channel=item.channel, action="listado2", title="A", url="http://www.vepelis.com/letra/a.html", + extra="http://www.vepelis.com/letra/a.html")) + itemlist.append(Item(channel=item.channel, action="listado2", title="B", url="http://www.vepelis.com/letra/b.html", + extra="http://www.vepelis.com/letra/b.html")) + itemlist.append(Item(channel=item.channel, action="listado2", title="C", url="http://www.vepelis.com/letra/c.html", + extra="http://www.vepelis.com/letra/c.html")) + itemlist.append(Item(channel=item.channel, action="listado2", title="E", url="http://www.vepelis.com/letra/d.html", + extra="http://www.vepelis.com/letra/d.html")) + itemlist.append(Item(channel=item.channel, action="listado2", title="D", url="http://www.vepelis.com/letra/e.html", + extra="http://www.vepelis.com/letra/e.html")) + itemlist.append(Item(channel=item.channel, action="listado2", title="F", url="http://www.vepelis.com/letra/f.html", + extra="http://www.vepelis.com/letra/f.html")) + itemlist.append(Item(channel=item.channel, action="listado2", title="G", url="http://www.vepelis.com/letra/g.html", + extra="http://www.vepelis.com/letra/g.html")) + itemlist.append(Item(channel=item.channel, action="listado2", title="H", url="http://www.vepelis.com/letra/h.html", + extra="http://www.vepelis.com/letra/h.html")) + itemlist.append(Item(channel=item.channel, action="listado2", title="I", url="http://www.vepelis.com/letra/i.html", + extra="http://www.vepelis.com/letra/i.html")) + itemlist.append(Item(channel=item.channel, action="listado2", title="J", url="http://www.vepelis.com/letra/j.html", + extra="http://www.vepelis.com/letra/j.html")) + itemlist.append(Item(channel=item.channel, action="listado2", title="K", url="http://www.vepelis.com/letra/k.html", + extra="http://www.vepelis.com/letra/k.html")) + itemlist.append(Item(channel=item.channel, action="listado2", title="L", url="http://www.vepelis.com/letra/l.html", + extra="http://www.vepelis.com/letra/l.html")) + itemlist.append(Item(channel=item.channel, action="listado2", title="M", url="http://www.vepelis.com/letra/m.html", + extra="http://www.vepelis.com/letra/m.html")) + itemlist.append(Item(channel=item.channel, action="listado2", title="N", url="http://www.vepelis.com/letra/n.html", + extra="http://www.vepelis.com/letra/n.html")) + itemlist.append(Item(channel=item.channel, action="listado2", title="O", url="http://www.vepelis.com/letra/o.html", + extra="http://www.vepelis.com/letra/o.html")) + itemlist.append(Item(channel=item.channel, action="listado2", title="P", url="http://www.vepelis.com/letra/p.html", + extra="http://www.vepelis.com/letra/p.html")) + itemlist.append(Item(channel=item.channel, action="listado2", title="Q", url="http://www.vepelis.com/letra/q.html", + extra="http://www.vepelis.com/letra/q.html")) + itemlist.append(Item(channel=item.channel, action="listado2", title="R", url="http://www.vepelis.com/letra/r.html", + extra="http://www.vepelis.com/letra/r.html")) + itemlist.append(Item(channel=item.channel, action="listado2", title="S", url="http://www.vepelis.com/letra/s.html", + extra="http://www.vepelis.com/letra/s.html")) + itemlist.append(Item(channel=item.channel, action="listado2", title="T", url="http://www.vepelis.com/letra/t.html", + extra="http://www.vepelis.com/letra/t.html")) + itemlist.append(Item(channel=item.channel, action="listado2", title="U", url="http://www.vepelis.com/letra/u.html", + extra="http://www.vepelis.com/letra/u.html")) + itemlist.append(Item(channel=item.channel, action="listado2", title="V", url="http://www.vepelis.com/letra/v.html", + extra="http://www.vepelis.com/letra/v.html")) + itemlist.append(Item(channel=item.channel, action="listado2", title="W", url="http://www.vepelis.com/letra/w.html", + extra="http://www.vepelis.com/letra/w.html")) + itemlist.append(Item(channel=item.channel, action="listado2", title="X", url="http://www.vepelis.com/letra/x.html", + extra="http://www.vepelis.com/letra/x.html")) + itemlist.append(Item(channel=item.channel, action="listado2", title="Y", url="http://www.vepelis.com/letra/y.html", + extra="http://www.vepelis.com/letra/y.html")) + itemlist.append(Item(channel=item.channel, action="listado2", title="Z", url="http://www.vepelis.com/letra/z.html", + extra="http://www.vepelis.com/letra/z.html")) + + return itemlist + + +def listado2(item): + logger.info() + extra = item.extra + itemlist = [] + + # Descarga la página + data = scrapertools.cachePage(item.url) + + patron = '<h2 class="titpeli.*?<a href="([^"]+)" title="([^"]+)".*?peli_img_img">.*?<img src="([^"]+)".*?<strong>Idioma</strong>:.*?/>([^"]+)</div>.*?<strong>Calidad</strong>: ([^"]+)</div>' + + matches = re.compile(patron, re.DOTALL).findall(data) + for match in matches: + scrapedurl = match[0] # urlparse.urljoin("",match[0]) + scrapedtitle = match[1] + ' - ' + match[4] + scrapedtitle = unicode(scrapedtitle, "iso-8859-1", errors="replace").encode("utf-8") + scrapedthumbnail = match[2] + # scrapedplot = match[0] + # itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , fulltitle=scrapedtitle, url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + itemlist.append( + Item(channel=item.channel, action="findvideos", title=scrapedtitle, fulltitle=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, folder=True)) + + # if extra<>"": + # Extrae la marca de siguiente página + # patron = 'page=(.*?)"><span><b>' + patron = '<span><b>(.*?)</b></span>' + matches = re.compile(patron, re.DOTALL).findall(data) + # if DEBUG: scrapertools.printMatches(matches) + for match in matches: + # if len(matches)>0: + nu = int(match[0]) + 1 + scrapedurl = extra + "?page=" + str(nu) + scrapedtitle = "!Pagina Siguiente ->" + scrapedthumbnail = "" + scrapedplot = "" + itemlist.append( + Item(channel=item.channel, action="listado2", title=scrapedtitle, fulltitle=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, extra=extra, folder=True)) + + return itemlist + + +def search(item, texto): + logger.info() + itemlist = [] + + texto = texto.replace(" ", "+") + try: + # Series + item.url = "http://www.vepelis.com/buscar/?q=%s" + item.url = item.url % texto + item.extra = "" + itemlist.extend(listado2(item)) + itemlist = sorted(itemlist, key=lambda Item: Item.title) + + return itemlist + + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] diff --git a/plugin.video.alfa/channels/ver-pelis.json b/plugin.video.alfa/channels/ver-pelis.json new file mode 100755 index 00000000..cfe43098 --- /dev/null +++ b/plugin.video.alfa/channels/ver-pelis.json @@ -0,0 +1,42 @@ +{ + "id": "ver-pelis", + "name": "Ver-pelis", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "http://imgur.com/lmYQgOu.png", + "version": 1, + "changes": [ + { + "date": "26/04/2016", + "description": "Release" + }, + { + "date": "28/06/2017", + "description": "Corrección código y algunas mejoras" + } + ], + "categories": [ + "torrent", + "movie", + "tvshow" + ], + "settings": [ + { + "id": "modo_grafico", + "type": "bool", + "label": "Buscar información extra", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/ver-pelis.py b/plugin.video.alfa/channels/ver-pelis.py new file mode 100755 index 00000000..48e74f0b --- /dev/null +++ b/plugin.video.alfa/channels/ver-pelis.py @@ -0,0 +1,438 @@ +# -*- coding: utf-8 -*- + +import re +import unicodedata +from threading import Thread + +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item + +ACTION_SHOW_FULLSCREEN = 36 +ACTION_GESTURE_SWIPE_LEFT = 511 +ACTION_SELECT_ITEM = 7 +ACTION_PREVIOUS_MENU = 10 +ACTION_MOVE_LEFT = 1 +ACTION_MOVE_RIGHT = 2 +ACTION_MOVE_DOWN = 4 +ACTION_MOVE_UP = 3 +OPTION_PANEL = 6 +OPTIONS_OK = 5 + +__modo_grafico__ = config.get_setting('modo_grafico', "ver-pelis") + + +# Para la busqueda en bing evitando baneos + +def browser(url): + import mechanize + + # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing + br = mechanize.Browser() + # Browser options + br.set_handle_equiv(False) + br.set_handle_gzip(True) + br.set_handle_redirect(True) + br.set_handle_referer(False) + br.set_handle_robots(False) + # Follows refresh 0 but not hangs on refresh > 0 + br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) + # Want debugging messages? + # br.set_debug_http(True) + # br.set_debug_redirects(True) + # br.set_debug_responses(True) + + # User-Agent (this is cheating, ok?) + # br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')] + # br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')] + # Open some site, let's pick a random one, the first that pops in mind + r = br.open(url) + response = r.read() + print response + if "img,divreturn" in response: + r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url) + print "prooooxy" + response = r.read() + + return response + + +api_key = "2e2160006592024ba87ccdf78c28f49f" +api_fankey = "dffe90fba4d02c199ae7a9e71330c987" + + +def mainlist(item): + logger.info() + itemlist = [] + i = 0 + global i + itemlist.append( + item.clone(title="[COLOR oldlace][B]Películas[/B][/COLOR]", action="scraper", url="http://ver-pelis.me/ver/", + thumbnail="http://imgur.com/36xALWc.png", fanart="http://imgur.com/53dhEU4.jpg", + contentType="movie")) + itemlist.append(item.clone(title="[COLOR oldlace][B]Películas en Español[/B][/COLOR]", action="scraper", + url="http://ver-pelis.me/ver/espanol/", thumbnail="http://imgur.com/36xALWc.png", + fanart="http://imgur.com/53dhEU4.jpg", contentType="movie")) + + itemlist.append(itemlist[-1].clone(title="[COLOR orangered][B]Buscar[/B][/COLOR]", action="search", + thumbnail="http://imgur.com/ebWyuGe.png", fanart="http://imgur.com/53dhEU4.jpg", + contentType="tvshow")) + + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = "http://ver-pelis.me/ver/buscar?s=" + texto + item.extra = "search" + if texto != '': + return scraper(item) + + +def scraper(item): + logger.info() + itemlist = [] + url_next_page = "" + global i + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = scrapertools.find_multiple_matches(data, + '<a class="thumb cluetip".*?href="([^"]+)".*?src="([^"]+)" alt="([^"]+)".*?"res">([^"]+)</span>') + if len(patron) > 20: + if item.next_page != 20: + url_next_page = item.url + patron = patron[:20] + next_page = 20 + item.i = 0 + else: + patron = patron[item.i:][:20] + next_page = 20 + + url_next_page = item.url + + for url, thumb, title, cuality in patron: + title = re.sub(r"Imagen", "", title) + title = ''.join((c for c in unicodedata.normalize('NFD', unicode(title.decode('utf-8'))) if + unicodedata.category(c) != 'Mn')).encode("ascii", "ignore") + titulo = "[COLOR floralwhite]" + title + "[/COLOR]" + " " + "[COLOR crimson][B]" + cuality + "[/B][/COLOR]" + title = re.sub(r"!|\/.*", "", title).strip() + + if item.extra != "search": + item.i += 1 + new_item = item.clone(action="findvideos", title=titulo, url=url, thumbnail=thumb, fulltitle=title, + contentTitle=title, contentType="movie", library=True) + new_item.infoLabels['year'] = get_year(url) + itemlist.append(new_item) + + ## Paginación + if url_next_page: + itemlist.append(item.clone(title="[COLOR crimson]Siguiente >>[/COLOR]", url=url_next_page, next_page=next_page, + thumbnail="http://imgur.com/w3OMy2f.png", i=item.i)) + try: + from core import tmdb + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + for item in itemlist: + if not "Siguiente >>" in item.title: + if "0." in str(item.infoLabels['rating']): + item.infoLabels['rating'] = "[COLOR indianred]Sin puntuacíon[/COLOR]" + else: + item.infoLabels['rating'] = "[COLOR orange]" + str(item.infoLabels['rating']) + "[/COLOR]" + item.title = item.title + " " + str(item.infoLabels['rating']) + except: + pass + + for item_tmdb in itemlist: + logger.info(str(item_tmdb.infoLabels['tmdb_id'])) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + th = Thread(target=get_art(item)) + th.setDaemon(True) + th.start() + data = httptools.downloadpage(item.url).data + data_post = scrapertools.find_single_match(data, "type: 'POST'.*?id: (.*?),slug: '(.*?)'") + if data_post: + post = 'id=' + data_post[0] + '&slug=' + data_post[1] + data_info = httptools.downloadpage('http://ver-pelis.me/ajax/cargar_video.php', post=post).data + enlaces = scrapertools.find_multiple_matches(data_info, + "</i> (\w+ \w+).*?<a onclick=\"load_player\('([^']+)','([^']+)', ([^']+),.*?REPRODUCIR\">([^']+)</a>") + for server, id_enlace, name, number, idioma_calidad in enlaces: + + if "SUBTITULOS" in idioma_calidad and not "P" in idioma_calidad: + idioma_calidad = idioma_calidad.replace("SUBTITULOS", "VO") + idioma_calidad = idioma_calidad.replace("VO", "[COLOR orangered] VO[/COLOR]") + elif "SUBTITULOS" in idioma_calidad and "P" in idioma_calidad: + idioma_calidad = "[COLOR indianred] " + idioma_calidad + "[/COLOR]" + + elif "LATINO" in idioma_calidad: + idioma_calidad = idioma_calidad.replace("LATINO", "[COLOR red]LATINO[/COLOR]") + elif "Español" in idioma_calidad: + idioma_calidad = idioma_calidad.replace("Español", "[COLOR crimson]ESPAÑOL[/COLOR]") + if "HD" in idioma_calidad: + idioma_calidad = idioma_calidad.replace("HD", "[COLOR crimson] HD[/COLOR]") + elif "720" in idioma_calidad: + idioma_calidad = idioma_calidad.replace("720", "[COLOR firebrick] 720[/COLOR]") + elif "TS" in idioma_calidad: + idioma_calidad = idioma_calidad.replace("TS", "[COLOR brown] TS[/COLOR]") + + elif "CAM" in idioma_calidad: + idioma_calidad = idioma_calidad.replace("CAM", "[COLOR darkkakhi] CAM[/COLOR]") + + url = "http://ver-pelis.me/ajax/video.php?id=" + id_enlace + "&slug=" + name + "&quality=" + number + + if not "Ultra" in server: + server = "[COLOR cyan][B]" + server + "[/B][/COLOR]" + extra = "yes" + else: + server = "[COLOR yellow][B]" + server + "[/B][/COLOR]" + extra = "" + title = server.strip() + " " + idioma_calidad + itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, fanart=item.fanart, + thumbnail=item.thumbnail, fulltitle=item.title, extra=extra, folder=True)) + if item.library and config.get_videolibrary_support() and len(itemlist) > 0: + infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'], + 'title': item.infoLabels['title']} + itemlist.append(Item(channel=item.channel, title="Añadir esta película a la videoteca", + action="add_pelicula_to_library", url=item.url, infoLabels=infoLabels, + text_color="0xFFf7f7f7", + thumbnail='http://imgur.com/gPyN1Tf.png')) + else: + itemlist.append( + Item(channel=item.channel, action="", title="[COLOR red][B]Upps!..Archivo no encontrado...[/B][/COLOR]", + thumbnail=item.thumbnail)) + return itemlist + + +def play(item): + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r'\\', '', data) + item.url = scrapertools.find_single_match(data, 'src="([^"]+)"') + data = httptools.downloadpage(item.url).data + + if item.extra != "yes": + patron = '"label":(.*?),.*?"type":"(.*?)",.*?"file":"(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + if not matches: + patron = '"label":(.*?),.*?"file":"(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + for dato_a, type, dato_b in matches: + if 'http' in dato_a: + url = dato_a + calidad = dato_b + else: + url = dato_b + calidad = dato_a + url = url.replace('\\', '') + type = type.replace('\\', '') + itemlist.append( + Item(channel=item.channel, url=url, action="play", title=item.fulltitle + " (" + dato_a + ")", + folder=False)) + else: + + url = scrapertools.find_single_match(data, 'window.location="([^"]+)"') + + videolist = servertools.find_video_items(data=url) + for video in videolist: + itemlist.append(Item(channel=item.channel, url=video.url, server=video.server, + title="[COLOR floralwhite][B]" + video.server + "[/B][/COLOR]", action="play", + folder=False)) + + return itemlist + + +def fanartv(item, id_tvdb, id, images={}): + headers = [['Content-Type', 'application/json']] + from core import jsontools + if item.contentType == "movie": + url = "http://webservice.fanart.tv/v3/movies/%s?api_key=cab16e262d72fea6a6843d679aa10300" \ + % id + else: + url = "http://webservice.fanart.tv/v3/tv/%s?api_key=cab16e262d72fea6a6843d679aa10300" % id_tvdb + try: + data = jsontools.load(scrapertools.downloadpage(url, headers=headers)) + if data and not "error message" in data: + for key, value in data.items(): + if key not in ["name", "tmdb_id", "imdb_id", "thetvdb_id"]: + images[key] = value + else: + images = [] + + except: + images = [] + return images + + +def get_art(item): + logger.info() + id = item.infoLabels['tmdb_id'] + check_fanart = item.infoLabels['fanart'] + if item.contentType != "movie": + tipo_ps = "tv" + else: + tipo_ps = "movie" + if not id: + year = item.extra + otmdb = tmdb.Tmdb(texto_buscado=item.fulltitle, year=year, tipo=tipo_ps) + id = otmdb.result.get("id") + if id == None: + otmdb = tmdb.Tmdb(texto_buscado=item.fulltitle, tipo=tipo_ps) + id = otmdb.result.get("id") + if id == None: + if item.contentType == "movie": + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % ( + item.fulltitle.replace(' ', '+'), year) + + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data) + subdata_imdb = scrapertools.find_single_match(data, + '<li class="b_algo">(.*?)h="ID.*?<strong>.*?TV Series') + else: + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % ( + item.fulltitle.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data) + subdata_imdb = scrapertools.find_single_match(data, '<li class="b_algo">(.*?)h="ID.*?<strong>') + try: + imdb_id = scrapertools.get_match(subdata_imdb, '<a href=.*?http.*?imdb.com/title/(.*?)/.*?"') + except: + try: + imdb_id = scrapertools.get_match(subdata_imdb, + '<a href=.*?http.*?imdb.com/.*?/title/(.*?)/.*?"') + except: + imdb_id = "" + otmdb = tmdb.Tmdb(external_id=imdb_id, external_source="imdb_id", tipo=tipo_ps, idioma_busqueda="es") + id = otmdb.result.get("id") + + if id == None: + if "(" in item.fulltitle: + title = scrapertools.find_single_match(item.fulltitle, '\(.*?\)') + if item.contentType != "movie": + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % ( + title.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", + data) + subdata_imdb = scrapertools.find_single_match(data, + '<li class="b_algo">(.*?)h="ID.*?<strong>.*?TV Series') + else: + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % ( + title.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", + data) + subdata_imdb = scrapertools.find_single_match(data, + '<li class="b_algo">(.*?)h="ID.*?<strong>') + try: + imdb_id = scrapertools.get_match(subdata_imdb, + '<a href=.*?http.*?imdb.com/title/(.*?)/.*?"') + except: + try: + imdb_id = scrapertools.get_match(subdata_imdb, + '<a href=.*?http.*?imdb.com/.*?/title/(.*?)/.*?"') + except: + imdb_id = "" + otmdb = tmdb.Tmdb(external_id=imdb_id, external_source="imdb_id", tipo=tipo_ps, + idioma_busqueda="es") + id = otmdb.result.get("id") + + if not id: + fanart = item.fanart + id_tvdb = "" + imagenes = [] + itmdb = tmdb.Tmdb(id_Tmdb=id, tipo=tipo_ps) + images = itmdb.result.get("images") + if images: + for key, value in images.iteritems(): + for detail in value: + imagenes.append('http://image.tmdb.org/t/p/original' + detail["file_path"]) + + if len(imagenes) >= 4: + if imagenes[0] != check_fanart: + item.fanart = imagenes[0] + else: + item.fanart = imagenes[1] + if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart: + item.extra = imagenes[1] + "|" + imagenes[2] + else: + if imagenes[1] != check_fanart and imagenes[1] != item.fanart: + item.extra = imagenes[1] + "|" + imagenes[3] + elif imagenes[2] != check_fanart: + item.extra = imagenes[2] + "|" + imagenes[3] + else: + item.extra = imagenes[3] + "|" + imagenes[3] + elif len(imagenes) == 3: + if imagenes[0] != check_fanart: + item.fanart = imagenes[0] + else: + item.fanart = imagenes[1] + if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart: + item.extra = imagenes[1] + "|" + imagenes[2] + + + else: + if imagenes[1] != check_fanart and imagenes[1] != item.fanart: + item.extra = imagenes[0] + "|" + imagenes[1] + elif imagenes[2] != check_fanart: + item.extra = imagenes[1] + "|" + imagenes[2] + else: + item.extra = imagenes[1] + "|" + imagenes[1] + elif len(imagenes) == 2: + if imagenes[0] != check_fanart: + item.fanart = imagenes[0] + else: + item.fanart = imagenes[1] + if imagenes[1] != check_fanart and imagenes[1] != item.fanart: + item.extra = imagenes[0] + "|" + imagenes[1] + else: + item.extra = imagenes[1] + "|" + imagenes[0] + elif len(imagenes) == 1: + item.extra = imagenes + "|" + imagenes + else: + item.extra = item.fanart + "|" + item.fanart + + images_fanarttv = fanartv(item, id_tvdb, id) + if images_fanarttv: + if item.contentType == "movie": + if images_fanarttv.get("moviedisc"): + item.thumbnail = images_fanarttv.get("moviedisc")[0].get("url") + elif images_fanarttv.get("hdmovielogo"): + item.thumbnail = images_fanarttv.get("hdmovielogo")[0].get("url") + elif images_fanarttv.get("moviethumb"): + item.thumbnail = images_fanarttv.get("moviethumb")[0].get("url") + elif images_fanarttv.get("moviebanner"): + item.thumbnail_ = images_fanarttv.get("moviebanner")[0].get("url") + else: + item.thumbnail = item.thumbnail + else: + if images_fanarttv.get("hdtvlogo"): + item.thumbnail = images_fanarttv.get("hdtvlogo")[0].get("url") + elif images_fanarttv.get("clearlogo"): + item.thumbnail = images_fanarttv.get("hdmovielogo")[0].get("url") + + if images_fanarttv.get("tvbanner"): + item.extra = item.extra + "|" + images_fanarttv.get("tvbanner")[0].get("url") + elif images_fanarttv.get("tvthumb"): + item.extra = item.extra + "|" + images_fanarttv.get("tvthumb")[0].get("url") + else: + item.extra = item.extra + "|" + item.thumbnail + else: + item.extra = item.extra + "|" + item.thumbnail + + +def get_year(url): + data = httptools.downloadpage(url).data + year = scrapertools.find_single_match(data, '<p><strong>Año:</strong>(.*?)</p>') + if year == "": + year = " " + return year diff --git a/plugin.video.alfa/channels/verpeliculasnuevas.json b/plugin.video.alfa/channels/verpeliculasnuevas.json new file mode 100755 index 00000000..cd9b7b88 --- /dev/null +++ b/plugin.video.alfa/channels/verpeliculasnuevas.json @@ -0,0 +1,79 @@ +{ + "id": "verpeliculasnuevas", + "name": "VerPeliculasNuevas", + "active": true, + "adult": false, + "language": "es", + "compatible": { + "addon_version": "4.3" + }, + "banner": "https://s23.postimg.org/4uyyz6w4b/verpeliculasnuevas_banner.png", + "thumbnail": "https://s27.postimg.org/atgs5erab/verpeliculasnuevas.png", + "version": 1, + "changes": [ + { + "date": "23/06/2017", + "description": "añadidos servidores favoritos en find_videos" + }, + { + "date": "06/06/2017", + "description": "compatibilidad con AutoPlay" + }, + { + "date": "25/05/2017", + "description": "cambios esteticos" + }, + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "06/01/2017", + "description": "Release." + } + ], + "categories": [ + "latino", + "movie" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": false, + "visible": false + }, + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "No filtrar", + "Latino", + "Español", + "VOS" + ] + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_infantiles", + "type": "bool", + "label": "Incluir en Novedades - Infantiles", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/verpeliculasnuevas.py b/plugin.video.alfa/channels/verpeliculasnuevas.py new file mode 100755 index 00000000..40b6f65d --- /dev/null +++ b/plugin.video.alfa/channels/verpeliculasnuevas.py @@ -0,0 +1,392 @@ +# -*- coding: utf-8 -*- + +import re + +from channels import autoplay +from channels import filtertools +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item + +host = 'http://verpeliculasnuevas.com' + +IDIOMAS = {'latino': 'Latino', 'castellano': 'Español', 'sub': 'VOS'} +list_language = IDIOMAS.values() + +taudio = {'latino': '[COLOR limegreen]LATINO[/COLOR]', + 'castellano': '[COLOR yellow]ESPAÑOL[/COLOR]', + 'sub': '[COLOR red]ORIGINAL SUBTITULADO[/COLOR]', + 'castellanolatinosub': '[COLOR orange]MULTI[/COLOR]', + 'castellanolatino': '[COLOR orange]MULTI[/COLOR]' + } + +thumbaudio = {'latino': 'http://flags.fmcdn.net/data/flags/normal/mx.png', + 'castellano': 'http://flags.fmcdn.net/data/flags/normal/es.png', + 'sub': 'https://s32.postimg.org/nzstk8z11/sub.png' + } + +list_quality = ['HQ', + 'HD', + 'HD-1080', + 'DVD', + 'CAM' + ] +list_servers = [ + 'openload', + 'powvideo', + 'streamplay', + 'streaminto', + 'netu', + 'vidabc', + 'thevideos', + 'yourupload', + 'thevideome', + 'directo', + 'netutv' +] + +tcalidad = {'hq': '[COLOR limegreen]HQ[/COLOR]', + 'hd': '[COLOR limegreen]HD[/COLOR]', + 'hd-1080': '[COLOR limegreen]HD-1080[/COLOR]', + 'dvd': '[COLOR limegreen]DVD[/COLOR]', + 'cam': '[COLOR red]CAM[/COLOR]' + } + +thumbcalidad = {'hd-1080': 'https://s21.postimg.org/4h1s0t1wn/hd1080.png', + 'dvd': 'https://s1.postimg.org/m89hus1tb/dvd.png', + 'cam': 'https://s11.postimg.org/ad4o5wpz7/cam.png', + 'hq': 'https://s23.postimg.org/j76ldf8qz/image.png', + 'hd': 'https://s27.postimg.org/m2dhhkrur/image.png' + } + +thumbletras = {'0-9': 'https://s32.postimg.org/drojt686d/image.png', '1': 'https://s32.postimg.org/drojt686d/image.png', + 'a': 'https://s32.postimg.org/llp5ekfz9/image.png', 'b': 'https://s32.postimg.org/y1qgm1yp1/image.png', + 'c': 'https://s32.postimg.org/vlon87gmd/image.png', 'd': 'https://s32.postimg.org/3zlvnix9h/image.png', + 'e': 'https://s32.postimg.org/bgv32qmsl/image.png', 'f': 'https://s32.postimg.org/y6u7vq605/image.png', + 'g': 'https://s32.postimg.org/9237ib6jp/image.png', 'h': 'https://s32.postimg.org/812yt6pk5/image.png', + 'i': 'https://s32.postimg.org/6nbbxvqat/image.png', 'j': 'https://s32.postimg.org/axpztgvdx/image.png', + 'k': 'https://s32.postimg.org/976yrzdut/image.png', 'l': 'https://s32.postimg.org/fmal2e9yd/image.png', + 'm': 'https://s32.postimg.org/m19lz2go5/image.png', 'n': 'https://s32.postimg.org/b2ycgvs2t/image.png', + "ñ": "https://s30.postimg.org/ayy8g02xd/image.png", 'o': 'https://s32.postimg.org/c6igsucpx/image.png', + 'p': 'https://s32.postimg.org/jnro82291/image.png', 'q': 'https://s32.postimg.org/ve5lpfv1h/image.png', + 'r': 'https://s32.postimg.org/nmovqvqw5/image.png', 's': 'https://s32.postimg.org/zd2t89jol/image.png', + 't': 'https://s32.postimg.org/wk9lo8jc5/image.png', 'u': 'https://s32.postimg.org/w8s5bh2w5/image.png', + 'v': 'https://s32.postimg.org/e7dlrey91/image.png', 'w': 'https://s32.postimg.org/fnp49k15x/image.png', + 'x': 'https://s32.postimg.org/dkep1w1d1/image.png', 'y': 'https://s32.postimg.org/um7j3zg85/image.png', + 'z': 'https://s32.postimg.org/jb4vfm9d1/image.png'} + +tgenero = {"comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png", + "suspenso": "https://s13.postimg.org/wmw6vl1cn/suspenso.png", + "drama": "https://s16.postimg.org/94sia332d/drama.png", + "accion": "https://s3.postimg.org/y6o9puflv/accion.png", + "aventura": "https://s10.postimg.org/6su40czih/aventura.png", + "romance": "https://s15.postimg.org/fb5j8cl63/romance.png", + "thriller": "https://s22.postimg.org/5y9g0jsu9/thriller.png", + "ciencia-ficcion": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png", + "terror": "https://s7.postimg.org/yi0gij3gb/terror.png", + "documental": "https://s16.postimg.org/7xjj4bmol/documental.png", + "musical": "https://s29.postimg.org/bbxmdh9c7/musical.png", + "fantastico": "https://s10.postimg.org/pbkbs6j55/fantastico.png", + "deporte": "https://s13.postimg.org/xuxf5h06v/deporte.png", + "infantil": "https://s23.postimg.org/g5rmazozv/infantil.png", + "animacion": "https://s13.postimg.org/5on877l87/animacion.png" + } + +patrones = ['', '<span class="clms">Sinopsis:<\/span>([^<]+)<div class="info_movie">'] + + +def mainlist(item): + logger.info() + + autoplay.init(item.channel, list_servers, list_quality) + itemlist = [] + + itemlist.append( + item.clone(title="Todas", + action="lista", + thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png', + fanart='https://s18.postimg.org/fwvaeo6qh/todas.png', + extra='peliculas/', + url=host + )) + + itemlist.append( + itemlist[-1].clone(title="Generos", + action="menuseccion", + thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png', + fanart='https://s3.postimg.org/5s9jg2wtf/generos.png', + url=host, + extra='/genero' + )) + + itemlist.append( + itemlist[-1].clone(title="Alfabetico", + action="menuseccion", + thumbnail='https://s17.postimg.org/fwi1y99en/a-z.png', + fanart='https://s17.postimg.org/fwi1y99en/a-z.png', + url=host, extra='/tag' + )) + + itemlist.append( + itemlist[-1].clone(title="Audio", + action="menuseccion", + thumbnail='https://s27.postimg.org/avs17iuw3/audio.png', + fanart='https://s27.postimg.org/avs17iuw3/audio.png', + url=host, + extra='/audio' + )) + + itemlist.append( + itemlist[-1].clone(title="Calidad", + action="menuseccion", + thumbnail='https://s13.postimg.org/6nzv8nlkn/calidad.png', + fanart='https://s13.postimg.org/6nzv8nlkn/calidad.png', + extra='/calidad' + )) + + itemlist.append( + itemlist[-1].clone(title="Año", + action="menuseccion", + thumbnail='https://s8.postimg.org/7eoedwfg5/pora_o.png', + fanart='https://s8.postimg.org/7eoedwfg5/pora_o.png', + url=host, + extra='/fecha-estreno' + )) + + itemlist.append( + itemlist[-1].clone(title="Buscar", + action="search", + url=host + '?s=', + thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png', + fanart='https://s30.postimg.org/pei7txpa9/buscar.png' + )) + + autoplay.show_option(item.channel, itemlist) + + return itemlist + + +def menuseccion(item): + logger.info() + itemlist = [] + seccion = item.extra + data = httptools.downloadpage(item.url).data + + if seccion == '/audio': + patron = "<a href='\/audio([^']+)' title='lista de películas en.*?'>(?:Español|Latino|Subtitulado)<\/a>" + elif seccion == '/calidad': + patron = "<a href='\/calidad([^']+)' title='lista de películas en.*?'>(?:HD-1080|HD-Real|DvD|HQ|CAM)<\/a>" + elif seccion == '/fecha-estreno': + patron = "<a href='\/fecha-estreno([^']+)' title='lista de películas del.*?'>.*?<\/a>" + elif seccion == '/genero': + patron = '<a href="\/genero([^"]+)">.*?<\/a><\/li>' + else: + patron = "<a href='\/tag([^']+)' title='lista de películas.*?'>.*?<\/a>" + + matches = re.compile(patron, re.DOTALL).findall(data) + for scrapedurl in matches: + + url = host + seccion + scrapedurl + titulo = scrapedurl.replace('/', '') + + if seccion == '/audio': + title = taudio[titulo.lower()] + thumbnail = thumbaudio[titulo] + elif seccion == '/calidad': + title = tcalidad[titulo.lower()] + thumbnail = thumbcalidad[titulo] + elif seccion == '/tag': + title = titulo.upper() + if titulo in thumbletras: + thumbnail = thumbletras[titulo] + else: + thumbnail = '' + else: + title = titulo.upper() + if titulo in tgenero: + thumbnail = tgenero[titulo] + else: + thumbnail = '' + + itemlist.append( + Item(channel=item.channel, + action='lista', + title=title, + url=url, + thumbnail=thumbnail + )) + + return itemlist + + +def lista(item): + logger.info() + + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r'"|\n|\r|\t| |<br>', "", data) + + patron = "peli><a href=([^ ]+) title=(.*?)><img src=([^ ]+) alt=.*?><div class=([^>]+)>.*?<p>.*?<\/p>.*?flags ([" \ + "^']+)'" + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedcalidad, scrapedidioma in matches: + year = scrapertools.find_single_match(scrapedtitle, '.*?\((\d{4})\)') + scrapedtitle = scrapertools.find_single_match(scrapedtitle, '(.*?)\(\.*?') + url = scrapedurl + thumbnail = scrapedthumbnail + scrapedcalidad = scrapedcalidad.replace("'", "") + scrapedcalidad = scrapedcalidad.lower() + + if scrapedcalidad in tcalidad: + scrapedcalidad = tcalidad[scrapedcalidad] + else: + scrapedcalidad = '[COLOR orange]MULTI[/COLOR]' + + if scrapedidioma in taudio: + scrapedidioma = taudio[scrapedidioma] + else: + scrapedidioma = '[COLOR orange]MULTI[/COLOR]' + + title = scrapedtitle + ' | ' + scrapedcalidad + ' | ' + scrapedidioma + ' | ' + fanart = '' + plot = '' + + itemlist.append( + Item(channel=item.channel, + action='findvideos', + title=title, + url=url, + thumbnail=thumbnail, + plot=plot, + fanart=fanart, + contentTitle=scrapedtitle, + extra=item.extra, + infoLabels={'year': year}, + show=scrapedtitle, + list_language=list_language, + context=autoplay.context + )) + + # #Paginacion + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + itemlist = fail_tmdb(itemlist) + if itemlist != []: + actual_page_url = item.url + next_page = scrapertools.find_single_match(data, + "class=previouspostslink' href='([^']+)'>Siguiente ›<\/a>") + if next_page != '': + itemlist.append( + Item(channel=item.channel, + action="lista", + title='Siguiente >>>', + url=next_page, + thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png', + extra=item.extra + )) + + return itemlist + + +def fail_tmdb(itemlist): + logger.info() + realplot = '' + for item in itemlist: + if item.infoLabels['plot'] == '': + data = httptools.downloadpage(item.url).data + if item.thumbnail == '': + item.thumbnail = scrapertools.find_single_match(data, patrones[0]) + realplot = scrapertools.find_single_match(data, patrones[1]) + item.plot = scrapertools.remove_htmltags(realplot) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = item.url + texto + + if texto != '': + return lista(item) + else: + return [] + + +def findvideos(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"'|\n|\r|\t| |<br>", "", data) + + patron = 'class="servidor" alt=""> ([^<]+)<\/span><span style="width: 40px;">([^<]+)<\/span><a class="verLink" ' \ + 'rel="nofollow" href="([^"]+)" target="_blank"> <img title="Ver online gratis"' + matches = re.compile(patron, re.DOTALL).findall(data) + for scrapedidioma, scrapedcalidad, scrapedurl in matches: + + scrapedidioma = scrapertools.decodeHtmlentities(scrapedidioma) + + scrapedcalidad = scrapertools.decodeHtmlentities(scrapedcalidad) + if scrapedidioma.lower() == 'español': + scrapedidioma = 'castellano' + scrapedidioma = scrapedidioma.lower() + idioma = taudio[scrapedidioma.lower()] + calidad = tcalidad[scrapedcalidad.lower()] + url = scrapedurl + itemlist.append( + item.clone(action='play', + idioma=idioma, + calidad=calidad, + url=url, + language=IDIOMAS[scrapedidioma.lower()], + quality=scrapedcalidad + )) + + itemlist = servertools.get_servers_itemlist(itemlist, lambda + i: item.contentTitle + ' | ' + i.calidad + ' | ' + i.idioma + ' (' + i.server + ')', True) + + # Requerido para FilterTools + + itemlist = filtertools.get_links(itemlist, item, list_language) + + # Requerido para AutoPlay + + autoplay.start(itemlist, item) + + if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': + itemlist.append( + Item(channel=item.channel, + title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', + url=item.url, + action="add_pelicula_to_library", + extra="findvideos", + contentTitle=item.contentTitle + )) + + return itemlist + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + # categoria='peliculas' + try: + if categoria == 'peliculas': + item.url = host + elif categoria == 'infantiles': + item.url = host + '/genero/infantil/' + itemlist = lista(item) + if itemlist[-1].title == 'Siguiente >>>': + itemlist.pop() + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist diff --git a/plugin.video.alfa/channels/verseriesonlinetv.json b/plugin.video.alfa/channels/verseriesonlinetv.json new file mode 100755 index 00000000..d8b5201c --- /dev/null +++ b/plugin.video.alfa/channels/verseriesonlinetv.json @@ -0,0 +1,38 @@ +{ + "id": "verseriesonlinetv", + "name": "Veriesonlinetv", + "active": true, + "adult": false, + "language": "es", + "banner": "verseriesonlinetv.png", + "thumbnail": "http://s6.postimg.org/gl0ok4t01/verserieslogo.png", + "version": 1, + "changes": [ + { + "date": "17/12/2016", + "description": "Mejora código y adaptación Infoplus" + }, + { + "date": "04/04/2017", + "description": "Migración a Httptools" + }, + { + "date": "28/06/2017", + "description": "Corrección código y algunas mejoras" + } + ], + "categories": [ + "tvshow", + "vos" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/verseriesonlinetv.py b/plugin.video.alfa/channels/verseriesonlinetv.py new file mode 100755 index 00000000..848f451a --- /dev/null +++ b/plugin.video.alfa/channels/verseriesonlinetv.py @@ -0,0 +1,1036 @@ +# -*- coding: utf-8 -*- + +import os +import re +import urllib2 + +import xbmc +import xbmcgui +from core import config +from core import logger +from core import scrapertools, httptools +from core import servertools +from core.item import Item +from core.scrapertools import decodeHtmlentities as dhe + +ACTION_SHOW_FULLSCREEN = 36 +ACTION_GESTURE_SWIPE_LEFT = 511 +ACTION_SELECT_ITEM = 7 +ACTION_PREVIOUS_MENU = 10 +ACTION_MOVE_LEFT = 1 +ACTION_MOVE_RIGHT = 2 +ACTION_MOVE_DOWN = 4 +ACTION_MOVE_UP = 3 +OPTION_PANEL = 6 +OPTIONS_OK = 5 + +api_key = "2e2160006592024ba87ccdf78c28f49f" +api_fankey = "dffe90fba4d02c199ae7a9e71330c987" + + +def browser(url): + import mechanize + + # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing + br = mechanize.Browser() + # Browser options + br.set_handle_equiv(False) + br.set_handle_gzip(True) + br.set_handle_redirect(True) + br.set_handle_referer(False) + br.set_handle_robots(False) + # Follows refresh 0 but not hangs on refresh > 0 + br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) + + # Want debugging messages? + # br.set_debug_http(True) + # br.set_debug_redirects(True) + # br.set_debug_responses(True) + + # User-Agent (this is cheating, ok?) + br.addheaders = [('User-agent', + 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')] + # br.addheaders =[('Cookie','SRCHD=D=4210979&AF=NOFORM; domain=.bing.com; expires=Wednesday, 09-Nov-06 23:12:40 GMT; MUIDB=36F71C46589F6EAD0BE714175C9F68FC; domain=www.bing.com; expires=15 de enero de 2018 08:43:26 GMT+1')] + + # Open some site, let's pick a random one, the first that pops in mind + r = br.open("http://anonymouse.org/cgi-bin/anon-www.cgi/" + url) + response = r.read() + if not ".ftrH,.ftrHd,.ftrD>" in response: + print "proooxy" + r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url) + response = r.read() + return response + + +def mainlist(item): + logger.info() + + itemlist = [] + + itemlist.append(Item(channel=item.channel, title="[COLOR chartreuse][B]Series[/B][/COLOR]", action="scraper", + url="http://www.verseriesonline.tv/series", + thumbnail="http://s6.postimg.org/6hpa9tzgx/verseriesthumb.png", + fanart="http://s6.postimg.org/71zpys3bl/verseriesfan2.jpg")) + + itemlist.append(Item(channel=item.channel, title="[COLOR chartreuse][B]Buscar[/B][/COLOR]", action="search", url="", + thumbnail="http://s6.postimg.org/5gp1kpihd/verseriesbuscthumb.png", + fanart="http://s6.postimg.org/7vgx54yq9/verseriesbuscfan.jpg", extra="search")) + + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = "http://www.verseriesonline.tv/series?s=" + texto + + try: + return scraper(item) + # Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def scraper(item): + logger.info() + itemlist = [] + ###Borra customkeys + + + # Descarga la página + data = dhe(httptools.downloadpage(item.url).data) + + patron = '<li class="item">.*?<a class="poster" href="([^"]+)".*?<img src="([^"]+)" alt="([^<]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + for scrapedurl, scrapedthumbnail, scrapedtitle in matches: + title_fan = scrapedtitle.strip() + + # Busqueda del año y puntuacion + urlyear = scrapedurl + data2 = httptools.downloadpage(scrapedurl).data + year = scrapertools.get_match(data2, '<h1>.*?<span>\((.*?)\)</span></h1>') + points = scrapertools.get_match(data2, '<div class="number">.*?<b>(.*?)</b>') + if points == "": + points = "No puntuada" + + scrapedtitle = scrapedtitle + " (" + "[COLOR orange][B]" + points + "[/B][/COLOR]" + ")" + show = title_fan + "|" + year + + scrapedtitle = scrapedtitle.replace(scrapedtitle, "[COLOR springgreen]" + scrapedtitle + "[/COLOR]") + itemlist.append( + Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="fanart", thumbnail=scrapedthumbnail, + fanart="http://s6.postimg.org/8pyvdfh75/verseriesfan.jpg", show=show, plot=title_fan, folder=True)) + + ## Paginación + # <span class='current'>1</span><a href='http://www.bricocine.com/c/hd-microhd/page/2/' + + # Si falla no muestra ">> Página siguiente" + try: + + next_page = scrapertools.get_match(data, + "<span class='current'>\d+</span><a class=\"page larger\" href=\"([^\"]+)\"") + + title = "[COLOR floralwhite]Pagina siguiente>>[/COLOR]" + itemlist.append(Item(channel=item.channel, title=title, url=next_page, action="scraper", + fanart="http://s6.postimg.org/8pyvdfh75/verseriesfan.jpg", + thumbnail="http://virtualmarketingpro.com/app/webroot/img/vmp/arrows/Green%20Arrow%20(26).png", + folder=True)) + except: + pass + + return itemlist + + +def fanart(item): + # Vamos a sacar todos los fanarts y arts posibles + logger.info() + itemlist = [] + url = item.url + data = dhe(httptools.downloadpage(item.url).data) + data = re.sub(r"\n|\r|\t|\s{2}|\(.*?\)|\[.*?\]| ", "", data) + try: + sinopsis = scrapertools.get_match(data, '<div class="sinopsis">.*?</b>(.*?)</div>') + if " . Aquí podrán encontrar la información de toda la serie incluyendo sus temporadas y episodios." in sinopsis: + sinopsis = "" + else: + sinopsis = re.sub( + '.. Aquí podrán encontrar la información de toda la serie incluyendo sus temporadas y episodios.', '.', + sinopsis) + except: + sinopsis = "" + + title_fan = item.show.split("|")[0] + title = title_fan.decode('utf8').encode('latin1') + title = title.replace(' ', '%20') + item.title = re.sub(r"\(.*?\)", "", item.title) + year = item.show.split("|")[1] + + url = "http://www.filmaffinity.com/es/advsearch.php?stext={0}&stype%5B%5D=title&country=&ggenre=TV_SE&fromyear={1}&toyear={1}".format( + title, year) + data = httptools.downloadpage(url).data + + url_filmaf = scrapertools.find_single_match(data, '<div class="mc-poster">\s*<a title="[^"]*" href="([^"]+)"') + if url_filmaf: + url_filmaf = "http://www.filmaffinity.com%s" % url_filmaf + data = httptools.downloadpage(url_filmaf).data + else: + + try: + url_bing = "http://www.bing.com/search?q=%s+%s+site:filmaffinity.com" % (title.replace(' ', '+'), year) + data = browser(url_bing) + data = re.sub(r'\n|\r|\t|\s{2}| ', '', data) + + if "myaddrproxy.php" in data: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><div class="b_title"><h2>(<a href="/ myaddrproxy.php/http/www.filmaffinity.com/es/film.*?)"') + subdata_bing = re.sub(r'\/myaddrproxy.php\/http\/', '', subdata_bing) + else: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><h2>(<a href="http://www.filmaffinity.com/es/film.*?)"') + + url_filma = scrapertools.get_match(subdata_bing, '<a href="([^"]+)') + if not "http" in url_filma: + data = httptools.downloadpage("http://" + url_filma).data + else: + data = httptools.downloadpage(url_filma).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + except: + pass + if sinopsis == "": + try: + sinopsis = scrapertools.find_single_match(data, '<dd itemprop="description">(.*?)</dd>') + sinopsis = sinopsis.replace("<br><br />", "\n") + sinopsis = re.sub(r"\(FILMAFFINITY\)<br />", "", sinopsis) + except: + pass + try: + rating_filma = scrapertools.get_match(data, 'itemprop="ratingValue" content="(.*?)">') + except: + rating_filma = "Sin puntuacion" + print "lobeznito" + print rating_filma + + critica = "" + patron = '<div itemprop="reviewBody">(.*?)</div>.*?itemprop="author">(.*?)\s*<i alt="([^"]+)"' + matches_reviews = scrapertools.find_multiple_matches(data, patron) + + if matches_reviews: + for review, autor, valoracion in matches_reviews: + review = dhe(scrapertools.htmlclean(review)) + review += "\n" + autor + "[CR]" + review = re.sub(r'Puntuac.*?\)', '', review) + if "positiva" in valoracion: + critica += "[COLOR green][B]%s[/B][/COLOR]\n" % review + elif "neutral" in valoracion: + critica += "[COLOR yellow][B]%s[/B][/COLOR]\n" % review + else: + critica += "[COLOR red][B]%s[/B][/COLOR]\n" % review + else: + critica = "[COLOR floralwhite][B]Esta serie no tiene críticas[/B][/COLOR]" + + ###Busqueda en tmdb + + url_tmdb = "http://api.themoviedb.org/3/search/tv?api_key=" + api_key + "&query=" + title + "&language=es&include_adult=false&first_air_date_year=" + year + data_tmdb = httptools.downloadpage(url_tmdb).data + data_tmdb = re.sub(r"\n|\r|\t|\s{2}| ", "", data_tmdb) + patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),' + matches = re.compile(patron, re.DOTALL).findall(data_tmdb) + + ###Busqueda en bing el id de imdb de la serie + if len(matches) == 0: + url_tmdb = "http://api.themoviedb.org/3/search/tv?api_key=" + api_key + "&query=" + title + "&language=es" + data_tmdb = httptools.downloadpage(url_tmdb).data + data_tmdb = re.sub(r"\n|\r|\t|\s{2}| ", "", data_tmdb) + patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),' + matches = re.compile(patron, re.DOTALL).findall(data_tmdb) + if len(matches) == 0: + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % ( + title.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data) + try: + subdata_imdb = scrapertools.find_single_match(data, + '<li class="b_algo">(.*?)h="ID.*?<strong>.*?TV Series') + except: + pass + + try: + imdb_id = scrapertools.get_match(subdata_imdb, '<a href=.*?http.*?imdb.com/title/(.*?)/.*?"') + except: + imdb_id = "" + ###Busca id de tvdb y tmdb mediante imdb id + + urlremotetbdb = "https://api.themoviedb.org/3/find/" + imdb_id + "?api_key=" + api_key + "&external_source=imdb_id&language=es" + data_tmdb = httptools.downloadpage(urlremotetbdb).data + matches = scrapertools.find_multiple_matches(data_tmdb, + '"tv_results":.*?"id":(.*?),.*?"poster_path":(.*?),') + + if len(matches) == 0: + id_tmdb = "" + fanart_3 = "" + extra = item.thumbnail + "|" + year + "|" + "no data" + "|" + "no data" + "|" + rating_filma + "|" + critica + "|" + "" + "|" + id_tmdb + show = item.fanart + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + item.thumbnail + "|" + id_tmdb + fanart_info = item.fanart + fanart_2 = item.fanart + id_scraper = " " + "|" + "serie" + "|" + rating_filma + "|" + critica + "|" + " " + category = "" + posterdb = item.thumbnail + itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action="findvideos", + thumbnail=item.thumbnail, fanart=item.fanart, extra=extra, category=category, + show=show, folder=True)) + + for id_tmdb, fan in matches: + ###Busca id tvdb + urlid_tvdb = "https://api.themoviedb.org/3/tv/" + id_tmdb + "/external_ids?api_key=" + api_key + "&language=es" + data_tvdb = httptools.downloadpage(urlid_tvdb).data + id = scrapertools.find_single_match(data_tvdb, 'tvdb_id":(.*?),"tvrage_id"') + if id == "null": + id = "" + category = id + ###Busqueda nºepisodios y temporadas,status + url_status = "http://api.themoviedb.org/3/tv/" + id_tmdb + "?api_key=" + api_key + "&append_to_response=credits&language=es" + data_status = httptools.downloadpage(url_status).data + season_episodes = scrapertools.find_single_match(data_status, + '"(number_of_episodes":\d+,"number_of_seasons":\d+,)"') + season_episodes = re.sub(r'"', '', season_episodes) + season_episodes = re.sub(r'number_of_episodes', 'Episodios ', season_episodes) + season_episodes = re.sub(r'number_of_seasons', 'Temporadas', season_episodes) + season_episodes = re.sub(r'_', ' ', season_episodes) + status = scrapertools.find_single_match(data_status, '"status":"(.*?)"') + if status == "Ended": + status = "Finalizada" + else: + status = "En emisión" + status = status + " (" + season_episodes + ")" + status = re.sub(r',', '.', status) + ####### + + fan = re.sub(r'\\|"', '', fan) + + try: + # rating tvdb + url_rating_tvdb = "http://thetvdb.com/api/1D62F2F90030C444/series/" + id + "/es.xml" + print "pepote" + print url_rating_tvdb + data = httptools.downloadpage(url_rating_tvdb).data + rating = scrapertools.find_single_match(data, '<Rating>(.*?)<') + except: + ratintg_tvdb = "" + try: + rating = scrapertools.get_match(data, '"vote_average":(.*?),') + except: + + rating = "Sin puntuación" + + id_scraper = id_tmdb + "|" + "serie" + "|" + rating_filma + "|" + critica + "|" + rating + "|" + status # +"|"+emision + posterdb = scrapertools.find_single_match(data_tmdb, '"poster_path":(.*?)",') + + if "null" in posterdb: + posterdb = item.thumbnail + else: + posterdb = re.sub(r'\\|"', '', posterdb) + posterdb = "https://image.tmdb.org/t/p/original" + posterdb + if "null" in fan: + fanart = "http://s6.postimg.org/qcbsfbvm9/verseriesnofan2.jpg" + else: + fanart = "https://image.tmdb.org/t/p/original" + fan + + if fanart == "http://s6.postimg.org/qcbsfbvm9/verseriesnofan2.jpg": + fanart_info = fanart + fanart_2 = fanart + fanart_3 = fanart + fanart_4 = fanart + else: + url = "http://api.themoviedb.org/3/tv/" + id_tmdb + "/images?api_key=" + api_key + + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + file_path = scrapertools.find_multiple_matches(data, '"file_path":"(.*?)"') + if len(file_path) >= 5: + fanart_info = "https://image.tmdb.org/t/p/original" + file_path[1] + fanart_2 = "https://image.tmdb.org/t/p/original" + file_path[2] + fanart_3 = "https://image.tmdb.org/t/p/original" + file_path[3] + fanart_4 = "https://image.tmdb.org/t/p/original" + file_path[4] + if fanart == "http://s6.postimg.org/qcbsfbvm9/verseriesnofan2.jpg": + fanart = "https://image.tmdb.org/t/p/original" + fanart_info + elif len(file_path) == 4: + fanart_info = "https://image.tmdb.org/t/p/original" + file_path[1] + fanart_2 = "https://image.tmdb.org/t/p/original" + file_path[2] + fanart_3 = "https://image.tmdb.org/t/p/original" + file_path[3] + fanart_4 = "https://image.tmdb.org/t/p/original" + file_path[1] + if fanart == "http://s6.postimg.org/qcbsfbvm9/verseriesnofan2.jpg": + fanart = "https://image.tmdb.org/t/p/original" + fanart_info + elif len(file_path) == 3: + fanart_info = "https://image.tmdb.org/t/p/original" + file_path[1] + fanart_2 = "https://image.tmdb.org/t/p/original" + file_path[2] + fanart_3 = "https://image.tmdb.org/t/p/original" + file_path[1] + fanart_4 = "https://image.tmdb.org/t/p/original" + file_path[0] + if fanart == "http://s6.postimg.org/qcbsfbvm9/verseriesnofan2.jpg": + fanart = "https://image.tmdb.org/t/p/original" + fanart_info + elif len(file_path) == 2: + fanart_info = "https://image.tmdb.org/t/p/original" + file_path[1] + fanart_2 = "https://image.tmdb.org/t/p/original" + file_path[0] + fanart_3 = "https://image.tmdb.org/t/p/original" + file_path[1] + fanart_4 = "https://image.tmdb.org/t/p/original" + file_path[1] + if fanart == "http://s6.postimg.org/qcbsfbvm9/verseriesnofan2.jpg": + fanart = "https://image.tmdb.org/t/p/original" + fanart_info + else: + fanart_info = fanart + fanart_2 = fanart + fanart_3 = fanart + fanart_4 = fanart + + url = "http://webservice.fanart.tv/v3/tv/" + id + "?api_key=" + api_fankey + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '"clearlogo":.*?"url": "([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + if '"tvbanner"' in data: + tvbanner = scrapertools.get_match(data, '"tvbanner":.*?"url": "([^"]+)"') + tfv = tvbanner + elif '"tvposter"' in data: + tvposter = scrapertools.get_match(data, '"tvposter":.*?"url": "([^"]+)"') + tfv = tvposter + else: + tfv = posterdb + if '"tvthumb"' in data: + tvthumb = scrapertools.get_match(data, '"tvthumb":.*?"url": "([^"]+)"') + if '"hdtvlogo"' in data: + hdtvlogo = scrapertools.get_match(data, '"hdtvlogo":.*?"url": "([^"]+)"') + if '"hdclearart"' in data: + hdtvclear = scrapertools.get_match(data, '"hdclearart":.*?"url": "([^"]+)"') + if len(matches) == 0: + if '"hdtvlogo"' in data: + if "showbackground" in data: + + if '"hdclearart"' in data: + thumbnail = hdtvlogo + extra = hdtvclear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + "|" + fanart_4 + else: + thumbnail = hdtvlogo + extra = thumbnail + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + "|" + fanart_4 + itemlist.append(Item(channel=item.channel, title=item.title, action="temporadas", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=fanart, category=category, + extra=extra, show=show, folder=True)) + + + else: + if '"hdclearart"' in data: + thumbnail = hdtvlogo + extra = hdtvclear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + "|" + fanart_4 + else: + thumbnail = hdtvlogo + extra = thumbnail + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + "|" + fanart_4 + itemlist.append(Item(channel=item.channel, title=item.title, action="temporadas", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=fanart, extra=extra, show=show, + category=category, folder=True)) + else: + extra = "" + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + "|" + fanart_4 + itemlist.append( + Item(channel=item.channel, title=item.title, action="temporadas", url=item.url, server="torrent", + thumbnail=posterdb, fanart=fanart, extra=extra, show=show, category=category, folder=True)) + + for logo in matches: + if '"hdtvlogo"' in data: + thumbnail = hdtvlogo + elif not '"hdtvlogo"' in data: + if '"clearlogo"' in data: + thumbnail = logo + else: + thumbnail = item.thumbnail + if '"clearart"' in data: + clear = scrapertools.get_match(data, '"clearart":.*?"url": "([^"]+)"') + if "showbackground" in data: + + extra = clear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + "|" + fanart_4 + itemlist.append(Item(channel=item.channel, title=item.title, action="temporadas", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=fanart, extra=extra, show=show, + category=category, folder=True)) + else: + extra = clear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + "|" + fanart_4 + itemlist.append(Item(channel=item.channel, title=item.title, action="temporadas", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=fanart, extra=extra, show=show, + category=category, folder=True)) + + if "showbackground" in data: + + if '"clearart"' in data: + clear = scrapertools.get_match(data, '"clearart":.*?"url": "([^"]+)"') + extra = clear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + "|" + fanart_4 + else: + extra = logo + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + "|" + fanart_4 + itemlist.append(Item(channel=item.channel, title=item.title, action="temporadas", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=fanart, extra=extra, show=show, + category=category, folder=True)) + + if not '"clearart"' in data and not '"showbackground"' in data: + if '"hdclearart"' in data: + extra = hdtvclear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + "|" + fanart_4 + else: + extra = thumbnail + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + "|" + fanart_4 + itemlist.append( + Item(channel=item.channel, title=item.title, action="temporadas", url=item.url, server="torrent", + thumbnail=thumbnail, fanart=fanart, extra=extra, show=show, category=category, folder=True)) + title = "Info" + title_info = title.replace(title, "[COLOR seagreen]" + title + "[/COLOR]") + + if '"tvposter"' in data: + thumbnail = scrapertools.get_match(data, '"tvposter":.*?"url": "([^"]+)"') + else: + thumbnail = posterdb + + if "tvbanner" in data: + category = tvbanner + else: + category = show + if '"tvthumb"' in data: + plot = item.plot + "|" + tvthumb + else: + plot = item.plot + "|" + item.thumbnail + if '"tvbanner"' in data: + plot = plot + "|" + tvbanner + elif '"tvthumb"' in data: + plot = plot + "|" + tvthumb + else: + plot = plot + "|" + item.thumbnail + + id = id_scraper + + extra = extra + "|" + id + "|" + title.encode('utf8') + + itemlist.append(Item(channel=item.channel, action="info", title=title_info, url=item.url, thumbnail=thumbnail, + fanart=fanart_info, extra=extra, category=category, plot=plot, show=show, + viewmode="movie_with_plot", folder=False)) + + return itemlist + + +def temporadas(item): + logger.info() + + itemlist = [] + + data = dhe(httptools.downloadpage(item.url).data) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + if "Temporada 0" in data: + bloque_temporadas = 'Temporada 0.*?(<h3 class="three fourths col-xs-12 pad0">.*?<div class="col-md-4 padl0">)' + matchestemporadas = re.compile(bloque_temporadas, re.DOTALL).findall(data) + + for bloque_temporadas in matchestemporadas: + patron = '<h3 class="three fourths col-xs-12 pad0">.*?href="([^"]+)" title="([^<]+)"' + matches = re.compile(patron, re.DOTALL).findall(bloque_temporadas) + + else: + patron = '<h3 class="three fourths col-xs-12 pad0">.*?href="([^"]+)" title="([^<]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + itemlist.append(Item(channel=item.channel, title="[COLOR gold][B]No hay resultados...[/B][/COLOR]", + thumbnail="http://s6.postimg.org/fay99h9ox/briconoisethumb.png", + fanart="http://pic.raise5.com/user_pictures/user-1423992581-237429.jpg", folder=False)) + for scrapedurl, scrapedtitle in matches: + ###Busqueda poster temporada tmdb + scrapedtitle = scrapedtitle.replace(scrapedtitle, "[COLOR springgreen]" + scrapedtitle + "[/COLOR]") + temporada = scrapertools.get_match(scrapedtitle, 'Temporada (\d+)') + scrapedtitle = scrapedtitle.replace("Temporada", "[COLOR darkorange]Temporada[/COLOR]") + + ###Busca poster de temporada Tmdb + urltmdb_temp = "http://api.themoviedb.org/3/tv/" + item.show.split("|")[ + 5] + "/season/" + temporada + "/images?api_key=" + api_key + data = httptools.downloadpage(urltmdb_temp).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '{"id".*?"file_path":"(.*?)","height"' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + thumbnail = item.thumbnail + for temp in matches: + thumbnail = "https://image.tmdb.org/t/p/original" + temp + extra = item.extra + "|" + temporada + + itemlist.append( + Item(channel=item.channel, title=scrapedtitle, action="capitulos", url=scrapedurl, thumbnail=thumbnail, + fanart=item.show.split("|")[0], show=item.show, extra=extra, category=item.category, folder=True)) + + return itemlist + + +def capitulos(item): + logger.info() + + itemlist = [] + + data = dhe(httptools.downloadpage(item.url).data) + patron = '<div class="item_episodio col-xs-3 ">.*?href="([^"]+)" title="([^<]+)".*?<img src="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + itemlist.append( + Item(channel=item.channel, title="[COLOR coral][B]" + "no hay capítulos...".upper() + "[/B][/COLOR]", + thumbnail="http://s6.postimg.org/wa269heq9/verseriesnohaythumb.png", + fanart="http://s6.postimg.org/4nzeosvdd/verseriesnothingfan.jpg", folder=False)) + for scrapedurl, scrapedtitle, scrapedthumbnail in matches: + scrapedtitle = re.sub(r"(.*?Temporada \d+)", "", scrapedtitle).strip() + capitulo = re.sub(r"Capitulo", "", scrapedtitle).strip() + scrapedtitle = scrapedtitle.replace(scrapedtitle, "[COLOR limegreen]" + scrapedtitle + "[/COLOR]") + extra = item.extra + "|" + capitulo + + itemlist.append(Item(channel=item.channel, title=scrapedtitle, action="findvideos", url=scrapedurl, + thumbnail=item.show.split("|")[4], fanart=item.show.split("|")[1], show=item.show, + extra=extra, category=item.category, folder=True)) + title = "Info" + title = title.replace(title, "[COLOR darkseagreen]" + title + "[/COLOR]") + itemlist.append( + Item(channel=item.channel, action="info_capitulos", title=title, url=item.url, thumbnail=scrapedthumbnail, + fanart=item.show.split("|")[1], extra=extra, show=item.show, category=item.category, folder=False)) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + + patron = '<td><a href="([^"]+)".*?<img src="([^"]+)" title="([^<]+)" .*?<td>([^<]+)</td>.*?<td>([^<]+)</td>' + matches = re.compile(patron, re.DOTALL).findall(data) + print matches + for scrapedurl, scrapedthumbnail, scrapedserver, scrapedidioma, scrapedcalidad in matches: + + server = scrapertools.get_match(scrapedserver, '(.*?)[.]') + icon_server = os.path.join(config.get_runtime_path(), "resources", "images", "servers", + "server_" + server + ".png") + icon_server = re.sub(r"tv|com|net|", "", icon_server) + icon_server = icon_server.replace('streamin', 'streaminto') + icon_server = icon_server.replace('ul', 'uploadedto') + + if not os.path.exists(icon_server): + icon_server = scrapedthumbnail + + scrapedserver = scrapedserver.replace(scrapedserver, + "[COLOR darkorange][B]" + "[" + scrapedserver + "]" + "[/B][/COLOR]") + scrapedidioma = scrapedidioma.replace(scrapedidioma, + "[COLOR lawngreen][B]" + "--" + scrapedidioma + "--" + "[/B][/COLOR]") + scrapedcalidad = scrapedcalidad.replace(scrapedcalidad, + "[COLOR floralwhite][B]" + scrapedcalidad + "[/B][/COLOR]") + + title = scrapedserver + scrapedidioma + scrapedcalidad + itemlist.append(Item(channel=item.channel, title=title, action="play", url=scrapedurl, thumbnail=icon_server, + fanart=item.show.split("|")[6], extra=item.thumbnail, folder=True)) + + return itemlist + + +def play(item): + logger.info() + import xbmc + xbmc.executebuiltin('Action(reloadkeymaps)') + + itemlist = servertools.find_video_items(data=item.url) + + for videoitem in itemlist: + videoitem.title = item.title + videoitem.thumbnail = item.extra + videoitem.extra = item.extra + videoitem.channel = item.channel + + return itemlist + + +def info(item): + logger.info() + itemlist = [] + url = item.url + id = item.extra + + if "serie" in item.url: + try: + rating_tmdba_tvdb = item.extra.split("|")[6] + if item.extra.split("|")[6] == "": + rating_tmdba_tvdb = "Sin puntuación" + except: + rating_tmdba_tvdb = "Sin puntuación" + else: + rating_tmdba_tvdb = item.extra.split("|")[3] + rating_filma = item.extra.split("|")[4] + print "eztoquee" + print rating_filma + print rating_tmdba_tvdb + + filma = "http://s6.postimg.org/6yhe5fgy9/filma.png" + + try: + if "serie" in item.url: + title = item.extra.split("|")[8] + + else: + title = item.extra.split("|")[6] + title = title.replace("%20", " ") + title = "[COLOR yellow][B]" + title + "[/B][/COLOR]" + except: + title = item.title + + try: + if "." in rating_tmdba_tvdb: + check_rat_tmdba = scrapertools.get_match(rating_tmdba_tvdb, '(\d+).') + else: + check_rat_tmdba = rating_tmdba_tvdb + if int(check_rat_tmdba) >= 5 and int(check_rat_tmdba) < 8: + rating = "[COLOR springgreen][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + elif int(check_rat_tmdba) >= 8 or rating_tmdba_tvdb == 10: + rating = "[COLOR yellow][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + else: + rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + print "lolaymaue" + except: + rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + if "10." in rating: + rating = re.sub(r'10\.\d+', '10', rating) + try: + check_rat_filma = scrapertools.get_match(rating_filma, '(\d)') + print "paco" + print check_rat_filma + if int(check_rat_filma) >= 5 and int(check_rat_filma) < 8: + print "dios" + print check_rat_filma + rating_filma = "[COLOR springgreen][B]" + rating_filma + "[/B][/COLOR]" + elif int(check_rat_filma) >= 8: + + print check_rat_filma + rating_filma = "[COLOR yellow][B]" + rating_filma + "[/B][/COLOR]" + else: + rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]" + print "rojo??" + print check_rat_filma + except: + rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]" + + try: + if not "serie" in item.url: + url_plot = "http://api.themoviedb.org/3/movie/" + item.extra.split("|")[ + 1] + "?api_key=" + api_key + "&append_to_response=credits&language=es" + data_plot = httptools.downloadpage(url_plot).data + plot, tagline = scrapertools.find_single_match(data_plot, '"overview":"(.*?)",.*?"tagline":(".*?")') + if plot == "": + plot = item.show.split("|")[2] + + plot = "[COLOR moccasin][B]" + plot + "[/B][/COLOR]" + plot = re.sub(r"\\", "", plot) + + else: + plot = item.show.split("|")[2] + plot = "[COLOR moccasin][B]" + plot + "[/B][/COLOR]" + plot = re.sub(r"\\", "", plot) + + if item.extra.split("|")[7] != "": + tagline = item.extra.split("|")[7] + # tagline= re.sub(r',','.',tagline) + else: + tagline = "" + except: + title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" + plot = "Esta pelicula no tiene informacion..." + plot = plot.replace(plot, "[COLOR yellow][B]" + plot + "[/B][/COLOR]") + photo = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png" + foto = "http://s6.postimg.org/ub7pb76c1/noinfo.png" + info = "" + rating = "" + rating_filam = "" + + if "serie" in item.url: + check2 = "serie" + + icon = "http://s6.postimg.org/hzcjag975/tvdb.png" + foto = item.show.split("|")[1] + if not "image.tmdb" in foto: + foto = "" + if item.extra.split("|")[5] != "": + critica = item.extra.split("|")[5] + else: + critica = "Esta serie no tiene críticas..." + + photo = item.extra.split("|")[0].replace(" ", "%20") + if not ".png" in photo: + photo = "" + try: + tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]" + except: + tagline = "" + + else: + + critica = item.extra.split("|")[5] + if "%20" in critica: + critica = "No hay críticas" + icon = "http://imgur.com/SenkyxF.png" + + photo = item.extra.split("|")[0].replace(" ", "%20") + foto = item.show.split("|")[1] + + try: + if tagline == "\"\"": + tagline = " " + except: + tagline = " " + tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]" + check2 = "pelicula" + # Tambien te puede interesar + peliculas = [] + if "serie" in item.url: + + url_tpi = "http://api.themoviedb.org/3/tv/" + item.show.split("|")[ + 5] + "/recommendations?api_key=" + api_key + "&language=es" + data_tpi = httptools.downloadpage(url_tpi).data + tpi = scrapertools.find_multiple_matches(data_tpi, + 'id":(.*?),.*?"original_name":"(.*?)",.*?"poster_path":(.*?),"popularity"') + + else: + url_tpi = "http://api.themoviedb.org/3/movie/" + item.extra.split("|")[ + 1] + "/recommendations?api_key=" + api_key + "&language=es" + data_tpi = httptools.downloadpage(url_tpi).data + tpi = scrapertools.find_multiple_matches(data_tpi, + 'id":(.*?),.*?"original_title":"(.*?)",.*?"poster_path":(.*?),"popularity"') + + for idp, peli, thumb in tpi: + + thumb = re.sub(r'"|}', '', thumb) + if "null" in thumb: + thumb = "http://s6.postimg.org/tw1vhymj5/noposter.png" + else: + thumb = "https://image.tmdb.org/t/p/original" + thumb + peliculas.append([idp, peli, thumb]) + + check2 = check2.replace("pelicula", "movie").replace("serie", "tvshow") + infoLabels = {'title': title, 'plot': plot, 'thumbnail': photo, 'fanart': foto, 'tagline': tagline, + 'rating': rating} + item_info = item.clone(info=infoLabels, icon=icon, extra=id, rating=rating, rating_filma=rating_filma, + critica=critica, contentType=check2, thumb_busqueda="http://imgur.com/zKjAjzB.png") + from channels import infoplus + infoplus.start(item_info, peliculas) + + +def info_capitulos(item): + logger.info() + + url = "https://api.themoviedb.org/3/tv/" + item.show.split("|")[5] + "/season/" + item.extra.split("|")[ + 2] + "/episode/" + item.extra.split("|")[3] + "?api_key=" + api_key + "&language=es" + + if "/0" in url: + url = url.replace("/0", "/") + + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '],"name":"(.*?)","overview":"(.*?)".*?"still_path":(.*?),"vote_average":(\d+\.\d).*?,"' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + + url = "http://thetvdb.com/api/1D62F2F90030C444/series/" + item.category + "/default/" + item.extra.split("|")[ + 2] + "/" + item.extra.split("|")[3] + "/es.xml" + if "/0" in url: + url = url.replace("/0", "/") + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '<Data>.*?<EpisodeName>([^<]+)</EpisodeName>.*?<Overview>(.*?)</Overview>.*?<Rating>(.*?)</Rating>' + + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + + title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" + plot = "Este capitulo no tiene informacion..." + plot = "[COLOR yellow][B]" + plot + "[/B][/COLOR]" + image = "http://s6.postimg.org/ub7pb76c1/noinfo.png" + foto = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png" + rating = "" + + + else: + + for name_epi, info, rating in matches: + if "<filename>episodes" in data: + foto = scrapertools.get_match(data, '<Data>.*?<filename>(.*?)</filename>') + fanart = "http://thetvdb.com/banners/" + foto + else: + fanart = "http://imgur.com/ZiEAVOD.png" + plot = info + plot = "[COLOR peachpuff][B]" + plot + "[/B][/COLOR]" + title = name_epi.upper() + title = "[COLOR bisque][B]" + title + "[/B][/COLOR]" + image = fanart + foto = item.extra.split("|")[0] + if not ".png" in foto: + foto = "http://imgur.com/zKjAjzB.png" + + foto = re.sub(r'\(.*?\)|" "|" "', '', foto) + foto = re.sub(r' ', '', foto) + try: + + check_rating = scrapertools.get_match(rating, '(\d+).') + + if int(check_rating) >= 5 and int(check_rating) < 8: + rating = "Puntuación " + "[COLOR springgreen][B]" + rating + "[/B][/COLOR]" + elif int(check_rating) >= 8 and int(check_rating) < 10: + rating = "Puntuación " + "[COLOR yellow][B]" + rating + "[/B][/COLOR]" + elif int(check_rating) == 10: + rating = "Puntuación " + "[COLOR orangered][B]" + rating + "[/B][/COLOR]" + else: + rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" + + except: + rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" + if "10." in rating: + rating = re.sub(r'10\.\d+', '10', rating) + else: + for name_epi, info, fanart, rating in matches: + if info == "" or info == "\\": + info = "Sin informacion del capítulo aún..." + plot = info + plot = re.sub(r'/n', '', plot) + plot = "[COLOR peachpuff][B]" + plot + "[/B][/COLOR]" + title = name_epi.upper() + title = "[COLOR bisque][B]" + title + "[/B][/COLOR]" + image = fanart + image = re.sub(r'"|}', '', image) + if "null" in image: + image = "http://imgur.com/ZiEAVOD.png" + else: + image = "https://image.tmdb.org/t/p/original" + image + foto = item.extra.split("|")[0] + if not ".png" in foto: + foto = "http://imgur.com/zKjAjzB.png" + foto = re.sub(r'\(.*?\)|" "|" "', '', foto) + foto = re.sub(r' ', '', foto) + try: + + check_rating = scrapertools.get_match(rating, '(\d+).') + + if int(check_rating) >= 5 and int(check_rating) < 8: + rating = "Puntuación " + "[COLOR springgreen][B]" + rating + "[/B][/COLOR]" + elif int(check_rating) >= 8 and int(check_rating) < 10: + rating = "Puntuación " + "[COLOR yellow][B]" + rating + "[/B][/COLOR]" + elif int(check_rating) == 10: + rating = "Puntuación " + "[COLOR orangered][B]" + rating + "[/B][/COLOR]" + else: + rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" + + except: + rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" + if "10." in rating: + rating = re.sub(r'10\.\d+', '10', rating) + ventana = TextBox2(title=title, plot=plot, thumbnail=image, fanart=foto, rating=rating) + ventana.doModal() + + +class TextBox2(xbmcgui.WindowDialog): + """ Create a skinned textbox window """ + + def __init__(self, *args, **kwargs): + self.getTitle = kwargs.get('title') + self.getPlot = kwargs.get('plot') + self.getThumbnail = kwargs.get('thumbnail') + self.getFanart = kwargs.get('fanart') + self.getRating = kwargs.get('rating') + + self.background = xbmcgui.ControlImage(70, 20, 1150, 630, 'http://imgur.com/mpMQp6c.jpg') + self.title = xbmcgui.ControlTextBox(120, 60, 430, 50) + self.rating = xbmcgui.ControlTextBox(145, 112, 1030, 45) + self.plot = xbmcgui.ControlTextBox(120, 150, 1056, 100) + self.thumbnail = xbmcgui.ControlImage(120, 300, 1056, 300, self.getThumbnail) + self.fanart = xbmcgui.ControlImage(780, 43, 390, 100, self.getFanart) + + self.addControl(self.background) + self.background.setAnimations( + [('conditional', 'effect=slide start=1000% end=0% time=1500 condition=true tween=bounce',), + ('WindowClose', 'effect=slide delay=800 start=0% end=1000% time=800 condition=true',)]) + self.addControl(self.thumbnail) + self.thumbnail.setAnimations([('conditional', + 'effect=zoom start=0% end=100% delay=2700 time=1500 condition=true tween=elastic easing=inout',), + ('WindowClose', 'effect=slide end=0,700% time=300 condition=true',)]) + self.addControl(self.plot) + self.plot.setAnimations( + [('conditional', 'effect=zoom delay=2000 center=auto start=0 end=100 time=800 condition=true ',), ( + 'conditional', + 'effect=rotate delay=2000 center=auto aceleration=6000 start=0% end=360% time=800 condition=true',), + ('WindowClose', 'effect=zoom center=auto start=100% end=-0% time=600 condition=true',)]) + self.addControl(self.fanart) + self.fanart.setAnimations( + [('WindowOpen', 'effect=slide start=0,-700 delay=1000 time=2500 tween=bounce condition=true',), ( + 'conditional', + 'effect=rotate center=auto start=0% end=360% delay=3000 time=2500 tween=bounce condition=true',), + ('WindowClose', 'effect=slide end=0,-700% time=1000 condition=true',)]) + self.addControl(self.title) + self.title.setText(self.getTitle) + self.title.setAnimations( + [('conditional', 'effect=slide start=-1500% end=0% delay=1000 time=2000 condition=true tween=elastic',), + ('WindowClose', 'effect=slide start=0% end=-1500% time=800 condition=true',)]) + self.addControl(self.rating) + self.rating.setText(self.getRating) + self.rating.setAnimations( + [('conditional', 'effect=fade start=0% end=100% delay=3000 time=1500 condition=true',), + ('WindowClose', 'effect=slide end=0,-700% time=600 condition=true',)]) + xbmc.sleep(200) + + try: + self.plot.autoScroll(7000, 6000, 30000) + except: + + xbmc.executebuiltin( + 'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000,"https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/kodi-icon.png")') + self.plot.setText(self.getPlot) + + def get(self): + self.show() + + def onAction(self, action): + if action == ACTION_PREVIOUS_MENU or action.getId() == ACTION_GESTURE_SWIPE_LEFT or action == 110 or action == 92: + self.close() + + +def test(): + return True + + +def translate(to_translate, to_langage="auto", langage="auto"): + '''Return the translation using google translate + you must shortcut the langage you define (French = fr, English = en, Spanish = es, etc...) + if you don't define anything it will detect it or use english by default + Example: + print(translate("salut tu vas bien?", "en")) + hello you alright?''' + agents = { + 'User-Agent': "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)"} + before_trans = 'class="t0">' + link = "http://translate.google.com/m?hl=%s&sl=%s&q=%s" % (to_langage, langage, to_translate.replace(" ", "+")) + request = urllib2.Request(link, headers=agents) + page = urllib2.urlopen(request).read() + result = page[page.find(before_trans) + len(before_trans):] + result = result.split("<")[0] + return result + + +if __name__ == '__main__': + to_translate = 'Hola como estas?' + print("%s >> %s" % (to_translate, translate(to_translate))) + print("%s >> %s" % (to_translate, translate(to_translate, 'fr'))) +# should print Hola como estas >> Hello how are you +# and Hola como estas? >> Bonjour comment allez-vous? diff --git a/plugin.video.alfa/channels/verseriesynovelas.json b/plugin.video.alfa/channels/verseriesynovelas.json new file mode 100755 index 00000000..5ee0e6cb --- /dev/null +++ b/plugin.video.alfa/channels/verseriesynovelas.json @@ -0,0 +1,89 @@ +{ + "id": "verseriesynovelas", + "name": "Ver Series y Novelas", + "active": false, + "adult": false, + "language": "es", + "banner": "verseriesynovelas.png", + "thumbnail": "http://i.imgur.com/ZhQknRE.png", + "version": 1, + "changes": [ + { + "date": "17/05/2017", + "description": "Canal desactivado por cambio de dominio, a la espera de si lo arreglan" + }, + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "16/02/2017", + "description": "Ligeros cambios para mejorar la carga de enlaces" + }, + { + "date": "11/10/2016", + "description": "Reparado por cambios en la web, ahora necesita registro" + } + ], + "categories": [ + "latino", + "vos", + "tvshow" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_series", + "type": "bool", + "label": "Incluir en Novedades - Series", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "modo_grafico", + "type": "bool", + "label": "Buscar información extra", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "perfil", + "type": "list", + "label": "Perfil de color", + "default": 2, + "enabled": true, + "visible": true, + "lvalues": [ + "Perfil 3", + "Perfil 2", + "Perfil 1" + ] + }, + { + "id": "verseriesynovelasuser", + "type": "text", + "label": "@30014", + "default": "", + "enabled": true, + "visible": true + }, + { + "id": "verseriesynovelaspassword", + "label": "@30015", + "type": "text", + "default": "", + "enabled": "!eq(-1,'')", + "hidden": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/verseriesynovelas.py b/plugin.video.alfa/channels/verseriesynovelas.py new file mode 100755 index 00000000..051ce49d --- /dev/null +++ b/plugin.video.alfa/channels/verseriesynovelas.py @@ -0,0 +1,394 @@ +# -*- coding: utf-8 -*- + +import re + +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + +# Configuracion del canal +__modo_grafico__ = config.get_setting('modo_grafico', 'verseriesynovelas') +__perfil__ = config.get_setting('perfil', 'verseriesynovelas') + +# Fijar perfil de color +perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'], + ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'], + ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']] +color1, color2, color3 = perfil[__perfil__] + + +def login(check_login=True): + logger.info() + + try: + user = config.get_setting("verseriesynovelasuser", "verseriesynovelas") + password = config.get_setting("verseriesynovelaspassword", "verseriesynovelas") + if user == "" and password == "": + return False, "Para ver los enlaces de este canal es necesario registrarse en www.verseriesynovelas.tv" + elif user == "" or password == "": + return False, "Usuario o contraseña en blanco. Revisa tus credenciales" + if check_login: + data = httptools.downloadpage("http://www.verseriesynovelas.tv/").data + if user in data: + return True, "" + + post = "log=%s&pwd=%s&redirect_to=http://www.verseriesynovelas.tv/wp-admin/&action=login" % (user, password) + data = httptools.downloadpage("http://www.verseriesynovelas.tv/iniciar-sesion", post=post).data + if "La contraseña que has introducido" in data: + logger.error("Error en el login") + return False, "Contraseña errónea. Comprueba tus credenciales" + elif "Nombre de usuario no válido" in data: + logger.error("Error en el login") + return False, "Nombre de usuario no válido. Comprueba tus credenciales" + else: + logger.info("Login correcto") + return True, "" + except: + import traceback + logger.error(traceback.format_exc()) + return False, "Error durante el login. Comprueba tus credenciales" + + +def mainlist(item): + logger.info() + itemlist = [] + item.text_color = color1 + + logueado, error_message = login() + + if not logueado: + itemlist.append(item.clone(title=error_message, action="", text_color="darkorange")) + else: + itemlist.append( + item.clone(title="Nuevos Capítulos", action="novedades", fanart="http://i.imgur.com/9loVksV.png", + url="http://www.verseriesynovelas.tv/archivos/nuevo")) + itemlist.append(item.clone(title="Últimas Series", action="ultimas", fanart="http://i.imgur.com/9loVksV.png", + url="http://www.verseriesynovelas.tv/")) + itemlist.append( + item.clone(title="Lista de Series A-Z", action="indices", fanart="http://i.imgur.com/9loVksV.png", + url="http://www.verseriesynovelas.tv/")) + itemlist.append(item.clone(title="Categorías", action="indices", fanart="http://i.imgur.com/9loVksV.png", + url="http://www.verseriesynovelas.tv/")) + itemlist.append(item.clone(title="", action="")) + itemlist.append(item.clone(title="Buscar...", action="search", fanart="http://i.imgur.com/9loVksV.png")) + itemlist.append(item.clone(title="Configurar canal...", action="configuracion", text_color="gold", folder=False)) + + return itemlist + + +def configuracion(item): + from platformcode import platformtools + ret = platformtools.show_channel_settings() + platformtools.itemlist_refresh() + return ret + + +def indices(item): + logger.info() + + itemlist = [] + data = httptools.downloadpage(item.url).data + data = data.replace("\n", "").replace("\t", "") + + if "Categorías" in item.title: + bloque = scrapertools.find_single_match(data, '<span>Seleccion tu categoria</span>(.*?)</section>') + matches = scrapertools.find_multiple_matches(bloque, '<li.*?<a href="([^"]+)">(.*?)</a>') + for url, title in matches: + itemlist.append(item.clone(action="ultimas", title=title, url=url)) + else: + bloque = scrapertools.find_single_match(data, '<ul class="alfabetico">(.*?)</ul>') + matches = scrapertools.find_multiple_matches(bloque, '<li.*?<a href="([^"]+)".*?>(.*?)</a>') + for url, title in matches: + itemlist.append(item.clone(action="ultimas", title=title, url=url)) + + return itemlist + + +def search(item, texto): + logger.info() + item.url = "http://www.verseriesynovelas.tv/archivos/h1/?s=" + texto + if "Buscar..." in item.title: + return ultimas(item, texto) + else: + try: + return busqueda(item, texto) + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def busqueda(item, texto=""): + logger.info() + itemlist = [] + item.text_color = color2 + + data = httptools.downloadpage(item.url).data + data = data.replace("\n", "").replace("\t", "") + + bloque = scrapertools.find_single_match(data, '<ul class="list-paginacion">(.*?)</section>') + bloque = scrapertools.find_multiple_matches(bloque, '<li><a href=(.*?)</li>') + for match in bloque: + patron = '([^"]+)".*?<img class="fade" src="([^"]+)".*?<h2>(.*?)</h2>' + matches = scrapertools.find_multiple_matches(match, patron) + for scrapedurl, scrapedthumbnail, scrapedtitle in matches: + # fix para el buscador para que no muestre entradas con texto que no es correcto + if unicode(texto, "utf8").lower().encode("utf8") not in \ + unicode(scrapedtitle, "utf8").lower().encode("utf8"): + continue + + scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).replace(" online", "") + titleinfo = re.sub(r'(?i)((primera|segunda|tercera|cuarta|quinta|sexta) temporada)', "Temporada", + scrapedtitle) + titleinfo = titleinfo.split("Temporada")[0].strip() + titleinfo = re.sub(r'(\(\d{4}\))|(\(\d{4}\s*-\s*\d{4}\))', '', titleinfo) + + itemlist.append(item.clone(action="episodios", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, fulltitle=scrapedtitle, show=titleinfo, + contentType="tvshow", contentTitle=titleinfo)) + # Paginación + next_page = scrapertools.find_single_match(data, '<a class="nextpostslink".*?href="([^"]+)">') + if next_page != "": + itemlist.append(item.clone(title=">> Siguiente", url=next_page)) + + return itemlist + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + try: + if categoria == 'series': + item.channel = "verseriesynovelas" + item.extra = "newest" + item.url = "http://www.verseriesynovelas.tv/archivos/nuevo" + item.action = "novedades" + itemlist = novedades(item) + + if itemlist[-1].action == "novedades": + itemlist.pop() + + # Se captura la excepción, para no interrumpir al canal novedades si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist + + +def novedades(item): + logger.info() + itemlist = [] + item.text_color = color2 + + data = httptools.downloadpage(item.url).data + data = data.replace("\n", "").replace("\t", "") + + bloque = scrapertools.find_single_match(data, '<section class="list-galeria">(.*?)</section>') + bloque = scrapertools.find_multiple_matches(bloque, '<li><a href=(.*?)</a></li>') + for match in bloque: + patron = '([^"]+)".*?<img class="fade" src="([^"]+)".*?title="(?:ver |)([^"]+)"' + matches = scrapertools.find_multiple_matches(match, patron) + for scrapedurl, scrapedthumbnail, scrapedtitle in matches: + titleinfo = scrapertools.decodeHtmlentities(scrapedtitle) + try: + titleinfo = re.split("Temporada", titleinfo, flags=re.IGNORECASE)[0] + except: + try: + titleinfo = re.split("Capitulo", titleinfo, flags=re.IGNORECASE)[0] + except: + pass + scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle) + " " + if item.extra != "newest": + contentTitle = titleinfo + else: + contentTitle = re.sub(r'(?i)(temporada |episodios |capítulo |capitulo )', '', scrapedtitle) + + if "ES.png" in match: + scrapedtitle += "[CAST]" + if "SUB.png" in match: + scrapedtitle += "[VOSE]" + if "LA.png" in match: + scrapedtitle += "[LAT]" + if "EN.png" in match: + scrapedtitle += "[V.O]" + itemlist.append(item.clone(action="findvideos", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, fulltitle=titleinfo, show=titleinfo, + contentTitle=contentTitle, context=["buscar_trailer"], contentType="tvshow")) + + if item.extra != "newest": + try: + from core import tmdb + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + except: + pass + + # Paginación + next_page = scrapertools.find_single_match(data, '<a class="nextpostslink".*?href="([^"]+)">') + if next_page != "": + itemlist.append(item.clone(title=">> Siguiente", url=next_page, text_color=color3)) + + return itemlist + + +def ultimas(item, texto=""): + logger.info() + itemlist = [] + item.text_color = color2 + + data = httptools.downloadpage(item.url).data + data = data.replace("\n", "").replace("\t", "") + + bloque = scrapertools.find_single_match(data, '<ul class="list-paginacion">(.*?)</section>') + bloque = scrapertools.find_multiple_matches(bloque, '<li><a href=(.*?)</li>') + for match in bloque: + patron = '([^"]+)".*?<img class="fade" src="([^"]+)".*?<h2>(.*?)</h2>' + matches = scrapertools.find_multiple_matches(match, patron) + for scrapedurl, scrapedthumbnail, scrapedtitle in matches: + # fix para el buscador para que no muestre entradas con texto que no es correcto + if unicode(texto, "utf8").lower().encode("utf8") not in \ + unicode(scrapedtitle, "utf8").lower().encode("utf8"): + continue + + scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).replace(" online", "") + titleinfo = re.sub(r'(?i)((primera|segunda|tercera|cuarta|quinta|sexta) temporada)', "Temporada", + scrapedtitle) + titleinfo = titleinfo.split("Temporada")[0].strip() + titleinfo = re.sub(r'(\(\d{4}\))|(\(\d{4}\s*-\s*\d{4}\))', '', titleinfo) + + itemlist.append(item.clone(action="episodios", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, fulltitle=titleinfo, + contentTitle=titleinfo, context=["buscar_trailer"], show=titleinfo, + contentType="tvshow")) + + try: + from core import tmdb + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + except: + pass + + # Paginación + next_page = scrapertools.find_single_match(data, '<a class="nextpostslink".*?href="([^"]+)">') + if next_page != "": + itemlist.append(item.clone(title=">> Siguiente", url=next_page, text_color=color3)) + + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = data.replace("\n", "").replace("\t", "") + + plot = scrapertools.find_single_match(data, '<p><p>(.*?)</p>') + item.plot = scrapertools.htmlclean(plot) + bloque = scrapertools.find_multiple_matches(data, '<td data-th="Temporada"(.*?)</div>') + for match in bloque: + matches = scrapertools.find_multiple_matches(match, '.*?href="([^"]+)".*?title="([^"]+)"') + for scrapedurl, scrapedtitle in matches: + try: + season, episode = scrapertools.find_single_match(scrapedtitle, '(\d+)(?:×|x)(\d+)') + item.infoLabels['season'] = season + item.infoLabels['episode'] = episode + contentType = "episode" + except: + try: + episode = scrapertools.find_single_match(scrapedtitle, '(?i)(?:Capitulo|Capítulo|Episodio)\s*(\d+)') + item.infoLabels['season'] = "1" + item.infoLabels['episode'] = episode + contentType = "episode" + except: + contentType = "tvshow" + + scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle) + " " + scrapedtitle = scrapedtitle.replace('Temporada', '') + if "ES.png" in match: + scrapedtitle += "[CAST]" + if "SUB.png" in match: + scrapedtitle += "[VOSE]" + if "LA.png" in match: + scrapedtitle += "[LAT]" + if "EN.png" in match: + scrapedtitle += "[V.O]" + + itemlist.append(item.clone(action="findvideos", title=scrapedtitle, url=scrapedurl, + fulltitle=scrapedtitle, contentType=contentType)) + + itemlist.reverse() + if itemlist and item.extra != "episodios": + try: + from core import tmdb + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + except: + pass + itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="", + text_color="magenta")) + if item.category != "" and config.get_videolibrary_support(): + itemlist.append(Item(channel=item.channel, title="Añadir esta temporada a la videoteca", url=item.url, + action="add_serie_to_library", extra="episodios", text_color="green", show=item.show)) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + item.text_color = color3 + + if item.extra == "newest" and item.extra != "episodios": + try: + from core import tmdb + tmdb.set_infoLabels_item(item, __modo_grafico__) + except: + pass + + data = httptools.downloadpage(item.url).data + if "valida el captcha" in data: + logueado, error = login(check_login=False) + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + bloque = scrapertools.find_multiple_matches(data, '<tr><td data-th="Idioma">(.*?)</div>') + for match in bloque: + patron = 'data-th="Calidad">(.*?)<.*?' \ + '"Servidor".*?src="http://www.google.com/s2/favicons\?domain=(.*?)\.' \ + '.*?<td data-th="Enlace"><a href="(http://www.verseriesynovelas.tv/link/enlaces.php.*?)"' + matches = scrapertools.find_multiple_matches(match, patron) + for quality, server, url in matches: + video_data = httptools.downloadpage(url).data + url_redirect = scrapertools.find_single_match(video_data, + 'href="(http://www.verseriesynovelas.tv/link/enlace.php\?u=[^"]+)"') + location = httptools.downloadpage(url_redirect, follow_redirects=False, only_headers=True).headers[ + "location"] + + title = "Ver vídeo en %s [" + quality + "]" + if "Español.png" in match: + title += " [CAST]" + if "VOS.png" in match: + title += " [VOSE]" + if "Latino.png" in match: + title += " [LAT]" + if "VO.png" in match: + title += " [V.O]" + itemlist.append(item.clone(action="play", title=url, url=location)) + + itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server) + + if not itemlist: + itemlist.append(item.clone(action="", title="No se ha encontrado ningún enlace")) + if item.extra != "episodios": + url_lista = scrapertools.find_single_match(data, '<a class="regresar" href="([^"]+)"') + if url_lista != "": + itemlist.append(item.clone(action="episodios", title="Ir a la Lista de Capítulos", url=url_lista, + text_color="red", context="")) + + return itemlist diff --git a/plugin.video.alfa/channels/vertelenovelas.json b/plugin.video.alfa/channels/vertelenovelas.json new file mode 100755 index 00000000..60bc382d --- /dev/null +++ b/plugin.video.alfa/channels/vertelenovelas.json @@ -0,0 +1,25 @@ +{ + "id": "vertelenovelas", + "name": "Ver Telenovelas", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "vertelenovelas.png", + "banner": "vertelenovelas.png", + "version": 1, + "date": "15/02/2017", + "changes": "Fix findvideos.", + "categories": [ + "tvshow" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/vertelenovelas.py b/plugin.video.alfa/channels/vertelenovelas.py new file mode 100755 index 00000000..a8050cce --- /dev/null +++ b/plugin.video.alfa/channels/vertelenovelas.py @@ -0,0 +1,116 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import httptools +from core import logger +from core import scrapertools +from core.item import Item + + +def mainlist(item): + logger.info() + + itemlist = [] + itemlist.append(Item(channel=item.channel, title="Catálogo", action="series", url="http://www.vertelenovelas.cc/", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, title="Buscar", action="search")) + + return itemlist + + +def search(item, texto): + logger.info() + + texto = texto.replace(" ", "+") + item.url = "http://www.vertelenovelas.cc/ajax/autocompletex.php?q=" + texto + + try: + return series(item) + + # Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def series(item): + logger.info() + itemlist = [] + + # Descarga la página + data = httptools.downloadpage(item.url).data + + patron = '<article.*?</article>' + matches = re.compile(patron, re.DOTALL).findall(data) + + for match in matches: + title = scrapertools.find_single_match(match, '<span>([^<]+)</span>') + if title == "": + title = scrapertools.find_single_match(match, '<a href="[^"]+" class="title link">([^<]+)</a>') + url = urlparse.urljoin(item.url, scrapertools.find_single_match(match, '<a href="([^"]+)"')) + thumbnail = scrapertools.find_single_match(match, '<div data-src="([^"]+)"') + if thumbnail == "": + thumbnail = scrapertools.find_single_match(match, '<img src="([^"]+)"') + logger.debug("title=[" + title + "], url=[" + url + "]") + itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail)) + + next_page_url = scrapertools.find_single_match(data, '<a href="([^"]+)" class="next">') + if next_page_url != "": + itemlist.append(Item(channel=item.channel, action="series", title=">> Pagina siguiente", + url=urlparse.urljoin(item.url, next_page_url), viewmode="movie", thumbnail="", plot="", + folder=True)) + + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + + # Descarga la página + data = httptools.downloadpage(item.url).data + data = scrapertools.find_single_match(data, '<h2>Cap(.*?)</ul>') + patron = '<li><a href="([^"]+)"><span>([^<]+)</span></a>' + + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + title = scrapertools.htmlclean(scrapedtitle) + plot = "" + thumbnail = "" + url = urlparse.urljoin(item.url, scrapedurl) + + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot, + folder=True, fulltitle=title)) + + return itemlist + + +def findvideos(item): + logger.info() + data = httptools.downloadpage(item.url).data + + pattern = 'data-id="([^"]+)"' + list_servers = re.compile(pattern, re.DOTALL).findall(data) + + logger.debug("llist_servers %s" % list_servers) + list_urls = [] + + for _id in list_servers: + post = "id=%s" % _id + data = httptools.downloadpage("http://www.vertelenovelas.cc/goto/", post=post).data + list_urls.append(scrapertools.find_single_match(data, 'document\.location = "([^"]+)";')) + + from core import servertools + itemlist = servertools.find_video_items(data=", ".join(list_urls)) + for videoitem in itemlist: + # videoitem.title = item.title + videoitem.channel = item.channel + + return itemlist diff --git a/plugin.video.alfa/channels/videolibrary.json b/plugin.video.alfa/channels/videolibrary.json new file mode 100755 index 00000000..a98b0999 --- /dev/null +++ b/plugin.video.alfa/channels/videolibrary.json @@ -0,0 +1,300 @@ +{ + "id": "videolibrary", + "name": "Videoteca", + "active": false, + "adult": false, + "language": "es", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/07/16", + "description": "Eliminado código innecesario." + }, + { + "date": "30/04/2016", + "description": "Version inicial" + } + ], + "settings": [ + { + "id": "update", + "type": "list", + "label": "Actualizar la videoteca", + "default": 1, + "visible": true, + "lvalues": [ + "Nunca", + "Al iniciar Kodi", + "Una sola vez al día", + "Al iniciar Kodi y al menos una vez al día" + ] + }, + { + "id": "update_wait", + "type": "list", + "label": " Esperar antes de actualizar al iniciar kodi", + "default": 0, + "visible": true, + "enabled": "eq(-1,Al iniciar Kodi)|eq(-1,Al iniciar Kodi y al menos una vez al día)", + "lvalues": [ + "No", + "10 seg", + "20 seg", + "30 seg", + "60 seg" + ] + }, + { + "id": "everyday_delay", + "type": "list", + "label": " Iniciar actualización programada a partir de las", + "default": 1, + "visible": true, + "enabled": "eq(-2,Una sola vez al día)|eq(-2,Al iniciar Kodi y al menos una vez al día)", + "lvalues": [ + "00:00", + "04:00", + "08:00", + "12:00" + ] + }, + { + "id": "updatetvshows_interval", + "type": "list", + "label": " Buscar nuevos episodios en las series activas", + "default": 0, + "visible": true, + "enabled": "!eq(-3,Nunca)", + "lvalues": [ + "Siempre", + "Según su emisión" + ] + }, + { + "id": "search_new_content", + "type": "list", + "label": " Realizar búsqueda de contenido en", + "default": 0, + "enabled": "!eq(-4,Nunca)", + "lvalues": [ + "La carpeta de cada serie", + "Toda la videoteca" + ] + }, + { + "id": "window_type", + "type": "list", + "label": "Mostrar los enlaces en", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "Ventana convencional", + "Ventana emergente" + ] + }, + { + "id": "max_links", + "type": "list", + "label": " Numero máximo de enlaces a mostrar (recomendable para equipos lentos)", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "Todos", + "30", + "60", + "90", + "120", + "150", + "180", + "210" + ] + }, + { + "id": "white_list_order", + "type": "bool", + "label": " Ordenar segun el orden de la lista blanca", + "enabled": true, + "visible": true, + "default": false + }, + { + "id": "quit_channel_name", + "type": "bool", + "label": " Quitar el nombre del canal del principio", + "enabled": true, + "visible": true, + "default": false + }, + { + "id": "replace_VD", + "type": "bool", + "label": " Ventana emergente: Reemplazar \"Ver en\" por \"[V]\" y \"Descargar en\" por \"[D]\"", + "enabled": true, + "visible": true, + "default": false + }, + { + "id": "db_mode", + "type": "list", + "label": "Ubicación de Base de datos", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "Local", + "Remota" + ] + }, + { + "id": "xbmc_host", + "type": "text", + "label": " Nombre Servidor", + "visible": true, + "enabled": "eq(-1,Remota)" + }, + { + "id": "xbmc_puerto", + "type": "text", + "label": " Puerto Servidor", + "enabled": "!eq(-1,'')", + "visible": true + }, + { + "id": "mark_as_watched", + "type": "bool", + "label": "Marcar automáticamente como visto", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "watched_setting", + "type": "list", + "label": " Tiempo necesario del video", + "default": 3, + "visible": true, + "enabled": "eq(-1,true)", + "lvalues": [ + "5 min", + "30%", + "50%", + "80%", + "0 seg" + ] + }, + { + "id": "sync_trakt", + "type": "label", + "label": "Sincronización con Trakt", + "enabled": true, + "visible": true + }, + { + "id": "sync_trakt_watched", + "type": "bool", + "label": " Tras marcar como visto el episodio", + "default": false, + "visible": true, + "enabled": "eq(-3,true)" + }, + { + "id": "sync_trakt_notification", + "type": "bool", + "label": " Mostrar notificación", + "default": true, + "visible": true, + "enabled": "eq(-1,true)" + }, + { + "id": "sync_trakt_new_tvshow", + "type": "bool", + "label": " Al añadir una serie a la videoteca", + "default": false, + "enabled": true, + "visible": true + }, + { + "id": "sync_trakt_new_tvshow_wait", + "type": "bool", + "label": " Esperar a que se añada la serie a la videoteca", + "default": true, + "visible": true, + "enabled": "eq(-1,true)" + }, + { + "id": "show_all_seasons", + "type": "bool", + "label": "Mostrar la opción \"Todas las temporadas\"", + "default": true + }, + { + "id": "no_pile_on_seasons", + "type": "list", + "label": "No apilar temporadas de series", + "default": 1, + "lvalues": [ + "Nunca", + "Sólo si hay una temporada", + "Siempre" + ] + }, + { + "id": "ask_channel", + "type": "bool", + "label": "Mostrar cuadro de selección de canales", + "default": false + }, + { + "id": "original_title_folder", + "type": "list", + "label": "Crear directorios en el sistema usando", + "default": 0, + "lvalues": [ + "Título localizado", + "Título original" + ] + }, + { + "id": "lab_1", + "type": "label", + "label": "Al añadir contenido, obtener información de:", + "enabled": true, + "visible": true + }, + { + "id": "scraper_movies", + "type": "list", + "label": " Peliculas:", + "enabled": false, + "default": 0, + "lvalues": [ + "TheMovieDB.org", + "None" + ] + }, + { + "id": "scraper_tvshows", + "type": "list", + "label": " Series:", + "default": 0, + "lvalues": [ + "TheMovieDB.org", + "TheTvDB.com" + ] + }, + { + "id": "tvdb_retry_eng", + "type": "bool", + "label": " Si no hay resultados buscar también en Inglés", + "default": true, + "enabled": "eq(-1,TheTvDB.com)", + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/videolibrary.py b/plugin.video.alfa/channels/videolibrary.py new file mode 100755 index 00000000..6b86e488 --- /dev/null +++ b/plugin.video.alfa/channels/videolibrary.py @@ -0,0 +1,692 @@ +# -*- coding: utf-8 -*- + +import glob +import os + +from core import config +from core import filetools +from core import videolibrarytools +from core import logger +from core import scrapertools +from core.item import Item +from platformcode import platformtools + + +def mainlist(item): + logger.info() + + itemlist = list() + itemlist.append(Item(channel=item.channel, action="list_movies", title="Películas", + category="Videoteca de películas", + thumbnail=config.get_thumb("thumb_videolibrary_movie.png"))) + itemlist.append(Item(channel=item.channel, action="list_tvshows", title="Series", + category="Videoteca de series", + thumbnail=config.get_thumb("thumb_videolibrary_tvshow.png"))) + + return itemlist + + +def channel_config(item): + return platformtools.show_channel_settings(channelpath=os.path.join(config.get_runtime_path(), "channels", + item.channel), + caption="configuración -- Videoteca") + + +def list_movies(item): + logger.info() + itemlist = [] + + for f in glob.glob(filetools.join(videolibrarytools.MOVIES_PATH, u'/*/*.nfo')): + nfo_path = f + head_nfo, new_item = videolibrarytools.read_nfo(nfo_path) + + new_item.nfo = nfo_path + new_item.path = filetools.dirname(f) + new_item.thumbnail = new_item.contentThumbnail + new_item.text_color = "blue" + + if not filetools.exists(filetools.join(videolibrarytools.MOVIES_PATH, new_item.strm_path)): + # Si se ha eliminado el strm desde la bilbioteca de kodi, no mostrarlo + continue + + # Menu contextual: Marcar como visto/no visto + visto = new_item.library_playcounts.get(os.path.splitext(f)[0], 0) + new_item.infoLabels["playcount"] = visto + if visto > 0: + texto_visto = "Marcar película como no vista" + contador = 0 + else: + texto_visto = "Marcar película como vista" + contador = 1 + + # Menu contextual: Eliminar serie/canal + num_canales = len(new_item.library_urls) + if "downloads" in new_item.library_urls: + num_canales -= 1 + if num_canales > 1: + texto_eliminar = "Eliminar película/canal" + multicanal = True + else: + texto_eliminar = "Eliminar esta película" + multicanal = False + + new_item.context = [{"title": texto_visto, + "action": "mark_content_as_watched", + "channel": "videolibrary", + "playcount": contador}, + {"title": texto_eliminar, + "action": "delete", + "channel": "videolibrary", + "multicanal": multicanal}] + # ,{"title": "Cambiar contenido (PENDIENTE)", + # "action": "", + # "channel": "videolibrary"}] + # logger.debug("new_item: " + new_item.tostring('\n')) + itemlist.append(new_item) + + return sorted(itemlist, key=lambda it: it.title.lower()) + + +def list_tvshows(item): + logger.info() + itemlist = [] + + # Obtenemos todos los tvshow.nfo de la videoteca de SERIES recursivamente + for f in glob.glob(filetools.join(videolibrarytools.TVSHOWS_PATH, u'/*/tvshow.nfo')): + # logger.debug("file es %s" % f) + + head_nfo, item_tvshow = videolibrarytools.read_nfo(f) + item_tvshow.title = item_tvshow.contentTitle + item_tvshow.path = filetools.join(videolibrarytools.TVSHOWS_PATH, item_tvshow.path) + item_tvshow.nfo = f + + # Menu contextual: Marcar como visto/no visto + visto = item_tvshow.library_playcounts.get(item_tvshow.contentTitle, 0) + item_tvshow.infoLabels["playcount"] = visto + if visto > 0: + texto_visto = "Marcar serie como no vista" + contador = 0 + else: + texto_visto = "Marcar serie como vista" + contador = 1 + + # Menu contextual: Buscar automáticamente nuevos episodios o no + if item_tvshow.active and int(item_tvshow.active) > 0: + texto_update = "Buscar automáticamente nuevos episodios: Desactivar" + value = 0 + item_tvshow.text_color = "green" + else: + texto_update = "Buscar automáticamente nuevos episodios: Activar" + value = 1 + item_tvshow.text_color = "0xFFDF7401" + + # Menu contextual: Eliminar serie/canal + num_canales = len(item_tvshow.library_urls) + if "downloads" in item_tvshow.library_urls: + num_canales -= 1 + if num_canales > 1: + texto_eliminar = "Eliminar serie/canal" + multicanal = True + else: + texto_eliminar = "Eliminar esta serie" + multicanal = False + + item_tvshow.context = [{"title": texto_visto, + "action": "mark_content_as_watched", + "channel": "videolibrary", + "playcount": contador}, + {"title": texto_update, + "action": "mark_tvshow_as_updatable", + "channel": "videolibrary", + "active": value}, + {"title": texto_eliminar, + "action": "delete", + "channel": "videolibrary", + "multicanal": multicanal}, + {"title": "Buscar nuevos episodios ahora", + "action": "update_tvshow", + "channel": "videolibrary"}] + # ,{"title": "Cambiar contenido (PENDIENTE)", + # "action": "", + # "channel": "videolibrary"}] + + # logger.debug("item_tvshow:\n" + item_tvshow.tostring('\n')) + itemlist.append(item_tvshow) + + if itemlist: + itemlist = sorted(itemlist, key=lambda it: it.title.lower()) + + itemlist.append(Item(channel=item.channel, action="update_videolibrary", thumbnail=item.thumbnail, + title="Buscar nuevos episodios y actualizar videoteca", folder=False)) + + return itemlist + + +def get_seasons(item): + logger.info() + # logger.debug("item:\n" + item.tostring('\n')) + itemlist = [] + dict_temp = {} + + # Menu contextual: Releer tvshow.nfo + head_nfo, item_nfo = videolibrarytools.read_nfo(item.nfo) + + # Miramos las temporadas que estén marcadas como vistas + if not hasattr(item_nfo, 'library_playcounts'): + item_nfo.library_playcounts = {} + + if config.get_setting("no_pile_on_seasons", "videolibrary") == 2: # Siempre + return get_episodes(item) + + for f in glob.glob1(item.path, u'*.json'): + season = f.split('x')[0] + dict_temp[season] = "Temporada %s" % season + + if config.get_setting("no_pile_on_seasons", "videolibrary") == 1 and len(dict_temp) == 1: # Sólo si hay una temporada + return get_episodes(item) + else: + + # TODO mostrar los episodios de la unica temporada "no vista", en vez de mostrar el Item "temporada X" previo + # si está marcado "ocultar los vistos" en el skin, se ejecutaria esto + # se comprueba cada temporada en dict_temp si está visto. + # si hay una sola temporada y no_pile_on_seasons == 1, se devuelve get(episodios) + # si está todo visto, hacemos como actualmente <-- el else no se hace nada.. CREO + # if config.get_setting("no_pile_on_seasons", "videolibrary") == 1 and len(dict_temp_Visible) == 1: # Sólo si hay una temporada + + # Creamos un item por cada temporada + for season, title in dict_temp.items(): + new_item = item.clone(action="get_episodes", title=title, contentSeason=season, + filtrar_season=True) + + # Menu contextual: Marcar la temporada como vista o no + visto = item_nfo.library_playcounts.get("season %s" % season, 0) + new_item.infoLabels["playcount"] = visto + if visto > 0: + texto = "Marcar temporada como no vista" + value = 0 + else: + texto = "Marcar temporada como vista" + value = 1 + new_item.context = [{"title": texto, + "action": "mark_season_as_watched", + "channel": "videolibrary", + "playcount": value}] + + # logger.debug("new_item:\n" + new_item.tostring('\n')) + itemlist.append(new_item) + + if len(itemlist) > 1: + itemlist = sorted(itemlist, key=lambda it: int(it.contentSeason)) + + if config.get_setting("show_all_seasons", "videolibrary"): + new_item = item.clone(action="get_episodes", title="*Todas las temporadas") + new_item.infoLabels["playcount"] = 0 + itemlist.insert(0, new_item) + + return itemlist + + +def get_episodes(item): + logger.info() + # logger.debug("item:\n" + item.tostring('\n')) + itemlist = [] + + # Menu contextual: Releer tvshow.nfo + head_nfo, item_nfo = videolibrarytools.read_nfo(item.nfo) + + # Crear un item en la lista para cada strm encontrado + for f in glob.glob1(item.path, u'*.strm'): + season_episode = scrapertools.get_season_and_episode(f) + if not season_episode: + # El fichero no incluye el numero de temporada y episodio + continue + season, episode = season_episode.split("x") + # Si hay q filtrar por temporada, ignoramos los capitulos de otras temporadas + if item.filtrar_season and int(season) != int(item.contentSeason): + continue + + # Obtener los datos del season_episode.nfo + nfo_path = filetools.join(item.path, f).replace('.strm', '.nfo') + head_nfo, epi = videolibrarytools.read_nfo(nfo_path) + + # Fijar el titulo del capitulo si es posible + if epi.contentTitle: + title_episodie = epi.contentTitle.strip() + else: + title_episodie = "Temporada %s Episodio %s" % \ + (epi.contentSeason, str(epi.contentEpisodeNumber).zfill(2)) + + epi.contentTitle = "%sx%s" % (epi.contentSeason, str(epi.contentEpisodeNumber).zfill(2)) + epi.title = "%sx%s - %s" % (epi.contentSeason, str(epi.contentEpisodeNumber).zfill(2), title_episodie) + + if item_nfo.library_filter_show: + epi.library_filter_show = item_nfo.library_filter_show + + # Menu contextual: Marcar episodio como visto o no + visto = item_nfo.library_playcounts.get(season_episode, 0) + epi.infoLabels["playcount"] = visto + if visto > 0: + texto = "Marcar episodio como no visto" + value = 0 + else: + texto = "Marcar episodio como visto" + value = 1 + epi.context = [{"title": texto, + "action": "mark_content_as_watched", + "channel": "videolibrary", + "playcount": value, + "nfo": item.nfo}] + + # logger.debug("epi:\n" + epi.tostring('\n')) + itemlist.append(epi) + + return sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) + + +def findvideos(item): + logger.info() + # logger.debug("item:\n" + item.tostring('\n')) + + itemlist = [] + list_canales = {} + item_local = None + + if not item.contentTitle or not item.strm_path: + logger.debug("No se pueden buscar videos por falta de parametros") + return [] + + content_title = filter(lambda c: c not in ":*?<>|\/", item.contentTitle.strip().lower()) + + if item.contentType == 'movie': + item.strm_path = filetools.join(videolibrarytools.MOVIES_PATH, item.strm_path) + path_dir = os.path.dirname(item.strm_path) + item.nfo = filetools.join(path_dir, os.path.basename(path_dir) + ".nfo") + else: + item.strm_path = filetools.join(videolibrarytools.TVSHOWS_PATH, item.strm_path) + path_dir = os.path.dirname(item.strm_path) + item.nfo = filetools.join(path_dir, 'tvshow.nfo') + + for fd in filetools.listdir(path_dir): + if fd.endswith('.json'): + contenido, nom_canal = fd[:-6].split('[') + if (contenido.startswith(content_title) or item.contentType == 'movie') and nom_canal not in \ + list_canales.keys(): + list_canales[nom_canal] = filetools.join(path_dir, fd) + + num_canales = len(list_canales) + # logger.debug(str(list_canales)) + if 'downloads' in list_canales: + json_path = list_canales['downloads'] + item_json = Item().fromjson(filetools.read(json_path)) + item_json.contentChannel = "local" + # Soporte para rutas relativas en descargas + if filetools.is_relative(item_json.url): + item_json.url = filetools.join(videolibrarytools.LIBRARY_PATH, item_json.url) + + del list_canales['downloads'] + + # Comprobar q el video no haya sido borrado + if filetools.exists(item_json.url): + item_local = item_json.clone(action='play') + itemlist.append(item_local) + else: + num_canales -= 1 + + filtro_canal = '' + if num_canales > 1 and config.get_setting("ask_channel", "videolibrary"): + opciones = ["Mostrar solo los enlaces de %s" % k.capitalize() for k in list_canales.keys()] + opciones.insert(0, "Mostrar todos los enlaces") + if item_local: + opciones.append(item_local.title) + + from platformcode import platformtools + index = platformtools.dialog_select(config.get_localized_string(30163), opciones) + if index < 0: + return [] + + elif item_local and index == len(opciones) - 1: + filtro_canal = 'downloads' + platformtools.play_video(item_local) + + elif index > 0: + filtro_canal = opciones[index].replace("Mostrar solo los enlaces de ", "") + itemlist = [] + + for nom_canal, json_path in list_canales.items(): + if filtro_canal and filtro_canal != nom_canal.capitalize(): + continue + + # Importamos el canal de la parte seleccionada + try: + channel = __import__('channels.%s' % nom_canal, fromlist=["channels.%s" % nom_canal]) + except ImportError: + exec "import channels." + nom_canal + " as channel" + + item_json = Item().fromjson(filetools.read(json_path)) + list_servers = [] + + try: + # FILTERTOOLS + # si el canal tiene filtro se le pasa el nombre que tiene guardado para que filtre correctamente. + if "list_language" in item_json: + # si se viene desde la videoteca del addon + if "library_filter_show" in item: + item_json.show = item.library_filter_show.get(nom_canal, "") + + # Ejecutamos find_videos, del canal o común + if hasattr(channel, 'findvideos'): + from core import servertools + list_servers = getattr(channel, 'findvideos')(item_json) + list_servers = servertools.filter_servers(list_servers) + else: + from core import servertools + list_servers = servertools.find_video_items(item_json) + except Exception, ex: + logger.error("Ha fallado la funcion findvideos para el canal %s" % nom_canal) + template = "An exception of type %s occured. Arguments:\n%r" + message = template % (type(ex).__name__, ex.args) + logger.error(message) + + # Cambiarle el titulo a los servers añadiendoles el nombre del canal delante y + # las infoLabels y las imagenes del item si el server no tiene + for server in list_servers: + if not server.action: # Ignorar las etiquetas + continue + + server.contentChannel = server.channel + server.channel = "videolibrary" + server.nfo = item.nfo + server.strm_path = item.strm_path + + # Se añade el nombre del canal si se desea + if config.get_setting("quit_channel_name", "videolibrary") == 0: + server.title = "%s: %s" % (nom_canal.capitalize(), server.title) + + server.infoLabels = item_json.infoLabels + + if not server.thumbnail: + server.thumbnail = item.thumbnail + + # logger.debug("server:\n%s" % server.tostring('\n')) + itemlist.append(server) + + # return sorted(itemlist, key=lambda it: it.title.lower()) + return itemlist + + +def play(item): + logger.info() + # logger.debug("item:\n" + item.tostring('\n')) + + if not item.contentChannel == "local": + channel = __import__('channels.%s' % item.contentChannel, fromlist=["channels.%s" % item.contentChannel]) + if hasattr(channel, "play"): + itemlist = getattr(channel, "play")(item) + + else: + itemlist = [item.clone()] + else: + itemlist = [item.clone(url=item.url, server="local")] + + # Para enlaces directo en formato lista + if isinstance(itemlist[0], list): + item.video_urls = itemlist + itemlist = [item] + + # Esto es necesario por si el play del canal elimina los datos + for v in itemlist: + if isinstance(v, Item): + v.nfo = item.nfo + v.strm_path = item.strm_path + v.infoLabels = item.infoLabels + if item.contentTitle: + v.title = item.contentTitle + else: + if item.contentType == "episode": + v.title = "Episodio %s" % item.contentEpisodeNumber + v.thumbnail = item.thumbnail + v.contentThumbnail = item.thumbnail + + return itemlist + + +def update_videolibrary(item): + logger.info() + + # Actualizar las series activas sobreescribiendo + import videolibrary_service + videolibrary_service.check_for_update(overwrite=True) + + # Eliminar las carpetas de peliculas que no contengan archivo strm + for raiz, subcarpetas, ficheros in filetools.walk(videolibrarytools.MOVIES_PATH): + strm = False + for f in ficheros: + if f.endswith(".strm"): + strm = True + break + + if ficheros and not strm: + logger.debug("Borrando carpeta de pelicula eliminada: %s" % raiz) + filetools.rmdirtree(raiz) + + +# metodos de menu contextual +def update_tvshow(item): + logger.info() + # logger.debug("item:\n" + item.tostring('\n')) + + heading = 'Actualizando serie....' + p_dialog = platformtools.dialog_progress_bg('alfa', heading) + p_dialog.update(0, heading, item.contentSerieName) + + import videolibrary_service + if videolibrary_service.update(item.path, p_dialog, 1, 1, item, False) and config.is_xbmc(): + from platformcode import xbmc_videolibrary + xbmc_videolibrary.update(folder=filetools.basename(item.path)) + + p_dialog.close() + + +def mark_content_as_watched(item): + logger.info() + # logger.debug("item:\n" + item.tostring('\n')) + + if filetools.exists(item.nfo): + head_nfo, it = videolibrarytools.read_nfo(item.nfo) + + if item.contentType == 'movie': + name_file = os.path.splitext(os.path.basename(item.nfo))[0] + elif item.contentType == 'episode': + name_file = "%sx%s" % (item.contentSeason, str(item.contentEpisodeNumber).zfill(2)) + else: + name_file = item.contentTitle + + if not hasattr(it, 'library_playcounts'): + it.library_playcounts = {} + it.library_playcounts.update({name_file: item.playcount}) + + # se comprueba que si todos los episodios de una temporada están marcados, se marque tb la temporada + if item.contentType != 'movie': + it = check_season_playcount(it, item.contentSeason) + + # Guardamos los cambios en item.nfo + if filetools.write(item.nfo, head_nfo + it.tojson()): + item.infoLabels['playcount'] = item.playcount + + if item.contentType == 'tvshow': + # Actualizar toda la serie + new_item = item.clone(contentSeason=-1) + mark_season_as_watched(new_item) + + if config.is_xbmc() and item.contentType == 'episode': + from platformcode import xbmc_videolibrary + xbmc_videolibrary.mark_content_as_watched_on_kodi(item, item.playcount) + + platformtools.itemlist_refresh() + + +def mark_season_as_watched(item): + logger.info() + # logger.debug("item:\n" + item.tostring('\n')) + + # Obtener el diccionario de episodios marcados + tvshow_path = filetools.join(item.path, 'tvshow.nfo') + head_nfo, it = videolibrarytools.read_nfo(tvshow_path) + if not hasattr(it, 'library_playcounts'): + it.library_playcounts = {} + + # Marcamos cada uno de los episodios encontrados de esta temporada + episodios_marcados = 0 + for f in glob.glob1(item.path, u'*.strm'): + # if f.endswith(".strm"): + season_episode = scrapertools.get_season_and_episode(f) + if not season_episode: + # El fichero no incluye el numero de temporada y episodio + continue + season, episode = season_episode.split("x") + + if int(item.contentSeason) == -1 or int(season) == int(item.contentSeason): + name_file = os.path.splitext(os.path.basename(f))[0] + it.library_playcounts[name_file] = item.playcount + episodios_marcados += 1 + + if episodios_marcados: + if int(item.contentSeason) == -1: + # Añadimos todas las temporadas al diccionario item.library_playcounts + for k in it.library_playcounts.keys(): + if k.startswith("season"): + it.library_playcounts[k] = item.playcount + else: + # Añadimos la temporada al diccionario item.library_playcounts + it.library_playcounts["season %s" % item.contentSeason] = item.playcount + + # se comprueba que si todas las temporadas están vistas, se marque la serie como vista + it = check_tvshow_playcount(it, item.contentSeason) + + # Guardamos los cambios en tvshow.nfo + filetools.write(tvshow_path, head_nfo + it.tojson()) + item.infoLabels['playcount'] = item.playcount + + if config.is_xbmc(): + # Actualizamos la BBDD de Kodi + from platformcode import xbmc_videolibrary + xbmc_videolibrary.mark_season_as_watched_on_kodi(item, item.playcount) + + platformtools.itemlist_refresh() + + +def mark_tvshow_as_updatable(item): + logger.info() + head_nfo, it = videolibrarytools.read_nfo(item.nfo) + it.active = item.active + filetools.write(item.nfo, head_nfo + it.tojson()) + + platformtools.itemlist_refresh() + + +def delete(item): + def delete_all(_item): + filetools.rmdirtree(_item.path) + + if config.is_xbmc(): + import xbmc + # esperamos 3 segundos para dar tiempo a borrar los ficheros + xbmc.sleep(3000) + # TODO mirar por qué no funciona al limpiar en la videoteca de Kodi al añadirle un path + # limpiamos la videoteca de Kodi + from platformcode import xbmc_videolibrary + xbmc_videolibrary.clean() + + logger.info("Eliminados todos los enlaces") + platformtools.itemlist_refresh() + + # logger.info(item.contentTitle) + # logger.debug(item.tostring('\n')) + + if item.contentType == 'movie': + heading = "Eliminar película" + else: + heading = "Eliminar serie" + + if item.multicanal: + # Obtener listado de canales + opciones = ["Eliminar solo los enlaces de %s" % k.capitalize() for k in item.library_urls.keys() if + k != "downloads"] + opciones.insert(0, heading) + + index = platformtools.dialog_select(config.get_localized_string(30163), opciones) + + if index == 0: + # Seleccionado Eliminar pelicula/serie + delete_all(item) + + elif index > 0: + # Seleccionado Eliminar canal X + canal = opciones[index].replace("Eliminar solo los enlaces de ", "").lower() + + num_enlaces = 0 + for fd in filetools.listdir(item.path): + if fd.endswith(canal + '].json'): + if filetools.remove(filetools.join(item.path, fd)): + num_enlaces += 1 + + if num_enlaces > 0: + # Actualizar .nfo + head_nfo, item_nfo = videolibrarytools.read_nfo(item.nfo) + del item_nfo.library_urls[canal] + filetools.write(item.nfo, head_nfo + item_nfo.tojson()) + + msg_txt = "Eliminados %s enlaces del canal %s" % (num_enlaces, canal) + logger.info(msg_txt) + platformtools.dialog_notification(heading, msg_txt) + platformtools.itemlist_refresh() + + else: + if platformtools.dialog_yesno(heading, + "¿Realmente desea eliminar '%s' de su videoteca?" % item.infoLabels['title']): + delete_all(item) + + +def check_season_playcount(item, season): + logger.info() + + if season: + episodios_temporada = 0 + episodios_vistos_temporada = 0 + for key, value in item.library_playcounts.iteritems(): + if key.startswith("%sx" % season): + episodios_temporada += 1 + if value > 0: + episodios_vistos_temporada += 1 + + if episodios_temporada == episodios_vistos_temporada: + # se comprueba que si todas las temporadas están vistas, se marque la serie como vista + item.library_playcounts.update({"season %s" % season: 1}) + else: + # se comprueba que si todas las temporadas están vistas, se marque la serie como vista + item.library_playcounts.update({"season %s" % season: 0}) + + return check_tvshow_playcount(item, season) + + +def check_tvshow_playcount(item, season): + logger.info() + if season: + temporadas_serie = 0 + temporadas_vistas_serie = 0 + for key, value in item.library_playcounts.iteritems(): + if key == ("season %s" % season): + temporadas_serie += 1 + if value > 0: + temporadas_vistas_serie += 1 + + if temporadas_serie == temporadas_vistas_serie: + item.library_playcounts.update({item.title: 1}) + else: + item.library_playcounts.update({item.title: 0}) + + else: + playcount = item.library_playcounts.get(item.title, 0) + item.library_playcounts.update({item.title: playcount}) + + return item diff --git a/plugin.video.alfa/channels/vidz7.json b/plugin.video.alfa/channels/vidz7.json new file mode 100755 index 00000000..26cbfd3b --- /dev/null +++ b/plugin.video.alfa/channels/vidz7.json @@ -0,0 +1,33 @@ +{ + "id": "vidz7", + "name": "Vidz7", + "active": true, + "adult": true, + "language": "es", + "banner": "https://www.dropbox.com/s/182r0wby3ohnxkc/bannermenu.jpg?dl=1", + "thumbnail": "https://www.dropbox.com/s/7z31b4ixve2ge0l/thumbnail.png?dl=1", + "version": 1, + "changes": [ + { + "date": "29/04/2017", + "description": "Pequeña correción en la paginación" + }, + { + "date": "03/01/2017", + "description": "Versión inicial" + } + ], + "categories": [ + "adult" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/vidz7.py b/plugin.video.alfa/channels/vidz7.py new file mode 100755 index 00000000..4962b260 --- /dev/null +++ b/plugin.video.alfa/channels/vidz7.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import logger +from core import scrapertools +from core.item import Item + + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append(Item(channel=item.channel, action="lista", title="Útimos videos", url="http://www.vidz7.com/")) + itemlist.append( + Item(channel=item.channel, action="categorias", title="Categorias", url="http://www.vidz7.com/category/")) + itemlist.append(Item(channel=item.channel, action="search", title="Buscar", + url="http://www.vidz7.com/?s=")) + + return itemlist + + +def search(item, texto): + logger.info() + + texto = texto.replace(" ", "+") + item.url = "{0}{1}".format(item.url, texto) + try: + return lista(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + +def categorias(item): + logger.info() + itemlist = [] + data = scrapertools.cache_page(item.url) + data = re.sub(r"\n|\r|\t|\s{2}", "", data) + patron = '<li><a href="([^"]+)">(.*?)</a>' + matches = re.compile(patron, re.DOTALL).findall(data) + for url, actriz in matches: + itemlist.append(Item(channel=item.channel, action="lista", title=actriz, url=url)) + + return itemlist + + +def lista(item): + logger.info() + + # Descarga la página + data = scrapertools.cache_page(item.url) + data = re.sub(r"\n|\r|\t|\s{2}", "", data) + + # Extrae las entradas de la pagina seleccionada + patron = "<a href='.*?.' class='thumb' style='background-image:url\(\"([^\"]+)\"\).*?.<h6><a class='hp' href='([^']+)'>(.*?)</a></h6>" + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + for scrapedthumbnail, scrapedurl, scrapedtitle in matches: + thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) + url = urlparse.urljoin(item.url, scrapedurl) + title = scrapedtitle.strip() + + # Añade al listado + itemlist.append(Item(channel=item.channel, action="play", title=title, thumbnail=thumbnail, fanart=thumbnail, + fulltitle=title, url=url, + viewmode="movie", folder=True)) + + paginacion = scrapertools.find_single_match(data, + '<a class="active".*?.>\d+</a><a class="inactive" href ="([^"]+)">') + + if paginacion: + itemlist.append(Item(channel=item.channel, action="lista", title=">> Página Siguiente", url=paginacion)) + + return itemlist + + +def play(item): + logger.info() + itemlist = [] + # Descarga la página + data = scrapertools.cachePage(item.url) + data = scrapertools.unescape(data) + logger.info(data) + from core import servertools + itemlist.extend(servertools.find_video_items(data=data)) + for videoitem in itemlist: + videoitem.thumbnail = item.thumbnail + videoitem.channel = item.channel + videoitem.action = "play" + videoitem.folder = False + videoitem.title = item.title + + return itemlist diff --git a/plugin.video.alfa/channels/vixto.json b/plugin.video.alfa/channels/vixto.json new file mode 100755 index 00000000..2107c89c --- /dev/null +++ b/plugin.video.alfa/channels/vixto.json @@ -0,0 +1,101 @@ +{ + "id": "vixto", + "name": "Vixto", + "active": true, + "adult": false, + "language": "es", + "banner": "vixto.png", + "thumbnail": "http://i.imgur.com/y4c4HT2.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "16/02/2017", + "description": "Correccion para el apartado de series" + }, + { + "date": "12/11/2016", + "description": "Primera version, sustituye a oranline" + } + ], + "categories": [ + "latino", + "movie", + "tvshow", + "vos" + ], + "settings": [ + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Películas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "modo_grafico", + "type": "bool", + "label": "Buscar información extra", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "perfil", + "type": "list", + "label": "Perfil de color", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "Perfil 3", + "Perfil 2", + "Perfil 1" + ] + }, + { + "id": "filterlanguages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 3, + "enabled": true, + "visible": true, + "lvalues": [ + "VOSE", + "Latino", + "Español", + "No filtrar" + ] + }, + { + "id": "filterlinks", + "type": "list", + "label": "Mostrar enlaces de tipo...", + "default": 2, + "enabled": true, + "visible": true, + "lvalues": [ + "Solo Descarga", + "Solo Online", + "No filtrar" + ] + }, + { + "id": "orderlinks", + "type": "list", + "label": "Ordenar enlaces por...", + "default": 2, + "enabled": true, + "visible": true, + "lvalues": [ + "Servidor", + "Idioma", + "Más recientes" + ] + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/vixto.py b/plugin.video.alfa/channels/vixto.py new file mode 100755 index 00000000..62ab7d2f --- /dev/null +++ b/plugin.video.alfa/channels/vixto.py @@ -0,0 +1,383 @@ +# -*- coding: utf-8 -*- + +import re + +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item + +# Configuracion del canal +__modo_grafico__ = config.get_setting('modo_grafico', "vixto") +__perfil__ = config.get_setting('perfil', "vixto") + +# Fijar perfil de color +perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'], + ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'], + ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']] +color1, color2, color3 = perfil[__perfil__] + +host = "http://www.vixto.net/" + + +def mainlist(item): + logger.info() + itemlist = list() + + itemlist.append(item.clone(title="Películas", text_color=color2, action="", + text_bold=True)) + itemlist.append(item.clone(action="listado", title=" Estrenos", text_color=color1, url=host, + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/" + "0/Directors%20Chair.png")) + itemlist.append(item.clone(action="listado", title=" Novedades", text_color=color1, url=host, + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/" + "0/Directors%20Chair.png")) + itemlist.append(item.clone(action="listado", title="Series - Novedades", text_color=color2, url=host, + thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/" + "0/TV%20Series.png", text_bold=True)) + + itemlist.append(item.clone(action="search", title="Buscar...", text_color=color3, + url="http://www.vixto.net/buscar?q=")) + + itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False)) + return itemlist + + +def configuracion(item): + from platformcode import platformtools + ret = platformtools.show_channel_settings() + platformtools.itemlist_refresh() + return ret + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = item.url + texto + try: + return busqueda(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%{0}".format(line)) + return [] + + +def newest(categoria): + logger.info() + itemlist = list() + item = Item() + try: + if categoria == 'peliculas': + item.url = host + itemlist = listado(item) + + if itemlist[-1].action == "listado": + itemlist.pop() + item.title = "Estrenos" + itemlist.extend(listado(item)) + + # Se captura la excepción, para no interrumpir al canal novedades si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist + + +def listado(item): + logger.info() + itemlist = list() + + item.infoLabels['mediatype'] = "movie" + if "Estrenos" in item.title: + bloque_head = "ESTRENOS CARTELERA" + elif "Series" in item.title: + bloque_head = "RECIENTE SERIES" + item.infoLabels['mediatype'] = "tvshow" + else: + bloque_head = "RECIENTE PELICULAS" + + # Descarga la página + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |\s{2}", "", data) + + # Extrae las entradas (carpetas) + bloque = scrapertools.find_single_match(data, bloque_head + '\s*</h2>(.*?)</section>') + patron = '<div class="".*?href="([^"]+)".*?src="([^"]+)".*?<div class="calZG">(.*?)</div>' \ + '(.*?)</div>.*?href.*?>(.*?)</a>' + matches = scrapertools.find_multiple_matches(bloque, patron) + + for scrapedurl, scrapedthumbnail, calidad, idiomas, scrapedtitle in matches: + title = scrapedtitle + langs = [] + if 'idio idi1' in idiomas: + langs.append("VOS") + if 'idio idi2' in idiomas: + langs.append("LAT") + if 'idio idi4' in idiomas: + langs.append("ESP") + if langs: + title += " [%s]" % "/".join(langs) + if calidad: + title += " %s" % calidad + + filtro_thumb = scrapedthumbnail.replace("http://image.tmdb.org/t/p/w342", "") + filtro_list = {"poster_path": filtro_thumb} + filtro_list = filtro_list.items() + + if item.contentType == "tvshow": + new_item = item.clone(action="episodios", title=title, url=scrapedurl, thumbnail=scrapedthumbnail, + fulltitle=scrapedtitle, infoLabels={'filtro': filtro_list}, + contentTitle=scrapedtitle, context="buscar_trailer", text_color=color1, + show=scrapedtitle, text_bold=False) + else: + new_item = item.clone(action="findvideos", title=title, url=scrapedurl, thumbnail=scrapedthumbnail, + fulltitle=scrapedtitle, infoLabels={'filtro': filtro_list}, text_bold=False, + contentTitle=scrapedtitle, context="buscar_trailer", text_color=color1) + + itemlist.append(new_item) + + if item.action == "listado": + try: + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + except: + pass + + return itemlist + + +def busqueda(item): + logger.info() + itemlist = list() + + # Descarga la página + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |\s{2}", "", data) + + # Extrae las entradas (carpetas) + bloque = scrapertools.find_single_match(data, '<h2>Peliculas</h2>(.*?)</div>') + bloque += scrapertools.find_single_match(data, '<h2>Series</h2>(.*?)</div>') + + patron = '<figure class="col-lg-2.*?href="([^"]+)".*?src="([^"]+)".*?<figcaption title="([^"]+)"' + matches = scrapertools.find_multiple_matches(bloque, patron) + + peliculas = False + series = False + for scrapedurl, scrapedthumbnail, scrapedtitle in matches: + new_item = Item(channel=item.channel, contentType="movie", url=scrapedurl, title=" " + scrapedtitle, + text_color=color1, context="buscar_trailer", fulltitle=scrapedtitle, + contentTitle=scrapedtitle, thumbnail=scrapedthumbnail, action="findvideos") + + if "/peliculas/" in scrapedurl and not peliculas: + itemlist.append(Item(channel=item.channel, action="", title="Películas", text_color=color2)) + peliculas = True + if "/series/" in scrapedurl and not series: + itemlist.append(Item(channel=item.channel, action="", title="Series", text_color=color2)) + series = True + + if "/series/" in scrapedurl: + new_item.contentType = "tvshow" + new_item.show = scrapedtitle + new_item.action = "episodios" + + filtro_thumb = scrapedthumbnail.replace("http://image.tmdb.org/t/p/w342", "") + filtro_list = {"poster_path": filtro_thumb} + new_item.infoLabels["filtro"] = filtro_list.items() + itemlist.append(new_item) + + try: + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + except: + pass + + return itemlist + + +def episodios(item): + logger.info() + itemlist = list() + + # Descarga la página + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |\s{2}", "", data) + + # Extrae las entradas (carpetas) + bloque = scrapertools.find_single_match(data, '<strong>Temporada:(.*?)</div>') + matches = scrapertools.find_multiple_matches(bloque, 'href="([^"]+)">(.*?)</a>') + + for scrapedurl, scrapedtitle in matches: + title = "Temporada %s" % scrapedtitle + + new_item = item.clone(action="", title=title, text_color=color2) + new_item.infoLabels["season"] = scrapedtitle + new_item.infoLabels["mediatype"] = "season" + data_season = httptools.downloadpage(scrapedurl).data + data_season = re.sub(r"\n|\r|\t| |\s{2}", "", data_season) + patron = '<li class="media">.*?href="([^"]+)"(.*?)<div class="media-body">.*?href.*?>' \ + '(.*?)</a>' + matches = scrapertools.find_multiple_matches(data_season, patron) + + elementos = [] + for url, status, title in matches: + if not "Enlaces Disponibles" in status: + continue + elementos.append(title) + item_epi = item.clone(action="findvideos", url=url, text_color=color1) + item_epi.infoLabels["season"] = scrapedtitle + episode = scrapertools.find_single_match(title, 'Capitulo (\d+)') + titulo = scrapertools.find_single_match(title, 'Capitulo \d+\s*-\s*(.*?)$') + item_epi.infoLabels["episode"] = episode + item_epi.infoLabels["mediatype"] = "episode" + item_epi.title = "%sx%s %s" % (scrapedtitle, episode.zfill(2), titulo) + + itemlist.insert(0, item_epi) + if elementos: + itemlist.insert(0, new_item) + + if item.infoLabels["tmdb_id"] and itemlist: + try: + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + except: + pass + + if itemlist: + if config.get_videolibrary_support(): + itemlist.append(Item(channel=item.channel, title="Añadir serie a la videoteca", text_color="green", + filtro=True, action="add_serie_to_library", fulltitle=item.fulltitle, + extra="episodios", url=item.url, infoLabels=item.infoLabels, show=item.show)) + else: + itemlist.append(item.clone(title="Serie sin episodios disponibles", action="", text_color=color3)) + return itemlist + + +def findvideos(item): + logger.info() + itemlist = list() + + try: + filtro_idioma = config.get_setting("filterlanguages", item.channel) + filtro_enlaces = config.get_setting("filterlinks", item.channel) + except: + filtro_idioma = 3 + filtro_enlaces = 2 + + dict_idiomas = {'Castellano': 2, 'Latino': 1, 'Subtitulada': 0} + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |\s{2}", "", data) + + if not item.infoLabels["tmdb_id"]: + year = scrapertools.find_single_match(data, 'Lanzamiento.*?(\d{4})') + + if year != "": + item.infoLabels['filtro'] = "" + item.infoLabels['year'] = int(year) + + # Ampliamos datos en tmdb + try: + tmdb.set_infoLabels_item(item, __modo_grafico__) + except: + pass + + if not item.infoLabels['plot']: + plot = scrapertools.find_single_match(data, '<p class="plot">(.*?)</p>') + item.infoLabels['plot'] = plot + + if filtro_enlaces != 0: + list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas, "Ver Online", item) + if list_enlaces: + itemlist.append(item.clone(action="", title="Enlaces Online", text_color=color1, + text_bold=True)) + itemlist.extend(list_enlaces) + if filtro_enlaces != 1: + list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas, "Descarga Directa", item) + if list_enlaces: + itemlist.append(item.clone(action="", title="Enlaces Descarga", text_color=color1, + text_bold=True)) + itemlist.extend(list_enlaces) + + # Opción "Añadir esta película a la videoteca de XBMC" + if itemlist and item.contentType == "movie": + contextual = config.is_xbmc() + itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="", + text_color="magenta", contextual=contextual)) + if item.extra != "findvideos": + if config.get_videolibrary_support(): + itemlist.append(Item(channel=item.channel, title="Añadir enlaces a la videoteca", text_color="green", + filtro=True, action="add_pelicula_to_library", fulltitle=item.fulltitle, + extra="findvideos", url=item.url, infoLabels=item.infoLabels, + contentType=item.contentType, contentTitle=item.contentTitle, show=item.show)) + elif not itemlist and item.contentType == "movie": + itemlist.append(item.clone(title="Película sin enlaces disponibles", action="", text_color=color3)) + + return itemlist + + +def bloque_enlaces(data, filtro_idioma, dict_idiomas, tipo, item): + logger.info() + + lista_enlaces = list() + bloque = scrapertools.find_single_match(data, tipo + '(.*?)</table>') + patron = '<td class="sape">\s*<i class="idioma-([^"]+)".*?href="([^"]+)".*?</p>.*?<td>([^<]+)</td>' \ + '.*?<td class="desaparecer">(.*?)</td>' + matches = scrapertools.find_multiple_matches(bloque, patron) + filtrados = [] + for language, scrapedurl, calidad, orden in matches: + language = language.strip() + server = scrapertools.find_single_match(scrapedurl, 'http(?:s|)://(?:www.|)(\w+).') + if server == "ul": + server = "uploadedto" + if server == "streamin": + server = "streaminto" + if server == "waaw": + server = "netutv" + + if servertools.is_server_enabled(server): + try: + servers_module = __import__("servers." + server) + title = " Mirror en " + server + " (" + language + ") (Calidad " + calidad.strip() + ")" + if filtro_idioma == 3 or item.filtro: + lista_enlaces.append(item.clone(title=title, action="play", server=server, text_color=color2, + url=scrapedurl, idioma=language, orden=orden)) + else: + idioma = dict_idiomas[language] + if idioma == filtro_idioma: + lista_enlaces.append(item.clone(title=title, text_color=color2, action="play", + url=scrapedurl, server=server, idioma=language, orden=orden)) + else: + if language not in filtrados: + filtrados.append(language) + except: + pass + + order = config.get_setting("orderlinks", item.channel) + if order == 0: + lista_enlaces.sort(key=lambda item: item.server) + elif order == 1: + lista_enlaces.sort(key=lambda item: item.idioma) + else: + lista_enlaces.sort(key=lambda item: item.orden, reverse=True) + + if filtro_idioma != 3: + if len(filtrados) > 0: + title = "Mostrar enlaces filtrados en %s" % ", ".join(filtrados) + lista_enlaces.append(item.clone(title=title, action="findvideos", url=item.url, text_color=color3, + filtro=True)) + + return lista_enlaces + + +def play(item): + logger.info() + itemlist = list() + enlace = servertools.findvideosbyserver(item.url, item.server) + itemlist.append(item.clone(url=enlace[0][1])) + + return itemlist diff --git a/plugin.video.alfa/channels/vseries.json b/plugin.video.alfa/channels/vseries.json new file mode 100755 index 00000000..702417d1 --- /dev/null +++ b/plugin.video.alfa/channels/vseries.json @@ -0,0 +1,34 @@ +{ + "id": "vseries", + "name": "V Serie", + "active": false, + "adult": false, + "language": "es", + "banner": "vseries.png", + "thumbnail": "vseries.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/07/2016", + "description": "Eliminado código innecesario." + } + ], + "categories": [ + "movie", + "tvshow" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/vseries.py b/plugin.video.alfa/channels/vseries.py new file mode 100755 index 00000000..42a2f8b7 --- /dev/null +++ b/plugin.video.alfa/channels/vseries.py @@ -0,0 +1,321 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + +from core import config +from core import jsontools +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + +DEFAULT_HEADERS = [] +DEFAULT_HEADERS.append( + ["User-Agent", "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12"]) + + +def mainlist(item): + logger.info() + + itemlist = [] + itemlist.append(Item(channel=item.channel, action="menuseries", title="Series", url="")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="Películas", url="http://vserie.com/peliculas", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="search", title="Buscar...", url="http://vserie.com/search")) + + return itemlist + + +def menuseries(item): + logger.info() + + itemlist = [] + itemlist.append( + Item(channel=item.channel, action="novedades", title="Últimos episodios", url="http://vserie.com/series", + viewmode="movie")) + itemlist.append( + Item(channel=item.channel, action="series", title="Todas", url="http://vserie.com/series", viewmode="movie")) + + return itemlist + + +def search(item, texto): + logger.info() + + try: + if config.get_setting("zampaseriesaccount") == True: + login() + + if item.url == "": + item.url = "http://vserie.com/search" + + texto = texto.replace(" ", "+") + + # Mete el referer en item.extra + post = "s=" + texto + data = scrapertools.cache_page(item.url, post=post) + data = scrapertools.find_single_match(data, '<div id="resultados">(.*?)<div id="cargando">') + ''' + <div id="resultados"> + <h1>Resultados de la Busqueda para skyfall (1)</h1> + <div id="lista"> <ul> <li title="007 Skyfall" id="id-1"><a href="http://vserie.com/pelicula/2-007-skyfall"><img src="http://vserie.com/images/p_p2_s.png" alt=""></a></li> </ul> </div> + <div id="cargando"><i class="icon-spinner icon-spin"></i>Cargando más resultados</div> + </div> + ''' + patron = '<li title="([^"]+)"[^<]+<a href="([^"]+)"><img src="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + for scrapedtitle, scrapedurl, scrapedthumbnail in matches: + if "/pelicula/" in scrapedurl: + title = scrapedtitle + url = scrapedurl + thumbnail = scrapedthumbnail + plot = "" + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, + plot=plot, show=title)) + else: + title = scrapedtitle + url = scrapedurl + thumbnail = scrapedthumbnail + plot = "" + itemlist.append( + Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, plot=plot, + show=title)) + + return itemlist + + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def novedades(item): + logger.info() + + if config.get_setting("zampaseriesaccount") == True: + login() + + # Descarga la pagina + data = scrapertools.cache_page(item.url) + data = scrapertools.find_single_match(data, 'ltimas Series Actualizadas</h2[^<]+<div id="listado">(.*?)</ul>') + logger.info("data=" + data) + + # Extrae las entradas (carpetas) + patron = '<li><a href="([^"]+)"><img src="([^"]+)[^<]+<h3>([^<]+)</h3></a>' + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + for scrapedurl, scrapedthumbnail, scrapedtitle in matches: + title = scrapertools.htmlclean(scrapedtitle) + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) + plot = "" + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot, + show=title)) + + return itemlist + + +def series(item, data=""): + logger.info() + + if config.get_setting("zampaseriesaccount") == True: + login() + + # Descarga la pagina + if data == "": + if item.extra == "": + data = scrapertools.cache_page(item.url) + else: + data = scrapertools.cache_page(item.url, post=item.extra) + logger.info("data=" + data) + + json_object = jsontools.load(data) + # {"resultado":{"40":"<li id=\"id-40\" title=\"The 100\"><a href=\"http:\/\/vserie.com\/serie\/175-the-100\"><img src=\"http:\/\/vserie.com\/images\/s_s175_s.png\" alt=\"The 100\"><\/a><\/li>","41":"<li id=\"id-41\" title=\"Teen Wolf\"><a href=\"http:\/\/vserie.com\/serie\/25-teen-wolf\"><img src=\"http:\/\/vserie.com\/images\/s_s25_s.png\" alt=\"Teen Wolf\"><\/a><\/li>","42":"<li id=\"id-42\" title=\"Surviving Jack\"><a href=\"http:\/\/vserie.com\/serie\/178-surviving-jack\"><img src=\"http:\/\/vserie.com\/images\/s_s178_s.png\" alt=\"Surviving Jack\"><\/a><\/li>","43":"<li id=\"id-43\" title=\"Supernatural\"><a href=\"http:\/\/vserie.com\/serie\/68-supernatural\"><img src=\"http:\/\/vserie.com\/images\/s_s68_s.png\" alt=\"Supernatural\"><\/a><\/li>","44":"<li id=\"id-44\" title=\"Suits\"><a href=\"http:\/\/vserie.com\/serie\/131-suits\"><img src=\"http:\/\/vserie.com\/images\/s_s131_s.png\" alt=\"Suits\"><\/a><\/li>","45":"<li id=\"id-45\" title=\"Star-Crossed\"><a href=\"http:\/\/vserie.com\/serie\/154-star-crossed\"><img src=\"http:\/\/vserie.com\/images\/s_s154_s.png\" alt=\"Star-Crossed\"><\/a><\/li>","46":"<li id=\"id-46\" title=\"Sons of Anarchy\"><a href=\"http:\/\/vserie.com\/serie\/46-sons-of-anarchy\"><img src=\"http:\/\/vserie.com\/images\/s_s46_s.png\" alt=\"Sons of Anarchy\"><\/a><\/li>","47":"<li id=\"id-47\" title=\"Sleepy Hollow\"><a href=\"http:\/\/vserie.com\/serie\/52-sleepy-hollow\"><img src=\"http:\/\/vserie.com\/images\/s_s52_s.png\" alt=\"Sleepy Hollow\"><\/a><\/li>","48":"<li id=\"id-48\" title=\"Skins\"><a href=\"http:\/\/vserie.com\/serie\/36-skins\"><img src=\"http:\/\/vserie.com\/images\/s_s36_s.png\" alt=\"Skins\"><\/a><\/li>","49":"<li id=\"id-49\" title=\"Sirens\"><a href=\"http:\/\/vserie.com\/serie\/172-sirens\"><img src=\"http:\/\/vserie.com\/images\/s_s172_s.png\" alt=\"Sirens\"><\/a><\/li>","50":"<li id=\"id-50\" title=\"Sin identidad\"><a href=\"http:\/\/vserie.com\/serie\/199-sin-identidad\"><img src=\"http:\/\/vserie.com\/images\/s_s199_s.png\" alt=\"Sin identidad\"><\/a><\/li>","51":"<li id=\"id-51\" title=\"Silicon Valley\"><a href=\"http:\/\/vserie.com\/serie\/179-silicon-valley\"><img src=\"http:\/\/vserie.com\/images\/s_s179_s.png\" alt=\"Silicon Valley\"><\/a><\/li>","52":"<li id=\"id-52\" title=\"Siberia\"><a href=\"http:\/\/vserie.com\/serie\/39-siberia\"><img src=\"http:\/\/vserie.com\/images\/s_s39_s.png\" alt=\"Siberia\"><\/a><\/li>","53":"<li id=\"id-53\" title=\"Sherlock\"><a href=\"http:\/\/vserie.com\/serie\/103-sherlock\"><img src=\"http:\/\/vserie.com\/images\/s_s103_s.png\" alt=\"Sherlock\"><\/a><\/li>","54":"<li id=\"id-54\" title=\"Shameless\"><a href=\"http:\/\/vserie.com\/serie\/142-shameless\"><img src=\"http:\/\/vserie.com\/images\/s_s142_s.png\" alt=\"Shameless\"><\/a><\/li>","55":"<li id=\"id-55\" title=\"Salem\"><a href=\"http:\/\/vserie.com\/serie\/186-salem\"><img src=\"http:\/\/vserie.com\/images\/s_s186_s.png\" alt=\"Salem\"><\/a><\/li>","56":"<li id=\"id-56\" title=\"Rosemary's Baby (La semilla del diablo)\"><a href=\"http:\/\/vserie.com\/serie\/198-rosemary-039-s-baby-la-semilla-del-diablo\"><img src=\"http:\/\/vserie.com\/images\/s_s198_s.png\" alt=\"Rosemary's Baby (La semilla del diablo)\"><\/a><\/li>","57":"<li id=\"id-57\" title=\"Ripper Street\"><a href=\"http:\/\/vserie.com\/serie\/100-ripper-street\"><img src=\"http:\/\/vserie.com\/images\/s_s100_s.png\" alt=\"Ripper Street\"><\/a><\/li>","58":"<li id=\"id-58\" title=\"Revolution\"><a href=\"http:\/\/vserie.com\/serie\/62-revolution\"><img src=\"http:\/\/vserie.com\/images\/s_s62_s.png\" alt=\"Revolution\"><\/a><\/li>","59":"<li id=\"id-59\" title=\"Revenge\"><a href=\"http:\/\/vserie.com\/serie\/67-revenge\"><img src=\"http:\/\/vserie.com\/images\/s_s67_s.png\" alt=\"Revenge\"><\/a><\/li>","60":"<li id=\"id-60\" title=\"Resurrection\"><a href=\"http:\/\/vserie.com\/serie\/167-resurrection\"><img src=\"http:\/\/vserie.com\/images\/s_s167_s.png\" alt=\"Resurrection\"><\/a><\/li>","61":"<li id=\"id-61\" title=\"Remedy\"><a href=\"http:\/\/vserie.com\/serie\/161-remedy\"><img src=\"http:\/\/vserie.com\/images\/s_s161_s.png\" alt=\"Remedy\"><\/a><\/li>","62":"<li id=\"id-62\" title=\"Reign\"><a href=\"http:\/\/vserie.com\/serie\/92-reign\"><img src=\"http:\/\/vserie.com\/images\/s_s92_s.png\" alt=\"Reign\"><\/a><\/li>","63":"<li id=\"id-63\" title=\"Ray Donovan\"><a href=\"http:\/\/vserie.com\/serie\/44-ray-donovan\"><img src=\"http:\/\/vserie.com\/images\/s_s44_s.png\" alt=\"Ray Donovan\"><\/a><\/li>","64":"<li id=\"id-64\" title=\"Ravenswood\"><a href=\"http:\/\/vserie.com\/serie\/93-ravenswood\"><img src=\"http:\/\/vserie.com\/images\/s_s93_s.png\" alt=\"Ravenswood\"><\/a><\/li>","65":"<li id=\"id-65\" title=\"Psych\"><a href=\"http:\/\/vserie.com\/serie\/203-psych\"><img src=\"http:\/\/vserie.com\/images\/s_s203_s.png\" alt=\"Psych\"><\/a><\/li>","66":"<li id=\"id-66\" title=\"Pretty Little Liars (Pequeñas mentirosas)\"><a href=\"http:\/\/vserie.com\/serie\/38-pretty-little-liars-peque-ntilde-as-mentirosas\"><img src=\"http:\/\/vserie.com\/images\/s_s38_s.png\" alt=\"Pretty Little Liars (Pequeñas mentirosas)\"><\/a><\/li>","67":"<li id=\"id-67\" title=\"Power\"><a href=\"http:\/\/vserie.com\/serie\/205-power\"><img src=\"http:\/\/vserie.com\/images\/s_s205_s.png\" alt=\"Power\"><\/a><\/li>","68":"<li id=\"id-68\" title=\"Person of Interest\"><a href=\"http:\/\/vserie.com\/serie\/59-person-of-interest\"><img src=\"http:\/\/vserie.com\/images\/s_s59_s.png\" alt=\"Person of Interest\"><\/a><\/li>","69":"<li id=\"id-69\" title=\"Perdidos (Lost)\"><a href=\"http:\/\/vserie.com\/serie\/112-perdidos-lost\"><img src=\"http:\/\/vserie.com\/images\/s_s112_s.png\" alt=\"Perdidos (Lost)\"><\/a><\/li>","70":"<li id=\"id-70\" title=\"Perception\"><a href=\"http:\/\/vserie.com\/serie\/164-perception\"><img src=\"http:\/\/vserie.com\/images\/s_s164_s.png\" alt=\"Perception\"><\/a><\/li>","71":"<li id=\"id-71\" title=\"Penny Dreadful\"><a href=\"http:\/\/vserie.com\/serie\/195-penny-dreadful\"><img src=\"http:\/\/vserie.com\/images\/s_s195_s.png\" alt=\"Penny Dreadful\"><\/a><\/li>","72":"<li id=\"id-72\" title=\"Peaky Blinders\"><a href=\"http:\/\/vserie.com\/serie\/97-peaky-blinders\"><img src=\"http:\/\/vserie.com\/images\/s_s97_s.png\" alt=\"Peaky Blinders\"><\/a><\/li>","73":"<li id=\"id-73\" title=\"Orphan Black\"><a href=\"http:\/\/vserie.com\/serie\/158-orphan-black\"><img src=\"http:\/\/vserie.com\/images\/s_s158_s.png\" alt=\"Orphan Black\"><\/a><\/li>","74":"<li id=\"id-74\" title=\"Orange Is the New Black\"><a href=\"http:\/\/vserie.com\/serie\/13-orange-is-the-new-black\"><img src=\"http:\/\/vserie.com\/images\/s_s13_s.png\" alt=\"Orange Is the New Black\"><\/a><\/li>"} + rows = json_object["resultado"] + + data = "" + for row in rows: + data = data + rows[row] + + logger.info("data=" + repr(data)) + + # Extrae las entradas (carpetas) + patron = 'title="([^"]+)"[^<]+<a href="(http.//vserie.com/serie/[^"]+)"><img src="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + for scrapedtitle, scrapedurl, scrapedthumbnail in matches: + title = scrapertools.htmlclean(scrapedtitle) + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) + plot = "" + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + itemlist.append( + Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, plot=plot, + show=title)) + + if not "/paginador/" in item.url: + itemlist.append(Item(channel=item.channel, action="series", title=">> Página siguiente", + url="http://vserie.com/api/paginador/", extra="tipo=series&last=39", viewmode="movie")) + else: + actual = scrapertools.find_single_match(item.extra, "last\=(\d+)") + siguiente = str(int(actual) + 35) + itemlist.append(Item(channel=item.channel, action="series", title=">> Página siguiente", + url="http://vserie.com/api/paginador/", extra="tipo=series&last=" + siguiente, + viewmode="movie")) + + return itemlist + + +def peliculas(item, data=""): + logger.info() + + if config.get_setting("zampaseriesaccount") == True: + login() + + # Descarga la pagina + if data == "": + if item.extra == "": + data = scrapertools.cache_page(item.url) + else: + data = scrapertools.cache_page(item.url, post=item.extra) + # logger.info("data="+data) + + json_object = jsontools.load(data) + # {"resultado":{"40":"<li id=\"id-40\" title=\"The 100\"><a href=\"http:\/\/vserie.com\/serie\/175-the-100\"><img src=\"http:\/\/vserie.com\/images\/s_s175_s.png\" alt=\"The 100\"><\/a><\/li>","41":"<li id=\"id-41\" title=\"Teen Wolf\"><a href=\"http:\/\/vserie.com\/serie\/25-teen-wolf\"><img src=\"http:\/\/vserie.com\/images\/s_s25_s.png\" alt=\"Teen Wolf\"><\/a><\/li>","42":"<li id=\"id-42\" title=\"Surviving Jack\"><a href=\"http:\/\/vserie.com\/serie\/178-surviving-jack\"><img src=\"http:\/\/vserie.com\/images\/s_s178_s.png\" alt=\"Surviving Jack\"><\/a><\/li>","43":"<li id=\"id-43\" title=\"Supernatural\"><a href=\"http:\/\/vserie.com\/serie\/68-supernatural\"><img src=\"http:\/\/vserie.com\/images\/s_s68_s.png\" alt=\"Supernatural\"><\/a><\/li>","44":"<li id=\"id-44\" title=\"Suits\"><a href=\"http:\/\/vserie.com\/serie\/131-suits\"><img src=\"http:\/\/vserie.com\/images\/s_s131_s.png\" alt=\"Suits\"><\/a><\/li>","45":"<li id=\"id-45\" title=\"Star-Crossed\"><a href=\"http:\/\/vserie.com\/serie\/154-star-crossed\"><img src=\"http:\/\/vserie.com\/images\/s_s154_s.png\" alt=\"Star-Crossed\"><\/a><\/li>","46":"<li id=\"id-46\" title=\"Sons of Anarchy\"><a href=\"http:\/\/vserie.com\/serie\/46-sons-of-anarchy\"><img src=\"http:\/\/vserie.com\/images\/s_s46_s.png\" alt=\"Sons of Anarchy\"><\/a><\/li>","47":"<li id=\"id-47\" title=\"Sleepy Hollow\"><a href=\"http:\/\/vserie.com\/serie\/52-sleepy-hollow\"><img src=\"http:\/\/vserie.com\/images\/s_s52_s.png\" alt=\"Sleepy Hollow\"><\/a><\/li>","48":"<li id=\"id-48\" title=\"Skins\"><a href=\"http:\/\/vserie.com\/serie\/36-skins\"><img src=\"http:\/\/vserie.com\/images\/s_s36_s.png\" alt=\"Skins\"><\/a><\/li>","49":"<li id=\"id-49\" title=\"Sirens\"><a href=\"http:\/\/vserie.com\/serie\/172-sirens\"><img src=\"http:\/\/vserie.com\/images\/s_s172_s.png\" alt=\"Sirens\"><\/a><\/li>","50":"<li id=\"id-50\" title=\"Sin identidad\"><a href=\"http:\/\/vserie.com\/serie\/199-sin-identidad\"><img src=\"http:\/\/vserie.com\/images\/s_s199_s.png\" alt=\"Sin identidad\"><\/a><\/li>","51":"<li id=\"id-51\" title=\"Silicon Valley\"><a href=\"http:\/\/vserie.com\/serie\/179-silicon-valley\"><img src=\"http:\/\/vserie.com\/images\/s_s179_s.png\" alt=\"Silicon Valley\"><\/a><\/li>","52":"<li id=\"id-52\" title=\"Siberia\"><a href=\"http:\/\/vserie.com\/serie\/39-siberia\"><img src=\"http:\/\/vserie.com\/images\/s_s39_s.png\" alt=\"Siberia\"><\/a><\/li>","53":"<li id=\"id-53\" title=\"Sherlock\"><a href=\"http:\/\/vserie.com\/serie\/103-sherlock\"><img src=\"http:\/\/vserie.com\/images\/s_s103_s.png\" alt=\"Sherlock\"><\/a><\/li>","54":"<li id=\"id-54\" title=\"Shameless\"><a href=\"http:\/\/vserie.com\/serie\/142-shameless\"><img src=\"http:\/\/vserie.com\/images\/s_s142_s.png\" alt=\"Shameless\"><\/a><\/li>","55":"<li id=\"id-55\" title=\"Salem\"><a href=\"http:\/\/vserie.com\/serie\/186-salem\"><img src=\"http:\/\/vserie.com\/images\/s_s186_s.png\" alt=\"Salem\"><\/a><\/li>","56":"<li id=\"id-56\" title=\"Rosemary's Baby (La semilla del diablo)\"><a href=\"http:\/\/vserie.com\/serie\/198-rosemary-039-s-baby-la-semilla-del-diablo\"><img src=\"http:\/\/vserie.com\/images\/s_s198_s.png\" alt=\"Rosemary's Baby (La semilla del diablo)\"><\/a><\/li>","57":"<li id=\"id-57\" title=\"Ripper Street\"><a href=\"http:\/\/vserie.com\/serie\/100-ripper-street\"><img src=\"http:\/\/vserie.com\/images\/s_s100_s.png\" alt=\"Ripper Street\"><\/a><\/li>","58":"<li id=\"id-58\" title=\"Revolution\"><a href=\"http:\/\/vserie.com\/serie\/62-revolution\"><img src=\"http:\/\/vserie.com\/images\/s_s62_s.png\" alt=\"Revolution\"><\/a><\/li>","59":"<li id=\"id-59\" title=\"Revenge\"><a href=\"http:\/\/vserie.com\/serie\/67-revenge\"><img src=\"http:\/\/vserie.com\/images\/s_s67_s.png\" alt=\"Revenge\"><\/a><\/li>","60":"<li id=\"id-60\" title=\"Resurrection\"><a href=\"http:\/\/vserie.com\/serie\/167-resurrection\"><img src=\"http:\/\/vserie.com\/images\/s_s167_s.png\" alt=\"Resurrection\"><\/a><\/li>","61":"<li id=\"id-61\" title=\"Remedy\"><a href=\"http:\/\/vserie.com\/serie\/161-remedy\"><img src=\"http:\/\/vserie.com\/images\/s_s161_s.png\" alt=\"Remedy\"><\/a><\/li>","62":"<li id=\"id-62\" title=\"Reign\"><a href=\"http:\/\/vserie.com\/serie\/92-reign\"><img src=\"http:\/\/vserie.com\/images\/s_s92_s.png\" alt=\"Reign\"><\/a><\/li>","63":"<li id=\"id-63\" title=\"Ray Donovan\"><a href=\"http:\/\/vserie.com\/serie\/44-ray-donovan\"><img src=\"http:\/\/vserie.com\/images\/s_s44_s.png\" alt=\"Ray Donovan\"><\/a><\/li>","64":"<li id=\"id-64\" title=\"Ravenswood\"><a href=\"http:\/\/vserie.com\/serie\/93-ravenswood\"><img src=\"http:\/\/vserie.com\/images\/s_s93_s.png\" alt=\"Ravenswood\"><\/a><\/li>","65":"<li id=\"id-65\" title=\"Psych\"><a href=\"http:\/\/vserie.com\/serie\/203-psych\"><img src=\"http:\/\/vserie.com\/images\/s_s203_s.png\" alt=\"Psych\"><\/a><\/li>","66":"<li id=\"id-66\" title=\"Pretty Little Liars (Pequeñas mentirosas)\"><a href=\"http:\/\/vserie.com\/serie\/38-pretty-little-liars-peque-ntilde-as-mentirosas\"><img src=\"http:\/\/vserie.com\/images\/s_s38_s.png\" alt=\"Pretty Little Liars (Pequeñas mentirosas)\"><\/a><\/li>","67":"<li id=\"id-67\" title=\"Power\"><a href=\"http:\/\/vserie.com\/serie\/205-power\"><img src=\"http:\/\/vserie.com\/images\/s_s205_s.png\" alt=\"Power\"><\/a><\/li>","68":"<li id=\"id-68\" title=\"Person of Interest\"><a href=\"http:\/\/vserie.com\/serie\/59-person-of-interest\"><img src=\"http:\/\/vserie.com\/images\/s_s59_s.png\" alt=\"Person of Interest\"><\/a><\/li>","69":"<li id=\"id-69\" title=\"Perdidos (Lost)\"><a href=\"http:\/\/vserie.com\/serie\/112-perdidos-lost\"><img src=\"http:\/\/vserie.com\/images\/s_s112_s.png\" alt=\"Perdidos (Lost)\"><\/a><\/li>","70":"<li id=\"id-70\" title=\"Perception\"><a href=\"http:\/\/vserie.com\/serie\/164-perception\"><img src=\"http:\/\/vserie.com\/images\/s_s164_s.png\" alt=\"Perception\"><\/a><\/li>","71":"<li id=\"id-71\" title=\"Penny Dreadful\"><a href=\"http:\/\/vserie.com\/serie\/195-penny-dreadful\"><img src=\"http:\/\/vserie.com\/images\/s_s195_s.png\" alt=\"Penny Dreadful\"><\/a><\/li>","72":"<li id=\"id-72\" title=\"Peaky Blinders\"><a href=\"http:\/\/vserie.com\/serie\/97-peaky-blinders\"><img src=\"http:\/\/vserie.com\/images\/s_s97_s.png\" alt=\"Peaky Blinders\"><\/a><\/li>","73":"<li id=\"id-73\" title=\"Orphan Black\"><a href=\"http:\/\/vserie.com\/serie\/158-orphan-black\"><img src=\"http:\/\/vserie.com\/images\/s_s158_s.png\" alt=\"Orphan Black\"><\/a><\/li>","74":"<li id=\"id-74\" title=\"Orange Is the New Black\"><a href=\"http:\/\/vserie.com\/serie\/13-orange-is-the-new-black\"><img src=\"http:\/\/vserie.com\/images\/s_s13_s.png\" alt=\"Orange Is the New Black\"><\/a><\/li>"} + rows = json_object["resultado"] + + data = "" + for row in rows: + # logger.info("rows[row]="+rows[row]) + data = data + rows[row] + + logger.info("data=" + repr(data)) + + # Extrae las entradas (carpetas) + patron = 'title="([^"]+)"[^<]+<a href="(http.//vserie.com/pelicula/[^"]+)"><img src="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + for scrapedtitle, scrapedurl, scrapedthumbnail in matches: + title = scrapertools.htmlclean(scrapedtitle) + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) + plot = "" + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot, + show=title)) + + if not "/paginador/" in item.url: + itemlist.append(Item(channel=item.channel, action="peliculas", title=">> Página siguiente", + url="http://vserie.com/api/paginador/", extra="tipo=peliculas&last=40", viewmode="movie")) + else: + actual = scrapertools.find_single_match(item.extra, "last\=(\d+)") + siguiente = str(int(actual) + 35) + itemlist.append(Item(channel=item.channel, action="peliculas", title=">> Página siguiente", + url="http://vserie.com/api/paginador/", extra="tipo=peliculas&last=" + siguiente, + viewmode="movie")) + + return itemlist + + +def episodios(item): + logger.info() + + if config.get_setting("zampaseriesaccount") == True: + login() + + # Descarga la pagina + data = scrapertools.cache_page(item.url) + data = scrapertools.find_single_match(data, '<div id="listado">(.*?)</ul>'); + logger.info("data=" + data) + + # Extrae las entradas (carpetas) + patron = '<a href="([^"]+)">([^<]+)</a>' + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + for scrapedurl, scrapedtitle in matches: + title = scrapertools.htmlclean(scrapedtitle) + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = "" + plot = "" + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot, + show=item.show)) + + return itemlist + + +def findvideos(item): + logger.info() + + if config.get_setting("zampaseriesaccount") == True: + login() + + # Descarga la pagina + data = scrapertools.cache_page(item.url) + # logger.info("data="+data) + + # Extrae las entradas (carpetas) + patron = '<tr[^<]+' + patron += '<td>([^<]*)</td[^<]+' + patron += '<td>([^<]*)</td[^<]+' + patron += '<td>([^<]*)</td[^<]+' + patron += '<td>([^<]*)</td[^<]+' + patron += '<td>[^<]*</td[^<]+' + patron += '<td>[^<]*</td[^<]+' + patron += '<td class="descarga"><a href="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + for nombre_servidor, idioma, subs, calidad, scrapedurl in matches: + if subs.strip() == "": + subtitulos = "" + else: + subtitulos = scrapertools.htmlclean(" sub " + subs) + title = "Ver en " + nombre_servidor + " (" + scrapertools.htmlclean( + idioma) + subtitulos + ") (Calidad " + calidad.strip() + ")" + url = urlparse.urljoin(item.url, scrapedurl) + thumbnail = "" + plot = "" + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, extra=item.url, folder=False)) + + return itemlist + + +def play(item): + logger.info("url=" + item.url) + + if config.get_setting("zampaseriesaccount") == True: + login() + + headers = DEFAULT_HEADERS[:] + headers.append(["Referer", item.extra]) + + ''' + 21:32:07 T:4560547840 NOTICE: 'GET /serie/104-1x01-sincronizado/temporada-1/capitulo-1/17088" rel="nofollow" target="_blank" class="btn btn-success HTTP/1.1\r\nAccept-Encoding: identity\r\nReferer: http://vserie.com/serie/104-1x01-sincronizado/temporada-1/capitulo-1\r\nHost: vserie.com\r\nCookie: PHPSESSID=fed3f2fbe02705b186646e0a5b4692b8\r\nConnection: close\r\nUser-Agent: Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12\r\n\r\n' + ''' + media_url = scrapertools.downloadpage(item.url, header_to_get="location", follow_redirects=False, headers=headers) + logger.info("media_url=" + media_url) + + itemlist = servertools.find_video_items(data=media_url) + + for videoitem in itemlist: + videoitem.title = item.title + videoitem.fulltitle = item.fulltitle + videoitem.thumbnail = item.thumbnail + videoitem.channel = item.channel + + return itemlist diff --git a/plugin.video.alfa/channels/wopelis.json b/plugin.video.alfa/channels/wopelis.json new file mode 100755 index 00000000..d017496e --- /dev/null +++ b/plugin.video.alfa/channels/wopelis.json @@ -0,0 +1,51 @@ +{ + "id": "wopelis", + "name": "WoPelis", + "active": true, + "adult": false, + "language": "es", + "banner": "https://github.com/master-1970/resources/raw/master/images/bannermenu/wopelis.png", + "fanart": "https://github.com/master-1970/resources/raw/master/images/fanart/wopelis.png", + "thumbnail": "https://github.com/master-1970/resources/raw/master/images/squares/wopelis.png", + "version": 1, + "categories": [ + "movie", + "tvshow" + ], + "changes": [ + { + "date": "23/05/17", + "description": "Reparado findvideos, añadidos enlaces descargas" + }, + { + "date": "27/03/16", + "description": "Version inicial" + } + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Películas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_series", + "type": "bool", + "label": "Incluir en Novedades - Series", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/wopelis.py b/plugin.video.alfa/channels/wopelis.py new file mode 100755 index 00000000..50b454df --- /dev/null +++ b/plugin.video.alfa/channels/wopelis.py @@ -0,0 +1,367 @@ +# -*- coding: utf-8 -*- + +import re + +from core import channeltools +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item + +HOST = 'http://www.wopelis.com' +__channel__ = 'wopelis' +parameters = channeltools.get_channel_parameters(__channel__) +fanart_host = parameters['fanart'] +thumbnail_host = parameters['thumbnail'] +color1, color2, color3 = ['0xFF58D3F7', '0xFF2E64FE', '0xFF0404B4'] + + +def mainlist(item): + logger.info() + itemlist = [] + item.url = HOST + item.text_color = color2 + item.fanart = fanart_host + + item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/Directors%20Chair.png" + url = HOST + "/galep.php?solo=cenlaces&empen=0" + itemlist.append(item.clone(title="Películas:", folder=False, text_color=color3, text_bold=True)) + itemlist.append(item.clone(title=" Recientes", action="listado", url=url)) + itemlist.append(item.clone(title=" Mas populares de la semana", action="listado", url=url + "&ord=popu")) + itemlist.append(item.clone(title=" Por géneros", action="generos", url=HOST + "/index.php")) + itemlist.append(item.clone(title=" Buscar película", action="search", url=url)) + + itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host)) + + item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/TV%20Series.png" + url = HOST + "/gales.php?empen=0" + itemlist.append(item.clone(title="Series:", folder=False, text_color=color3, text_bold=True)) + itemlist.append(item.clone(title=" Nuevos episodios", action="listado", url=url + "&ord=reci")) + itemlist.append(item.clone(title=" Mas populares de la semana", action="listado", url=url + "&ord=popu")) + itemlist.append(item.clone(title=" Por géneros", action="generos", url=HOST + "/series.php")) + itemlist.append(item.clone(title=" Buscar serie", action="search", url=url + "&ord=popu")) + + return itemlist + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + try: + if categoria == 'peliculas': + item.url = HOST + "/galep.php?solo=cenlaces&empen=0" + + elif categoria == 'series': + item.url = HOST + "/gales.php?empen=0&ord=reci" + + else: + return [] + + itemlist = listado(item) + if itemlist[-1].title == ">> Página siguiente": + itemlist.pop() + + # Se captura la excepción, para no interrumpir al canal novedades si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist + + +def search(item, texto): + logger.info("search:" + texto) + try: + if texto: + item.url = "%s&busqueda=%s" % (item.url, texto.replace(" ", "+")) + return listado(item) + else: + return [] + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def generos(item): + logger.info() + itemlist = [] + dict_gender = {"acción": "accion", "animación": "animacion", "ciencia ficción": "ciencia%20ficcion", + "fantasía": "fantasia", "música": "musica", "película de la televisión": "pelicula%20de%20tv"} + + data = downloadpage(item.url) + data = scrapertools.find_single_match(data, '<select name="gener">(.*?)</select>') + + for genero in scrapertools.find_multiple_matches(data, '<option value="([^"]+)'): + if genero != 'Todos': + if 'series' in item.url: + url = HOST + "/gales.php?empen=0&gener=%s" % genero + else: + url = HOST + "/galep.php?solo=cenlaces&empen=0&gener=%s" % genero + + thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/azul/%s.png" + thumbnail = thumbnail % dict_gender.get(genero.lower(), genero.lower()) + + itemlist.append(Item(channel=item.channel, action="listado", title=genero, url=url, text_color=color1, + contentType='movie', folder=True, + thumbnail=thumbnail)) # ,viewmode="movie_with_plot")) + + return sorted(itemlist, key=lambda i: i.title.lower()) + + +def listado(item): + logger.info(item) + itemlist = [] + + data = downloadpage(item.url) + + patron = '<a class="extended" href=".([^"]+).*?' + patron += '<img class="centeredPicFalse"([^>]+).*?' + patron += '<span class="year">(\d{4})</span>.*?' + patron += '<span class="title">(.*?)</span>' + + for url, pic, year, title in scrapertools.find_multiple_matches(data, patron): + thumbnail = scrapertools.find_single_match(pic, 'src="([^"]+)') + if not thumbnail: + thumbnail = HOST + "/images/cover-notfound.png" + + new_item = Item(channel=__channel__, thumbnail=thumbnail, text_color=color2, infoLabels={"year": year}) + + if "galep.php" in item.url: + # movie + new_item.contentTitle = title + new_item.action = "findvideos" + new_item.url = HOST + url.replace('peli.php?id=', 'venlaces.php?npl=') + + + elif "gales.php" in item.url: + # tvshow + title = title.replace(' - 0x0', '') + new_item.contentSerieName = title + new_item.action = "temporadas" + new_item.url = HOST + url + if "ord=reci" in item.url: + # episode + season_episode = scrapertools.get_season_and_episode(title) + if season_episode: + new_item.contentSeason, new_item.contentEpisodeNumber = season_episode.split('x') + new_item.action = "get_episodio" + new_item.contentSerieName = title.split('-', 1)[1].strip() + + elif "gener=" in item.url and scrapertools.get_season_and_episode(title): + # Las series filtrada por genero devuelven capitulos y series completas + title = title.split('-', 1)[1].strip() + new_item.contentSerieName = title + + else: + return [] + + new_item.title = "%s (%s)" % (title, year) + + itemlist.append(new_item) + + if itemlist: + # Obtenemos los datos basicos mediante multihilos + tmdb.set_infoLabels(itemlist) + + # Si es necesario añadir paginacion + if len(itemlist) == 35: + empen = scrapertools.find_single_match(item.url, 'empen=(\d+)') + url_next_page = item.url.replace('empen=%s' % empen, 'empen=%s' % (int(empen) + 35)) + itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente", + thumbnail=thumbnail_host, url=url_next_page, folder=True, + text_color=color3, text_bold=True)) + + return itemlist + + +def temporadas(item): + logger.info(item) + itemlist = [] + + data = downloadpage(item.url) + patron = '<div class="checkSeason" data-num="([^"]+)[^>]+>([^<]+)' + + for num_season, title in scrapertools.find_multiple_matches(data, patron): + itemlist.append(item.clone(contentSeason=num_season, title="%s - %s" % (item.contentSerieName, title), + action="episodios")) + + if itemlist: + # Obtenemos los datos de las temporadas mediante multihilos + tmdb.set_infoLabels(itemlist) + + if config.get_videolibrary_support(): + itemlist.append(item.clone(title="Añadir esta serie a la videoteca", + action="add_serie_to_library", extra="episodios", + text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host)) + + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + + data = downloadpage(item.url) + patron = '<div class="checkSeason" data-num="([^"]+)(.*?)</div></div></div>' + for num_season, data in scrapertools.find_multiple_matches(data, patron): + if item.contentSeason and item.contentSeason != int(num_season): + # Si buscamos los episodios de una temporada concreta y no es esta (num_season)... + continue + + patron = '<div class="info"><a href="..([^"]+).*?class="number">([^<]+)' + for url, num_episode in scrapertools.find_multiple_matches(data, patron): + if item.contentEpisodeNumber and item.contentEpisodeNumber != int(num_episode): + # Si buscamos un episodio concreto y no es este (num_episode)... + continue + + title = "%sx%s - %s" % (num_season, num_episode.strip().zfill(2), item.contentSerieName) + itemlist.append(item.clone(title=title, url=HOST + url, action="findvideos", + contentSeason=num_season, contentEpisodeNumber=num_episode)) + + if itemlist and hasattr(item, 'contentSeason'): + # Obtenemos los datos de los episodios de esta temporada mediante multihilos + tmdb.set_infoLabels(itemlist) + + for i in itemlist: + if i.infoLabels['title']: + # Si el capitulo tiene nombre propio añadirselo al titulo del item + i.title = "%sx%s %s" % ( + i.infoLabels['season'], str(i.infoLabels['episode']).zfill(2), i.infoLabels['title']) + + return itemlist + + +def get_episodio(item): + logger.info() + itemlist = episodios(item) + if itemlist: + itemlist = findvideos(itemlist[0]) + + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + dic_langs = {'esp': 'Español', 'english': 'Ingles', 'japo': 'Japones', 'argentina': 'Latino', 'ntfof': ''} + dic_servers = {'ntfof': 'Servidor Desconocido', 'stramango': 'streamango', 'flasht': 'flashx'} + + data1 = downloadpage(item.url) + patron = 'onclick="redir\(([^\)]+).*?' + patron += '<img style="float:left" src="./[^/]+/([^\.]+).+?' + patron += '<span[^>]+>([^<]+).*?' + patron += '<img(.*?)onerror' + + if "Descarga:</h1>" in data1: + list_showlinks = [('Online:', 'Online:</h1>(.*?)Descarga:</h1>'), + ('Download:', 'Descarga:</h1>(.*?)</section>')] + else: + list_showlinks = [('Online:', 'Online:</h1>(.*?)</section>')] + + for t in list_showlinks: + data = scrapertools.find_single_match(data1, t[1]) + + if data: + itemlist.append(Item(title=t[0], text_color=color3, text_bold=True, + folder=False, thumbnail=thumbnail_host)) + + for redir, server, quality, langs in scrapertools.find_multiple_matches(data, + patron): # , server, quality, langs + redir = redir.split(",") + url = redir[0][1:-1] + id = redir[1][1:-1] + # type = redir[2][1:-1] + # url = url.split("','")[0] # [2] = 0 movies, [2] = 1 tvshows + + langs = scrapertools.find_multiple_matches(langs, 'src="./images/([^\.]+)') + idioma = dic_langs.get(langs[0], langs[0]) + subtitulos = dic_langs.get(langs[1], langs[1]) + if subtitulos: + idioma = "%s (Sub: %s)" % (idioma, subtitulos) + + if server in dic_servers: server = dic_servers[server] + + itemlist.append( + item.clone(url=url, action="play", language=idioma, contentQuality=quality, server=server, + title=" %s: %s [%s]" % (server.capitalize(), idioma, quality))) + + if itemlist and config.get_videolibrary_support() and not "library" in item.extra: + if item.contentType == 'movie': + itemlist.append(item.clone(title="Añadir película a la videoteca", + action="add_pelicula_to_library", text_color=color1, + contentTitle=item.contentTitle, extra="library", thumbnail=thumbnail_host)) + else: + # http://www.wopelis.com/serie.php?id=275641 + item.url = "http://www.wopelis.com/serie.php?id=" + id + item.contentSeason = 0 + item.contentEpisodeNumber = 0 + # logger.error(item) + itemlist.append(item.clone(title="Añadir esta serie a la videoteca", + action="add_serie_to_library", extra="episodios###library", + text_color=color1, thumbnail=thumbnail_host)) + + return itemlist + + +def play(item): + logger.info() + itemlist = [] + + # Buscamos video por servidor ... + devuelve = servertools.findvideosbyserver(item.url, item.server) + if not devuelve: + # ...sino lo encontramos buscamos en todos los servidores disponibles + devuelve = servertools.findvideos(item.url, skip=True) + + if devuelve: + # logger.debug(devuelve) + itemlist.append(Item(channel=item.channel, title=item.contentTitle, action="play", server=devuelve[0][2], + url=devuelve[0][1], thumbnail=item.thumbnail, folder=False)) + + return itemlist + + +def downloadpage(url): + cookievalue = config.get_setting("cookie", "wopelis") + if not cookievalue: + data = httptools.downloadpage(url).data + cookievalue = get_cookie(data) + + headers = {'Cookie': '%s' % cookievalue} + data = httptools.downloadpage(url, headers=headers).data + if "Hola bienvenido" in data: + cookievalue = get_cookie(data) + headers = {'Cookie': '%s' % cookievalue} + data = httptools.downloadpage(url, headers=headers).data + + return re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + +def get_cookie(data): + import random + cookievalue = "" + cookiename = scrapertools.find_single_match(data, 'document.cookie\s*=\s*"([^"]+)"') + cookiename = cookiename.replace("=", "") + posible = scrapertools.find_single_match(data, 'var possible\s*=\s*"([^"]+)"') + bloque = scrapertools.find_single_match(data, 'function cok(.*?);') + lengths = scrapertools.find_multiple_matches(bloque, '([\S]{1}\d+)') + for numero in lengths: + if numero.startswith("("): + for i in range(0, int(numero[1:])): + cookievalue += posible[int(random.random() * len(posible))] + else: + cookievalue += numero[1:] + + cookievalue = "%s=%s" % (cookiename, cookievalue) + config.set_setting("cookie", cookievalue, "wopelis") + + return cookievalue diff --git a/plugin.video.alfa/channels/x18hentai.json b/plugin.video.alfa/channels/x18hentai.json new file mode 100755 index 00000000..4a03b57e --- /dev/null +++ b/plugin.video.alfa/channels/x18hentai.json @@ -0,0 +1,33 @@ +{ + "id": "x18hentai", + "name": "18HentaiOnline", + "active": true, + "adult": true, + "language": "es", + "banner": "https://s32.postimg.org/lafs9vgxh/18hentaionline_banner.png", + "thumbnail": "https://s32.postimg.org/fui7jdg9x/18hentaionline.png", + "version": 1, + "categories": [ + "adult" + ], + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "04/01/2017", + "description": "Release." + } + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": false, + "visible": false + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/x18hentai.py b/plugin.video.alfa/channels/x18hentai.py new file mode 100755 index 00000000..4f93f8f0 --- /dev/null +++ b/plugin.video.alfa/channels/x18hentai.py @@ -0,0 +1,108 @@ +# -*- coding: utf-8 -*- + +import re + +from core import httptools +from core import logger +from core import scrapertools +from core.item import Item + +host = 'http://www.18hentaionline.eu/' +headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'], + ['Referer', host]] + + +def mainlist(item): + logger.info() + + itemlist = [] + + itemlist.append(Item(channel=item.channel, title="Todos", action="todas", url=host, thumbnail='', fanart='')) + + itemlist.append( + Item(channel=item.channel, title="Sin Censura", action="todas", url=host + 'tag/sin-censura/', thumbnail='', + fanart='')) + + itemlist.append( + Item(channel=item.channel, title="Estrenos", action="todas", url=host + 'category/estreno/', thumbnail='', + fanart='')) + + itemlist.append( + Item(channel=item.channel, title="Categorias", action="categorias", url=host, thumbnail='', fanart='')) + + return itemlist + + +def todas(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url, headers=headers).data + patron = '<h3><a href="([^"]+)" title="([^"]+)">.*?<\/a><\/h3>.*?' + patron += '<.*?>.*?' + patron += '<a.*?img src="([^"]+)" alt' + + matches = re.compile(patron, re.DOTALL).findall(data) + for scrapedurl, scrapedtitle, scrapedthumbnail in matches: + url = scrapedurl + title = scrapedtitle.decode('utf-8') + thumbnail = scrapedthumbnail + fanart = '' + itemlist.append( + Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, fanart=fanart)) + + # Paginacion + title = '' + siguiente = scrapertools.find_single_match(data, + '<a rel="nofollow" class="next page-numbers" href="([^"]+)">Siguiente »<\/a><\/div>') + title = 'Pagina Siguiente >>> ' + fanart = '' + itemlist.append(Item(channel=item.channel, action="todas", title=title, url=siguiente, fanart=fanart)) + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = item.url + texto + + if texto != '': + return todas(item) + else: + return [] + + +def categorias(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url, headers=headers).data + patron = "<a href='([^']+)' class='tag-link-.*? tag-link-position-.*?' title='.*?' style='font-size: 11px;'>([^<]+)<\/a>" + + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + url = scrapedurl + title = scrapedtitle + itemlist.append(Item(channel=item.channel, action="todas", title=title, fulltitle=item.fulltitle, url=url)) + + return itemlist + + +def episodios(item): + censura = {'Si': 'con censura', 'No': 'sin censura'} + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url, headers=headers).data + patron = '<td>([^<]+)<\/td>.<td>([^<]+)<\/td>.<td>([^<]+)<\/td>.<td>([^<]+)<\/td>.<td><a href="([^"]+)".*?>Ver Capitulo<\/a><\/td>' + + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedcap, scrapedaud, scrapedsub, scrapedcen, scrapedurl in matches: + url = scrapedurl + title = 'CAPITULO ' + scrapedcap + ' AUDIO: ' + scrapedaud + ' SUB:' + scrapedsub + ' ' + censura[scrapedcen] + thumbnail = '' + plot = '' + fanart = '' + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=item.fulltitle, url=url, + thumbnail=item.thumbnail, plot=plot)) + + return itemlist diff --git a/plugin.video.alfa/channels/xdvideos.json b/plugin.video.alfa/channels/xdvideos.json new file mode 100755 index 00000000..f85eca1d --- /dev/null +++ b/plugin.video.alfa/channels/xdvideos.json @@ -0,0 +1,19 @@ +{ + "id": "xdvideos", + "name": "XDVideos", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "http://i.imgur.com/vKcZxXS.png", + "version": 1, + "changes": [ + { + "date": "18/05/2017", + "description": "Primera Version" + } + ], + "categories": [ + "tvshow", + "latino" + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/xdvideos.py b/plugin.video.alfa/channels/xdvideos.py new file mode 100755 index 00000000..4766a4a8 --- /dev/null +++ b/plugin.video.alfa/channels/xdvideos.py @@ -0,0 +1,107 @@ +# -*- coding: utf-8 -*- + +import re + +from channelselector import get_thumb +from core import httptools +from core import logger +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item + +host = "http://xdvideos.org/" + + +def mainlist(item): + logger.info() + thumb_series = get_thumb("thumb_channels_tvshow.png") + + itemlist = list() + + itemlist.append(Item(channel=item.channel, action="categorias", title="Categorias", url=host, + thumbnail=thumb_series)) + return itemlist + + +def categorias(item): + logger.info() + + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '<ul id="menu-menu-2" class="menu">(.+)>Problemas' + data = scrapertools.find_single_match(data, patron) + patron_cat = '<li id="menu-item-[^"]+" class=".+?"><a href="([^"]+)">([^"]+)<\/a><ulclass="sub-menu">' + matches = scrapertools.find_multiple_matches(data, patron_cat) + for url, name in matches: + if name != 'Clasicos': + title = name + itemlist.append(item.clone(title=title, url=url, action="lista", show=title)) + return itemlist + + +def lista(item): + logger.info() + + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + category = item.title + patron = '<li id="menu-item-[^"]+" class="menu-item menu-item-type-post_type menu-item-object-page current-menu-item page_item page-item-[^"]+ current_page_item menu-item-has-children menu-item-[^"]+"><a href="[^"]+">[^"]+<\/a>(.+?)<\/a><\/li><\/ul><\/li>' + content = scrapertools.find_single_match(data, patron) + patron_lista = '<a href="([^"]+)">([^"]+)<\/a>' + match_series = scrapertools.find_multiple_matches(content, patron_lista) + for url, title in match_series: + if "(" in title: + show_dual = title.split("(") + show = show_dual[1] + if ")" in show: + show = show.rstrip(")") + else: + show = title + itemlist.append(item.clone(title=title, url=url, action="episodios", show=show, plot=show)) + tmdb.set_infoLabels(itemlist) + return itemlist + + +def episodios(item): + logger.info() + + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + scrapedshow, scrapedthumbnail = scrapertools.find_single_match(data, + '<h1 class="entry-title">([^"]+)<\/h1>.+?<img .+? src="([^"]+)"') + data = scrapertools.find_single_match(data, '<div class="entry-content">(.+?)<div id="wpdevar') + patron_caps = '<a href="([^"]+)">([^"]+)<\/a>' + matches = scrapertools.find_multiple_matches(data, patron_caps) + i = 0 + for url, name in matches: + i = i + 1 + if i < 10: + title = "1x0" + str(i) + " " + name + else: + title = "1x" + str(i) + " " + name + itemlist.append( + item.clone(title=title, url=url, action="findvideos", show=scrapedshow, thumbnail=scrapedthumbnail)) + + return itemlist + + +def findvideos(item): + logger.info() + + itemlist = [] + + data = httptools.downloadpage(item.url).data + itemlist.extend(servertools.find_video_items(data=data)) + scrapedthumbnail = scrapertools.find_single_match(data, 'src="([^"]+)"') + for videoitem in itemlist: + videoitem.channel = item.channel + videoitem.thumbnail = scrapedthumbnail + + return itemlist diff --git a/plugin.video.alfa/channels/xhamster.json b/plugin.video.alfa/channels/xhamster.json new file mode 100755 index 00000000..1958c90f --- /dev/null +++ b/plugin.video.alfa/channels/xhamster.json @@ -0,0 +1,37 @@ +{ + "id": "xhamster", + "name": "xhamster", + "active": true, + "adult": true, + "language": "es", + "banner": "xhamster.png", + "thumbnail": "xhamster.png", + "version": 1, + "changes": [ + { + "date": "03/06/2017", + "description": "añadido soporte multiples calidades" + }, + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "05/08/2016", + "description": "Eliminado de sección películas." + } + ], + "categories": [ + "adult" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/xhamster.py b/plugin.video.alfa/channels/xhamster.py new file mode 100755 index 00000000..5ff96349 --- /dev/null +++ b/plugin.video.alfa/channels/xhamster.py @@ -0,0 +1,150 @@ +# -*- coding: utf-8 -*- + +import re + +from core import logger +from core import scrapertools +from core.item import Item + + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append(Item(channel=item.channel, action="videos", title="Útimos vídeos", url="http://es.xhamster.com/", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="categorias", title="Categorías")) + itemlist.append(Item(channel=item.channel, action="votados", title="Más votados")) + itemlist.append(Item(channel=item.channel, action="search", title="Buscar", + url="http://xhamster.com/search.php?q=%s&qcat=video")) + return itemlist + + +# REALMENTE PASA LA DIRECCION DE BUSQUEDA + +def search(item, texto): + logger.info() + tecleado = texto.replace(" ", "+") + item.url = item.url % tecleado + item.extra = "buscar" + try: + return videos(item) + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +# SECCION ENCARGADA DE BUSCAR + +def videos(item): + logger.info() + data = scrapertools.cache_page(item.url) + itemlist = [] + + data = scrapertools.get_match(data, '<div class="boxC videoList clearfix">(.*?)<div id="footer">') + + # Patron #1 + patron = '<div class="video"><a href="([^"]+)" class="hRotator">' + "<img src='([^']+)' class='thumb'" + ' alt="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + for scrapedurl, scrapedthumbnail, scrapedtitle in matches: + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + itemlist.append( + Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, + folder=True)) + + # Patron #2 + patron = '<a href="([^"]+)" data-click="[^"]+" class="hRotator"><img src=\'([^\']+)\' class=\'thumb\' alt="([^"]+)"/>' + matches = re.compile(patron, re.DOTALL).findall(data) + for scrapedurl, scrapedthumbnail, scrapedtitle in matches: + logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") + itemlist.append( + Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, + folder=True)) + + # Paginador + patron = "<a href='([^']+)' class='last colR'><div class='icon iconPagerNextHover'></div>Próximo</a>" + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) > 0: + itemlist.append( + Item(channel=item.channel, action="videos", title="Página Siguiente", url=matches[0], thumbnail="", + folder=True, viewmode="movie")) + + return itemlist + + +# SECCION ENCARGADA DE VOLCAR EL LISTADO DE CATEGORIAS CON EL LINK CORRESPONDIENTE A CADA PAGINA + +def categorias(item): + logger.info() + itemlist = [] + + itemlist.append( + Item(channel=item.channel, action="lista", title="Heterosexual", url="http://es.xhamster.com/channels.php")) + itemlist.append( + Item(channel=item.channel, action="lista", title="Transexuales", url="http://es.xhamster.com/channels.php")) + itemlist.append(Item(channel=item.channel, action="lista", title="Gays", url="http://es.xhamster.com/channels.php")) + return itemlist + + +def votados(item): + logger.info() + itemlist = [] + + itemlist.append(Item(channel=item.channel, action="videos", title="Día", + url="http://es.xhamster.com/rankings/daily-top-videos.html", viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="videos", title="Semana", + url="http://es.xhamster.com/rankings/weekly-top-videos.html", viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="videos", title="Mes", + url="http://es.xhamster.com/rankings/monthly-top-videos.html", viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="videos", title="De siempre", + url="http://es.xhamster.com/rankings/alltime-top-videos.html", viewmode="movie")) + return itemlist + + +def lista(item): + logger.info() + itemlist = [] + data = scrapertools.downloadpageGzip(item.url) + # data = data.replace("\n","") + # data = data.replace("\t","") + + if item.title == "Gays": + data = scrapertools.get_match(data, + '<div class="title">' + item.title + '</div>.*?<div class="list">(.*?)<div id="footer">') + else: + data = scrapertools.get_match(data, + '<div class="title">' + item.title + '</div>.*?<div class="list">(.*?)<div class="catName">') + patron = '(<div.*?</div>)' + matches = re.compile(patron, re.DOTALL).findall(data) + for match in matches: + data = data.replace(match, "") + patron = 'href="([^"]+)">(.*?)</a>' + data = ' '.join(data.split()) + logger.info(data) + matches = re.compile(patron, re.DOTALL).findall(data) + for scrapedurl, scrapedtitle in matches: + itemlist.append(Item(channel=item.channel, action="videos", title=scrapedtitle, url=scrapedurl, folder=True, + viewmode="movie")) + + sorted_itemlist = sorted(itemlist, key=lambda Item: Item.title) + return sorted_itemlist + + +# OBTIENE LOS ENLACES SEGUN LOS PATRONES DEL VIDEO Y LOS UNE CON EL SERVIDOR +def play(item): + logger.info() + itemlist = [] + + data = scrapertools.cachePage(item.url) + logger.debug(data) + + patron = '"([0-9]+p)":"([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + for res, url in matches: + url = url.replace("\\", "") + logger.debug("url=" + url) + itemlist.append(["%s %s [directo]" % (res, scrapertools.get_filename_from_url(url)[-4:]), url]) + + return itemlist diff --git a/plugin.video.alfa/channels/yaske.json b/plugin.video.alfa/channels/yaske.json new file mode 100755 index 00000000..4f460979 --- /dev/null +++ b/plugin.video.alfa/channels/yaske.json @@ -0,0 +1,67 @@ +{ + "id": "yaske", + "name": "Yaske", + "active": true, + "adult": false, + "language": "es", + "banner": "yaske.png", + "fanart": "https://github.com/master-1970/resources/raw/master/images/fanart/yaske.png", + "thumbnail": "yaske.png", + "version": 1, + "changes": [ + { + "date": "27/06/17", + "description": "Desactivar por falta de contenidos" + }, + { + "date": "04/06/17", + "description": "Desactivar por falta de contenidos" + }, + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/02/17", + "description": "Añadir imagenes, sinopsis, etc..." + }, + { + "date": "18/01/17", + "description": "Uso de httptools" + }, + { + "date": "12/12/16", + "description": "Cambios en la web" + }, + { + "date": "01/07/16", + "description": "Eliminado código innecesario." + }, + { + "date": "29/04/16", + "description": "Adaptar a Novedades Peliculas e Infantiles" + } + ], + "categories": [ + "latino", + "movie" + ], + "settings": [ + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_infantiles", + "type": "bool", + "label": "Incluir en Novedades - Infantiles", + "default": false, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/yaske.py b/plugin.video.alfa/channels/yaske.py new file mode 100755 index 00000000..99e3d9aa --- /dev/null +++ b/plugin.video.alfa/channels/yaske.py @@ -0,0 +1,256 @@ +# -*- coding: utf-8 -*- + +import re + +from core import channeltools +from core import config +from core import httptools +from core import logger +from core import scrapertoolsV2 +from core import servertools +from core import tmdb +from core.item import Item + +HOST = 'http://www.yaske.ro' +parameters = channeltools.get_channel_parameters('yaske') +fanart_host = parameters['fanart'] +thumbnail_host = parameters['thumbnail'] +color1, color2, color3 = ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'] + + +def mainlist(item): + logger.info() + itemlist = [] + item.url = HOST + item.text_color = color2 + item.fanart = fanart_host + thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/verdes/%s.png" + + itemlist.append(item.clone(title="Novedades", action="peliculas", text_bold=True, viewcontent='movies', + url=HOST + "/ultimas-y-actualizadas", + thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot")) + itemlist.append(item.clone(title="Estrenos", action="peliculas", text_bold=True, + url=HOST + "/genre/premieres", thumbnail=thumbnail % 'estrenos')) + itemlist.append(item.clone(title="", folder=False)) + + itemlist.append(Item(channel=item.channel, title="Filtrar por:", fanart=fanart_host, folder=False, + text_color=color3, text_bold=True, thumbnail=thumbnail_host)) + itemlist.append(item.clone(title=" Género", action="menu_buscar_contenido", text_color=color1, text_italic=True, + extra="genre", thumbnail=thumbnail % 'generos', viewmode="thumbnails")) + itemlist.append(item.clone(title=" Idioma", action="menu_buscar_contenido", text_color=color1, text_italic=True, + extra="audio", thumbnail=thumbnail % 'idiomas')) + itemlist.append(item.clone(title=" Calidad", action="menu_buscar_contenido", text_color=color1, text_italic=True, + extra="quality", thumbnail=thumbnail % 'calidad')) + itemlist.append(item.clone(title=" Año", action="menu_buscar_contenido", text_color=color1, text_italic=True, + extra="year", thumbnail=thumbnail % 'year')) + + itemlist.append(item.clone(title="", folder=False)) + itemlist.append(item.clone(title="Buscar por título", action="search", thumbnail=thumbnail % 'buscar')) + + return itemlist + + +def search(item, texto): + logger.info() + itemlist = [] + + try: + # http://www.yaske.ro/search/?q=los+pitufos + item.url = HOST + "/search/?q=" + texto.replace(' ', '+') + item.extra = "" + itemlist.extend(peliculas(item)) + if itemlist[-1].title == ">> Página siguiente": + item_pag = itemlist[-1] + itemlist = sorted(itemlist[:-1], key=lambda Item: Item.contentTitle) + itemlist.append(item_pag) + else: + itemlist = sorted(itemlist, key=lambda Item: Item.contentTitle) + + return itemlist + + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def newest(categoria): + logger.info() + item = Item() + try: + if categoria == 'peliculas': + item.url = HOST + "/ultimas-y-actualizadas" + elif categoria == 'infantiles': + item.url = HOST + "/search/?q=&genre%5B%5D=animation" + else: + return [] + + itemlist = peliculas(item) + if itemlist[-1].title == ">> Página siguiente": + itemlist.pop() + + # Se captura la excepción, para no interrumpir al canal novedades si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist + + +def peliculas(item): + logger.info() + itemlist = [] + url_next_page = "" + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '<article class.*?' + patron += '<a href="([^"]+)">.*?' + patron += '<img src="([^"]+)".*?' + patron += '<aside class="item-control down">(.*?)</aside>.*?' + patron += '<small class="pull-right text-muted">([^<]+)</small>.*?' + patron += '<h2 class.*?>([^<]+)</h2>' + + matches = re.compile(patron, re.DOTALL).findall(data) + + # Paginacion + if item.next_page != 'b': + if len(matches) > 30: + url_next_page = item.url + matches = matches[:30] + next_page = 'b' + else: + matches = matches[30:] + next_page = 'a' + patron_next_page = 'Anteriores</a> <a href="([^"]+)" class="btn btn-default ".*?Siguiente' + matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data) + if len(matches_next_page) > 0: + url_next_page = matches_next_page[0] + + for scrapedurl, scrapedthumbnail, idiomas, year, scrapedtitle in matches: + patronidiomas = "<img src='([^']+)'" + matchesidiomas = re.compile(patronidiomas, re.DOTALL).findall(idiomas) + + idiomas_disponibles = [] + for idioma in matchesidiomas: + if idioma.endswith("la_la.png"): + idiomas_disponibles.append("LAT") + elif idioma.endswith("en_en.png"): + idiomas_disponibles.append("VO") + elif idioma.endswith("en_es.png"): + idiomas_disponibles.append("VOSE") + elif idioma.endswith("es_es.png"): + idiomas_disponibles.append("ESP") + + if idiomas_disponibles: + idiomas_disponibles = "[" + "/".join(idiomas_disponibles) + "]" + + contentTitle = scrapertoolsV2.decodeHtmlentities(scrapedtitle.strip()) + title = "%s %s" % (contentTitle, idiomas_disponibles) + + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl, + thumbnail=scrapedthumbnail, contentTitle=contentTitle, + infoLabels={"year": year}, text_color=color1)) + + # Obtenemos los datos basicos de todas las peliculas mediante multihilos + tmdb.set_infoLabels(itemlist) + + # Si es necesario añadir paginacion + if url_next_page: + itemlist.append( + Item(channel=item.channel, action="peliculas", title=">> Página siguiente", thumbnail=thumbnail_host, + url=url_next_page, next_page=next_page, folder=True, text_color=color3, text_bold=True)) + + return itemlist + + +def menu_buscar_contenido(item): + logger.info(item) + + data = httptools.downloadpage(item.url).data + patron = '<select name="' + item.extra + '(.*?)</select>' + data = scrapertoolsV2.get_match(data, patron) + + # Extrae las entradas + patron = "<option value='([^']+)'>([^<]+)</option>" + matches = re.compile(patron, re.DOTALL).findall(data) + + itemlist = [] + for scrapedvalue, scrapedtitle in matches: + thumbnail = "" + + if item.extra == 'genre': + if scrapedtitle.strip() in ['Documental', 'Short', 'News']: + continue + + url = HOST + "/search/?q=&genre%5B%5D=" + scrapedvalue + filename = scrapedtitle.lower().replace(' ', '%20') + if filename == "ciencia%20ficción": + filename = "ciencia%20ficcion" + thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/verdes/%s.png" \ + % filename + + elif item.extra == 'year': + url = HOST + "/search/?q=&year=" + scrapedvalue + thumbnail = item.thumbnail + else: + # http://www.yaske.ro/search/?q=&quality%5B%5D=c9 + # http://www.yaske.ro/search/?q=&audio%5B%5D=es + url = HOST + "/search/?q=&" + item.extra + "%5B%5D=" + scrapedvalue + thumbnail = item.thumbnail + + itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=url, text_color=color1, + thumbnail=thumbnail, contentType='movie', folder=True, viewmode="movie_with_plot")) + + if item.extra in ['genre', 'audio', 'year']: + return sorted(itemlist, key=lambda i: i.title.lower(), reverse=item.extra == 'year') + else: + return itemlist + + +def findvideos(item): + logger.info() + itemlist = list() + sublist = list() + + # Descarga la página + data = httptools.downloadpage(item.url).data + + if not item.plot: + item.plot = scrapertoolsV2.find_single_match(data, '>Sinopsis</dt> <dd>([^<]+)</dd>') + item.plot = scrapertoolsV2.decodeHtmlentities(item.plot) + + patron = '<option value="([^"]+)"[^>]+' + patron += '>([^<]+).*?</i>([^<]+)' + matches = re.compile(patron, re.DOTALL).findall(data) + + for url, idioma, calidad in matches: + sublist.append(item.clone(action="play", url=url, folder=False, text_color=color1, quality=calidad.strip(), + language=idioma.strip())) + + sublist = servertools.get_servers_itemlist(sublist, lambda i: "Ver en %s %s" % (i.server, i.quality), True) + + # Añadir servidores encontrados, agrupandolos por idioma + for k in ["Español", "Latino", "Subtitulado", "Ingles"]: + lista_idioma = filter(lambda i: i.language == k, sublist) + if lista_idioma: + itemlist.append(Item(channel=item.channel, title=k, fanart=item.fanart, folder=False, + text_color=color2, text_bold=True, thumbnail=thumbnail_host)) + itemlist.extend(lista_idioma) + + # Insertar items "Buscar trailer" y "Añadir a la videoteca" + if itemlist and item.extra != "library": + title = "%s [Buscar trailer]" % (item.contentTitle) + itemlist.insert(0, item.clone(channel="trailertools", action="buscartrailer", + text_color=color3, title=title, viewmode="list")) + + if config.get_videolibrary_support(): + itemlist.append(Item(channel=item.channel, title="Añadir película a la videoteca", + action="add_pelicula_to_library", url=item.url, text_color="green", + contentTitle=item.contentTitle, extra="library", thumbnail=thumbnail_host)) + + return itemlist diff --git a/plugin.video.alfa/channels/youtube_channel.py b/plugin.video.alfa/channels/youtube_channel.py new file mode 100755 index 00000000..ce5146a5 --- /dev/null +++ b/plugin.video.alfa/channels/youtube_channel.py @@ -0,0 +1,123 @@ +# -*- coding: utf-8 -*- + +import urllib + +from core import jsontools +from core import logger +from core import scrapertools +from core.item import Item + +CHANNELNAME = "youtube_channel" +YOUTUBE_V3_API_KEY = "AIzaSyCjsmBT0JZy1RT-PLwB-Zkfba87sa2inyI" + + +def youtube_api_call(method, parameters): + logger.info("method=" + method + ", parameters=" + repr(parameters)) + + encoded_parameters = urllib.urlencode(parameters) + + url = "https://www.googleapis.com/youtube/v3/" + method + "?" + encoded_parameters + "&key=" + YOUTUBE_V3_API_KEY; + logger.info("url=" + url) + + data = scrapertools.cache_page(url) + logger.info("data=" + data) + + json_object = jsontools.load(data) + + return json_object + + +def youtube_get_user_playlists(user_id, pageToken=""): + # Primero averigua el channel_id a partir del nombre del usuario + json_object = youtube_api_call("channels", {"part": "id", "forUsername": user_id}) + channel_id = json_object["items"][0]["id"] + + # Ahora obtiene la lista de playlists del usuario + json_object = youtube_api_call("playlists", + {"part": "snippet,contentDetails", "channelId": channel_id, "maxResults": 50, + "pageToken": pageToken}) + + return json_object; + + +def youtube_get_playlist_items(playlist_id, pageToken=""): + json_object = youtube_api_call("playlistItems", {"part": "snippet", "playlistId": playlist_id, "maxResults": 50, + "pageToken": pageToken}) + + return json_object + + +# Show all YouTube playlists for the selected channel +def playlists(item, channel_id, pageToken=""): + logger.info() + itemlist = [] + + json_object = youtube_get_user_playlists(channel_id, pageToken) + + for entry in json_object["items"]: + logger.info("entry=" + repr(entry)) + + title = entry["snippet"]["title"] + plot = entry["snippet"]["description"] + thumbnail = entry["snippet"]["thumbnails"]["high"]["url"] + url = entry["id"] + + # Appends a new item to the xbmc item list + itemlist.append(Item(channel=CHANNELNAME, title=title, action="videos", url=url, thumbnail=thumbnail, plot=plot, + folder=True)) + + try: + nextPageToken = json_object["nextPageToken"] + itemlist.extend(playlists(item, channel_id, nextPageToken)) + except: + import traceback + logger.error(traceback.format_exc()) + + return itemlist + + +def latest_videos(item, channel_id): + item.url = "http://gdata.youtube.com/feeds/api/users/" + channel_id + "/uploads?v=2&start-index=1&max-results=30" + return videos(item) + + +# Show all YouTube videos for the selected playlist +def videos(item, pageToken=""): + logger.info() + itemlist = [] + + json_object = youtube_get_playlist_items(item.url, pageToken) + + for entry in json_object["items"]: + logger.info("entry=" + repr(entry)) + + title = entry["snippet"]["title"] + plot = entry["snippet"]["description"] + thumbnail = entry["snippet"]["thumbnails"]["high"]["url"] + url = entry["snippet"]["resourceId"]["videoId"] + + # Appends a new item to the xbmc item list + itemlist.append( + Item(channel=CHANNELNAME, title=title, action="play", server="youtube", url=url, thumbnail=thumbnail, + plot=plot, folder=False)) + + try: + nextPageToken = json_object["nextPageToken"] + itemlist.extend(videos(item, nextPageToken)) + except: + import traceback + logger.error(traceback.format_exc()) + + return itemlist + + +# Verificación automática de canales: Esta función debe devolver "True" si todo está ok en el canal. +def test(channel_id="TelevisionCanaria"): + # Si hay algún video en alguna de las listas de reproducción lo da por bueno + playlist_items = playlists(Item(), channel_id) + for playlist_item in playlist_items: + items_videos = videos(playlist_item) + if len(items_videos) > 0: + return True + + return False diff --git a/plugin.video.alfa/channels/zentorrents.json b/plugin.video.alfa/channels/zentorrents.json new file mode 100755 index 00000000..aefc1bad --- /dev/null +++ b/plugin.video.alfa/channels/zentorrents.json @@ -0,0 +1,43 @@ +{ + "id": "zentorrents", + "name": "Zentorrent", + "active": true, + "adult": false, + "language": "es", + "banner": "zentorrents.png", + "thumbnail": "http://s6.postimg.org/9zv90yjip/zentorrentlogo.jpg", + "version": 1, + "changes": [ + { + "date": "07/12/2016", + "description": "Correciones código. Adaptación a Infoplus" + }, + { + "date": "03/04/2017", + "description": "Migración a Httptools" + }, + { + "date": "30/04/2017", + "description": "Correción cambios web" + }, + { + "date": "28/06/2017", + "description": "Corrección código y algunas mejoras" + } + ], + "categories": [ + "torrent", + "movie", + "tvshow" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/zentorrents.py b/plugin.video.alfa/channels/zentorrents.py new file mode 100755 index 00000000..204f335f --- /dev/null +++ b/plugin.video.alfa/channels/zentorrents.py @@ -0,0 +1,1420 @@ +# -*- coding: utf-8 -*- + +import os +import re +import unicodedata +import urllib +import urlparse + +import xbmc +import xbmcgui +from core import config +from core import httptools +from core import logger +from core import scrapertools +from core.item import Item +from core.scrapertools import decodeHtmlentities as dhe + +ACTION_SHOW_FULLSCREEN = 36 +ACTION_GESTURE_SWIPE_LEFT = 511 +ACTION_SELECT_ITEM = 7 +ACTION_PREVIOUS_MENU = 10 +ACTION_MOVE_LEFT = 1 +ACTION_MOVE_RIGHT = 2 +ACTION_MOVE_DOWN = 4 +ACTION_MOVE_UP = 3 +OPTION_PANEL = 6 +OPTIONS_OK = 5 + +host = "http://www.zentorrents.com/" + +api_key = "2e2160006592024ba87ccdf78c28f49f" +api_fankey = "dffe90fba4d02c199ae7a9e71330c987" + + +def mainlist(item): + logger.info() + + itemlist = [] + itemlist.append( + Item(channel=item.channel, title="Películas", action="peliculas", url="http://www.zentorrents.com/peliculas", + thumbnail="http://www.navymwr.org/assets/movies/images/img-popcorn.png", + fanart="http://s18.postimg.org/u9wyvm809/zen_peliculas.jpg")) + itemlist.append( + Item(channel=item.channel, title="MicroHD", action="peliculas", url="http://www.zentorrents.com/tags/microhd", + thumbnail="http://s11.postimg.org/5s67cden7/microhdzt.jpg", + fanart="http://s9.postimg.org/i5qhadsjj/zen_1080.jpg")) + itemlist.append( + Item(channel=item.channel, title="HDrip", action="peliculas", url="http://www.zentorrents.com/tags/hdrip", + thumbnail="http://s10.postimg.org/pft9z4c5l/hdripzent.jpg", + fanart="http://s15.postimg.org/5kqx9ln7v/zen_720.jpg")) + itemlist.append( + Item(channel=item.channel, title="Series", action="peliculas", url="http://www.zentorrents.com/series", + thumbnail="http://imgur.com/HbM2dt5.png", fanart="http://s10.postimg.org/t0xz1t661/zen_series.jpg")) + itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", url="", + thumbnail="http://newmedia-art.pl/product_picture/full_size/bed9a8589ad98470258899475cf56cca.jpg", + fanart="http://s23.postimg.org/jdutugvrf/zen_buscar.jpg")) + + return itemlist + + +def search(item, texto): + logger.info() + + texto = texto.replace(" ", "+") + item.url = "http://www.zentorrents.com//buscar?searchword=%s&ordering=&searchphrase=all&limit=\d+" % (texto) + # item.url = item.url % texto + # itemlist.extend(buscador(item, texto.replace("+", " "))) + item.extra = str(texto) + + try: + return buscador(item) + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def buscador(item): + logger.info() + itemlist = [] + # Descarga la página + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + pepe = item.extra + pepe = pepe.replace("+", " ") + if "highlight" in data: + searchword = scrapertools.get_match(data, '<span class="highlight">([^<]+)</span>') + data = re.sub(r'<span class="highlight">[^<]+</span>', searchword, data) + + patron = '<div class="moditemfdb">' # Empezamos el patrón por aquí para que no se cuele nada raro + patron += '<a title="([^"]+)" ' # scrapedtitulo + patron += 'href="([^"]+)".*?' # scrapedurl + patron += 'src="([^"]+)".*?' # scrapedthumbnail + patron += '<p>([^<]+)</p>' + + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + for scrapedtitulo, scrapedurl, scrapedthumbnail, scrapedplot in matches: + # evitamos falsos positivos en los enlaces, ya que el buscador de la web muestra de todo, + # tiene que ser una descarga y que el texto a buscar esté en el titulo + if "Descargas/" in scrapedplot and pepe.lower() in scrapedtitulo.lower(): + title_fan = re.sub(r"\[.*?\]|\(.*?\)|\d+x\d+.*?Final|-\d+|-|\d+x\d+|Temporada.*?Completa| ;", "", + scrapedtitulo) + + scrapedtitulo = "[COLOR white]" + scrapedtitulo + "[/COLOR]" + torrent_tag = "[COLOR pink] (Torrent)[/COLOR]" + scrapedtitulo = scrapedtitulo + torrent_tag + scrapedurl = "http://zentorrents.com" + scrapedurl + + itemlist.append(Item(channel=item.channel, title=scrapedtitulo, url=scrapedurl, action="fanart", + thumbnail=scrapedthumbnail, fulltitle=scrapedtitulo, extra=title_fan, + fanart="http://s6.postimg.org/4j8vdzy6p/zenwallbasic.jpg", folder=True)) + + return itemlist + + +def peliculas(item): + logger.info() + itemlist = [] + + # Descarga la página + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| |</p>|<p>|&|amp;", "", data) + + # <div class="blogitem "><a title="En Un Patio De Paris [DVD Rip]" href="/peliculas/17937-en-un-patio-de-paris-dvd-rip"><div class="thumbnail_wrapper"><img alt="En Un Patio De Paris [DVD Rip]" src="http://www.zentorrents.com/images/articles/17/17937t.jpg" onload="imgLoaded(this)" /></div></a><div class="info"><div class="title"><a title="En Un Patio De Paris [DVD Rip]" href="/peliculas/17937-en-un-patio-de-paris-dvd-rip" class="contentpagetitleblog">En Un Patio De Paris [DVD Rip]</a></div><div class="createdate">21/01/2015</div><div class="text">[DVD Rip][AC3 5.1 Español Castellano][2014] Antoine es un músico de 40 años que de pronto decide abandonar su carrera.</div></div><div class="clr"></div></div> + + patron = '<div class="blogitem[^>]+>' + patron += '<a title="([^"]+)" ' + patron += 'href="([^"]+)".*?' + patron += 'src="([^"]+)".*?' + patron += '<div class="createdate">([^<]+)</div>' + + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + for scrapedtitulo, scrapedurl, scrapedthumbnail, scrapedcreatedate in matches: + title_fan = re.sub(r"\[.*?\]|\(.*?\)|\d+x\d+.*?Final|-\d+|-|\d+x\d+|Temporada.*?Completa| ;", "", scrapedtitulo) + scrapedtitulo = "[COLOR white]" + scrapedtitulo + "[/COLOR]" + scrapedcreatedate = "[COLOR bisque]" + scrapedcreatedate + "[/COLOR]" + torrent_tag = "[COLOR pink]Torrent:[/COLOR]" + scrapedtitulo = scrapedtitulo + "(" + torrent_tag + scrapedcreatedate + ")" + scrapedurl = "http://zentorrents.com" + scrapedurl + itemlist.append( + Item(channel=item.channel, title=scrapedtitulo, url=scrapedurl, action="fanart", thumbnail=scrapedthumbnail, + fulltitle=scrapedtitulo, extra=title_fan, fanart="http://s6.postimg.org/4j8vdzy6p/zenwallbasic.jpg", + folder=True)) + # 1080,720 y seies + + + # <div class="blogitem "><a title="En Un Patio De Paris [DVD Rip]" href="/peliculas/17937-en-un-patio-de-paris-dvd-rip"><div class="thumbnail_wrapper"><img alt="En Un Patio De Paris [DVD Rip]" src="http://www.zentorrents.com/images/articles/17/17937t.jpg" onload="imgLoaded(this)" /></div></a><div class="info"><div class="title"><a title="En Un Patio De Paris [DVD Rip]" href="/peliculas/17937-en-un-patio-de-paris-dvd-rip" class="contentpagetitleblog">En Un Patio De Paris [DVD Rip]</a></div><div class="createdate">21/01/2015</div><div class="text">[DVD Rip][AC3 5.1 Español Castellano][2014] Antoine es un músico de 40 años que de pronto decide abandonar su carrera.</div></div><div class="clr"></div></div> + + patron = '<div class="blogitem[^>]+>' + patron += '<a href="([^"]+)".*? ' + patron += 'title="([^"]+)".*? ' + patron += 'src="([^"]+)".*?' + patron += '<div class="createdate">([^<]+)</div>' + + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + for scrapedurl, scrapedtitulo, scrapedthumbnail, scrapedcreatedate in matches: + title_fan = re.sub(r"\[.*?\]|\(.*?\)|\d+x\d+.*?Final|-\d+|-|\d+x\d+|Temporada.*?Completa| ;", "", scrapedtitulo) + scrapedtitulo = "[COLOR white]" + scrapedtitulo + "[/COLOR]" + scrapedcreatedate = "[COLOR bisque]" + scrapedcreatedate + "[/COLOR]" + torrent_tag = "[COLOR pink]Torrent:[/COLOR]" + scrapedtitulo = scrapedtitulo + "(" + torrent_tag + scrapedcreatedate + ")" + scrapedurl = "http://zentorrents.com" + scrapedurl + itemlist.append( + Item(channel=item.channel, title=scrapedtitulo, url=scrapedurl, action="fanart", thumbnail=scrapedthumbnail, + fulltitle=scrapedtitulo, extra=title_fan, fanart="http://s6.postimg.org/4j8vdzy6p/zenwallbasic.jpg", + folder=True)) + + # Extrae el paginador + patronvideos = '<a href="([^"]+)" title="Siguiente">Siguiente</a>' + matches = re.compile(patronvideos, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + if len(matches) > 0: + scrapedurl = urlparse.urljoin(item.url, matches[0]) + title = "[COLOR chocolate]siguiente>>[/COLOR]" + itemlist.append(Item(channel=item.channel, action="peliculas", title=title, url=scrapedurl, + thumbnail="http://s6.postimg.org/9iwpso8k1/ztarrow2.png", + fanart="http://s6.postimg.org/4j8vdzy6p/zenwallbasic.jpg", folder=True)) + + return itemlist + + +def fanart(item): + logger.info() + itemlist = [] + url = item.url + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + title_fan = item.extra + title = re.sub(r'Serie Completa|3D|Temporada.*?Completa', '', title_fan) + title = title.replace(' ', '%20') + title = ''.join((c for c in unicodedata.normalize('NFD', unicode(title.decode('utf-8'))) if + unicodedata.category(c) != 'Mn')).encode("ascii", "ignore") + item.title = re.sub(r'\(.*?\)|\[.*?\]', '', item.title) + item.title = '[COLOR floralwhite]' + item.title + '[/COLOR]' + try: + sinopsis = scrapertools.get_match(data, 'onload="imgLoaded.*?</div><p>(.*?)<p class="descauto">') + sinopsis = re.sub(r"<\p><p>", "", sinopsis) + except: + sinopsis = "" + if not "series" in item.url: + + # filmafinity + title = re.sub(r"cerdas", "cuerdas", title) + url_bing = "http://www.bing.com/search?q=%s+site:filmaffinity.com" % (title.replace(' ', '+')) + data = browser(url_bing) + data = re.sub(r"\n|\r|\t|\s{2}| |", "", data) + + try: + if "myaddrproxy.php" in data: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><div class="b_title"><h2>(<a href="/myaddrproxy.php/http/www.filmaffinity.com/es/film.*?)"') + subdata_bing = re.sub(r'\/myaddrproxy.php\/http\/', '', subdata_bing) + else: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><h2>(<a href="http://www.filmaffinity.com/es/film.*?)"') + except: + pass + + try: + url_filma = scrapertools.get_match(subdata_bing, '<a href="([^"]+)') + + if not "http" in url_filma: + data = httptools.downloadpage("http://" + url_filma).data + else: + data = httptools.downloadpage(url_filma).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + year = scrapertools.get_match(data, '<dt>Año</dt>.*?>(.*?)</dd>') + except: + year = "" + if sinopsis == " ": + try: + sinopsis = scrapertools.find_single_match(data, '<dd itemprop="description">(.*?)</dd>') + sinopsis = sinopsis.replace("<br><br />", "\n") + sinopsis = re.sub(r"\(FILMAFFINITY\)<br />", "", sinopsis) + except: + pass + try: + rating_filma = scrapertools.get_match(data, 'itemprop="ratingValue" content="(.*?)">') + except: + rating_filma = "Sin puntuacion" + + critica = "" + patron = '<div itemprop="reviewBody">(.*?)</div>.*?itemprop="author">(.*?)\s*<i alt="([^"]+)"' + matches_reviews = scrapertools.find_multiple_matches(data, patron) + + if matches_reviews: + for review, autor, valoracion in matches_reviews: + review = dhe(scrapertools.htmlclean(review)) + review += "\n" + autor + "[CR]" + review = re.sub(r'Puntuac.*?\)', '', review) + if "positiva" in valoracion: + critica += "[COLOR green][B]%s[/B][/COLOR]\n" % review + elif "neutral" in valoracion: + critica += "[COLOR yellow][B]%s[/B][/COLOR]\n" % review + else: + critica += "[COLOR red][B]%s[/B][/COLOR]\n" % review + else: + critica = "[COLOR floralwhite][B]Esta película no tiene críticas todavía...[/B][/COLOR]" + print "ozuu" + print critica + + url = "http://api.themoviedb.org/3/search/movie?api_key=" + api_key + "&query=" + title + "&year=" + year + "&language=es&include_adult=false" + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + + title = re.sub(r":.*|\(.*?\)", "", title) + url = "http://api.themoviedb.org/3/search/movie?api_key=" + api_key + "&query=" + title + "&language=es&include_adult=false" + + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + extra = item.thumbnail + "|" + "" + "|" + "" + "|" + "Sin puntuación" + "|" + rating_filma + "|" + critica + show = item.fanart + "|" + "" + "|" + sinopsis + posterdb = item.thumbnail + fanart_info = item.fanart + fanart_3 = "" + fanart_2 = item.fanart + category = item.thumbnail + id_scraper = "" + + itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action="findvideos", + thumbnail=item.thumbnail, fanart=item.fanart, extra=extra, show=show, + category=category, folder=True)) + + for id, fan in matches: + + fan = re.sub(r'\\|"', '', fan) + + try: + rating = scrapertools.find_single_match(data, '"vote_average":(.*?),') + except: + rating = "Sin puntuación" + + id_scraper = id + "|" + "peli" + "|" + rating + "|" + rating_filma + "|" + critica + try: + posterdb = scrapertools.get_match(data, '"page":1,.*?"poster_path":"\\\(.*?)"') + posterdb = "https://image.tmdb.org/t/p/original" + posterdb + except: + posterdb = item.thumbnail + + if "null" in fan: + fanart = item.fanart + else: + fanart = "https://image.tmdb.org/t/p/original" + fan + + url = "http://api.themoviedb.org/3/movie/" + id + "/images?api_key=" + api_key + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '"backdrops".*?"file_path":".*?",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + patron = '"backdrops".*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + fanart_info = item.extra + fanart_3 = "" + fanart_2 = item.extra + for fanart_info, fanart_3, fanart_2 in matches: + fanart_info = "https://image.tmdb.org/t/p/original" + fanart_info + fanart_3 = "https://image.tmdb.org/t/p/original" + fanart_3 + fanart_2 = "https://image.tmdb.org/t/p/original" + fanart_2 + if fanart == item.fanart: + fanart = fanart_info + # clearart, fanart_2 y logo + url = "http://webservice.fanart.tv/v3/movies/" + id + "?api_key=" + api_fankey + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '"hdmovielogo":.*?"url": "([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + if '"moviedisc"' in data: + disc = scrapertools.get_match(data, '"moviedisc":.*?"url": "([^"]+)"') + if '"movieposter"' in data: + poster = scrapertools.get_match(data, '"movieposter":.*?"url": "([^"]+)"') + if '"moviethumb"' in data: + thumb = scrapertools.get_match(data, '"moviethumb":.*?"url": "([^"]+)"') + if '"moviebanner"' in data: + banner = scrapertools.get_match(data, '"moviebanner":.*?"url": "([^"]+)"') + + if len(matches) == 0: + extra = posterdb + # "http://es.seaicons.com/wp-content/uploads/2015/11/Editing-Overview-Pages-1-icon.png" + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + category = posterdb + + itemlist.append( + Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, server="torrent", + thumbnail=posterdb, fanart=fanart, extra=extra, show=show, category=category, folder=True)) + for logo in matches: + if '"hdmovieclearart"' in data: + clear = scrapertools.get_match(data, '"hdmovieclearart":.*?"url": "([^"]+)"') + if '"moviebackground"' in data: + + extra = clear + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + if '"moviedisc"' in data: + category = disc + else: + category = clear + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=logo, fanart=fanart, extra=extra, show=show, + category=category, folder=True)) + else: + extra = clear + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + if '"moviedisc"' in data: + category = disc + else: + category = clear + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=logo, fanart=fanart, extra=extra, show=show, + category=category, folder=True)) + + if '"moviebackground"' in data: + + if '"hdmovieclearart"' in data: + clear = scrapertools.get_match(data, '"hdmovieclearart":.*?"url": "([^"]+)"') + extra = clear + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + if '"moviedisc"' in data: + category = disc + else: + category = clear + else: + extra = logo + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + if '"moviedisc"' in data: + category = disc + else: + category = logo + + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=logo, fanart=fanart, extra=extra, show=show, + category=category, folder=True)) + + if not '"hdmovieclearart"' in data and not '"moviebackground"' in data: + extra = logo + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + if '"moviedisc"' in data: + category = disc + else: + category = item.extra + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=logo, fanart=fanart, extra=extra, show=show, + category=category, folder=True)) + + else: + + # filmafinity + url_bing = "http://www.bing.com/search?q=%s+Serie+de+tv+site:filmaffinity.com" % (title.replace(' ', '+')) + data = browser(url_bing) + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + try: + if "myaddrproxy.php" in data: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><div class="b_title"><h2>(<a href="/myaddrproxy.php/http/www.filmaffinity.com/es/film.*?)"') + subdata_bing = re.sub(r'\/myaddrproxy.php\/http\/', '', subdata_bing) + else: + subdata_bing = scrapertools.get_match(data, + 'li class="b_algo"><h2>(<a href="http://www.filmaffinity.com/es/film.*?)"') + except: + pass + + try: + url_filma = scrapertools.get_match(subdata_bing, '<a href="([^"]+)') + + if not "http" in url_filma: + data = httptools.downloadpage("http://" + url_filma).data + else: + data = httptools.downloadpage(url_filma).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + year = scrapertools.get_match(data, '<dt>Año</dt>.*?>(.*?)</dd>') + except: + year = "" + if sinopsis == " ": + try: + sinopsis = scrapertools.find_single_match(data, '<dd itemprop="description">(.*?)</dd>') + sinopsis = sinopsis.replace("<br><br />", "\n") + sinopsis = re.sub(r"\(FILMAFFINITY\)<br />", "", sinopsis) + except: + pass + try: + rating_filma = scrapertools.get_match(data, 'itemprop="ratingValue" content="(.*?)">') + except: + rating_filma = "Sin puntuacion" + print "lobeznito" + print rating_filma + + critica = "" + patron = '<div itemprop="reviewBody">(.*?)</div>.*?itemprop="author">(.*?)\s*<i alt="([^"]+)"' + matches_reviews = scrapertools.find_multiple_matches(data, patron) + + if matches_reviews: + for review, autor, valoracion in matches_reviews: + review = dhe(scrapertools.htmlclean(review)) + review += "\n" + autor + "[CR]" + review = re.sub(r'Puntuac.*?\)', '', review) + if "positiva" in valoracion: + critica += "[COLOR green][B]%s[/B][/COLOR]\n" % review + elif "neutral" in valoracion: + critica += "[COLOR yellow][B]%s[/B][/COLOR]\n" % review + else: + critica += "[COLOR red][B]%s[/B][/COLOR]\n" % review + else: + critica = "[COLOR floralwhite][B]Esta serie no tiene críticas[/B][/COLOR]" + + ###Busqueda en tmdb + + url_tmdb = "http://api.themoviedb.org/3/search/tv?api_key=" + api_key + "&query=" + title + "&language=es&include_adult=false&first_air_date_year=" + year + data_tmdb = httptools.downloadpage(url_tmdb).data + data_tmdb = re.sub(r"\n|\r|\t|\s{2}| ", "", data_tmdb) + patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),' + matches = re.compile(patron, re.DOTALL).findall(data_tmdb) + ###Busqueda en bing el id de imdb de la serie + if len(matches) == 0: + url_tmdb = "http://api.themoviedb.org/3/search/tv?api_key=" + api_key + "&query=" + title + "&language=es" + data_tmdb = httptools.downloadpage(url_tmdb).data + data_tmdb = re.sub(r"\n|\r|\t|\s{2}| ", "", data_tmdb) + patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),' + matches = re.compile(patron, re.DOTALL).findall(data_tmdb) + if len(matches) == 0: + urlbing_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % ( + title.replace(' ', '+'), year) + data = browser(urlbing_imdb) + data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data) + try: + subdata_imdb = scrapertools.find_single_match(data, + '<li class="b_algo">(.*?)h="ID.*?<strong>.*?TV Series') + except: + pass + + try: + imdb_id = scrapertools.get_match(subdata_imdb, '<a href=.*?http.*?imdb.com/title/(.*?)/.*?"') + except: + imdb_id = "" + ###Busca id de tvdb y tmdb mediante imdb id + + urlremotetbdb = "https://api.themoviedb.org/3/find/" + imdb_id + "?api_key=" + api_key + "&external_source=imdb_id&language=es" + data_tmdb = httptools.downloadpage(urlremotetbdb).data + matches = scrapertools.find_multiple_matches(data_tmdb, + '"tv_results":.*?"id":(.*?),.*?"poster_path":(.*?),') + + if len(matches) == 0: + id_tmdb = "" + fanart_3 = "" + extra = item.thumbnail + "|" + year + "|" + "no data" + "|" + "no data" + "|" + rating_filma + "|" + critica + "|" + "" + "|" + id_tmdb + show = item.fanart + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + item.thumbnail + "|" + id_tmdb + fanart_info = item.fanart + fanart_2 = item.fanart + id_scraper = " " + "|" + "serie" + "|" + rating_filma + "|" + critica + "|" + " " + category = "" + posterdb = item.thumbnail + itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action="findvideos", + thumbnail=item.thumbnail, fanart=item.fanart, extra=extra, category=category, + show=show, folder=True)) + + for id_tmdb, fan in matches: + ###Busca id tvdb + urlid_tvdb = "https://api.themoviedb.org/3/tv/" + id_tmdb + "/external_ids?api_key=" + api_key + "&language=es" + data_tvdb = httptools.downloadpage(urlid_tvdb).data + id = scrapertools.find_single_match(data_tvdb, 'tvdb_id":(.*?),"tvrage_id"') + if id == "null": + id = "" + category = id + ###Busqueda nºepisodios y temporadas,status + url_status = "http://api.themoviedb.org/3/tv/" + id_tmdb + "?api_key=" + api_key + "&append_to_response=credits&language=es" + data_status = httptools.downloadpage(url_status).data + season_episodes = scrapertools.find_single_match(data_status, + '"(number_of_episodes":\d+,"number_of_seasons":\d+,)"') + season_episodes = re.sub(r'"', '', season_episodes) + season_episodes = re.sub(r'number_of_episodes', 'Episodios ', season_episodes) + season_episodes = re.sub(r'number_of_seasons', 'Temporadas', season_episodes) + season_episodes = re.sub(r'_', ' ', season_episodes) + status = scrapertools.find_single_match(data_status, '"status":"(.*?)"') + if status == "Ended": + status = "Finalizada" + else: + status = "En emisión" + status = status + " (" + season_episodes + ")" + status = re.sub(r',', '.', status) + ####### + fan = re.sub(r'\\|"', '', fan) + try: + # rating tvdb + url_rating_tvdb = "http://thetvdb.com/api/1D62F2F90030C444/series/" + id + "/es.xml" + print "pepote" + print url_rating_tvdb + data = httptools.downloadpage(url_rating_tvdb).data + rating = scrapertools.find_single_match(data, '<Rating>(.*?)<') + except: + ratintg_tvdb = "" + try: + rating = scrapertools.get_match(data, '"vote_average":(.*?),') + except: + + rating = "Sin puntuación" + + id_scraper = id_tmdb + "|" + "serie" + "|" + rating_filma + "|" + critica + "|" + rating + "|" + status # +"|"+emision + posterdb = scrapertools.find_single_match(data_tmdb, '"poster_path":(.*?)","popularity"') + + if "null" in posterdb: + posterdb = item.thumbnail + else: + posterdb = re.sub(r'\\|"', '', posterdb) + posterdb = "https://image.tmdb.org/t/p/original" + posterdb + + if "null" in fan: + fanart = item.fanart + else: + fanart = "https://image.tmdb.org/t/p/original" + fan + + item.extra = fanart + + url = "http://api.themoviedb.org/3/tv/" + id_tmdb + "/images?api_key=" + api_key + "" + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '"backdrops".*?"file_path":".*?",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + patron = '"backdrops".*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + fanart_info = item.extra + fanart_3 = "" + fanart_2 = item.extra + for fanart_info, fanart_3, fanart_2 in matches: + if fanart == item.fanart: + fanart = "https://image.tmdb.org/t/p/original" + fanart_info + fanart_info = "https://image.tmdb.org/t/p/original" + fanart_info + fanart_3 = "https://image.tmdb.org/t/p/original" + fanart_3 + fanart_2 = "https://image.tmdb.org/t/p/original" + fanart_2 + url = "http://webservice.fanart.tv/v3/tv/" + id + "?api_key=" + api_fankey + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '"clearlogo":.*?"url": "([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + if '"tvbanner"' in data: + tvbanner = scrapertools.get_match(data, '"tvbanner":.*?"url": "([^"]+)"') + tfv = tvbanner + elif '"tvposter"' in data: + tvposter = scrapertools.get_match(data, '"tvposter":.*?"url": "([^"]+)"') + tfv = tvposter + else: + tfv = posterdb + if '"tvthumb"' in data: + tvthumb = scrapertools.get_match(data, '"tvthumb":.*?"url": "([^"]+)"') + if '"hdtvlogo"' in data: + hdtvlogo = scrapertools.get_match(data, '"hdtvlogo":.*?"url": "([^"]+)"') + if '"hdclearart"' in data: + hdtvclear = scrapertools.get_match(data, '"hdclearart":.*?"url": "([^"]+)"') + if len(matches) == 0: + if '"hdtvlogo"' in data: + if "showbackground" in data: + + if '"hdclearart"' in data: + thumbnail = hdtvlogo + extra = hdtvclear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + else: + thumbnail = hdtvlogo + extra = thumbnail + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=item.extra, + category=category, extra=extra, show=show, folder=True)) + + + else: + if '"hdclearart"' in data: + thumbnail = hdtvlogo + extra = hdtvclear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + else: + thumbnail = hdtvlogo + extra = thumbnail + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + else: + extra = "" + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=posterdb, fanart=fanart, extra=extra, show=show, + category=category, folder=True)) + + for logo in matches: + if '"hdtvlogo"' in data: + thumbnail = hdtvlogo + elif not '"hdtvlogo"' in data: + if '"clearlogo"' in data: + thumbnail = logo + else: + thumbnail = item.thumbnail + if '"clearart"' in data: + clear = scrapertools.get_match(data, '"clearart":.*?"url": "([^"]+)"') + if "showbackground" in data: + + extra = clear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + else: + extra = clear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + + if "showbackground" in data: + + if '"clearart"' in data: + clear = scrapertools.get_match(data, '"clearart":.*?"url": "([^"]+)"') + extra = clear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + else: + extra = logo + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + + if not '"clearart"' in data and not '"showbackground"' in data: + if '"hdclearart"' in data: + extra = hdtvclear + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + else: + extra = thumbnail + "|" + year + show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb + itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, + server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, + show=show, category=category, folder=True)) + + title_info = "Info" + title_info = "[COLOR skyblue]" + title_info + "[/COLOR]" + if not "series" in item.url: + thumbnail = posterdb + + if "series" in item.url: + + if '"tvposter"' in data: + thumbnail = scrapertools.get_match(data, '"tvposter":.*?"url": "([^"]+)"') + else: + thumbnail = posterdb + + if "tvbanner" in data: + category = tvbanner + else: + category = show + if '"tvthumb"' in data: + plot = item.plot + "|" + tvthumb + else: + plot = item.plot + "|" + item.thumbnail + if '"tvbanner"' in data: + plot = plot + "|" + tvbanner + elif '"tvthumb"' in data: + plot = plot + "|" + tvthumb + else: + plot = plot + "|" + item.thumbnail + else: + if '"moviethumb"' in data: + plot = item.plot + "|" + thumb + else: + plot = item.plot + "|" + posterdb + + if '"moviebanner"' in data: + plot = plot + "|" + banner + else: + if '"hdmovieclearart"' in data: + plot = plot + "|" + clear + + else: + plot = plot + "|" + posterdb + id = id_scraper + + extra = extra + "|" + id + "|" + title.encode('utf8') + + itemlist.append( + Item(channel=item.channel, action="info", title=title_info, plo=plot, url=item.url, thumbnail=thumbnail, + fanart=fanart_info, extra=extra, category=category, show=show, folder=False)) + + return itemlist + + +def findvideos(item): + logger.info() + + if not "serie" in item.url: + thumbnail = item.category + else: + thumbnail = item.show.split("|")[4] + itemlist = [] + + # Descarga la página + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| |&|amp;", "", data) + + patron = '<h1>(.*?)</h1>.*?src="([^"]+)".*?<div class="zentorrents_download"><p><a href="([^"]+)"' + + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + for scrapedtitulo, scrapedthumbnail, scrapedurl in matches: + if "series" in item.url: + patron = '<h1>.*?(\d+)x(\d+).*?' + matches = re.compile(patron, re.DOTALL).findall(data) + for temp, epi in matches: + plot = temp + "|" + epi + try: + # buscamos peso y formato + scrapedurl = "http://www.zentorrents.com" + scrapedurl + data_url = httptools.downloadpage(scrapedurl).data + logger.info("data=" + data) + url = scrapertools.get_match(data_url, "{ window.open\('([^']+)'") + url = urlparse.urljoin(scrapedurl, url) + + torrents_path = config.get_videolibrary_path() + '/torrents' + + if not os.path.exists(torrents_path): + os.mkdir(torrents_path) + urllib.URLopener.version = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36 SE 2.X MetaSr 1.0' + urllib.urlretrieve(url, torrents_path + "/temp.torrent") + pepe = open(torrents_path + "/temp.torrent", "rb").read() + + if "used CloudFlare" in pepe: + try: + urllib.urlretrieve("http://anonymouse.org/cgi-bin/anon-www.cgi/" + url.strip(), + torrents_path + "/temp.torrent") + pepe = open(torrents_path + "/temp.torrent", "rb").read() + except: + pepe = "" + torrent = decode(pepe) + + try: + name = torrent["info"]["name"] + sizet = torrent["info"]['length'] + sizet = convert_size(sizet) + except: + name = "no disponible" + try: + check_video = scrapertools.find_multiple_matches(str(torrent["info"]["files"]), + "'length': (\d+)}") + + size = max([int(i) for i in check_video]) + + for file in torrent["info"]["files"]: + manolo = "%r - %d bytes" % ("/".join(file["path"]), file["length"]) + if str(size) in manolo: + video = manolo + size = convert_size(size) + ext_v = re.sub(r"-.*? bytes|.*?\[.*?\].|'|.*?COM.|.*?\[.*?\]|\(.*?\)|.*?\.", "", video) + try: + os.remove(torrents_path + "/temp.torrent") + except: + pass + except: + size = sizet + ext_v = re.sub(r"-.*? bytes|.*?\[.*?\].|'|.*?COM.|.*?\.es.|.*?\[.*?\]|.*?\(.*?\)\.|.*?\.", "", + name) + try: + os.remove(torrents_path + "/temp.torrent") + except: + pass + except: + if "magnet" in url: + size = "MAGNET" + ext_v = " " + else: + size = "en estos momentos..." + ext_v = "no disponible" + if "rar" in ext_v: + ext_v = ext_v + " -- No reproducible" + size = "" + title_tag = "[COLOR pink]Ver--[/COLOR]" + scrapedtitulo = "[COLOR bisque][B]capítulo" + " " + temp + "x" + epi + "[/B][/COLOR]" + " " + "[COLOR peachpuff]( Video [/COLOR]" + "[COLOR peachpuff]" + ext_v + " -- " + size + " )[/COLOR]" + scrapedtitulo = title_tag + scrapedtitulo + scrapedurl = urlparse.urljoin(host, scrapedurl) + itemlist.append( + Item(channel=item.channel, title=scrapedtitulo, url=scrapedurl, action="play", server="torrent", + thumbnail=thumbnail, category=item.category, fanart=item.show.split("|")[0], folder=False)) + ###thumb temporada### + url = "http://api.themoviedb.org/3/tv/" + item.show.split("|")[ + 5] + "/season/" + temp + "/images?api_key=" + api_key + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '{"id".*?"file_path":"(.*?)","height"' + matches = re.compile(patron, re.DOTALL).findall(data) + if len(matches) == 0: + thumbnail = item.thumbnail + for thumtemp in matches: + thumbnail = "https://image.tmdb.org/t/p/original" + thumtemp + + extra = item.extra + "|" + temp + "|" + epi + + title = "Info" + title = "[COLOR skyblue]" + title + "[/COLOR]" + itemlist.append( + Item(channel=item.channel, action="info_capitulos", title=title, url=item.url, thumbnail=thumbnail, + fanart=item.show.split("|")[1], extra=extra, show=item.show, folder=False)) + else: + try: + # buscamos peso y formato + scrapedurl = urlparse.urljoin(host, scrapedurl) + data_url = httptools.downloadpage(scrapedurl).data + logger.info("data=" + data) + url = scrapertools.get_match(data_url, "{ window.open\('([^']+)'") + url = urlparse.urljoin(scrapedurl, url) + + torrents_path = config.get_videolibrary_path() + '/torrents' + + if not os.path.exists(torrents_path): + os.mkdir(torrents_path) + urllib.URLopener.version = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36 SE 2.X MetaSr 1.0' + urllib.urlretrieve(url, torrents_path + "/temp.torrent") + pepe = open(torrents_path + "/temp.torrent", "rb").read() + + if "used CloudFlare" in pepe: + try: + urllib.urlretrieve("http://anonymouse.org/cgi-bin/anon-www.cgi/" + url.strip(), + torrents_path + "/temp.torrent") + pepe = open(torrents_path + "/temp.torrent", "rb").read() + except: + pepe = "" + torrent = decode(pepe) + + try: + name = torrent["info"]["name"] + sizet = torrent["info"]['length'] + sizet = convert_size(sizet) + except: + name = "no disponible" + try: + check_video = scrapertools.find_multiple_matches(str(torrent["info"]["files"]), "'length': (\d+)}") + + size = max([int(i) for i in check_video]) + + for file in torrent["info"]["files"]: + manolo = "%r - %d bytes" % ("/".join(file["path"]), file["length"]) + if str(size) in manolo: + video = manolo + size = convert_size(size) + ext_v = re.sub(r"-.*? bytes|.*?\[.*?\].|'|.*?COM.|.*?\[.*?\]|\(.*?\)|.*?\.", "", video) + try: + os.remove(torrents_path + "/temp.torrent") + except: + pass + except: + size = sizet + ext_v = re.sub(r"-.*? bytes|.*?\[.*?\].|'|.*?COM.|.*?\.es.|.*?\[.*?\]|.*?\(.*?\)\.|.*?\.", "", name) + try: + os.remove(torrents_path + "/temp.torrent") + except: + pass + except: + if "magnet" in url: + size = "MAGNET" + ext_v = " " + else: + size = "en estos momentos..." + ext_v = "no disponible" + if "rar" in ext_v: + ext_v = ext_v + " -- No reproducible" + size = "" + infotitle = "[COLOR pink][B]Ver--[/B][/COLOR]" + scrapedtitulo = "[COLOR bisque]" + scrapedtitulo + "[/COLOR]" + "[COLOR peachpuff]( Video [/COLOR]" + "[COLOR peachpuff]" + ext_v + " -- " + size + " )[/COLOR]" + title = infotitle + scrapedtitulo + scrapedurl = urlparse.urljoin(host, scrapedurl) + if "peliculas" in item.url: + thumbnail = item.category + else: + thumbnail = item.extra + + itemlist.append( + Item(channel=item.channel, title=title, thumbnail=thumbnail, url=scrapedurl, fanart=item.show, + action="play", folder=False)) + + return itemlist + + +def play(item): + logger.info() + data = httptools.downloadpage(item.url).data + logger.info("data=" + data) + itemlist = [] + + try: + link = scrapertools.get_match(data, "{ window.open\('([^']+)'") + link = urlparse.urljoin(item.url, link) + logger.info("link=" + link) + + itemlist.append(Item(channel=item.channel, action=play, server="torrent", url=link, folder=False)) + except: + itemlist.append(Item(channel=item.channel, title=item.plot, url=item.url, server="youtube", fanart=item.fanart, + thumbnail=item.thumbnail, action="play", folder=False)) + + return itemlist + + +def info(item): + logger.info() + itemlist = [] + url = item.url + id = item.extra + + if "serie" in item.url: + try: + rating_tmdba_tvdb = item.extra.split("|")[6] + if item.extra.split("|")[6] == "": + rating_tmdba_tvdb = "Sin puntuación" + except: + rating_tmdba_tvdb = "Sin puntuación" + else: + rating_tmdba_tvdb = item.extra.split("|")[3] + rating_filma = item.extra.split("|")[4] + print "eztoquee" + print rating_filma + print rating_tmdba_tvdb + + filma = "http://s6.postimg.org/6yhe5fgy9/filma.png" + + try: + if "serie" in item.url: + title = item.extra.split("|")[8] + + else: + title = item.extra.split("|")[6] + title = title.replace("%20", " ") + title = "[COLOR yellow][B]" + title + "[/B][/COLOR]" + except: + title = item.title + + try: + if "." in rating_tmdba_tvdb: + check_rat_tmdba = scrapertools.get_match(rating_tmdba_tvdb, '(\d+).') + else: + check_rat_tmdba = rating_tmdba_tvdb + if int(check_rat_tmdba) >= 5 and int(check_rat_tmdba) < 8: + rating = "[COLOR springgreen][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + elif int(check_rat_tmdba) >= 8 or rating_tmdba_tvdb == 10: + rating = "[COLOR yellow][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + else: + rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + print "lolaymaue" + except: + rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" + if "10." in rating: + rating = re.sub(r'10\.\d+', '10', rating) + try: + check_rat_filma = scrapertools.get_match(rating_filma, '(\d)') + print "paco" + print check_rat_filma + if int(check_rat_filma) >= 5 and int(check_rat_filma) < 8: + print "dios" + print check_rat_filma + rating_filma = "[COLOR springgreen][B]" + rating_filma + "[/B][/COLOR]" + elif int(check_rat_filma) >= 8: + + print check_rat_filma + rating_filma = "[COLOR yellow][B]" + rating_filma + "[/B][/COLOR]" + else: + rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]" + print "rojo??" + print check_rat_filma + except: + rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]" + + try: + if not "serie" in item.url: + url_plot = "http://api.themoviedb.org/3/movie/" + item.extra.split("|")[ + 1] + "?api_key=" + api_key + "&append_to_response=credits&language=es" + data_plot = httptools.downloadpage(url_plot).data + plot, tagline = scrapertools.find_single_match(data_plot, '"overview":"(.*?)",.*?"tagline":(".*?")') + if plot == "": + plot = item.show.split("|")[2] + + plot = "[COLOR moccasin][B]" + plot + "[/B][/COLOR]" + plot = re.sub(r"\\", "", plot) + + else: + plot = item.show.split("|")[2] + plot = "[COLOR moccasin][B]" + plot + "[/B][/COLOR]" + plot = re.sub(r"\\|</p><p>|</p>", "", plot) + + if item.extra.split("|")[7] != "": + tagline = item.extra.split("|")[7] + # tagline= re.sub(r',','.',tagline) + else: + tagline = "" + except: + title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" + plot = "Esta pelicula no tiene informacion..." + plot = plot.replace(plot, "[COLOR yellow][B]" + plot + "[/B][/COLOR]") + photo = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png" + foto = "http://s6.postimg.org/ub7pb76c1/noinfo.png" + info = "" + + if "serie" in item.url: + check2 = "serie" + icon = "http://s6.postimg.org/hzcjag975/tvdb.png" + foto = item.show.split("|")[1] + if item.extra.split("|")[5] != "": + critica = item.extra.split("|")[5] + else: + critica = "Esta serie no tiene críticas..." + + photo = item.extra.split("|")[0].replace(" ", "%20") + try: + tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]" + except: + tagline = "" + + else: + critica = item.extra.split("|")[5] + if "%20" in critica: + critica = "No hay críticas" + icon = "http://imgur.com/SenkyxF.png" + + photo = item.extra.split("|")[0].replace(" ", "%20") + foto = item.show.split("|")[1] + + try: + if tagline == "\"\"": + tagline = " " + except: + tagline = " " + tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]" + check2 = "pelicula" + # Tambien te puede interesar + peliculas = [] + if "serie" in item.url: + + url_tpi = "http://api.themoviedb.org/3/tv/" + item.show.split("|")[ + 5] + "/recommendations?api_key=" + api_key + "&language=es" + data_tpi = httptools.downloadpage(url_tpi).data + tpi = scrapertools.find_multiple_matches(data_tpi, + 'id":(.*?),.*?"original_name":"(.*?)",.*?"poster_path":(.*?),"popularity"') + + else: + url_tpi = "http://api.themoviedb.org/3/movie/" + item.extra.split("|")[ + 1] + "/recommendations?api_key=" + api_key + "&language=es" + data_tpi = httptools.downloadpage(url_tpi).data + tpi = scrapertools.find_multiple_matches(data_tpi, + 'id":(.*?),.*?"original_title":"(.*?)",.*?"poster_path":(.*?),"popularity"') + + for idp, peli, thumb in tpi: + + thumb = re.sub(r'"|}', '', thumb) + if "null" in thumb: + thumb = "http://s6.postimg.org/tw1vhymj5/noposter.png" + else: + thumb = "https://image.tmdb.org/t/p/original" + thumb + peliculas.append([idp, peli, thumb]) + + check2 = check2.replace("pelicula", "movie").replace("serie", "tvshow") + infoLabels = {'title': title, 'plot': plot, 'thumbnail': photo, 'fanart': foto, 'tagline': tagline, + 'rating': rating} + item_info = item.clone(info=infoLabels, icon=icon, extra=id, rating=rating, rating_filma=rating_filma, + critica=critica, contentType=check2, thumb_busqueda="http://imgur.com/OZ1Vg3D.png") + from channels import infoplus + infoplus.start(item_info, peliculas) + + +def info_capitulos(item): + logger.info() + + url = "https://api.themoviedb.org/3/tv/" + item.show.split("|")[5] + "/season/" + item.extra.split("|")[ + 2] + "/episode/" + item.extra.split("|")[3] + "?api_key=" + api_key + "&language=es" + + if "/0" in url: + url = url.replace("/0", "/") + + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '],"name":"(.*?)","overview":"(.*?)".*?"still_path":(.*?),"vote_average":(\d+\.\d).*?,"' + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + + url = "http://thetvdb.com/api/1D62F2F90030C444/series/" + item.category + "/default/" + item.extra.split("|")[ + 2] + "/" + item.extra.split("|")[3] + "/es.xml" + if "/0" in url: + url = url.replace("/0", "/") + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '<Data>.*?<EpisodeName>([^<]+)</EpisodeName>.*?<Overview>(.*?)</Overview>.*?<Rating>(.*?)</Rating>' + + matches = re.compile(patron, re.DOTALL).findall(data) + + if len(matches) == 0: + + title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" + plot = "Este capitulo no tiene informacion..." + plot = "[COLOR yellow][B]" + plot + "[/B][/COLOR]" + image = "http://s6.postimg.org/ub7pb76c1/noinfo.png" + foto = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png" + rating = "" + + + else: + + for name_epi, info, rating in matches: + if "<filename>episodes" in data: + foto = scrapertools.get_match(data, '<Data>.*?<filename>(.*?)</filename>') + fanart = "http://thetvdb.com/banners/" + foto + else: + fanart = item.extra.split("|")[1] + plot = info + plot = "[COLOR peachpuff][B]" + plot + "[/B][/COLOR]" + title = name_epi.upper() + title = "[COLOR bisque][B]" + title + "[/B][/COLOR]" + image = fanart + foto = item.extra.split("|")[0] + if not ".png" in foto: + foto = "http://imgur.com/IqYaDrC.png" + foto = re.sub(r'\(.*?\)|" "|" "', '', foto) + foto = re.sub(r' ', '', foto) + try: + + check_rating = scrapertools.get_match(rating, '(\d+).') + + if int(check_rating) >= 5 and int(check_rating) < 8: + rating = "Puntuación " + "[COLOR springgreen][B]" + rating + "[/B][/COLOR]" + elif int(check_rating) >= 8 and int(check_rating) < 10: + rating = "Puntuación " + "[COLOR yellow][B]" + rating + "[/B][/COLOR]" + elif int(check_rating) == 10: + rating = "Puntuación " + "[COLOR orangered][B]" + rating + "[/B][/COLOR]" + else: + rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" + + except: + rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" + if "10." in rating: + rating = re.sub(r'10\.\d+', '10', rating) + else: + for name_epi, info, fanart, rating in matches: + if info == "" or info == "\\": + info = "Sin informacion del capítulo aún..." + plot = info + plot = re.sub(r'/n', '', plot) + plot = "[COLOR peachpuff][B]" + plot + "[/B][/COLOR]" + title = name_epi.upper() + title = "[COLOR bisque][B]" + title + "[/B][/COLOR]" + image = fanart + image = re.sub(r'"|}', '', image) + if "null" in image: + image = "http://imgur.com/ZiEAVOD.png" + else: + image = "https://image.tmdb.org/t/p/original" + image + foto = item.extra.split("|")[0] + if not ".png" in foto: + foto = "http://imgur.com/IqYaDrC.png" + foto = re.sub(r'\(.*?\)|" "|" "', '', foto) + foto = re.sub(r' ', '', foto) + try: + + check_rating = scrapertools.get_match(rating, '(\d+).') + + if int(check_rating) >= 5 and int(check_rating) < 8: + rating = "Puntuación " + "[COLOR springgreen][B]" + rating + "[/B][/COLOR]" + elif int(check_rating) >= 8 and int(check_rating) < 10: + rating = "Puntuación " + "[COLOR yellow][B]" + rating + "[/B][/COLOR]" + elif int(check_rating) == 10: + rating = "Puntuación " + "[COLOR orangered][B]" + rating + "[/B][/COLOR]" + else: + rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" + + except: + rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" + if "10." in rating: + rating = re.sub(r'10\.\d+', '10', rating) + ventana = TextBox2(title=title, plot=plot, thumbnail=image, fanart=foto, rating=rating) + ventana.doModal() + + +class TextBox2(xbmcgui.WindowDialog): + """ Create a skinned textbox window """ + + def __init__(self, *args, **kwargs): + self.getTitle = kwargs.get('title') + self.getPlot = kwargs.get('plot') + self.getThumbnail = kwargs.get('thumbnail') + self.getFanart = kwargs.get('fanart') + self.getRating = kwargs.get('rating') + + self.background = xbmcgui.ControlImage(70, 20, 1150, 630, 'http://imgur.com/133aoMw.jpg') + self.title = xbmcgui.ControlTextBox(120, 60, 430, 50) + self.rating = xbmcgui.ControlTextBox(145, 112, 1030, 45) + self.plot = xbmcgui.ControlTextBox(120, 150, 1056, 100) + self.thumbnail = xbmcgui.ControlImage(120, 300, 1056, 300, self.getThumbnail) + self.fanart = xbmcgui.ControlImage(780, 43, 390, 100, self.getFanart) + + self.addControl(self.background) + self.background.setAnimations( + [('conditional', 'effect=slide start=1000% end=0% time=1500 condition=true tween=bounce',), + ('WindowClose', 'effect=slide delay=800 start=0% end=1000% time=800 condition=true',)]) + self.addControl(self.thumbnail) + self.thumbnail.setAnimations([('conditional', + 'effect=zoom start=0% end=100% delay=2700 time=1500 condition=true tween=elastic easing=inout',), + ('WindowClose', 'effect=slide end=0,700% time=300 condition=true',)]) + self.addControl(self.plot) + self.plot.setAnimations( + [('conditional', 'effect=zoom delay=2000 center=auto start=0 end=100 time=800 condition=true ',), ( + 'conditional', + 'effect=rotate delay=2000 center=auto aceleration=6000 start=0% end=360% time=800 condition=true',), + ('WindowClose', 'effect=zoom center=auto start=100% end=-0% time=600 condition=true',)]) + self.addControl(self.fanart) + self.fanart.setAnimations( + [('WindowOpen', 'effect=slide start=0,-700 delay=1000 time=2500 tween=bounce condition=true',), ( + 'conditional', + 'effect=rotate center=auto start=0% end=360% delay=3000 time=2500 tween=bounce condition=true',), + ('WindowClose', 'effect=slide end=0,-700% time=1000 condition=true',)]) + self.addControl(self.title) + self.title.setText(self.getTitle) + self.title.setAnimations( + [('conditional', 'effect=slide start=-1500% end=0% delay=1000 time=2000 condition=true tween=elastic',), + ('WindowClose', 'effect=slide start=0% end=-1500% time=800 condition=true',)]) + self.addControl(self.rating) + self.rating.setText(self.getRating) + self.rating.setAnimations( + [('conditional', 'effect=fade start=0% end=100% delay=3000 time=1500 condition=true',), + ('WindowClose', 'effect=slide end=0,-700% time=600 condition=true',)]) + xbmc.sleep(200) + + try: + self.plot.autoScroll(7000, 6000, 30000) + except: + + xbmc.executebuiltin( + 'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000,"https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/kodi-icon.png")') + self.plot.setText(self.getPlot) + + def get(self): + self.show() + + def onAction(self, action): + if action == ACTION_PREVIOUS_MENU or action.getId() == ACTION_GESTURE_SWIPE_LEFT or action == 110 or action == 92: + self.close() + + +def test(): + return True + + +def browser(url): + import mechanize + + # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing + br = mechanize.Browser() + # Browser options + br.set_handle_equiv(False) + br.set_handle_gzip(True) + br.set_handle_redirect(True) + br.set_handle_referer(False) + br.set_handle_robots(False) + # Follows refresh 0 but not hangs on refresh > 0 + br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) + # Want debugging messages? + # br.set_debug_http(True) + # br.set_debug_redirects(True) + # br.set_debug_responses(True) + + # User-Agent (this is cheating, ok?) + br.addheaders = [('User-agent', + 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')] + # br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')] + # Open some site, let's pick a random one, the first that pops in mind + r = br.open(url) + response = r.read() + print response + # if not ".ftrH,.ftrHd,.ftrD>" in response: + if "img,divreturn" in response: + r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url) + response = r.read() + + return response + + +def tokenize(text, match=re.compile("([idel])|(\d+):|(-?\d+)").match): + i = 0 + while i < len(text): + m = match(text, i) + s = m.group(m.lastindex) + i = m.end() + if m.lastindex == 2: + yield "s" + yield text[i:i + int(s)] + i = i + int(s) + else: + yield s + + +def decode_item(next, token): + if token == "i": + # integer: "i" value "e" + data = int(next()) + if next() != "e": + raise ValueError + elif token == "s": + # string: "s" value (virtual tokens) + data = next() + elif token == "l" or token == "d": + # container: "l" (or "d") values "e" + data = [] + tok = next() + while tok != "e": + data.append(decode_item(next, tok)) + tok = next() + if token == "d": + data = dict(zip(data[0::2], data[1::2])) + else: + raise ValueError + return data + + +def decode(text): + try: + src = tokenize(text) + data = decode_item(src.next, src.next()) + for token in src: # look for more tokens + raise SyntaxError("trailing junk") + except (AttributeError, ValueError, StopIteration): + try: + data = data + except: + data = src + + return data + + +def convert_size(size): + import math + if (size == 0): + return '0B' + size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") + i = int(math.floor(math.log(size, 1024))) + p = math.pow(1024, i) + s = round(size / p, 2) + return '%s %s' % (s, size_name[i]) diff --git a/plugin.video.alfa/channels/zpeliculas.json b/plugin.video.alfa/channels/zpeliculas.json new file mode 100755 index 00000000..41ad30c6 --- /dev/null +++ b/plugin.video.alfa/channels/zpeliculas.json @@ -0,0 +1,53 @@ +{ + "id": "zpeliculas", + "name": "Zpeliculas", + "active": true, + "adult": false, + "language": "es", + "banner": "zpeliculas.png", + "thumbnail": "zpeliculas.png", + "version": 1, + "changes": [ + { + "date": "15/03/2017", + "description": "limpieza código" + }, + { + "date": "01/07/16", + "description": "Eliminado código innecesario." + }, + { + "date": "29/04/16", + "description": "Adaptar a Buscador global y Novedades Peliculas e Infantiles" + } + ], + "categories": [ + "movie" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_infantiles", + "type": "bool", + "label": "Incluir en Novedades - Infantiles", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/zpeliculas.py b/plugin.video.alfa/channels/zpeliculas.py new file mode 100755 index 00000000..3d3e42c2 --- /dev/null +++ b/plugin.video.alfa/channels/zpeliculas.py @@ -0,0 +1,370 @@ +# -*- coding: utf-8 -*- + +import re +import urllib + +from core import logger +from core import scrapertools +from core import servertools +from core.item import Item + + +def mainlist(item): + logger.info() + + itemlist = [] + # itemlist.append( Item(channel=item.channel, action="destacadas" , title="Destacadas", url="http://www.zpeliculas.com", fanart="http://www.zpeliculas.com/templates/mytopV2/images/background.png")) + itemlist.append( + Item(channel=item.channel, action="peliculas", title="Últimas peliculas", url="http://www.zpeliculas.com/", + fanart="http://www.zpeliculas.com/templates/mytopV2/images/background.png", viewmode="movie")) + # itemlist.append( Item(channel=item.channel, action="sugeridas" , title="Películas sugeridas", url="http://www.zpeliculas.com", fanart="http://www.zpeliculas.com/templates/mytopV2/images/background.png", viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="generos", title="Por género", url="http://www.zpeliculas.com", + fanart="http://www.zpeliculas.com/templates/mytopV2/images/background.png")) + itemlist.append(Item(channel=item.channel, action="alfabetico", title="Listado alfabético", + fanart="http://www.zpeliculas.com/templates/mytopV2/images/background.png")) + itemlist.append(Item(channel=item.channel, action="search", title="Buscador", url="http://www.zpeliculas.com", + fanart="http://www.zpeliculas.com/templates/mytopV2/images/background.png", viewmode="movie")) + + return itemlist + + +def alfabetico(item): + logger.info() + + itemlist = [] + itemlist.append(Item(channel=item.channel, action="peliculas", title="A", url="http://www.zpeliculas.com/cat/a", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="B", url="http://www.zpeliculas.com/cat/b", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="C", url="http://www.zpeliculas.com/cat/c", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="D", url="http://www.zpeliculas.com/cat/d", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="E", url="http://www.zpeliculas.com/cat/e", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="F", url="http://www.zpeliculas.com/cat/f", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="G", url="http://www.zpeliculas.com/cat/g", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="H", url="http://www.zpeliculas.com/cat/h", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="I", url="http://www.zpeliculas.com/cat/i", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="J", url="http://www.zpeliculas.com/cat/j", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="K", url="http://www.zpeliculas.com/cat/k", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="L", url="http://www.zpeliculas.com/cat/l", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="M", url="http://www.zpeliculas.com/cat/m", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="N", url="http://www.zpeliculas.com/cat/n", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="O", url="http://www.zpeliculas.com/cat/o", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="P", url="http://www.zpeliculas.com/cat/p", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="Q", url="http://www.zpeliculas.com/cat/q", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="R", url="http://www.zpeliculas.com/cat/r", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="S", url="http://www.zpeliculas.com/cat/s", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="T", url="http://www.zpeliculas.com/cat/t", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="U", url="http://www.zpeliculas.com/cat/u", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="V", url="http://www.zpeliculas.com/cat/v", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="W", url="http://www.zpeliculas.com/cat/w", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="X", url="http://www.zpeliculas.com/cat/x", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="Y", url="http://www.zpeliculas.com/cat/y", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="Z", url="http://www.zpeliculas.com/cat/z", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="0", url="http://www.zpeliculas.com/cat/0", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="1", url="http://www.zpeliculas.com/cat/1", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="2", url="http://www.zpeliculas.com/cat/2", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="3", url="http://www.zpeliculas.com/cat/3", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="4", url="http://www.zpeliculas.com/cat/4", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="5", url="http://www.zpeliculas.com/cat/5", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="6", url="http://www.zpeliculas.com/cat/6", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="7", url="http://www.zpeliculas.com/cat/7", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="8", url="http://www.zpeliculas.com/cat/8", + viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="9", url="http://www.zpeliculas.com/cat/9", + viewmode="movie")) + + return itemlist + + +def generos(item): + logger.info() + + itemlist = [] + itemlist.append(Item(channel=item.channel, action="peliculas", title="Acción", + url="http://www.zpeliculas.com/peliculas/p-accion/", viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="Animación", + url="http://www.zpeliculas.com/peliculas/p-animacion/", viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="Aventura", + url="http://www.zpeliculas.com/peliculas/p-aventura/", viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="Biografía", + url="http://www.zpeliculas.com/peliculas/p-biografia/", viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="Bélico", + url="http://www.zpeliculas.com/peliculas/p-belico/", viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="Ciencia Ficción", + url="http://www.zpeliculas.com/peliculas/p-cienciaficcion/", viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="Comedia", + url="http://www.zpeliculas.com/peliculas/p-comedia/", viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="Crimen", + url="http://www.zpeliculas.com/peliculas/p-crimen/", viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="Drama", + url="http://www.zpeliculas.com/peliculas/p-drama/", viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="Fantasía", + url="http://www.zpeliculas.com/peliculas/p-fantasia/", viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="Histórico", + url="http://www.zpeliculas.com/peliculas/p-historico/", viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="Intriga", + url="http://www.zpeliculas.com/peliculas/p-intriga/", viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="Musical", + url="http://www.zpeliculas.com/peliculas/p-musical/", viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="Romántica", + url="http://www.zpeliculas.com/peliculas/p-romantica/", viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="Terror", + url="http://www.zpeliculas.com/peliculas/p-terror/", viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="Thriller", + url="http://www.zpeliculas.com/peliculas/p-thriller/", viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="Western", + url="http://www.zpeliculas.com/peliculas/p-western/", viewmode="movie")) + itemlist.append(Item(channel=item.channel, action="peliculas", title="Otros", + url="http://www.zpeliculas.com/peliculas/p-otros/", viewmode="movie")) + return itemlist + + +def search(item, texto): + try: + post = urllib.urlencode({"story": texto, "do": "search", "subaction": "search", "x": "0", "y": "0"}) + data = scrapertools.cache_page("http://www.zpeliculas.com", post=post) + + patron = '<div class="leftpane">(.*?)<div class="clear"' + matches = re.compile(patron, re.DOTALL).findall(data) + itemlist = [] + + for match in matches: + scrapedtitle = scrapertools.find_single_match(match, '<div class="shortname">([^<]+)</div>') + scrapedurl = scrapertools.find_single_match(match, '<a href="([^"]+)"') + scrapedthumbnail = scrapertools.find_single_match(match, '<img src="([^"]+)"') + scrapedyear = scrapertools.find_single_match(match, '<div class="year"[^>]+>([^<]+)</div>') + scrapedidioma = scrapertools.find_single_match(match, 'title="Idioma">([^<]+)</div>') + scrapedcalidad = scrapertools.find_single_match(match, + '<div class="shortname"[^<]+</div[^<]+<div[^>]+>([^<]+)</div>') + + title = scrapedtitle + ' (' + scrapedyear + ') [' + scrapedidioma + '] [' + scrapedcalidad + ']' + url = scrapedurl + thumbnail = scrapedthumbnail + plot = "" + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot, + show=title, fanart=thumbnail, hasContentDetails=True, contentTitle=title, + contentThumbnail=thumbnail, + contentType="movie", context=["buscar_trailer"])) + + return itemlist + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def newest(categoria): + itemlist = [] + item = Item() + try: + if categoria == 'peliculas': + item.url = "http://www.zpeliculas.com" + + elif categoria == 'infantiles': + item.url = "http://www.zpeliculas.com/peliculas/p-animacion/" + + else: + return [] + + itemlist = peliculas(item) + if itemlist[-1].extra == "next_page": + itemlist.pop() + + # Se captura la excepción, para no interrumpir al canal novedades si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist + + +def peliculas(item): + logger.info() + + # Descarga la página + body = scrapertools.cachePage(item.url) + data = scrapertools.get_match(body, + '<div class="shortmovies">(.*?)<div class="navigation ignore-select" align="center">') + + ''' + <div class="leftpane"> + <div class="movieposter" title="Descargar Sólo los amantes sobreviven"> + <a href="http://www.zpeliculas.com/peliculas/p-drama/1634-slo-los-amantes-sobreviven.html"><img src="http://i.imgur.com/NBPgXrp.jpg" width="110" height="150" alt="Sólo los amantes sobreviven" title="Descargar Sólo los amantes sobreviven" /></a> + <div class="shortname">Sólo los amantes sobreviven</div> + <div class="BDRip">BDRip</div> + </div> + </div> + + <div class="rightpane"> + <div style="display:block;overflow:hidden;"> + <h2 class="title" title="Sólo los amantes sobreviven"><a href="http://www.zpeliculas.com/peliculas/p-drama/1634-slo-los-amantes-sobreviven.html">Sólo los amantes sobreviven</a></h2> + + <div style="height:105px; overflow:hidden;"> + <div class="small"> + <div class="cats" title="Genero"><a href="http://www.zpeliculas.com/peliculas/p-drama/">Drama</a>, <a href="http://www.zpeliculas.com/peliculas/p-fantasia/">Fantasia</a>, <a href="http://www.zpeliculas.com/peliculas/p-romantica/">Romantica</a></div> + <div class="year" title="Año">2013</div> + <div class="ESP" title="Idioma">ESP</div> + <div class="FA" title="Sólo los amantes sobreviven FA Official Website"><a href="http://www.filmaffinity.com/es/film851633.html" target="_blank" title="Sólo los amantes sobreviven en filmaffinity">Sólo los amantes sobreviven en FA</a></div> + </div> + </div> + <div class="clear" style="height:2px;"></div> + <div style="float:right"> + ''' + patron = '<div class="leftpane">(.*?)<div style="float\:right">' + matches = re.compile(patron, re.DOTALL).findall(data) + + itemlist = [] + + for match in matches: + scrapedurl = scrapertools.find_single_match(match, '<a href="([^"]+)"') + scrapedthumbnail = scrapertools.find_single_match(match, '<img src="([^"]+)"') + scrapedtitle = scrapertools.find_single_match(match, '<div class="shortname">([^<]+)') + scrapedcalidad = scrapertools.find_single_match(match, + '<div class="shortname">[^<]+</div[^<]+<div class="[^"]+">([^<]+)') + scrapedyear = scrapertools.find_single_match(match, '<div class="year[^>]+>([^<]+)') + scrapedidioma = scrapertools.find_single_match(match, + '<div class="year[^>]+>[^<]+</div[^<]+<div class[^>]+>([^<]+)') + + contentTitle = scrapertools.htmlclean(scrapedtitle) + # logger.info("title="+scrapedtitle) + title = contentTitle + ' (' + scrapedyear + ') [' + scrapedidioma + '] [' + scrapedcalidad + ']' + # title = scrapertools.htmlclean(title) + url = scrapedurl + thumbnail = scrapedthumbnail + plot = "" + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot, + hasContentDetails=True, contentTitle=contentTitle, contentThumbnail=thumbnail, fanart=thumbnail, + contentType="movie", context=["buscar_trailer"])) + + next_page = scrapertools.find_single_match(body, '<a href="([^"]+)">Siguiente') + if next_page != "": + itemlist.append( + Item(channel=item.channel, action="peliculas", title=">> Página siguiente", url=next_page, thumbnail="", + plot="", show="", viewmode="movie", fanart=thumbnail, extra="next_page")) + + return itemlist + + +def destacadas(item): + logger.info() + + # Descarga la página + data = scrapertools.cachePage(item.url) + data = scrapertools.get_match(data, '<div id="sliderwrapper">(.*?)<div class="genreblock">') + ''' + <div class="imageview view-first"> + <a href="/templates/mytopV2/blockpro/noimage-full.png" onclick="return hs.expand(this)"><img src="http://i.imgur.com/H4d96Wn.jpg" alt="Ocho apellidos vascos"></a> + <div class="mask"> + <h2><a href="/peliculas/p-comedia/1403-ocho-apellidos-vascos.html" title="Ocho apellidos vascos">Ocho apellidos vascos</a></h2> + </div> + ''' + patron = '<div class="imageview view-first">.*?<a href=.*?>.*?src="(.*?)" alt="(.*?)"></a>.*?<h2><a href="(.*?)".*?</div>' + + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + itemlist = [] + + for scrapedthumbnail, scrapedtitle, scrapedurl in matches: + logger.info("title=" + scrapedtitle) + title = scrapedtitle + title = scrapertools.htmlclean(title) + url = "http://www.zpeliculas.com" + scrapedurl + thumbnail = scrapedthumbnail + plot = "" + plot = unicode(plot, "iso-8859-1", errors="replace").encode("utf-8") + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot, + show=title, fanart=thumbnail, hasContentDetails=True, contentTitle=title, contentThumbnail=thumbnail, + contentType="movie", context=["buscar_trailer"])) + + return itemlist + + +def sugeridas(item): + logger.info() + + # Descarga la página + data = scrapertools.cachePage(item.url) + data = scrapertools.get_match(data, '<ul class="links">(.*?)</ul>') + ''' + <li><a href="/peliculas/p-accion/425-instinto-asesino.html" title="Descargar Instinto asesino (The Crew)"><span class="movie-name">Instinto asesino (The Crew)</span><img src="http://i.imgur.com/1xXLz.jpg" width="102" height="138" alt="Instinto asesino (The Crew)" title="Descargar Instinto asesino (The Crew)" /></a></li> + ''' + patron = '<li>.*?<a href="(.*?)".*?"movie-name">(.*?)</span><img src="(.*?)"' + + matches = re.compile(patron, re.DOTALL).findall(data) + scrapertools.printMatches(matches) + + itemlist = [] + + for scrapedurl, scrapedtitle, scrapedthumbnail in matches: + logger.info("title=" + scrapedtitle) + title = scrapedtitle + title = scrapertools.htmlclean(title) + url = "http://www.zpeliculas.com" + scrapedurl + thumbnail = scrapedthumbnail + plot = "" + plot = unicode(plot, "iso-8859-1", errors="replace").encode("utf-8") + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") + + itemlist.append( + Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot, + show=title, fanart=thumbnail, hasContentDetails=True, contentTitle=title, contentThumbnail=thumbnail, + contentType="movie", context=["buscar_trailer"])) + + return itemlist + + +def findvideos(item): + logger.info("item=" + item.tostring()) + + # Descarga la página para obtener el argumento + data = scrapertools.cachePage(item.url) + item.plot = scrapertools.find_single_match(data, '<div class="contenttext">([^<]+)<').strip() + item.contentPlot = item.plot + logger.info("plot=" + item.plot) + + return servertools.find_video_items(item=item, data=data) diff --git a/plugin.video.alfa/channelselector.py b/plugin.video.alfa/channelselector.py new file mode 100755 index 00000000..956effdd --- /dev/null +++ b/plugin.video.alfa/channelselector.py @@ -0,0 +1,203 @@ +# -*- coding: utf-8 -*- + +import glob +import os + +from core import channeltools +from core import config +from core import logger +from core.item import Item + + +def getmainlist(): + logger.info() + itemlist = list() + + # Añade los canales que forman el menú principal + itemlist.append(Item(title=config.get_localized_string(30130), channel="news", action="mainlist", + thumbnail=config.get_thumb("thumb_news.png"), + category=config.get_localized_string(30119), viewmode="thumbnails", + context=[{"title": "Configurar Novedades", "channel": "news", "action": "menu_opciones", + "goto": True}])) + + itemlist.append(Item(title=config.get_localized_string(30118), channel="channelselector", action="getchanneltypes", + thumbnail=config.get_thumb("thumb_channels.png"), + category=config.get_localized_string(30119), viewmode="thumbnails")) + + itemlist.append(Item(title=config.get_localized_string(30103), channel="search", action="mainlist", + thumbnail=config.get_thumb("thumb_search.png"), + category=config.get_localized_string(30119), viewmode="list", + context=[{"title": "Configurar Buscador", "channel": "search", "action": "opciones", + "goto": True}])) + + itemlist.append(Item(title=config.get_localized_string(30102), channel="favorites", action="mainlist", + thumbnail=config.get_thumb("thumb_favorites.png"), + category=config.get_localized_string(30102), viewmode="thumbnails")) + + if config.get_videolibrary_support(): + itemlist.append(Item(title=config.get_localized_string(30131), channel="videolibrary", action="mainlist", + thumbnail=config.get_thumb("thumb_videolibrary.png"), + category=config.get_localized_string(30119), viewmode="thumbnails", + context=[{"title": "Configurar Videoteca", "channel": "videolibrary", + "action": "channel_config"}])) + + itemlist.append(Item(title=config.get_localized_string(30101), channel="downloads", action="mainlist", + thumbnail=config.get_thumb("thumb_downloads.png"), viewmode="list", + context=[{"title": "Configurar Descargas", "channel": "setting", "config": "downloads", + "action": "channel_config"}])) + + thumb_configuracion = "thumb_setting_%s.png" % 0 # config.get_setting("plugin_updates_available") + + itemlist.append(Item(title=config.get_localized_string(30100), channel="setting", action="mainlist", + thumbnail=config.get_thumb(thumb_configuracion), + category=config.get_localized_string(30100), viewmode="list")) + # TODO REVISAR LA OPCION AYUDA + # itemlist.append(Item(title=config.get_localized_string(30104), channel="help", action="mainlist", + # thumbnail=config.get_thumb("thumb_help.png"), + # category=config.get_localized_string(30104), viewmode="list")) + return itemlist + + +def getchanneltypes(): + logger.info() + + # Lista de categorias + channel_types = ["movie", "tvshow", "anime", "documentary", "vos", "torrent", "latino"] + dict_types_lang = {'movie': config.get_localized_string(30122), 'tvshow': config.get_localized_string(30123), + 'anime': config.get_localized_string(30124), 'documentary': config.get_localized_string(30125), + 'vos': config.get_localized_string(30136), 'adult': config.get_localized_string(30126), + 'latino': config.get_localized_string(30127)} + + if config.get_setting("adult_mode") != 0: + channel_types.append("adult") + + channel_language = config.get_setting("channel_language") + logger.info("channel_language=" + channel_language) + + # Ahora construye el itemlist ordenadamente + itemlist = list() + title = config.get_localized_string(30121) + itemlist.append(Item(title=title, channel="channelselector", action="filterchannels", + category=title, channel_type="all", + thumbnail=config.get_thumb("thumb_channels_all.png"), + viewmode="thumbnails")) + + for channel_type in channel_types: + logger.info("channel_type=" + channel_type) + title = dict_types_lang.get(channel_type, channel_type) + itemlist.append(Item(title=title, channel="channelselector", action="filterchannels", category=title, + channel_type=channel_type, viewmode="thumbnails", + thumbnail=config.get_thumb("thumb_channels_" + channel_type + ".png"))) + + return itemlist + + +def filterchannels(category, preferred_thumb=""): + logger.info() + + channelslist = [] + + # Si category = "allchannelstatus" es que estamos activando/desactivando canales + appenddisabledchannels = False + if category == "allchannelstatus": + category = "all" + appenddisabledchannels = True + + # Lee la lista de canales + channel_path = os.path.join(config.get_runtime_path(), "channels", '*.json') + logger.info("channel_path=" + channel_path) + + channel_files = glob.glob(channel_path) + logger.info("channel_files encontrados " + str(len(channel_files))) + + channel_language = config.get_setting("channel_language") + logger.info("channel_language=" + channel_language) + if channel_language == "": + channel_language = "all" + logger.info("channel_language=" + channel_language) + + for channel_path in channel_files: + logger.info("channel=" + channel_path) + + channel = os.path.basename(channel_path).replace(".json", "") + + try: + channel_parameters = channeltools.get_channel_parameters(channel) + + # si el canal no es compatible, no se muestra + if not channel_parameters["compatible"]: + continue + + # Si no es un canal lo saltamos + if not channel_parameters["channel"]: + continue + logger.info("channel_parameters=" + repr(channel_parameters)) + + # preferred_thumb TODO REVISAR + # Si prefiere el banner y el canal lo tiene, cambia ahora de idea + if preferred_thumb == "banner" and "banner" in channel_parameters: + channel_parameters["thumbnail"] = channel_parameters["banner"] + + # si el canal está desactivado no se muestra el canal en la lista + if not channel_parameters["active"]: + continue + + # Se salta el canal si no está activo y no estamos activando/desactivando los canales + channel_status = config.get_setting("enabled", channel_parameters["channel"]) + + if channel_status is None: + # si channel_status no existe es que NO HAY valor en _data.json. + # como hemos llegado hasta aquí (el canal está activo en channel.json), se devuelve True + channel_status = True + + if channel_status != True: + # si obtenemos el listado de canales desde "activar/desactivar canales", y el canal está desactivado + # lo mostramos, si estamos listando todos los canales desde el listado general y está desactivado, + # no se muestra + if appenddisabledchannels != True: + continue + + # Se salta el canal para adultos si el modo adultos está desactivado + if channel_parameters["adult"] == True and config.get_setting("adult_mode") == 0: + continue + + # Se salta el canal si está en un idioma filtrado + if channel_language != "all" \ + and channel_parameters["language"] != config.get_setting("channel_language"): + continue + + # Se salta el canal si está en una categoria filtrado + if category != "all" and category not in channel_parameters["categories"]: + continue + + # Si tiene configuración añadimos un item en el contexto + context = [] + if channel_parameters["has_settings"]: + context.append({"title": "Configurar canal", "channel": "setting", "action": "channel_config", + "config": channel_parameters["channel"]}) + + # Si ha llegado hasta aquí, lo añade + channelslist.append(Item(title=channel_parameters["title"], channel=channel_parameters["channel"], + action="mainlist", thumbnail=channel_parameters["thumbnail"], + fanart=channel_parameters["fanart"], category=channel_parameters["title"], + language=channel_parameters["language"], viewmode="list", + version=channel_parameters["version"], context=context)) + + except: + logger.error("Se ha producido un error al leer los datos del canal " + channel) + import traceback + logger.error(traceback.format_exc()) + + channelslist.sort(key=lambda item: item.title.lower().strip()) + + if category == "all": + + channel_parameters = channeltools.get_channel_parameters('url') + # Si prefiere el banner y el canal lo tiene, cambia ahora de idea + if preferred_thumb == "banner" and "banner" in channel_parameters: + channel_parameters["thumbnail"] = channel_parameters["banner"] + + channelslist.insert(0, Item(title="Tengo una URL", action="mainlist", channel="url", + thumbnail=channel_parameters["thumbnail"], type="generic", viewmode="list")) + + return channelslist diff --git a/plugin.video.alfa/core/__init__.py b/plugin.video.alfa/core/__init__.py new file mode 100755 index 00000000..b48ad52a --- /dev/null +++ b/plugin.video.alfa/core/__init__.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- + +import os +import sys + +# Appends the main plugin dir to the PYTHONPATH if an internal package cannot be imported. +# Examples: In Plex Media Server all modules are under "Code.*" package, and in Enigma2 under "Plugins.Extensions.*" +try: + # from core import logger + import core +except: + sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) diff --git a/plugin.video.alfa/core/api.py b/plugin.video.alfa/core/api.py new file mode 100755 index 00000000..cf932990 --- /dev/null +++ b/plugin.video.alfa/core/api.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# ------------------------------------------------------------ +# Client for api.tvalacarta.info +# ------------------------------------------------------------ + +import urllib + +import config +import jsontools +import logger +import scrapertools + +MAIN_URL = "" +API_KEY = "nzgJy84P9w54H2w" +DEFAULT_HEADERS = [["User-Agent", config.PLUGIN_NAME + " " + config.get_platform()]] + + +# --------------------------------------------------------------------------------------------------------- +# Common function for API calls +# --------------------------------------------------------------------------------------------------------- + +# Make a remote call using post, ensuring api key is here +def remote_call(url, parameters={}, require_session=True): + logger.info("url=" + url + ", parameters=" + repr(parameters)) + + if not url.startswith("http"): + url = MAIN_URL + "/" + url + + if not "api_key" in parameters: + parameters["api_key"] = API_KEY + + # Add session token if not here + # if not "s" in parameters and require_session: + # parameters["s"] = get_session_token() + + headers = DEFAULT_HEADERS + post = urllib.urlencode(parameters) + + response_body = scrapertools.downloadpage(url, post, headers) + + return jsontools.load(response_body) + + +# --------------------------------------------------------------------------------------------------------- +# Plugin service calls +# --------------------------------------------------------------------------------------------------------- + +def plugins_get_all_packages(): + logger.info() + + parameters = {"plugin": config.PLUGIN_NAME, "platform": config.get_platform()} + return remote_call("plugins/get_all_packages.php", parameters) + + +def plugins_get_latest_packages(): + logger.info() + + parameters = {"plugin": config.PLUGIN_NAME, "platform": config.get_platform()} + return remote_call("plugins/get_latest_packages.php", parameters) diff --git a/plugin.video.alfa/core/channeltools.py b/plugin.video.alfa/core/channeltools.py new file mode 100755 index 00000000..e59dd7b8 --- /dev/null +++ b/plugin.video.alfa/core/channeltools.py @@ -0,0 +1,318 @@ +# -*- coding: utf-8 -*- +# ------------------------------------------------------------ +# channeltools - Herramientas para trabajar con canales +# ------------------------------------------------------------ + +import os + +import config +import jsontools +import logger + +DEFAULT_UPDATE_URL = "/channels/" +dict_channels_parameters = dict() + + +def is_adult(channel_name): + logger.info("channel_name=" + channel_name) + channel_parameters = get_channel_parameters(channel_name) + return channel_parameters["adult"] + + +def get_channel_parameters(channel_name): + global dict_channels_parameters + + if channel_name not in dict_channels_parameters: + try: + channel_parameters = get_channel_json(channel_name) + # logger.debug(channel_parameters) + if channel_parameters: + # cambios de nombres y valores por defecto + channel_parameters["title"] = channel_parameters.pop("name") + channel_parameters["channel"] = channel_parameters.pop("id") + + # si no existe el key se declaran valor por defecto para que no de fallos en las funciones que lo llaman + channel_parameters["update_url"] = channel_parameters.get("update_url", DEFAULT_UPDATE_URL) + channel_parameters["language"] = channel_parameters.get("language", "all") + channel_parameters["adult"] = channel_parameters.get("adult", False) + channel_parameters["active"] = channel_parameters.get("active", False) + channel_parameters["include_in_global_search"] = channel_parameters.get("include_in_global_search", + False) + channel_parameters["categories"] = channel_parameters.get("categories", list()) + + channel_parameters["thumbnail"] = channel_parameters.get("thumbnail", "") + channel_parameters["banner"] = channel_parameters.get("banner", "") + channel_parameters["fanart"] = channel_parameters.get("fanart", "") + + # Imagenes: se admiten url y archivos locales dentro de "resources/images" + if channel_parameters.get("thumbnail") and "://" not in channel_parameters["thumbnail"]: + channel_parameters["thumbnail"] = os.path.join(config.get_runtime_path(), "resources", "media", + "channels", "thumb", channel_parameters["thumbnail"]) + if channel_parameters.get("banner") and "://" not in channel_parameters["banner"]: + channel_parameters["banner"] = os.path.join(config.get_runtime_path(), "resources", "media", + "channels", "banner", channel_parameters["banner"]) + if channel_parameters.get("fanart") and "://" not in channel_parameters["fanart"]: + channel_parameters["fanart"] = os.path.join(config.get_runtime_path(), "resources", "media", + "channels", "fanart", channel_parameters["fanart"]) + + # Obtenemos si el canal tiene opciones de configuración + channel_parameters["has_settings"] = False + if 'settings' in channel_parameters: + # if not isinstance(channel_parameters['settings'], list): + # channel_parameters['settings'] = [channel_parameters['settings']] + + # if "include_in_global_search" in channel_parameters['settings']: + # channel_parameters["include_in_global_search"] = channel_parameters['settings'] + # ["include_in_global_search"].get('default', False) + # + # found = False + # for el in channel_parameters['settings']: + # for key in el.items(): + # if 'include_in' not in key: + # channel_parameters["has_settings"] = True + # found = True + # break + # if found: + # break + + for s in channel_parameters['settings']: + if 'id' in s: + if s['id'] == "include_in_global_search": + channel_parameters["include_in_global_search"] = True + elif not s['id'].startswith("include_in_") and \ + (s.get('enabled', False) or s.get('visible', False)): + channel_parameters["has_settings"] = True + + del channel_parameters['settings'] + + # Compatibilidad + if 'compatible' in channel_parameters: + # compatible python + python_compatible = True + if 'python' in channel_parameters["compatible"]: + import sys + python_condition = channel_parameters["compatible"]['python'] + if sys.version_info < tuple(map(int, (python_condition.split(".")))): + python_compatible = False + + # compatible addon_version + addon_version_compatible = True + if 'addon_version' in channel_parameters["compatible"]: + import versiontools + addon_version_condition = channel_parameters["compatible"]['addon_version'] + addon_version = int(addon_version_condition.replace(".", "").ljust(len(str( + versiontools.get_current_plugin_version())), '0')) + if versiontools.get_current_plugin_version() < addon_version: + addon_version_compatible = False + + channel_parameters["compatible"] = python_compatible and addon_version_compatible + else: + channel_parameters["compatible"] = True + + dict_channels_parameters[channel_name] = channel_parameters + + else: + # para evitar casos donde canales no están definidos como configuración + # lanzamos la excepcion y asi tenemos los valores básicos + raise Exception + + except Exception, ex: + logger.error(channel_name + ".json error \n%s" % ex) + channel_parameters = dict() + channel_parameters["channel"] = "" + channel_parameters["adult"] = False + channel_parameters['active'] = False + channel_parameters["compatible"] = True + channel_parameters["language"] = "" + channel_parameters["update_url"] = DEFAULT_UPDATE_URL + return channel_parameters + + return dict_channels_parameters[channel_name] + + +def get_channel_json(channel_name): + # logger.info("channel_name=" + channel_name) + import filetools + try: + channel_path = filetools.join(config.get_runtime_path(), "channels", channel_name + ".json") + # logger.info("channel_data=" + channel_path) + channel_json = jsontools.load(filetools.read(channel_path)) + # logger.info("channel_json= %s" % channel_json) + + except Exception, ex: + template = "An exception of type %s occured. Arguments:\n%r" + message = template % (type(ex).__name__, ex.args) + logger.error(" %s" % message) + channel_json = None + + return channel_json + + +def get_channel_controls_settings(channel_name): + # logger.info("channel_name=" + channel_name) + dict_settings = {} + + list_controls = get_channel_json(channel_name).get('settings', list()) + + for c in list_controls: + if 'id' not in c or 'type' not in c or 'default' not in c: + # Si algun control de la lista no tiene id, type o default lo ignoramos + continue + + # new dict with key(id) and value(default) from settings + dict_settings[c['id']] = c['default'] + + return list_controls, dict_settings + + +def get_channel_setting(name, channel, default=None): + """ + Retorna el valor de configuracion del parametro solicitado. + + Devuelve el valor del parametro 'name' en la configuracion propia del canal 'channel'. + + Busca en la ruta \addon_data\plugin.video.alfa\settings_channels el archivo channel_data.json y lee + el valor del parametro 'name'. Si el archivo channel_data.json no existe busca en la carpeta channels el archivo + channel.json y crea un archivo channel_data.json antes de retornar el valor solicitado. Si el parametro 'name' + tampoco existe en el el archivo channel.json se devuelve el parametro default. + + + @param name: nombre del parametro + @type name: str + @param channel: nombre del canal + @type channel: str + @param default: valor devuelto en caso de que no exista el parametro name + @type default: cualquiera + + @return: El valor del parametro 'name' + @rtype: El tipo del valor del parametro + + """ + file_settings = os.path.join(config.get_data_path(), "settings_channels", channel + "_data.json") + dict_settings = {} + dict_file = {} + if os.path.exists(file_settings): + # Obtenemos configuracion guardada de ../settings/channel_data.json + try: + dict_file = jsontools.load(open(file_settings, "rb").read()) + if isinstance(dict_file, dict) and 'settings' in dict_file: + dict_settings = dict_file['settings'] + except EnvironmentError: + logger.error("ERROR al leer el archivo: %s" % file_settings) + + if not dict_settings or name not in dict_settings: + # Obtenemos controles del archivo ../channels/channel.json + try: + list_controls, default_settings = get_channel_controls_settings(channel) + except: + default_settings = {} + + if name in default_settings: # Si el parametro existe en el channel.json creamos el channel_data.json + default_settings.update(dict_settings) + dict_settings = default_settings + dict_file['settings'] = dict_settings + # Creamos el archivo ../settings/channel_data.json + json_data = jsontools.dump(dict_file) + try: + open(file_settings, "wb").write(json_data) + except EnvironmentError: + logger.error("ERROR al salvar el archivo: %s" % file_settings) + + # Devolvemos el valor del parametro local 'name' si existe, si no se devuelve default + return dict_settings.get(name, default) + + +def set_channel_setting(name, value, channel): + """ + Fija el valor de configuracion del parametro indicado. + + Establece 'value' como el valor del parametro 'name' en la configuracion propia del canal 'channel'. + Devuelve el valor cambiado o None si la asignacion no se ha podido completar. + + Si se especifica el nombre del canal busca en la ruta \addon_data\plugin.video.alfa\settings_channels el + archivo channel_data.json y establece el parametro 'name' al valor indicado por 'value'. + Si el parametro 'name' no existe lo añade, con su valor, al archivo correspondiente. + + @param name: nombre del parametro + @type name: str + @param value: valor del parametro + @type value: str + @param channel: nombre del canal + @type channel: str + + @return: 'value' en caso de que se haya podido fijar el valor y None en caso contrario + @rtype: str, None + + """ + # Creamos la carpeta si no existe + if not os.path.exists(os.path.join(config.get_data_path(), "settings_channels")): + os.mkdir(os.path.join(config.get_data_path(), "settings_channels")) + + file_settings = os.path.join(config.get_data_path(), "settings_channels", channel + "_data.json") + dict_settings = {} + + dict_file = None + + if os.path.exists(file_settings): + # Obtenemos configuracion guardada de ../settings/channel_data.json + try: + dict_file = jsontools.load(open(file_settings, "r").read()) + dict_settings = dict_file.get('settings', {}) + except EnvironmentError: + logger.error("ERROR al leer el archivo: %s" % file_settings) + + dict_settings[name] = value + + # comprobamos si existe dict_file y es un diccionario, sino lo creamos + if dict_file is None or not dict_file: + dict_file = {} + + dict_file['settings'] = dict_settings + + # Creamos el archivo ../settings/channel_data.json + try: + json_data = jsontools.dump(dict_file) + open(file_settings, "w").write(json_data) + except EnvironmentError: + logger.error("ERROR al salvar el archivo: %s" % file_settings) + return None + + return value + + +def get_channel_module(channel_name, package="channels"): + # Sustituye al que hay en servertools.py ... + # ...pero añade la posibilidad de incluir un paquete diferente de "channels" + if "." not in channel_name: + channel_module = __import__('%s.%s' % (package, channel_name), None, None, ['%s.%s' % (package, channel_name)]) + else: + channel_module = __import__(channel_name, None, None, [channel_name]) + return channel_module + + +def get_channel_remote_url(channel_name): + channel_parameters = get_channel_parameters(channel_name) + remote_channel_url = channel_parameters["update_url"] + channel_name + ".py" + remote_version_url = channel_parameters["update_url"] + channel_name + ".json" + + logger.info("remote_channel_url=" + remote_channel_url) + logger.info("remote_version_url=" + remote_version_url) + + return remote_channel_url, remote_version_url + + +def get_channel_local_path(channel_name): + if channel_name != "channelselector": + local_channel_path = os.path.join(config.get_runtime_path(), 'channels', channel_name + ".py") + local_version_path = os.path.join(config.get_runtime_path(), 'channels', channel_name + ".json") + local_compiled_path = os.path.join(config.get_runtime_path(), 'channels', channel_name + ".pyo") + else: + local_channel_path = os.path.join(config.get_runtime_path(), channel_name + ".py") + local_version_path = os.path.join(config.get_runtime_path(), channel_name + ".json") + local_compiled_path = os.path.join(config.get_runtime_path(), channel_name + ".pyo") + + logger.info("local_channel_path=" + local_channel_path) + logger.info("local_version_path=" + local_version_path) + logger.info("local_compiled_path=" + local_compiled_path) + + return local_channel_path, local_version_path, local_compiled_path diff --git a/plugin.video.alfa/core/cloudflare.py b/plugin.video.alfa/core/cloudflare.py new file mode 100755 index 00000000..5e6b0532 --- /dev/null +++ b/plugin.video.alfa/core/cloudflare.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# -------------------------------------------------------------------------------- +# Cloudflare decoder +# -------------------------------------------------------------------------------- + +import re +import time +import urllib +import urlparse + +from core import logger + + +class Cloudflare: + def __init__(self, response): + self.timeout = 5 + self.domain = urlparse.urlparse(response["url"])[1] + self.protocol = urlparse.urlparse(response["url"])[0] + self.js_data = {} + self.header_data = {} + + if not "var s,t,o,p,b,r,e,a,k,i,n,g,f" in response["data"] or "chk_jschl" in response["url"]: + return + + try: + self.js_data["auth_url"] = \ + re.compile('<form id="challenge-form" action="([^"]+)" method="get">').findall(response["data"])[0] + self.js_data["params"] = {} + self.js_data["params"]["jschl_vc"] = \ + re.compile('<input type="hidden" name="jschl_vc" value="([^"]+)"/>').findall(response["data"])[0] + self.js_data["params"]["pass"] = \ + re.compile('<input type="hidden" name="pass" value="([^"]+)"/>').findall(response["data"])[0] + var, self.js_data["value"] = \ + re.compile('var s,t,o,p,b,r,e,a,k,i,n,g,f[^:]+"([^"]+)":([^\n]+)};', re.DOTALL).findall(response["data"])[0] + self.js_data["op"] = re.compile(var + "([\+|\-|\*|\/])=([^;]+)", re.MULTILINE).findall(response["data"]) + self.js_data["wait"] = int(re.compile("\}, ([\d]+)\);", re.MULTILINE).findall(response["data"])[0]) / 1000 + except: + logger.debug("Metodo #1 (javascript): NO disponible") + self.js_data = {} + + if "refresh" in response["headers"]: + try: + self.header_data["wait"] = int(response["headers"]["refresh"].split(";")[0]) + self.header_data["auth_url"] = response["headers"]["refresh"].split("=")[1].split("?")[0] + self.header_data["params"] = {} + self.header_data["params"]["pass"] = response["headers"]["refresh"].split("=")[2] + except: + logger.debug("Metodo #2 (headers): NO disponible") + self.header_data = {} + + @property + def wait_time(self): + if self.js_data.get("wait", 0): + return self.js_data["wait"] + else: + return self.header_data.get("wait", 0) + + @property + def is_cloudflare(self): + return self.header_data.get("wait", 0) > 0 or self.js_data.get("wait", 0) > 0 + + def get_url(self): + # Metodo #1 (javascript) + if self.js_data.get("wait", 0): + jschl_answer = self.decode(self.js_data["value"]) + + for op, v in self.js_data["op"]: + jschl_answer = eval(str(jschl_answer) + op + str(self.decode(v))) + + self.js_data["params"]["jschl_answer"] = jschl_answer + len(self.domain) + + response = "%s://%s%s?%s" % ( + self.protocol, self.domain, self.js_data["auth_url"], urllib.urlencode(self.js_data["params"])) + + time.sleep(self.js_data["wait"]) + + return response + + # Metodo #2 (headers) + if self.header_data.get("wait", 0): + response = "%s://%s%s?%s" % ( + self.protocol, self.domain, self.header_data["auth_url"], urllib.urlencode(self.header_data["params"])) + + time.sleep(self.header_data["wait"]) + + return response + + def decode(self, data): + t = time.time() + timeout = False + + while not timeout: + data = re.sub("\[\]", "''", data) + data = re.sub("!\+''", "+1", data) + data = re.sub("!''", "0", data) + data = re.sub("!0", "1", data) + + if "(" in data: + x, y = data.rfind("("), data.find(")", data.rfind("(")) + 1 + part = data[x + 1:y - 1] + else: + x = 0 + y = len(data) + part = data + + val = "" + + if not part.startswith("+"): part = "+" + part + + for i, ch in enumerate(part): + if ch == "+": + if not part[i + 1] == "'": + if val == "": val = 0 + if type(val) == str: + val = val + self.get_number(part, i + 1) + else: + val = val + int(self.get_number(part, i + 1)) + else: + val = str(val) + val = val + self.get_number(part, i + 1) or "0" + + if type(val) == str: val = "'%s'" % val + data = data[0:x] + str(val) + data[y:] + + timeout = time.time() - t > self.timeout + + if not "+" in data and not "(" in data and not ")" in data: + return int(self.get_number(data)) + + def get_number(self, str, start=0): + ret = "" + for chr in str[start:]: + try: + int(chr) + except: + if ret: break + else: + ret += chr + return ret diff --git a/plugin.video.alfa/core/config.py b/plugin.video.alfa/core/config.py new file mode 100755 index 00000000..c9e3df39 --- /dev/null +++ b/plugin.video.alfa/core/config.py @@ -0,0 +1,473 @@ +# -*- coding: utf-8 -*- +# ------------------------------------------------------------ +# Parámetros de configuración (kodi) +# ------------------------------------------------------------ + +import os +import re + +import xbmc +import xbmcaddon +import xbmcgui + +PLUGIN_NAME = "alfa" + +__settings__ = xbmcaddon.Addon(id="plugin.video." + PLUGIN_NAME) +__language__ = __settings__.getLocalizedString + + +def get_platform(full_version=False): + """ + Devuelve la información la version de xbmc o kodi sobre el que se ejecuta el plugin + + @param full_version: indica si queremos toda la informacion o no + @type full_version: bool + @rtype: str o dict + @return: Si el paramentro full_version es True se retorna un diccionario con las siguientes claves: + 'num_version': (float) numero de version en formato XX.X + 'name_version': (str) nombre clave de cada version + 'video_db': (str) nombre del archivo que contiene la base de datos de videos + 'plaform': (str) esta compuesto por "kodi-" o "xbmc-" mas el nombre de la version segun corresponda. + Si el parametro full_version es False (por defecto) se retorna el valor de la clave 'plaform' del diccionario anterior. + """ + + ret = {} + codename = {"10": "dharma", "11": "eden", "12": "frodo", + "13": "gotham", "14": "helix", "15": "isengard", + "16": "jarvis", "17": "krypton", "18": "leia"} + code_db = {'10': 'MyVideos37.db', '11': 'MyVideos60.db', '12': 'MyVideos75.db', + '13': 'MyVideos78.db', '14': 'MyVideos90.db', '15': 'MyVideos93.db', + '16': 'MyVideos99.db', '17': 'MyVideos107.db', '18': 'MyVideos108.db'} + + num_version = xbmc.getInfoLabel('System.BuildVersion') + num_version = re.match("\d+\.\d+", num_version).group(0) + ret['name_version'] = codename.get(num_version.split('.')[0], num_version) + ret['video_db'] = code_db.get(num_version.split('.')[0], "") + ret['num_version'] = float(num_version) + if ret['num_version'] < 14: + ret['platform'] = "xbmc-" + ret['name_version'] + else: + ret['platform'] = "kodi-" + ret['name_version'] + + if full_version: + return ret + else: + return ret['platform'] + + +def is_xbmc(): + return True + + +def get_videolibrary_support(): + return True + + +def get_system_platform(): + """ fonction: pour recuperer la platform que xbmc tourne """ + platform = "unknown" + if xbmc.getCondVisibility("system.platform.linux"): + platform = "linux" + elif xbmc.getCondVisibility("system.platform.windows"): + platform = "windows" + elif xbmc.getCondVisibility("system.platform.osx"): + platform = "osx" + return platform + + +def get_all_settings_addon(): + # Lee el archivo settings.xml y retorna un diccionario con {id: value} + import scrapertools + + infile = open(os.path.join(get_data_path(), "settings.xml"), "r") + data = infile.read() + infile.close() + + ret = {} + matches = scrapertools.find_multiple_matches(data, '<setting id="([^"]*)" value="([^"]*)') + + for _id, value in matches: + ret[_id] = get_setting(_id) + + return ret + + +def open_settings(): + settings_pre = get_all_settings_addon() + __settings__.openSettings() + settings_post = get_all_settings_addon() + + # cb_validate_config (util para validar cambios realizados en el cuadro de dialogo) + if settings_post.get('adult_aux_intro_password', None): + # Hemos accedido a la seccion de Canales para adultos + from platformcode import platformtools + if 'adult_password' not in settings_pre: + adult_password = set_setting('adult_password', '0000') + else: + adult_password = settings_pre['adult_password'] + + if settings_post['adult_aux_intro_password'] == adult_password: + # La contraseña de acceso es correcta + + # Cambio de contraseña + if settings_post['adult_aux_new_password1']: + if settings_post['adult_aux_new_password1'] == settings_post['adult_aux_new_password2']: + adult_password = set_setting('adult_password', settings_post['adult_aux_new_password1']) + else: + platformtools.dialog_ok("Canales para adultos", + "Los campos 'Nueva contraseña' y 'Confirmar nueva contraseña' no coinciden.", + "Entre de nuevo en 'Preferencias' para cambiar la contraseña") + + # Fijar adult_pin + adult_pin = "" + if settings_post["adult_request_password"] == True: + adult_pin = adult_password + set_setting("adult_pin", adult_pin) + + else: + platformtools.dialog_ok("Canales para adultos", "La contraseña no es correcta.", + "Los cambios realizados en esta sección no se guardaran.") + + # Deshacer cambios + set_setting("adult_mode", settings_pre.get("adult_mode", 0)) + set_setting("adult_request_password", settings_pre.get("adult_request_password", True)) + + # Borramos settings auxiliares + set_setting('adult_aux_intro_password', '') + set_setting('adult_aux_new_password1', '') + set_setting('adult_aux_new_password2', '') + + # si se ha cambiado la ruta de la videoteca llamamos a comprobar directorios para que lo cree y pregunte + # automaticamente si configurar la videoteca + if settings_pre.get("videolibrarypath", None) != settings_post.get("videolibrarypath", None) or \ + settings_pre.get("folder_movies", None) != settings_post.get("folder_movies", None) or \ + settings_pre.get("folder_tvshows", None) != settings_post.get("folder_tvshows", None): + verify_directories_created() + + else: + # si se ha puesto que se quiere autoconfigurar y se había creado el directorio de la videoteca + if not settings_pre.get("videolibrary_kodi", None) and settings_post.get("videolibrary_kodi", None) \ + and settings_post.get("videolibrary_kodi_flag", None) == 1: + + from platformcode import xbmc_videolibrary + xbmc_videolibrary.ask_set_content(2, silent=True) + + +def get_setting(name, channel="", server="", default=None): + """ + Retorna el valor de configuracion del parametro solicitado. + + Devuelve el valor del parametro 'name' en la configuracion global, en la configuracion propia del canal 'channel' + o en la del servidor 'server'. + + Los parametros channel y server no deben usarse simultaneamente. Si se especifica el nombre del canal se devolvera + el resultado de llamar a channeltools.get_channel_setting(name, channel, default). Si se especifica el nombre del + servidor se devolvera el resultado de llamar a servertools.get_channel_setting(name, server, default). Si no se + especifica ninguno de los anteriores se devolvera el valor del parametro en la configuracion global si existe o + el valor default en caso contrario. + + @param name: nombre del parametro + @type name: str + @param channel: nombre del canal + @type channel: str + @param server: nombre del servidor + @type server: str + @param default: valor devuelto en caso de que no exista el parametro name + @type default: cualquiera + + @return: El valor del parametro 'name' + @rtype: El tipo del valor del parametro + + """ + + # Specific channel setting + if channel: + # logger.info("config.get_setting reading channel setting '"+name+"' from channel json") + from core import channeltools + value = channeltools.get_channel_setting(name, channel, default) + # logger.info("config.get_setting -> '"+repr(value)+"'") + return value + + # Specific server setting + elif server: + # logger.info("config.get_setting reading server setting '"+name+"' from server json") + from core import servertools + value = servertools.get_server_setting(name, server, default) + # logger.info("config.get_setting -> '"+repr(value)+"'") + return value + + # Global setting + else: + # logger.info("config.get_setting reading main setting '"+name+"'") + value = __settings__.getSetting(name) + if not value: + return default + # Translate Path if start with "special://" + if value.startswith("special://") and "videolibrarypath" not in name: + value = xbmc.translatePath(value) + + # hack para devolver el tipo correspondiente + settings_types = get_settings_types() + + if settings_types.get(name) in ['enum', 'number']: + try: + value = int(value) + except Exception, ex: + from core import logger + logger.error("Error al convertir '%s' de tipo 'enum','number' \n%s" % (name, ex)) + + elif settings_types.get(name) == 'bool': + value = value == 'true' + + elif name not in settings_types: + try: + if value in ['true', 'false']: + if value == 'true': + aux_val = True + else: + aux_val = False + value = bool(aux_val) + else: + t = eval(value) + value = t[0](t[1]) + except Exception, ex: + from core import logger + logger.error("Error al convertir '%s' se pasa como tipo 'None'\n%s" % (name, ex)) + value = None + + return value + + +def set_setting(name, value, channel="", server=""): + """ + Fija el valor de configuracion del parametro indicado. + + Establece 'value' como el valor del parametro 'name' en la configuracion global o en la configuracion propia del + canal 'channel'. + Devuelve el valor cambiado o None si la asignacion no se ha podido completar. + + Si se especifica el nombre del canal busca en la ruta \addon_data\plugin.video.alfa\settings_channels el + archivo channel_data.json y establece el parametro 'name' al valor indicado por 'value'. Si el archivo + channel_data.json no existe busca en la carpeta channels el archivo channel.json y crea un archivo channel_data.json + antes de modificar el parametro 'name'. + Si el parametro 'name' no existe lo añade, con su valor, al archivo correspondiente. + + + Parametros: + name -- nombre del parametro + value -- valor del parametro + channel [opcional] -- nombre del canal + + Retorna: + 'value' en caso de que se haya podido fijar el valor y None en caso contrario + + """ + if channel: + from core import channeltools + return channeltools.set_channel_setting(name, value, channel) + elif server: + from core import servertools + return servertools.set_server_setting(name, value, server) + else: + try: + settings_types = get_settings_types() + + if settings_types.get(name) == 'bool': + if value: + new_value = "true" + else: + new_value = "false" + + elif settings_types.get(name): + new_value = str(value) + + else: + if isinstance(value, basestring): + new_value = "(%s, %s)" % (type(value).__name__, repr(value)) + + else: + new_value = "(%s, %s)" % (type(value).__name__, value) + + __settings__.setSetting(name, new_value) + + except Exception, ex: + from core import logger + logger.error("Error al convertir '%s' no se guarda el valor \n%s" % (name, ex)) + return None + + return value + + +def get_settings_types(): + """ + Devuelve un diccionario con los parametros (key) de la configuracion global y sus tipos (value) + + :return: dict + """ + win10000 = xbmcgui.Window(10000) + settings_types = win10000.getProperty(PLUGIN_NAME + "_settings_types") + + if not settings_types: + infile = open(os.path.join(get_runtime_path(), "resources", "settings.xml")) + data = infile.read() + infile.close() + + matches = re.findall('<setting id="([^"]*)" type="([^"]*)', data) + settings_types = "{%s}" % ",".join("'%s': '%s'" % tup for tup in matches) + + win10000.setProperty(PLUGIN_NAME + "_settings_types", settings_types) + + return eval(settings_types) + + +def get_localized_string(code): + dev = __language__(code) + + try: + dev = dev.encode("utf-8") + except: + pass + + return dev + + +def get_videolibrary_config_path(): + value = get_setting("videolibrarypath") + if value == "": + verify_directories_created() + value = get_setting("videolibrarypath") + return value + + +def get_videolibrary_path(): + return xbmc.translatePath(get_videolibrary_config_path()) + + +def get_temp_file(filename): + return xbmc.translatePath(os.path.join("special://temp/", filename)) + + +def get_runtime_path(): + return xbmc.translatePath(__settings__.getAddonInfo('Path')) + + +def get_data_path(): + dev = xbmc.translatePath(__settings__.getAddonInfo('Profile')) + + # Crea el directorio si no existe + if not os.path.exists(dev): + os.makedirs(dev) + + return dev + + +def get_cookie_data(): + import os + ficherocookies = os.path.join(get_data_path(), 'cookies.dat') + + cookiedatafile = open(ficherocookies, 'r') + cookiedata = cookiedatafile.read() + cookiedatafile.close() + + return cookiedata + + +# Test if all the required directories are created +def verify_directories_created(): + from core import logger + from core import filetools + from platformcode import xbmc_videolibrary + + config_paths = [["videolibrarypath", "videolibrary"], + ["downloadpath", "downloads"], + ["downloadlistpath", "downloads/list"], + ["settings_path", "settings_channels"]] + + for path, default in config_paths: + saved_path = get_setting(path) + + # videoteca + if path == "videolibrarypath": + if not saved_path: + saved_path = xbmc_videolibrary.search_library_path() + if saved_path: + set_setting(path, saved_path) + + if not saved_path: + saved_path = "special://profile/addon_data/plugin.video." + PLUGIN_NAME + "/" + default + set_setting(path, saved_path) + + saved_path = xbmc.translatePath(saved_path) + if not filetools.exists(saved_path): + logger.debug("Creating %s: %s" % (path, saved_path)) + filetools.mkdir(saved_path) + + config_paths = [["folder_movies", "CINE"], + ["folder_tvshows", "SERIES"]] + + flag_call = True + for path, default in config_paths: + saved_path = get_setting(path) + + if not saved_path: + saved_path = default + set_setting(path, saved_path) + + content_path = filetools.join(get_videolibrary_path(), saved_path) + if not filetools.exists(content_path): + logger.debug("Creating %s: %s" % (path, content_path)) + + # si se crea el directorio + if filetools.mkdir(content_path): + if flag_call: + # le pasamos el valor para que sepamos que se ha pasado por creación de directorio + xbmc_videolibrary.ask_set_content(1) + flag_call = False + + try: + from core import scrapertools + # Buscamos el archivo addon.xml del skin activo + skindir = filetools.join(xbmc.translatePath("special://home"), 'addons', xbmc.getSkinDir(), + 'addon.xml') + # Extraemos el nombre de la carpeta de resolución por defecto + folder = "" + data = filetools.read(skindir) + res = scrapertools.find_multiple_matches(data, '(<res .*?>)') + for r in res: + if 'default="true"' in r: + folder = scrapertools.find_single_match(r, 'folder="([^"]+)"') + break + + # Comprobamos si existe en el addon y sino es así, la creamos + default = filetools.join(get_runtime_path(), 'resources', 'skins', 'Default') + if folder and not filetools.exists(filetools.join(default, folder)): + filetools.mkdir(filetools.join(default, folder)) + + # Copiamos el archivo a dicha carpeta desde la de 720p si éste no existe o si el tamaño es diferente + if folder and folder != '720p': + for root, folders, files in filetools.walk(filetools.join(default, '720p')): + for f in files: + if not filetools.exists(filetools.join(default, folder, f)) or \ + (filetools.getsize(filetools.join(default, folder, f)) != + filetools.getsize(filetools.join(default, '720p', f))): + filetools.copy(filetools.join(default, '720p', f), + filetools.join(default, folder, f), + True) + except: + import traceback + logger.error("Al comprobar o crear la carpeta de resolución") + logger.error(traceback.format_exc()) + + +def get_thumb(thumb_name): + path = os.path.join(get_runtime_path(), "resources", "media", "general") + + # if config.get_setting("icons"): # TODO obtener de la configuración el pack de thumbs seleccionado + # preferred_thumb = config.get_setting("icons") + # else: + # preferred_thumb = os.sep + "default" + + preferred_thumb = os.sep + "default" + web_path = path + preferred_thumb + os.sep + + return os.path.join(web_path, thumb_name) diff --git a/plugin.video.alfa/core/downloader.py b/plugin.video.alfa/core/downloader.py new file mode 100755 index 00000000..616840bc --- /dev/null +++ b/plugin.video.alfa/core/downloader.py @@ -0,0 +1,546 @@ +# -*- coding: utf-8 -*- + +""" +Clase Downloader +Downloader(url, path [, filename, headers, resume]) + + url : string - url para descargar + path : string - Directorio donde se guarda la descarga + filename : [opt] string - Nombre de archivo para guardar + headers : [opt] dict - Headers para usar en la descarga + resume : [opt] bool - continuar una descarga previa en caso de existir, por defecto True + + +metodos: + start_dialog() Inicia la descarga mostrando el progreso + start() Inicia la descarga en segundo plano + stop(erase = False) Detiene la descarga, con erase = True elimina los datos descargados + +""" +import mimetypes +import os +import re +import sys +import threading +import time +import urllib +import urllib2 +import urlparse +from threading import Thread, Lock + +from core import filetools +from core import logger + + +class Downloader: + @property + def state(self): + return self._state + + @property + def connections(self): + return len([c for c in self._download_info["parts"] if + c["status"] in [self.states.downloading, self.states.connecting]]), self._max_connections + + @property + def downloaded(self): + return self.__change_units__(sum([c["current"] - c["start"] for c in self._download_info["parts"]])) + + @property + def average_speed(self): + return self.__change_units__(self._average_speed) + + @property + def speed(self): + return self.__change_units__(self._speed) + + @property + def remaining_time(self): + if self.speed[0] and self._file_size: + t = (self.size[0] - self.downloaded[0]) / self.speed[0] + else: + t = 0 + + return time.strftime("%H:%M:%S", time.gmtime(t)) + + @property + def download_url(self): + return self.url + + @property + def size(self): + return self.__change_units__(self._file_size) + + @property + def progress(self): + if self._file_size: + return float(self.downloaded[0]) * 100 / float(self._file_size) + elif self._state == self.states.completed: + return 100 + else: + return 0 + + @property + def filename(self): + return self._filename + + @property + def fullpath(self): + return os.path.abspath(filetools.join(self._path, self._filename)) + + # Funciones + def start_dialog(self, title="Descargando..."): + from platformcode import platformtools + progreso = platformtools.dialog_progress(title, "Iniciando descarga...") + self.start() + while self.state == self.states.downloading and not progreso.iscanceled(): + time.sleep(0.1) + line1 = "%s" % (self.filename) + line2 = "%.2f%% - %.2f %s de %.2f %s a %.2f %s/s (%d/%d)" % ( + self.progress, self.downloaded[1], self.downloaded[2], self.size[1], self.size[2], + self.speed[1], self.speed[2], self.connections[0], self.connections[1]) + line3 = "Tiempo restante: %s" % (self.remaining_time) + + progreso.update(int(self.progress), line1, line2, line3) + if self.state == self.states.downloading: + self.stop() + progreso.close() + + def start(self): + if self._state == self.states.error: return + conns = [] + for x in range(self._max_connections): + try: + conns.append(self.__open_connection__("0", "")) + except: + self._max_connections = x + self._threads = [ + Thread(target=self.__start_part__, name="Downloader %s/%s" % (x + 1, self._max_connections)) for x + in range(self._max_connections)] + break + del conns + self._start_time = time.time() - 1 + self._state = self.states.downloading + self._speed_thread.start() + self._save_thread.start() + + for t in self._threads: t.start() + + def stop(self, erase=False): + if self._state == self.states.downloading: + # Detenemos la descarga + self._state = self.states.stopped + for t in self._threads: + if t.isAlive(): t.join() + + if self._save_thread.isAlive(): self._save_thread.join() + + if self._seekable: + # Guardamos la info al final del archivo + self.file.seek(0, 2) + offset = self.file.tell() + self.file.write(str(self._download_info)) + self.file.write("%0.16d" % offset) + + self.file.close() + + if erase: os.remove(filetools.join(self._path, self._filename)) + + def __speed_metter__(self): + self._speed = 0 + self._average_speed = 0 + + downloaded = self._start_downloaded + downloaded2 = self._start_downloaded + t = time.time() + t2 = time.time() + time.sleep(1) + + while self.state == self.states.downloading: + self._average_speed = (self.downloaded[0] - self._start_downloaded) / (time.time() - self._start_time) + self._speed = (self.downloaded[0] - self._start_downloaded) / (time.time() - self._start_time) + # self._speed = (self.downloaded[0] - downloaded) / (time.time() -t) + + if time.time() - t > 5: + t = t2 + downloaded = downloaded2 + t2 = time.time() + downloaded2 = self.downloaded[0] + + time.sleep(0.5) + + # Funciones internas + def __init__(self, url, path, filename=None, headers=[], resume=True, max_connections=10, block_size=2 ** 17, + part_size=2 ** 24, max_buffer=10): + # Parametros + self._resume = resume + self._path = path + self._filename = filename + self._max_connections = max_connections + self._block_size = block_size + self._part_size = part_size + self._max_buffer = max_buffer + + try: + import xbmc + self.tmp_path = xbmc.translatePath("special://temp/") + except: + self.tmp_path = os.getenv("TEMP") or os.getenv("TMP") or os.getenv("TMPDIR") + + self.states = type('states', (), + {"stopped": 0, "connecting": 1, "downloading": 2, "completed": 3, "error": 4, "saving": 5}) + + self._state = self.states.stopped + self._download_lock = Lock() + self._headers = { + "User-Agent": "Kodi/15.2 (Windows NT 10.0; WOW64) App_Bitness/32 Version/15.2-Git:20151019-02e7013"} + self._speed = 0 + self._buffer = {} + self._seekable = True + + self._threads = [Thread(target=self.__start_part__, name="Downloader %s/%s" % (x + 1, self._max_connections)) + for x in range(self._max_connections)] + self._speed_thread = Thread(target=self.__speed_metter__, name="Speed Meter") + self._save_thread = Thread(target=self.__save_file__, name="File Writer") + + # Actualizamos los headers + self._headers.update(dict(headers)) + + # Separamos los headers de la url + self.__url_to_headers__(url) + + # Obtenemos la info del servidor + self.__get_download_headers__() + + self._file_size = int(self.response_headers.get("content-length", "0")) + + if not self.response_headers.get("accept-ranges") == "bytes" or self._file_size == 0: + self._max_connections = 1 + self._part_size = 0 + self._resume = False + + # Obtenemos el nombre del archivo + self.__get_download_filename__() + + # Abrimos en modo "a+" para que cree el archivo si no existe, luego en modo "r+b" para poder hacer seek() + self.file = filetools.file_open(filetools.join(self._path, self._filename), "a+") + self.file = filetools.file_open(filetools.join(self._path, self._filename), "r+b") + + if self._file_size >= 2 ** 31 or not self._file_size: + try: + self.file.seek(2 ** 31) + except OverflowError: + self._seekable = False + logger.info("No se puede hacer seek() ni tell() en ficheros mayores de 2GB") + + self.__get_download_info__() + + logger.info("Descarga inicializada: Partes: %s | Ruta: %s | Archivo: %s | Tamaño: %s" % ( + len(self._download_info["parts"]), self._path, self._filename, self._download_info["size"])) + + def __url_to_headers__(self, url): + # Separamos la url de los headers adicionales + self.url = url.split("|")[0] + + # headers adicionales + if "|" in url: + self._headers.update(dict([[header.split("=")[0], urllib.unquote_plus(header.split("=")[1])] for header in + url.split("|")[1].split("&")])) + + def __get_download_headers__(self): + if self.url.startswith("https"): + try: + conn = urllib2.urlopen(urllib2.Request(self.url.replace("https", "http"), headers=self._headers)) + conn.fp._sock.close() + self.url = self.url.replace("https", "http") + except: + pass + + for x in range(3): + try: + if not sys.hexversion > 0x0204FFFF: + conn = urllib2.urlopen(urllib2.Request(self.url, headers=self._headers)) + conn.fp._sock.close() + else: + conn = urllib2.urlopen(urllib2.Request(self.url, headers=self._headers), timeout=5) + + except: + self.response_headers = dict() + self._state = self.states.error + else: + self.response_headers = conn.headers.dict + self._state = self.states.stopped + break + + def __get_download_filename__(self): + # Obtenemos nombre de archivo y extension + if "filename" in self.response_headers.get("content-disposition", + "") and "attachment" in self.response_headers.get( + "content-disposition", ""): + cd_filename, cd_ext = os.path.splitext(urllib.unquote_plus( + re.compile("attachment; filename ?= ?[\"|']?([^\"']+)[\"|']?").match( + self.response_headers.get("content-disposition")).group(1))) + if "filename" in self.response_headers.get("content-disposition", "") and "inline" in self.response_headers.get( + "content-disposition", ""): + cd_filename, cd_ext = os.path.splitext(urllib.unquote_plus( + re.compile("inline; filename ?= ?[\"|']?([^\"']+)[\"|']?").match( + self.response_headers.get("content-disposition")).group(1))) + else: + cd_filename, cd_ext = "", "" + + url_filename, url_ext = os.path.splitext( + urllib.unquote_plus(filetools.basename(urlparse.urlparse(self.url)[2]))) + if self.response_headers.get("content-type", "application/octet-stream") <> "application/octet-stream": + mime_ext = mimetypes.guess_extension(self.response_headers.get("content-type")) + else: + mime_ext = "" + + # Seleccionamos el nombre mas adecuado + if cd_filename: + self.remote_filename = cd_filename + if not self._filename: + self._filename = cd_filename + + elif url_filename: + self.remote_filename = url_filename + if not self._filename: + self._filename = url_filename + + # Seleccionamos la extension mas adecuada + if cd_ext: + if not cd_ext in self._filename: self._filename += cd_ext + if self.remote_filename: self.remote_filename += cd_ext + elif mime_ext: + if not mime_ext in self._filename: self._filename += mime_ext + if self.remote_filename: self.remote_filename += mime_ext + elif url_ext: + if not url_ext in self._filename: self._filename += url_ext + if self.remote_filename: self.remote_filename += url_ext + + def __change_units__(self, value): + import math + units = ["B", "KB", "MB", "GB"] + if value <= 0: + return 0, 0, units[0] + else: + return value, value / 1024.0 ** int(math.log(value, 1024)), units[int(math.log(value, 1024))] + + def __get_download_info__(self): + # Continuamos con una descarga que contiene la info al final del archivo + self._download_info = {} + + try: + if not self._resume: + raise Exception() + self.file.seek(-16, 2) + offset = int(self.file.read()) + self.file.seek(offset) + data = self.file.read()[:-16] + self._download_info = eval(data) + if not self._download_info["size"] == self._file_size: + raise Exception() + self.file.seek(offset) + self.file.truncate() + + if not self._seekable: + for part in self._download_info["parts"]: + if part["start"] >= 2 ** 31 and part["status"] == self.states.completed: + part["status"] == self.states.stopped + part["current"] == part["start"] + + self._start_downloaded = sum([c["current"] - c["start"] for c in self._download_info["parts"]]) + self.pending_parts = set( + [x for x, a in enumerate(self._download_info["parts"]) if not a["status"] == self.states.completed]) + self.completed_parts = set( + [x for x, a in enumerate(self._download_info["parts"]) if a["status"] == self.states.completed]) + self.save_parts = set() + self.download_parts = set() + + # La info no existe o no es correcta, comenzamos de 0 + except: + self._download_info["parts"] = [] + if self._file_size and self._part_size: + for x in range(0, self._file_size, self._part_size): + end = x + self._part_size - 1 + if end >= self._file_size: end = self._file_size - 1 + self._download_info["parts"].append( + {"start": x, "end": end, "current": x, "status": self.states.stopped}) + else: + self._download_info["parts"].append( + {"start": 0, "end": self._file_size - 1, "current": 0, "status": self.states.stopped}) + + self._download_info["size"] = self._file_size + self._start_downloaded = 0 + self.pending_parts = set([x for x in range(len(self._download_info["parts"]))]) + self.completed_parts = set() + self.save_parts = set() + self.download_parts = set() + + self.file.seek(0) + self.file.truncate() + + def __open_connection__(self, start, end): + headers = self._headers.copy() + if not end: end = "" + headers.update({"Range": "bytes=%s-%s" % (start, end)}) + if not sys.hexversion > 0x0204FFFF: + conn = urllib2.urlopen(urllib2.Request(self.url, headers=headers)) + else: + conn = urllib2.urlopen(urllib2.Request(self.url, headers=headers), timeout=5) + return conn + + def __check_consecutive__(self, id): + return id == 0 or (len(self.completed_parts) >= id and sorted(self.completed_parts)[id - 1] == id - 1) + + def __save_file__(self): + logger.info("Thread iniciado: %s" % threading.current_thread().name) + + while self._state == self.states.downloading: + if not self.pending_parts and not self.download_parts and not self.save_parts: # Descarga finalizada + self._state = self.states.completed + self.file.close() + continue + + elif not self.save_parts: + continue + + save_id = min(self.save_parts) + + if not self._seekable and self._download_info["parts"][save_id][ + "start"] >= 2 ** 31 and not self.__check_consecutive__(save_id): + continue + + if self._seekable or self._download_info["parts"][save_id]["start"] < 2 ** 31: + self.file.seek(self._download_info["parts"][save_id]["start"]) + + try: + # file = open(os.path.join(self.tmp_path, self._filename + ".part%s" % save_id), "rb") + # self.file.write(file.read()) + # file.close() + # os.remove(os.path.join(self.tmp_path, self._filename + ".part%s" % save_id)) + for a in self._buffer.pop(save_id): + self.file.write(a) + self.save_parts.remove(save_id) + self.completed_parts.add(save_id) + self._download_info["parts"][save_id]["status"] = self.states.completed + except: + import traceback + logger.error(traceback.format_exc()) + self._state = self.states.error + + if self.save_parts: + for s in self.save_parts: + self._download_info["parts"][s]["status"] = self.states.stopped + self._download_info["parts"][s]["current"] = self._download_info["parts"][s]["start"] + + logger.info("Thread detenido: %s" % threading.current_thread().name) + + def __get_part_id__(self): + self._download_lock.acquire() + if len(self.pending_parts): + id = min(self.pending_parts) + self.pending_parts.remove(id) + self.download_parts.add(id) + self._download_lock.release() + return id + else: + self._download_lock.release() + return None + + def __set_part_connecting__(self, id): + logger.info("ID: %s Estableciendo conexión" % id) + self._download_info["parts"][id]["status"] = self.states.connecting + + def __set_part__error__(self, id): + logger.info("ID: %s Error al descargar" % id) + self._download_info["parts"][id]["status"] = self.states.error + self.pending_parts.add(id) + self.download_parts.remove(id) + + def __set_part__downloading__(self, id): + logger.info("ID: %s Descargando datos..." % id) + self._download_info["parts"][id]["status"] = self.states.downloading + + def __set_part_completed__(self, id): + logger.info("ID: %s ¡Descarga finalizada!" % id) + self._download_info["parts"][id]["status"] = self.states.saving + self.download_parts.remove(id) + self.save_parts.add(id) + while self._state == self.states.downloading and len(self._buffer) > self._max_connections + self._max_buffer: + time.sleep(0.1) + + def __set_part_stopped__(self, id): + if self._download_info["parts"][id]["status"] == self.states.downloading: + self._download_info["parts"][id]["status"] = self.states.stopped + self.download_parts.remove(id) + self.pending_parts.add(id) + + def __open_part_file__(self, id): + file = open(os.path.join(self.tmp_path, self._filename + ".part%s" % id), "a+") + file = open(os.path.join(self.tmp_path, self._filename + ".part%s" % id), "r+b") + file.seek(self._download_info["parts"][id]["current"] - self._download_info["parts"][id]["start"]) + return file + + def __start_part__(self): + logger.info("Thread Iniciado: %s" % threading.current_thread().name) + while self._state == self.states.downloading: + id = self.__get_part_id__() + if id is None: break + + self.__set_part_connecting__(id) + + try: + connection = self.__open_connection__(self._download_info["parts"][id]["current"], + self._download_info["parts"][id]["end"]) + except: + self.__set_part__error__(id) + time.sleep(5) + continue + + self.__set_part__downloading__(id) + # file = self.__open_part_file__(id) + + if not id in self._buffer: + self._buffer[id] = [] + speed = [] + + while self._state == self.states.downloading: + try: + start = time.time() + buffer = connection.read(self._block_size) + speed.append(len(buffer) / ((time.time() - start) or 0.001)) + except: + logger.info("ID: %s Error al descargar los datos" % id) + self._download_info["parts"][id]["status"] = self.states.error + self.pending_parts.add(id) + self.download_parts.remove(id) + break + else: + if len(buffer) and self._download_info["parts"][id]["current"] < self._download_info["parts"][id][ + "end"]: + # file.write(buffer) + self._buffer[id].append(buffer) + self._download_info["parts"][id]["current"] += len(buffer) + if len(speed) > 10: + velocidad_minima = sum(speed) / len(speed) / 3 + velocidad = speed[-1] + vm = self.__change_units__(velocidad_minima) + v = self.__change_units__(velocidad) + + if velocidad_minima > speed[-1] and velocidad_minima > speed[-2] and \ + self._download_info["parts"][id]["current"] < \ + self._download_info["parts"][id]["end"]: + connection.fp._sock.close() + logger.info( + "ID: %s ¡Reiniciando conexión! | Velocidad minima: %.2f %s/s | Velocidad: %.2f %s/s" % \ + (id, vm[1], vm[2], v[1], v[2])) + # file.close() + break + else: + self.__set_part_completed__(id) + connection.fp._sock.close() + # file.close() + break + + self.__set_part_stopped__(id) + logger.info("Thread detenido: %s" % threading.current_thread().name) diff --git a/plugin.video.alfa/core/downloadtools.py b/plugin.video.alfa/core/downloadtools.py new file mode 100755 index 00000000..98b48fcf --- /dev/null +++ b/plugin.video.alfa/core/downloadtools.py @@ -0,0 +1,1194 @@ +# -*- coding: utf-8 -*- +# -------------------------------------------------------------------------------- +# Download Tools - Original based from code of VideoMonkey XBMC Plugin +# --------------------------------------------------------------------------------- + +import os.path +import re +import socket +import sys +import time +import urllib +import urllib2 + +import config +import logger + +entitydefs = { + 'AElig': u'\u00C6', # latin capital letter AE = latin capital ligature AE, U+00C6 ISOlat1' + 'Aacute': u'\u00C1', # latin capital letter A with acute, U+00C1 ISOlat1' + 'Acirc': u'\u00C2', # latin capital letter A with circumflex, U+00C2 ISOlat1' + 'Agrave': u'\u00C0', # latin capital letter A with grave = latin capital letter A grave, U+00C0 ISOlat1' + 'Alpha': u'\u0391', # greek capital letter alpha, U+0391' + 'Aring': u'\u00C5', # latin capital letter A with ring above = latin capital letter A ring, U+00C5 ISOlat1' + 'Atilde': u'\u00C3', # latin capital letter A with tilde, U+00C3 ISOlat1' + 'Auml': u'\u00C4', # latin capital letter A with diaeresis, U+00C4 ISOlat1' + 'Beta': u'\u0392', # greek capital letter beta, U+0392' + 'Ccedil': u'\u00C7', # latin capital letter C with cedilla, U+00C7 ISOlat1' + 'Chi': u'\u03A7', # greek capital letter chi, U+03A7' + 'Dagger': u'\u2021', # double dagger, U+2021 ISOpub' + 'Delta': u'\u0394', # greek capital letter delta, U+0394 ISOgrk3' + 'ETH': u'\u00D0', # latin capital letter ETH, U+00D0 ISOlat1' + 'Eacute': u'\u00C9', # latin capital letter E with acute, U+00C9 ISOlat1' + 'Ecirc': u'\u00CA', # latin capital letter E with circumflex, U+00CA ISOlat1' + 'Egrave': u'\u00C8', # latin capital letter E with grave, U+00C8 ISOlat1' + 'Epsilon': u'\u0395', # grek capital letter epsilon, U+0395' + 'Eta': u'\u0397', # greek capital letter eta, U+0397' + 'Euml': u'\u00CB', # latin capital letter E with diaeresis, U+00CB ISOlat1' + 'Gamma': u'\u0393', # greek capital letter gamma, U+0393 ISOgrk3' + 'Iacute': u'\u00CD', # latin capital letter I with acute, U+00CD ISOlat1' + 'Icirc': u'\u00CE', # latin capital letter I with circumflex, U+00CE ISOlat1' + 'Igrave': u'\u00CC', # latin capital letter I with grave, U+00CC ISOlat1' + 'Iota': u'\u0399', # greek capital letter iota, U+0399' + 'Iuml': u'\u00CF', # latin capital letter I with diaeresis, U+00CF ISOlat1' + 'Kappa': u'\u039A', # greek capital letter kappa, U+039A' + 'Lambda': u'\u039B', # greek capital letter lambda, U+039B ISOgrk3' + 'Mu': u'\u039C', # greek capital letter mu, U+039C' + 'Ntilde': u'\u00D1', # latin capital letter N with tilde, U+00D1 ISOlat1' + 'Nu': u'\u039D', # greek capital letter nu, U+039D' + 'OElig': u'\u0152', # latin capital ligature OE, U+0152 ISOlat2' + 'Oacute': u'\u00D3', # latin capital letter O with acute, U+00D3 ISOlat1' + 'Ocirc': u'\u00D4', # latin capital letter O with circumflex, U+00D4 ISOlat1' + 'Ograve': u'\u00D2', # latin capital letter O with grave, U+00D2 ISOlat1' + 'Omega': u'\u03A9', # greek capital letter omega, U+03A9 ISOgrk3' + 'Omicron': u'\u039F', # greek capital letter omicron, U+039F' + 'Oslash': u'\u00D8', # latin capital letter O with stroke = latin capital letter O slash, U+00D8 ISOlat1' + 'Otilde': u'\u00D5', # latin capital letter O with tilde, U+00D5 ISOlat1' + 'Ouml': u'\u00D6', # latin capital letter O with diaeresis, U+00D6 ISOlat1' + 'Phi': u'\u03A6', # greek capital letter phi, U+03A6 ISOgrk3' + 'Pi': u'\u03A0', # greek capital letter pi, U+03A0 ISOgrk3' + 'Prime': u'\u2033', # double prime = seconds = inches, U+2033 ISOtech' + 'Psi': u'\u03A8', # greek capital letter psi, U+03A8 ISOgrk3' + 'Rho': u'\u03A1', # greek capital letter rho, U+03A1' + 'Scaron': u'\u0160', # latin capital letter S with caron, U+0160 ISOlat2' + 'Sigma': u'\u03A3', # greek capital letter sigma, U+03A3 ISOgrk3' + 'THORN': u'\u00DE', # latin capital letter THORN, U+00DE ISOlat1' + 'Tau': u'\u03A4', # greek capital letter tau, U+03A4' + 'Theta': u'\u0398', # greek capital letter theta, U+0398 ISOgrk3' + 'Uacute': u'\u00DA', # latin capital letter U with acute, U+00DA ISOlat1' + 'Ucirc': u'\u00DB', # latin capital letter U with circumflex, U+00DB ISOlat1' + 'Ugrave': u'\u00D9', # latin capital letter U with grave, U+00D9 ISOlat1' + 'Upsilon': u'\u03A5', # greek capital letter upsilon, U+03A5 ISOgrk3' + 'Uuml': u'\u00DC', # latin capital letter U with diaeresis, U+00DC ISOlat1' + 'Xi': u'\u039E', # greek capital letter xi, U+039E ISOgrk3' + 'Yacute': u'\u00DD', # latin capital letter Y with acute, U+00DD ISOlat1' + 'Yuml': u'\u0178', # latin capital letter Y with diaeresis, U+0178 ISOlat2' + 'Zeta': u'\u0396', # greek capital letter zeta, U+0396' + 'aacute': u'\u00E1', # latin small letter a with acute, U+00E1 ISOlat1' + 'acirc': u'\u00E2', # latin small letter a with circumflex, U+00E2 ISOlat1' + 'acute': u'\u00B4', # acute accent = spacing acute, U+00B4 ISOdia' + 'aelig': u'\u00E6', # latin small letter ae = latin small ligature ae, U+00E6 ISOlat1' + 'agrave': u'\u00E0', # latin small letter a with grave = latin small letter a grave, U+00E0 ISOlat1' + 'alefsym': u'\u2135', # alef symbol = first transfinite cardinal, U+2135 NEW' + 'alpha': u'\u03B1', # greek small letter alpha, U+03B1 ISOgrk3' + 'amp': u'\u0026', # ampersand, U+0026 ISOnum' + 'and': u'\u2227', # logical and = wedge, U+2227 ISOtech' + 'ang': u'\u2220', # angle, U+2220 ISOamso' + 'aring': u'\u00E5', # latin small letter a with ring above = latin small letter a ring, U+00E5 ISOlat1' + 'asymp': u'\u2248', # almost equal to = asymptotic to, U+2248 ISOamsr' + 'atilde': u'\u00E3', # latin small letter a with tilde, U+00E3 ISOlat1' + 'auml': u'\u00E4', # latin small letter a with diaeresis, U+00E4 ISOlat1' + 'bdquo': u'\u201E', # double low-9 quotation mark, U+201E NEW' + 'beta': u'\u03B2', # greek small letter beta, U+03B2 ISOgrk3' + 'brvbar': u'\u00A6', # broken bar = broken vertical bar, U+00A6 ISOnum' + 'bull': u'\u2022', # bullet = black small circle, U+2022 ISOpub' + 'cap': u'\u2229', # intersection = cap, U+2229 ISOtech' + 'ccedil': u'\u00E7', # latin small letter c with cedilla, U+00E7 ISOlat1' + 'cedil': u'\u00B8', # cedilla = spacing cedilla, U+00B8 ISOdia' + 'cent': u'\u00A2', # cent sign, U+00A2 ISOnum' + 'chi': u'\u03C7', # greek small letter chi, U+03C7 ISOgrk3' + 'circ': u'\u02C6', # modifier letter circumflex accent, U+02C6 ISOpub' + 'clubs': u'\u2663', # black club suit = shamrock, U+2663 ISOpub' + 'cong': u'\u2245', # approximately equal to, U+2245 ISOtech' + 'copy': u'\u00A9', # copyright sign, U+00A9 ISOnum' + 'crarr': u'\u21B5', # downwards arrow with corner leftwards = carriage return, U+21B5 NEW' + 'cup': u'\u222A', # union = cup, U+222A ISOtech' + 'curren': u'\u00A4', # currency sign, U+00A4 ISOnum' + 'dArr': u'\u21D3', # downwards double arrow, U+21D3 ISOamsa' + 'dagger': u'\u2020', # dagger, U+2020 ISOpub' + 'darr': u'\u2193', # downwards arrow, U+2193 ISOnum' + 'deg': u'\u00B0', # degree sign, U+00B0 ISOnum' + 'delta': u'\u03B4', # greek small letter delta, U+03B4 ISOgrk3' + 'diams': u'\u2666', # black diamond suit, U+2666 ISOpub' + 'divide': u'\u00F7', # division sign, U+00F7 ISOnum' + 'eacute': u'\u00E9', # latin small letter e with acute, U+00E9 ISOlat1' + 'ecirc': u'\u00EA', # latin small letter e with circumflex, U+00EA ISOlat1' + 'egrave': u'\u00E8', # latin small letter e with grave, U+00E8 ISOlat1' + 'empty': u'\u2205', # empty set = null set = diameter, U+2205 ISOamso' + 'emsp': u'\u2003', # em space, U+2003 ISOpub' + 'ensp': u'\u2002', # en space, U+2002 ISOpub' + 'epsilon': u'\u03B5', # greek small letter epsilon, U+03B5 ISOgrk3' + 'equiv': u'\u2261', # identical to, U+2261 ISOtech' + 'eta': u'\u03B7', # greek small letter eta, U+03B7 ISOgrk3' + 'eth': u'\u00F0', # latin small letter eth, U+00F0 ISOlat1' + 'euml': u'\u00EB', # latin small letter e with diaeresis, U+00EB ISOlat1' + 'euro': u'\u20AC', # euro sign, U+20AC NEW' + 'exist': u'\u2203', # there exists, U+2203 ISOtech' + 'fnof': u'\u0192', # latin small f with hook = function = florin, U+0192 ISOtech' + 'forall': u'\u2200', # for all, U+2200 ISOtech' + 'frac12': u'\u00BD', # vulgar fraction one half = fraction one half, U+00BD ISOnum' + 'frac14': u'\u00BC', # vulgar fraction one quarter = fraction one quarter, U+00BC ISOnum' + 'frac34': u'\u00BE', # vulgar fraction three quarters = fraction three quarters, U+00BE ISOnum' + 'frasl': u'\u2044', # fraction slash, U+2044 NEW' + 'gamma': u'\u03B3', # greek small letter gamma, U+03B3 ISOgrk3' + 'ge': u'\u2265', # greater-than or equal to, U+2265 ISOtech' + 'gt': u'\u003E', # greater-than sign, U+003E ISOnum' + 'hArr': u'\u21D4', # left right double arrow, U+21D4 ISOamsa' + 'harr': u'\u2194', # left right arrow, U+2194 ISOamsa' + 'hearts': u'\u2665', # black heart suit = valentine, U+2665 ISOpub' + 'hellip': u'\u2026', # horizontal ellipsis = three dot leader, U+2026 ISOpub' + 'iacute': u'\u00ED', # latin small letter i with acute, U+00ED ISOlat1' + 'icirc': u'\u00EE', # latin small letter i with circumflex, U+00EE ISOlat1' + 'iexcl': u'\u00A1', # inverted exclamation mark, U+00A1 ISOnum' + 'igrave': u'\u00EC', # latin small letter i with grave, U+00EC ISOlat1' + 'image': u'\u2111', # blackletter capital I = imaginary part, U+2111 ISOamso' + 'infin': u'\u221E', # infinity, U+221E ISOtech' + 'int': u'\u222B', # integral, U+222B ISOtech' + 'iota': u'\u03B9', # greek small letter iota, U+03B9 ISOgrk3' + 'iquest': u'\u00BF', # inverted question mark = turned question mark, U+00BF ISOnum' + 'isin': u'\u2208', # element of, U+2208 ISOtech' + 'iuml': u'\u00EF', # latin small letter i with diaeresis, U+00EF ISOlat1' + 'kappa': u'\u03BA', # greek small letter kappa, U+03BA ISOgrk3' + 'lArr': u'\u21D0', # leftwards double arrow, U+21D0 ISOtech' + 'lambda': u'\u03BB', # greek small letter lambda, U+03BB ISOgrk3' + 'lang': u'\u2329', # left-pointing angle bracket = bra, U+2329 ISOtech' + 'laquo': u'\u00AB', # left-pointing double angle quotation mark = left pointing guillemet, U+00AB ISOnum' + 'larr': u'\u2190', # leftwards arrow, U+2190 ISOnum' + 'lceil': u'\u2308', # left ceiling = apl upstile, U+2308 ISOamsc' + 'ldquo': u'\u201C', # left double quotation mark, U+201C ISOnum' + 'le': u'\u2264', # less-than or equal to, U+2264 ISOtech' + 'lfloor': u'\u230A', # left floor = apl downstile, U+230A ISOamsc' + 'lowast': u'\u2217', # asterisk operator, U+2217 ISOtech' + 'loz': u'\u25CA', # lozenge, U+25CA ISOpub' + 'lrm': u'\u200E', # left-to-right mark, U+200E NEW RFC 2070' + 'lsaquo': u'\u2039', # single left-pointing angle quotation mark, U+2039 ISO proposed' + 'lsquo': u'\u2018', # left single quotation mark, U+2018 ISOnum' + 'lt': u'\u003C', # less-than sign, U+003C ISOnum' + 'macr': u'\u00AF', # macron = spacing macron = overline = APL overbar, U+00AF ISOdia' + 'mdash': u'\u2014', # em dash, U+2014 ISOpub' + 'micro': u'\u00B5', # micro sign, U+00B5 ISOnum' + 'middot': u'\u00B7', # middle dot = Georgian comma = Greek middle dot, U+00B7 ISOnum' + 'minus': u'\u2212', # minus sign, U+2212 ISOtech' + 'mu': u'\u03BC', # greek small letter mu, U+03BC ISOgrk3' + 'nabla': u'\u2207', # nabla = backward difference, U+2207 ISOtech' + 'nbsp': u'\u00A0', # no-break space = non-breaking space, U+00A0 ISOnum' + 'ndash': u'\u2013', # en dash, U+2013 ISOpub' + 'ne': u'\u2260', # not equal to, U+2260 ISOtech' + 'ni': u'\u220B', # contains as member, U+220B ISOtech' + 'not': u'\u00AC', # not sign, U+00AC ISOnum' + 'notin': u'\u2209', # not an element of, U+2209 ISOtech' + 'nsub': u'\u2284', # not a subset of, U+2284 ISOamsn' + 'ntilde': u'\u00F1', # latin small letter n with tilde, U+00F1 ISOlat1' + 'nu': u'\u03BD', # greek small letter nu, U+03BD ISOgrk3' + 'oacute': u'\u00F3', # latin small letter o with acute, U+00F3 ISOlat1' + 'ocirc': u'\u00F4', # latin small letter o with circumflex, U+00F4 ISOlat1' + 'oelig': u'\u0153', # latin small ligature oe, U+0153 ISOlat2' + 'ograve': u'\u00F2', # latin small letter o with grave, U+00F2 ISOlat1' + 'oline': u'\u203E', # overline = spacing overscore, U+203E NEW' + 'omega': u'\u03C9', # greek small letter omega, U+03C9 ISOgrk3' + 'omicron': u'\u03BF', # greek small letter omicron, U+03BF NEW' + 'oplus': u'\u2295', # circled plus = direct sum, U+2295 ISOamsb' + 'or': u'\u2228', # logical or = vee, U+2228 ISOtech' + 'ordf': u'\u00AA', # feminine ordinal indicator, U+00AA ISOnum' + 'ordm': u'\u00BA', # masculine ordinal indicator, U+00BA ISOnum' + 'oslash': u'\u00F8', # latin small letter o with stroke, = latin small letter o slash, U+00F8 ISOlat1' + 'otilde': u'\u00F5', # latin small letter o with tilde, U+00F5 ISOlat1' + 'otimes': u'\u2297', # circled times = vector product, U+2297 ISOamsb' + 'ouml': u'\u00F6', # latin small letter o with diaeresis, U+00F6 ISOlat1' + 'para': u'\u00B6', # pilcrow sign = paragraph sign, U+00B6 ISOnum' + 'part': u'\u2202', # partial differential, U+2202 ISOtech' + 'permil': u'\u2030', # per mille sign, U+2030 ISOtech' + 'perp': u'\u22A5', # up tack = orthogonal to = perpendicular, U+22A5 ISOtech' + 'phi': u'\u03C6', # greek small letter phi, U+03C6 ISOgrk3' + 'pi': u'\u03C0', # greek small letter pi, U+03C0 ISOgrk3' + 'piv': u'\u03D6', # greek pi symbol, U+03D6 ISOgrk3' + 'plusmn': u'\u00B1', # plus-minus sign = plus-or-minus sign, U+00B1 ISOnum' + 'pound': u'\u00A3', # pound sign, U+00A3 ISOnum' + 'prime': u'\u2032', # prime = minutes = feet, U+2032 ISOtech' + 'prod': u'\u220F', # n-ary product = product sign, U+220F ISOamsb' + 'prop': u'\u221D', # proportional to, U+221D ISOtech' + 'psi': u'\u03C8', # greek small letter psi, U+03C8 ISOgrk3' + 'quot': u'\u0022', # quotation mark = APL quote, U+0022 ISOnum' + 'rArr': u'\u21D2', # rightwards double arrow, U+21D2 ISOtech' + 'radic': u'\u221A', # square root = radical sign, U+221A ISOtech' + 'rang': u'\u232A', # right-pointing angle bracket = ket, U+232A ISOtech' + 'raquo': u'\u00BB', # right-pointing double angle quotation mark = right pointing guillemet, U+00BB ISOnum' + 'rarr': u'\u2192', # rightwards arrow, U+2192 ISOnum' + 'rceil': u'\u2309', # right ceiling, U+2309 ISOamsc' + 'rdquo': u'\u201D', # right double quotation mark, U+201D ISOnum' + 'real': u'\u211C', # blackletter capital R = real part symbol, U+211C ISOamso' + 'reg': u'\u00AE', # registered sign = registered trade mark sign, U+00AE ISOnum' + 'rfloor': u'\u230B', # right floor, U+230B ISOamsc' + 'rho': u'\u03C1', # greek small letter rho, U+03C1 ISOgrk3' + 'rlm': u'\u200F', # right-to-left mark, U+200F NEW RFC 2070' + 'rsaquo': u'\u203A', # single right-pointing angle quotation mark, U+203A ISO proposed' + 'rsquo': u'\u2019', # right single quotation mark, U+2019 ISOnum' + 'sbquo': u'\u201A', # single low-9 quotation mark, U+201A NEW' + 'scaron': u'\u0161', # latin small letter s with caron, U+0161 ISOlat2' + 'sdot': u'\u22C5', # dot operator, U+22C5 ISOamsb' + 'sect': u'\u00A7', # section sign, U+00A7 ISOnum' + 'shy': u'\u00AD', # soft hyphen = discretionary hyphen, U+00AD ISOnum' + 'sigma': u'\u03C3', # greek small letter sigma, U+03C3 ISOgrk3' + 'sigmaf': u'\u03C2', # greek small letter final sigma, U+03C2 ISOgrk3' + 'sim': u'\u223C', # tilde operator = varies with = similar to, U+223C ISOtech' + 'spades': u'\u2660', # black spade suit, U+2660 ISOpub' + 'sub': u'\u2282', # subset of, U+2282 ISOtech' + 'sube': u'\u2286', # subset of or equal to, U+2286 ISOtech' + 'sum': u'\u2211', # n-ary sumation, U+2211 ISOamsb' + 'sup': u'\u2283', # superset of, U+2283 ISOtech' + 'sup1': u'\u00B9', # superscript one = superscript digit one, U+00B9 ISOnum' + 'sup2': u'\u00B2', # superscript two = superscript digit two = squared, U+00B2 ISOnum' + 'sup3': u'\u00B3', # superscript three = superscript digit three = cubed, U+00B3 ISOnum' + 'supe': u'\u2287', # superset of or equal to, U+2287 ISOtech' + 'szlig': u'\u00DF', # latin small letter sharp s = ess-zed, U+00DF ISOlat1' + 'tau': u'\u03C4', # greek small letter tau, U+03C4 ISOgrk3' + 'there4': u'\u2234', # therefore, U+2234 ISOtech' + 'theta': u'\u03B8', # greek small letter theta, U+03B8 ISOgrk3' + 'thetasym': u'\u03D1', # greek small letter theta symbol, U+03D1 NEW' + 'thinsp': u'\u2009', # thin space, U+2009 ISOpub' + 'thorn': u'\u00FE', # latin small letter thorn with, U+00FE ISOlat1' + 'tilde': u'\u02DC', # small tilde, U+02DC ISOdia' + 'times': u'\u00D7', # multiplication sign, U+00D7 ISOnum' + 'trade': u'\u2122', # trade mark sign, U+2122 ISOnum' + 'uArr': u'\u21D1', # upwards double arrow, U+21D1 ISOamsa' + 'uacute': u'\u00FA', # latin small letter u with acute, U+00FA ISOlat1' + 'uarr': u'\u2191', # upwards arrow, U+2191 ISOnum' + 'ucirc': u'\u00FB', # latin small letter u with circumflex, U+00FB ISOlat1' + 'ugrave': u'\u00F9', # latin small letter u with grave, U+00F9 ISOlat1' + 'uml': u'\u00A8', # diaeresis = spacing diaeresis, U+00A8 ISOdia' + 'upsih': u'\u03D2', # greek upsilon with hook symbol, U+03D2 NEW' + 'upsilon': u'\u03C5', # greek small letter upsilon, U+03C5 ISOgrk3' + 'uuml': u'\u00FC', # latin small letter u with diaeresis, U+00FC ISOlat1' + 'weierp': u'\u2118', # script capital P = power set = Weierstrass p, U+2118 ISOamso' + 'xi': u'\u03BE', # greek small letter xi, U+03BE ISOgrk3' + 'yacute': u'\u00FD', # latin small letter y with acute, U+00FD ISOlat1' + 'yen': u'\u00A5', # yen sign = yuan sign, U+00A5 ISOnum' + 'yuml': u'\u00FF', # latin small letter y with diaeresis, U+00FF ISOlat1' + 'zeta': u'\u03B6', # greek small letter zeta, U+03B6 ISOgrk3' + 'zwj': u'\u200D', # zero width joiner, U+200D NEW RFC 2070' + 'zwnj': u'\u200C' # zero width non-joiner, U+200C NEW RFC 2070' +} + +entitydefs2 = { + '$': '%24', + '&': '%26', + '+': '%2B', + ',': '%2C', + '/': '%2F', + ':': '%3A', + ';': '%3B', + '=': '%3D', + '?': '%3F', + '@': '%40', + ' ': '%20', + '"': '%22', + '<': '%3C', + '>': '%3E', + '#': '%23', + '%': '%25', + '{': '%7B', + '}': '%7D', + '|': '%7C', + '\\': '%5C', + '^': '%5E', + '~': '%7E', + '[': '%5B', + ']': '%5D', + '`': '%60' +} + +entitydefs3 = { + u'ÂÁÀÄÃÅ': u'A', + u'âáàäãå': u'a', + u'ÔÓÒÖÕ': u'O', + u'ôóòöõðø': u'o', + u'ÛÚÙÜ': u'U', + u'ûúùüµ': u'u', + u'ÊÉÈË': u'E', + u'êéèë': u'e', + u'ÎÍÌÏ': u'I', + u'îìíï': u'i', + u'ñ': u'n', + u'ß': u'B', + u'÷': u'%', + u'ç': u'c', + u'æ': u'ae' +} + + +def limpia_nombre_caracteres_especiales(s): + if not s: + return '' + badchars = '\\/:*?\"<>|' + for c in badchars: + s = s.replace(c, '') + return s + + +def limpia_nombre_sin_acentos(s): + if not s: + return '' + for key, value in entitydefs3.iteritems(): + for c in key: + s = s.replace(c, value) + return s + + +def limpia_nombre_excepto_1(s): + if not s: + return '' + + # Titulo de entrada + ''' + try: + logger.info("s1="+urllib.quote_plus(s)) + except: + logger.info("s1=no printable") + ''' + + # Convierte a unicode + try: + s = unicode(s, "utf-8") + except UnicodeError: + # logger.info("no es utf-8") + try: + s = unicode(s, "iso-8859-1") + except UnicodeError: + # logger.info("no es iso-8859-1") + pass + ''' + try: + logger.info("s2="+urllib.quote_plus(s)) + except: + logger.info("s2=no printable") + ''' + + # Elimina acentos + s = limpia_nombre_sin_acentos(s) + ''' + try: + logger.info("s3="+urllib.quote_plus(s)) + except: + logger.info("s3=no printable") + ''' + + # Elimina caracteres prohibidos + validchars = " ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890!#$%&'()-@[]^_`{}~." + stripped = ''.join(c for c in s if c in validchars) + ''' + try: + logger.info("s4="+urllib.quote_plus(stripped)) + except: + logger.info("s4=no printable") + ''' + + # Convierte a iso + s = stripped.encode("iso-8859-1") + ''' + try: + logger.info("s5="+urllib.quote_plus(s)) + except: + logger.info("s5=no printable") + ''' + + return s + + +def limpia_nombre_excepto_2(s): + if not s: + return '' + validchars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890." + stripped = ''.join(c for c in s if c in validchars) + return stripped + + +def getfilefromtitle(url, title): + # Imprime en el log lo que va a descartar + logger.info("title=" + title) + logger.info("url=" + url) + plataforma = config.get_system_platform() + logger.info("plataforma=" + plataforma) + + # nombrefichero = xbmc.makeLegalFilename(title + url[-4:]) + import scrapertools + + nombrefichero = title + scrapertools.get_filename_from_url(url)[-4:] + logger.info("nombrefichero=%s" % nombrefichero) + if "videobb" in url or "videozer" in url or "putlocker" in url: + nombrefichero = title + ".flv" + if "videobam" in url: + nombrefichero = title + "." + url.rsplit(".", 1)[1][0:3] + + logger.info("nombrefichero=%s" % nombrefichero) + + nombrefichero = limpia_nombre_caracteres_especiales(nombrefichero) + + logger.info("nombrefichero=%s" % nombrefichero) + + fullpath = os.path.join(config.get_setting("downloadpath"), nombrefichero) + logger.info("fullpath=%s" % fullpath) + + if config.is_xbmc() and fullpath.startswith("special://"): + import xbmc + fullpath = xbmc.translatePath(fullpath) + + return fullpath + + +def downloadtitle(url, title): + fullpath = getfilefromtitle(url, title) + return downloadfile(url, fullpath) + + +def downloadbest(video_urls, title, continuar=False): + logger.info() + + # Le da la vuelta, para poner el de más calidad primero ( list() es para que haga una copia ) + invertida = list(video_urls) + invertida.reverse() + + for elemento in invertida: + # videotitle = elemento[0] + url = elemento[1] + logger.info("Descargando opción " + title + " " + url.encode('ascii', 'ignore')) + + # Calcula el fichero donde debe grabar + try: + fullpath = getfilefromtitle(url, title.strip()) + # Si falla, es porque la URL no vale para nada + except: + import traceback + logger.error(traceback.format_exc()) + continue + + # Descarga + try: + ret = downloadfile(url, fullpath, continuar=continuar) + # Llegados a este punto, normalmente es un timeout + except urllib2.URLError, e: + import traceback + logger.error(traceback.format_exc()) + ret = -2 + + # El usuario ha cancelado la descarga + if ret == -1: + return -1 + else: + # El fichero ni siquiera existe + if not os.path.exists(fullpath): + logger.info("-> No ha descargado nada, probando con la siguiente opción si existe") + # El fichero existe + else: + tamanyo = os.path.getsize(fullpath) + + # Tiene tamaño 0 + if tamanyo == 0: + logger.info("-> Descargado un fichero con tamaño 0, probando con la siguiente opción si existe") + os.remove(fullpath) + else: + logger.info("-> Descargado un fichero con tamaño %d, lo da por bueno" % tamanyo) + return 0 + + return -2 + + +def downloadfile(url, nombrefichero, headers=None, silent=False, continuar=False, resumir=True): + logger.info("url=" + url) + logger.info("nombrefichero=" + nombrefichero) + + if headers is None: + headers = [] + + progreso = None + + if config.is_xbmc() and nombrefichero.startswith("special://"): + import xbmc + nombrefichero = xbmc.translatePath(nombrefichero) + + try: + # Si no es XBMC, siempre a "Silent" + from platformcode import platformtools + + # antes + # f=open(nombrefichero,"wb") + try: + import xbmc + nombrefichero = xbmc.makeLegalFilename(nombrefichero) + except: + pass + logger.info("nombrefichero=" + nombrefichero) + + # El fichero existe y se quiere continuar + if os.path.exists(nombrefichero) and continuar: + # try: + # import xbmcvfs + # f = xbmcvfs.File(nombrefichero) + # existSize = f.size(nombrefichero) + # except: + f = open(nombrefichero, 'r+b') + if resumir: + exist_size = os.path.getsize(nombrefichero) + logger.info("el fichero existe, size=%d" % exist_size) + grabado = exist_size + f.seek(exist_size) + else: + exist_size = 0 + grabado = 0 + + # el fichero ya existe y no se quiere continuar, se aborta + elif os.path.exists(nombrefichero) and not continuar: + logger.info("el fichero existe, no se descarga de nuevo") + return -3 + + # el fichero no existe + else: + exist_size = 0 + logger.info("el fichero no existe") + + # try: + # import xbmcvfs + # f = xbmcvfs.File(nombrefichero,"w") + # except: + f = open(nombrefichero, 'wb') + grabado = 0 + + # Crea el diálogo de progreso + if not silent: + progreso = platformtools.dialog_progress("plugin", "Descargando...", url, nombrefichero) + + # Si la plataforma no devuelve un cuadro de diálogo válido, asume modo silencio + if progreso is None: + silent = True + + if "|" in url: + additional_headers = url.split("|")[1] + if "&" in additional_headers: + additional_headers = additional_headers.split("&") + else: + additional_headers = [additional_headers] + + for additional_header in additional_headers: + logger.info("additional_header: " + additional_header) + name = re.findall("(.*?)=.*?", additional_header)[0] + value = urllib.unquote_plus(re.findall(".*?=(.*?)$", additional_header)[0]) + headers.append([name, value]) + + url = url.split("|")[0] + logger.info("url=" + url) + + # Timeout del socket a 60 segundos + socket.setdefaulttimeout(60) + + h = urllib2.HTTPHandler(debuglevel=0) + request = urllib2.Request(url) + for header in headers: + logger.info("Header=" + header[0] + ": " + header[1]) + request.add_header(header[0], header[1]) + + if exist_size > 0: + request.add_header('Range', 'bytes=%d-' % (exist_size,)) + + opener = urllib2.build_opener(h) + urllib2.install_opener(opener) + try: + connexion = opener.open(request) + except urllib2.HTTPError, e: + logger.error("error %d (%s) al abrir la url %s" % + (e.code, e.msg, url)) + # print e.code + # print e.msg + # print e.hdrs + # print e.fp + f.close() + if not silent: + progreso.close() + # El error 416 es que el rango pedido es mayor que el fichero => es que ya está completo + if e.code == 416: + return 0 + else: + return -2 + + try: + totalfichero = int(connexion.headers["Content-Length"]) + except ValueError: + totalfichero = 1 + + if exist_size > 0: + totalfichero = totalfichero + exist_size + + logger.info("Content-Length=%s" % totalfichero) + + blocksize = 100 * 1024 + + bloqueleido = connexion.read(blocksize) + logger.info("Iniciando descarga del fichero, bloqueleido=%s" % len(bloqueleido)) + + maxreintentos = 10 + + while len(bloqueleido) > 0: + try: + # Escribe el bloque leido + f.write(bloqueleido) + grabado += len(bloqueleido) + percent = int(float(grabado) * 100 / float(totalfichero)) + totalmb = float(float(totalfichero) / (1024 * 1024)) + descargadosmb = float(float(grabado) / (1024 * 1024)) + + # Lee el siguiente bloque, reintentando para no parar todo al primer timeout + reintentos = 0 + while reintentos <= maxreintentos: + try: + before = time.time() + bloqueleido = connexion.read(blocksize) + after = time.time() + if (after - before) > 0: + velocidad = len(bloqueleido) / (after - before) + falta = totalfichero - grabado + if velocidad > 0: + tiempofalta = falta / velocidad + else: + tiempofalta = 0 + # logger.info(sec_to_hms(tiempofalta)) + if not silent: + # progreso.update( percent , "Descargando %.2fMB de %.2fMB (%d%%)" % ( descargadosmb , + # totalmb , percent),"Falta %s - Velocidad %.2f Kb/s" % ( sec_to_hms(tiempofalta) , + # velocidad/1024 ), os.path.basename(nombrefichero) ) + progreso.update(percent, "%.2fMB/%.2fMB (%d%%) %.2f Kb/s %s falta " % + (descargadosmb, totalmb, percent, velocidad / 1024, + sec_to_hms(tiempofalta))) + break + except: + reintentos += 1 + logger.info("ERROR en la descarga del bloque, reintento %d" % reintentos) + import traceback + logger.error(traceback.print_exc()) + + # El usuario cancelo la descarga + try: + if progreso.iscanceled(): + logger.info("Descarga del fichero cancelada") + f.close() + progreso.close() + return -1 + except: + pass + + # Ha habido un error en la descarga + if reintentos > maxreintentos: + logger.info("ERROR en la descarga del fichero") + f.close() + if not silent: + progreso.close() + + return -2 + + except: + import traceback + logger.error(traceback.print_exc()) + + f.close() + if not silent: + progreso.close() + + # platformtools.dialog_ok('Error al descargar' , 'Se ha producido un error' , 'al descargar el archivo') + + return -2 + + except: + if url.startswith("rtmp"): + error = downloadfileRTMP(url, nombrefichero, silent) + if error and not silent: + from platformcode import platformtools + platformtools.dialog_ok("No puedes descargar ese vídeo", "Las descargas en RTMP aún no", "están soportadas") + else: + import traceback + from pprint import pprint + exc_type, exc_value, exc_tb = sys.exc_info() + lines = traceback.format_exception(exc_type, exc_value, exc_tb) + for line in lines: + line_splits = line.split("\n") + for line_split in line_splits: + logger.error(line_split) + + try: + f.close() + except: + pass + + if not silent: + try: + progreso.close() + except: + pass + + logger.info("Fin descarga del fichero") + + +def downloadfileRTMP(url, nombrefichero, silent): + ''' No usa librtmp ya que no siempre está disponible. + Lanza un subproceso con rtmpdump. En Windows es necesario instalarlo. + No usa threads así que no muestra ninguna barra de progreso ni tampoco + se marca el final real de la descarga en el log info. + ''' + Programfiles = os.getenv('Programfiles') + if Programfiles: # Windows + rtmpdump_cmd = Programfiles + "/rtmpdump/rtmpdump.exe" + nombrefichero = '"' + nombrefichero + '"' # Windows necesita las comillas en el nombre + else: + rtmpdump_cmd = "/usr/bin/rtmpdump" + + if not os.path.isfile(rtmpdump_cmd) and not silent: + from platformcode import platformtools + advertencia = platformtools.dialog_ok("Falta " + rtmpdump_cmd, "Comprueba que rtmpdump está instalado") + return True + + valid_rtmpdump_options = ["help", "url", "rtmp", "host", "port", "socks", "protocol", "playpath", "playlist", + "swfUrl", "tcUrl", "pageUrl", "app", "swfhash", "swfsize", "swfVfy", "swfAge", "auth", + "conn", "flashVer", "live", "subscribe", "realtime", "flv", "resume", "timeout", "start", + "stop", "token", "jtv", "hashes", "buffer", "skip", "quiet", "verbose", + "debug"] # for rtmpdump 2.4 + + url_args = url.split(' ') + rtmp_url = url_args[0] + rtmp_args = url_args[1:] + + rtmpdump_args = ["--rtmp", rtmp_url] + for arg in rtmp_args: + n = arg.find('=') + if n < 0: + if arg not in valid_rtmpdump_options: + continue + rtmpdump_args += ["--" + arg] + else: + if arg[:n] not in valid_rtmpdump_options: + continue + rtmpdump_args += ["--" + arg[:n], arg[n + 1:]] + + try: + rtmpdump_args = [rtmpdump_cmd] + rtmpdump_args + ["-o", nombrefichero] + from os import spawnv, P_NOWAIT + logger.info("Iniciando descarga del fichero: %s" % " ".join(rtmpdump_args)) + rtmpdump_exit = spawnv(P_NOWAIT, rtmpdump_cmd, rtmpdump_args) + if not silent: + from platformcode import platformtools + advertencia = platformtools.dialog_ok("La opción de descarga RTMP es experimental", + "y el vídeo se descargará en segundo plano.", + "No se mostrará ninguna barra de progreso.") + except: + return True + + return + + +def downloadfileGzipped(url, pathfichero): + logger.info("url=" + url) + nombrefichero = pathfichero + logger.info("nombrefichero=" + nombrefichero) + + import xbmc + nombrefichero = xbmc.makeLegalFilename(nombrefichero) + logger.info("nombrefichero=" + nombrefichero) + patron = "(http://[^/]+)/.+" + matches = re.compile(patron, re.DOTALL).findall(url) + + if len(matches): + logger.info("URL principal :" + matches[0]) + url1 = matches[0] + else: + url1 = url + + txheaders = { + 'User-Agent': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; SLCC1; .NET CLR 2.0.50727; ' + 'Media Center PC 5.0; .NET CLR 3.0.04506)', + 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', + 'Accept-Language': 'es-es,es;q=0.8,en-us;q=0.5,en;q=0.3', + 'Accept-Encoding': 'gzip,deflate', + 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', + 'Keep-Alive': '115', + 'Connection': 'keep-alive', + 'Referer': url1, + } + + txdata = "" + + # Crea el diálogo de progreso + from platformcode import platformtools + progreso = platformtools.dialog_progress("addon", "Descargando...", url.split("|")[0], nombrefichero) + + # Timeout del socket a 60 segundos + socket.setdefaulttimeout(10) + + h = urllib2.HTTPHandler(debuglevel=0) + request = urllib2.Request(url, txdata, txheaders) + # if existSize > 0: + # request.add_header('Range', 'bytes=%d-' % (existSize, )) + + opener = urllib2.build_opener(h) + urllib2.install_opener(opener) + try: + connexion = opener.open(request) + except urllib2.HTTPError, e: + logger.error("error %d (%s) al abrir la url %s" % + (e.code, e.msg, url)) + # print e.code + # print e.msg + # print e.hdrs + # print e.fp + progreso.close() + # El error 416 es que el rango pedido es mayor que el fichero => es que ya está completo + if e.code == 416: + return 0 + else: + return -2 + + nombre_fichero_base = os.path.basename(nombrefichero) + if len(nombre_fichero_base) == 0: + logger.info("Buscando nombre en el Headers de respuesta") + nombre_base = connexion.headers["Content-Disposition"] + logger.info(nombre_base) + patron = 'filename="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(nombre_base) + if len(matches) > 0: + titulo = matches[0] + titulo = GetTitleFromFile(titulo) + nombrefichero = os.path.join(pathfichero, titulo) + else: + logger.info("Nombre del fichero no encontrado, Colocando nombre temporal :sin_nombre.txt") + titulo = "sin_nombre.txt" + nombrefichero = os.path.join(pathfichero, titulo) + totalfichero = int(connexion.headers["Content-Length"]) + + # despues + f = open(nombrefichero, 'w') + + logger.info("fichero nuevo abierto") + + grabado = 0 + logger.info("Content-Length=%s" % totalfichero) + + blocksize = 100 * 1024 + + bloqueleido = connexion.read(blocksize) + + try: + import StringIO + compressedstream = StringIO.StringIO(bloqueleido) + import gzip + gzipper = gzip.GzipFile(fileobj=compressedstream) + bloquedata = gzipper.read() + gzipper.close() + logger.info("Iniciando descarga del fichero, bloqueleido=%s" % len(bloqueleido)) + except: + logger.error("ERROR : El archivo a descargar no esta comprimido con Gzip") + f.close() + progreso.close() + return -2 + + maxreintentos = 10 + + while len(bloqueleido) > 0: + try: + # Escribe el bloque leido + f.write(bloquedata) + grabado += len(bloqueleido) + percent = int(float(grabado) * 100 / float(totalfichero)) + totalmb = float(float(totalfichero) / (1024 * 1024)) + descargadosmb = float(float(grabado) / (1024 * 1024)) + + # Lee el siguiente bloque, reintentando para no parar todo al primer timeout + reintentos = 0 + while reintentos <= maxreintentos: + try: + before = time.time() + bloqueleido = connexion.read(blocksize) + + import gzip + import StringIO + compressedstream = StringIO.StringIO(bloqueleido) + gzipper = gzip.GzipFile(fileobj=compressedstream) + bloquedata = gzipper.read() + gzipper.close() + after = time.time() + if (after - before) > 0: + velocidad = len(bloqueleido) / (after - before) + falta = totalfichero - grabado + if velocidad > 0: + tiempofalta = falta / velocidad + else: + tiempofalta = 0 + logger.info(sec_to_hms(tiempofalta)) + progreso.update(percent, "%.2fMB/%.2fMB (%d%%) %.2f Kb/s %s falta " % + (descargadosmb, totalmb, percent, velocidad / 1024, sec_to_hms(tiempofalta))) + break + except: + reintentos += 1 + logger.info("ERROR en la descarga del bloque, reintento %d" % reintentos) + for line in sys.exc_info(): + logger.error("%s" % line) + + # El usuario cancelo la descarga + if progreso.iscanceled(): + logger.info("Descarga del fichero cancelada") + f.close() + progreso.close() + return -1 + + # Ha habido un error en la descarga + if reintentos > maxreintentos: + logger.info("ERROR en la descarga del fichero") + f.close() + progreso.close() + + return -2 + + except: + logger.info("ERROR en la descarga del fichero") + for line in sys.exc_info(): + logger.error("%s" % line) + f.close() + progreso.close() + + return -2 + f.close() + + # print data + progreso.close() + logger.info("Fin descarga del fichero") + return nombrefichero + + +def GetTitleFromFile(title): + # Imprime en el log lo que va a descartar + logger.info("titulo=" + title) + plataforma = config.get_system_platform() + logger.info("plataforma=" + plataforma) + + # nombrefichero = xbmc.makeLegalFilename(title + url[-4:]) + nombrefichero = title + return nombrefichero + + +def sec_to_hms(seconds): + m, s = divmod(int(seconds), 60) + h, m = divmod(m, 60) + return "%02d:%02d:%02d" % (h, m, s) + + +def downloadIfNotModifiedSince(url, timestamp): + logger.info("(" + url + "," + time.ctime(timestamp) + ")") + + # Convierte la fecha a GMT + fecha_formateada = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime(timestamp)) + logger.info("fechaFormateada=%s" % fecha_formateada) + + # Comprueba si ha cambiado + inicio = time.clock() + req = urllib2.Request(url) + req.add_header('If-Modified-Since', fecha_formateada) + req.add_header('User-Agent', + 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12') + + updated = False + + try: + response = urllib2.urlopen(req) + data = response.read() + # info = response.info() + # logger.info( info.headers ) + + # Si llega hasta aquí, es que ha cambiado + updated = True + response.close() + + except urllib2.URLError, e: + # Si devuelve 304 es que no ha cambiado + if hasattr(e, 'code'): + logger.info("Codigo de respuesta HTTP : %d" % e.code) + if e.code == 304: + logger.info("No ha cambiado") + updated = False + # Agarra los errores con codigo de respuesta del servidor externo solicitado + else: + for line in sys.exc_info(): + logger.error("%s" % line) + data = "" + + fin = time.clock() + logger.info("Descargado en %d segundos " % (fin - inicio + 1)) + + return updated, data + + +def download_all_episodes(item, channel, first_episode="", preferred_server="vidspot", filter_language=""): + logger.info("show=" + item.show) + show_title = item.show + + # Obtiene el listado desde el que se llamó + action = item.extra + + # Esta marca es porque el item tiene algo más aparte en el atributo "extra" + if "###" in item.extra: + action = item.extra.split("###")[0] + item.extra = item.extra.split("###")[1] + + episode_itemlist = getattr(channel, action)(item) + + # Ordena los episodios para que funcione el filtro de first_episode + episode_itemlist = sorted(episode_itemlist, key=lambda it: it.title) + + from core import servertools + from core import scrapertools + + best_server = preferred_server + # worst_server = "moevideos" + + # Para cada episodio + if first_episode == "": + empezar = True + else: + empezar = False + + for episode_item in episode_itemlist: + try: + logger.info("episode=" + episode_item.title) + episode_title = scrapertools.get_match(episode_item.title, "(\d+x\d+)") + logger.info("episode=" + episode_title) + except: + import traceback + logger.error(traceback.format_exc()) + continue + + if first_episode != "" and episode_title == first_episode: + empezar = True + + if episodio_ya_descargado(show_title, episode_title): + continue + + if not empezar: + continue + + # Extrae los mirrors + try: + mirrors_itemlist = channel.findvideos(episode_item) + except: + mirrors_itemlist = servertools.find_video_items(episode_item) + print mirrors_itemlist + + descargado = False + + new_mirror_itemlist_1 = [] + new_mirror_itemlist_2 = [] + new_mirror_itemlist_3 = [] + new_mirror_itemlist_4 = [] + new_mirror_itemlist_5 = [] + new_mirror_itemlist_6 = [] + + for mirror_item in mirrors_itemlist: + + # Si está en español va al principio, si no va al final + if "(Español)" in mirror_item.title: + if best_server in mirror_item.title.lower(): + new_mirror_itemlist_1.append(mirror_item) + else: + new_mirror_itemlist_2.append(mirror_item) + elif "(Latino)" in mirror_item.title: + if best_server in mirror_item.title.lower(): + new_mirror_itemlist_3.append(mirror_item) + else: + new_mirror_itemlist_4.append(mirror_item) + elif "(VOS)" in mirror_item.title: + if best_server in mirror_item.title.lower(): + new_mirror_itemlist_3.append(mirror_item) + else: + new_mirror_itemlist_4.append(mirror_item) + else: + if best_server in mirror_item.title.lower(): + new_mirror_itemlist_5.append(mirror_item) + else: + new_mirror_itemlist_6.append(mirror_item) + + mirrors_itemlist = (new_mirror_itemlist_1 + new_mirror_itemlist_2 + new_mirror_itemlist_3 + + new_mirror_itemlist_4 + new_mirror_itemlist_5 + new_mirror_itemlist_6) + + for mirror_item in mirrors_itemlist: + logger.info("mirror=" + mirror_item.title) + + if "(Español)" in mirror_item.title: + idioma = "(Español)" + codigo_idioma = "es" + elif "(Latino)" in mirror_item.title: + idioma = "(Latino)" + codigo_idioma = "lat" + elif "(VOS)" in mirror_item.title: + idioma = "(VOS)" + codigo_idioma = "vos" + elif "(VO)" in mirror_item.title: + idioma = "(VO)" + codigo_idioma = "vo" + else: + idioma = "(Desconocido)" + codigo_idioma = "desconocido" + + logger.info("filter_language=#" + filter_language + "#, codigo_idioma=#" + codigo_idioma + "#") + if filter_language == "" or (filter_language != "" and filter_language == codigo_idioma): + logger.info("downloading mirror") + else: + logger.info("language " + codigo_idioma + " filtered, skipping") + continue + + if hasattr(channel, 'play'): + video_items = channel.play(mirror_item) + else: + video_items = [mirror_item] + + if len(video_items) > 0: + video_item = video_items[0] + + # Comprueba que está disponible + video_urls, puedes, motivo = servertools.resolve_video_urls_for_playing(video_item.server, + video_item.url, + video_password="", + muestra_dialogo=False) + + # Lo añade a la lista de descargas + if puedes: + logger.info("downloading mirror started...") + # El vídeo de más calidad es el último + # mediaurl = video_urls[len(video_urls) - 1][1] + devuelve = downloadbest(video_urls, show_title + " " + episode_title + " " + idioma + + " [" + video_item.server + "]", continuar=False) + + if devuelve == 0: + logger.info("download ok") + descargado = True + break + elif devuelve == -1: + try: + from platformcode import platformtools + platformtools.dialog_ok("plugin", "Descarga abortada") + except: + pass + return + else: + logger.info("download error, try another mirror") + continue + + else: + logger.info("downloading mirror not available... trying next") + + if not descargado: + logger.info("EPISODIO NO DESCARGADO " + episode_title) + + +def episodio_ya_descargado(show_title, episode_title): + import scrapertools + ficheros = os.listdir(".") + + for fichero in ficheros: + # logger.info("fichero="+fichero) + if fichero.lower().startswith(show_title.lower()) and \ + scrapertools.find_single_match(fichero, "(\d+x\d+)") == episode_title: + logger.info("encontrado!") + return True + + return False diff --git a/plugin.video.alfa/core/entities.py b/plugin.video.alfa/core/entities.py new file mode 100755 index 00000000..523fb7fd --- /dev/null +++ b/plugin.video.alfa/core/entities.py @@ -0,0 +1,2239 @@ +# -*- coding: utf-8 -*- +### https://hg.python.org/cpython/file/3.6/Lib/html/entities.py + +"""HTML character entity references.""" + +# maps the HTML5 named character references to the equivalent Unicode character(s) +html5 = { + 'Aacute': '\xc1', + 'aacute': '\xe1', + 'Aacute;': '\xc1', + 'aacute;': '\xe1', + 'Abreve;': '\u0102', + 'abreve;': '\u0103', + 'ac;': '\u223e', + 'acd;': '\u223f', + 'acE;': '\u223e\u0333', + 'Acirc': '\xc2', + 'acirc': '\xe2', + 'Acirc;': '\xc2', + 'acirc;': '\xe2', + 'acute': '\xb4', + 'acute;': '\xb4', + 'Acy;': '\u0410', + 'acy;': '\u0430', + 'AElig': '\xc6', + 'aelig': '\xe6', + 'AElig;': '\xc6', + 'aelig;': '\xe6', + 'af;': '\u2061', + 'Afr;': '\U0001d504', + 'afr;': '\U0001d51e', + 'Agrave': '\xc0', + 'agrave': '\xe0', + 'Agrave;': '\xc0', + 'agrave;': '\xe0', + 'alefsym;': '\u2135', + 'aleph;': '\u2135', + 'Alpha;': '\u0391', + 'alpha;': '\u03b1', + 'Amacr;': '\u0100', + 'amacr;': '\u0101', + 'amalg;': '\u2a3f', + 'AMP': '&', + 'amp': '&', + 'AMP;': '&', + 'amp;': '&', + 'And;': '\u2a53', + 'and;': '\u2227', + 'andand;': '\u2a55', + 'andd;': '\u2a5c', + 'andslope;': '\u2a58', + 'andv;': '\u2a5a', + 'ang;': '\u2220', + 'ange;': '\u29a4', + 'angle;': '\u2220', + 'angmsd;': '\u2221', + 'angmsdaa;': '\u29a8', + 'angmsdab;': '\u29a9', + 'angmsdac;': '\u29aa', + 'angmsdad;': '\u29ab', + 'angmsdae;': '\u29ac', + 'angmsdaf;': '\u29ad', + 'angmsdag;': '\u29ae', + 'angmsdah;': '\u29af', + 'angrt;': '\u221f', + 'angrtvb;': '\u22be', + 'angrtvbd;': '\u299d', + 'angsph;': '\u2222', + 'angst;': '\xc5', + 'angzarr;': '\u237c', + 'Aogon;': '\u0104', + 'aogon;': '\u0105', + 'Aopf;': '\U0001d538', + 'aopf;': '\U0001d552', + 'ap;': '\u2248', + 'apacir;': '\u2a6f', + 'apE;': '\u2a70', + 'ape;': '\u224a', + 'apid;': '\u224b', + 'apos;': "'", + 'ApplyFunction;': '\u2061', + 'approx;': '\u2248', + 'approxeq;': '\u224a', + 'Aring': '\xc5', + 'aring': '\xe5', + 'Aring;': '\xc5', + 'aring;': '\xe5', + 'Ascr;': '\U0001d49c', + 'ascr;': '\U0001d4b6', + 'Assign;': '\u2254', + 'ast;': '*', + 'asymp;': '\u2248', + 'asympeq;': '\u224d', + 'Atilde': '\xc3', + 'atilde': '\xe3', + 'Atilde;': '\xc3', + 'atilde;': '\xe3', + 'Auml': '\xc4', + 'auml': '\xe4', + 'Auml;': '\xc4', + 'auml;': '\xe4', + 'awconint;': '\u2233', + 'awint;': '\u2a11', + 'backcong;': '\u224c', + 'backepsilon;': '\u03f6', + 'backprime;': '\u2035', + 'backsim;': '\u223d', + 'backsimeq;': '\u22cd', + 'Backslash;': '\u2216', + 'Barv;': '\u2ae7', + 'barvee;': '\u22bd', + 'Barwed;': '\u2306', + 'barwed;': '\u2305', + 'barwedge;': '\u2305', + 'bbrk;': '\u23b5', + 'bbrktbrk;': '\u23b6', + 'bcong;': '\u224c', + 'Bcy;': '\u0411', + 'bcy;': '\u0431', + 'bdquo;': '\u201e', + 'becaus;': '\u2235', + 'Because;': '\u2235', + 'because;': '\u2235', + 'bemptyv;': '\u29b0', + 'bepsi;': '\u03f6', + 'bernou;': '\u212c', + 'Bernoullis;': '\u212c', + 'Beta;': '\u0392', + 'beta;': '\u03b2', + 'beth;': '\u2136', + 'between;': '\u226c', + 'Bfr;': '\U0001d505', + 'bfr;': '\U0001d51f', + 'bigcap;': '\u22c2', + 'bigcirc;': '\u25ef', + 'bigcup;': '\u22c3', + 'bigodot;': '\u2a00', + 'bigoplus;': '\u2a01', + 'bigotimes;': '\u2a02', + 'bigsqcup;': '\u2a06', + 'bigstar;': '\u2605', + 'bigtriangledown;': '\u25bd', + 'bigtriangleup;': '\u25b3', + 'biguplus;': '\u2a04', + 'bigvee;': '\u22c1', + 'bigwedge;': '\u22c0', + 'bkarow;': '\u290d', + 'blacklozenge;': '\u29eb', + 'blacksquare;': '\u25aa', + 'blacktriangle;': '\u25b4', + 'blacktriangledown;': '\u25be', + 'blacktriangleleft;': '\u25c2', + 'blacktriangleright;': '\u25b8', + 'blank;': '\u2423', + 'blk12;': '\u2592', + 'blk14;': '\u2591', + 'blk34;': '\u2593', + 'block;': '\u2588', + 'bne;': '=\u20e5', + 'bnequiv;': '\u2261\u20e5', + 'bNot;': '\u2aed', + 'bnot;': '\u2310', + 'Bopf;': '\U0001d539', + 'bopf;': '\U0001d553', + 'bot;': '\u22a5', + 'bottom;': '\u22a5', + 'bowtie;': '\u22c8', + 'boxbox;': '\u29c9', + 'boxDL;': '\u2557', + 'boxDl;': '\u2556', + 'boxdL;': '\u2555', + 'boxdl;': '\u2510', + 'boxDR;': '\u2554', + 'boxDr;': '\u2553', + 'boxdR;': '\u2552', + 'boxdr;': '\u250c', + 'boxH;': '\u2550', + 'boxh;': '\u2500', + 'boxHD;': '\u2566', + 'boxHd;': '\u2564', + 'boxhD;': '\u2565', + 'boxhd;': '\u252c', + 'boxHU;': '\u2569', + 'boxHu;': '\u2567', + 'boxhU;': '\u2568', + 'boxhu;': '\u2534', + 'boxminus;': '\u229f', + 'boxplus;': '\u229e', + 'boxtimes;': '\u22a0', + 'boxUL;': '\u255d', + 'boxUl;': '\u255c', + 'boxuL;': '\u255b', + 'boxul;': '\u2518', + 'boxUR;': '\u255a', + 'boxUr;': '\u2559', + 'boxuR;': '\u2558', + 'boxur;': '\u2514', + 'boxV;': '\u2551', + 'boxv;': '\u2502', + 'boxVH;': '\u256c', + 'boxVh;': '\u256b', + 'boxvH;': '\u256a', + 'boxvh;': '\u253c', + 'boxVL;': '\u2563', + 'boxVl;': '\u2562', + 'boxvL;': '\u2561', + 'boxvl;': '\u2524', + 'boxVR;': '\u2560', + 'boxVr;': '\u255f', + 'boxvR;': '\u255e', + 'boxvr;': '\u251c', + 'bprime;': '\u2035', + 'Breve;': '\u02d8', + 'breve;': '\u02d8', + 'brvbar': '\xa6', + 'brvbar;': '\xa6', + 'Bscr;': '\u212c', + 'bscr;': '\U0001d4b7', + 'bsemi;': '\u204f', + 'bsim;': '\u223d', + 'bsime;': '\u22cd', + 'bsol;': '\\', + 'bsolb;': '\u29c5', + 'bsolhsub;': '\u27c8', + 'bull;': '\u2022', + 'bullet;': '\u2022', + 'bump;': '\u224e', + 'bumpE;': '\u2aae', + 'bumpe;': '\u224f', + 'Bumpeq;': '\u224e', + 'bumpeq;': '\u224f', + 'Cacute;': '\u0106', + 'cacute;': '\u0107', + 'Cap;': '\u22d2', + 'cap;': '\u2229', + 'capand;': '\u2a44', + 'capbrcup;': '\u2a49', + 'capcap;': '\u2a4b', + 'capcup;': '\u2a47', + 'capdot;': '\u2a40', + 'CapitalDifferentialD;': '\u2145', + 'caps;': '\u2229\ufe00', + 'caret;': '\u2041', + 'caron;': '\u02c7', + 'Cayleys;': '\u212d', + 'ccaps;': '\u2a4d', + 'Ccaron;': '\u010c', + 'ccaron;': '\u010d', + 'Ccedil': '\xc7', + 'ccedil': '\xe7', + 'Ccedil;': '\xc7', + 'ccedil;': '\xe7', + 'Ccirc;': '\u0108', + 'ccirc;': '\u0109', + 'Cconint;': '\u2230', + 'ccups;': '\u2a4c', + 'ccupssm;': '\u2a50', + 'Cdot;': '\u010a', + 'cdot;': '\u010b', + 'cedil': '\xb8', + 'cedil;': '\xb8', + 'Cedilla;': '\xb8', + 'cemptyv;': '\u29b2', + 'cent': '\xa2', + 'cent;': '\xa2', + 'CenterDot;': '\xb7', + 'centerdot;': '\xb7', + 'Cfr;': '\u212d', + 'cfr;': '\U0001d520', + 'CHcy;': '\u0427', + 'chcy;': '\u0447', + 'check;': '\u2713', + 'checkmark;': '\u2713', + 'Chi;': '\u03a7', + 'chi;': '\u03c7', + 'cir;': '\u25cb', + 'circ;': '\u02c6', + 'circeq;': '\u2257', + 'circlearrowleft;': '\u21ba', + 'circlearrowright;': '\u21bb', + 'circledast;': '\u229b', + 'circledcirc;': '\u229a', + 'circleddash;': '\u229d', + 'CircleDot;': '\u2299', + 'circledR;': '\xae', + 'circledS;': '\u24c8', + 'CircleMinus;': '\u2296', + 'CirclePlus;': '\u2295', + 'CircleTimes;': '\u2297', + 'cirE;': '\u29c3', + 'cire;': '\u2257', + 'cirfnint;': '\u2a10', + 'cirmid;': '\u2aef', + 'cirscir;': '\u29c2', + 'ClockwiseContourIntegral;': '\u2232', + 'CloseCurlyDoubleQuote;': '\u201d', + 'CloseCurlyQuote;': '\u2019', + 'clubs;': '\u2663', + 'clubsuit;': '\u2663', + 'Colon;': '\u2237', + 'colon;': ':', + 'Colone;': '\u2a74', + 'colone;': '\u2254', + 'coloneq;': '\u2254', + 'comma;': ',', + 'commat;': '@', + 'comp;': '\u2201', + 'compfn;': '\u2218', + 'complement;': '\u2201', + 'complexes;': '\u2102', + 'cong;': '\u2245', + 'congdot;': '\u2a6d', + 'Congruent;': '\u2261', + 'Conint;': '\u222f', + 'conint;': '\u222e', + 'ContourIntegral;': '\u222e', + 'Copf;': '\u2102', + 'copf;': '\U0001d554', + 'coprod;': '\u2210', + 'Coproduct;': '\u2210', + 'COPY': '\xa9', + 'copy': '\xa9', + 'COPY;': '\xa9', + 'copy;': '\xa9', + 'copysr;': '\u2117', + 'CounterClockwiseContourIntegral;': '\u2233', + 'crarr;': '\u21b5', + 'Cross;': '\u2a2f', + 'cross;': '\u2717', + 'Cscr;': '\U0001d49e', + 'cscr;': '\U0001d4b8', + 'csub;': '\u2acf', + 'csube;': '\u2ad1', + 'csup;': '\u2ad0', + 'csupe;': '\u2ad2', + 'ctdot;': '\u22ef', + 'cudarrl;': '\u2938', + 'cudarrr;': '\u2935', + 'cuepr;': '\u22de', + 'cuesc;': '\u22df', + 'cularr;': '\u21b6', + 'cularrp;': '\u293d', + 'Cup;': '\u22d3', + 'cup;': '\u222a', + 'cupbrcap;': '\u2a48', + 'CupCap;': '\u224d', + 'cupcap;': '\u2a46', + 'cupcup;': '\u2a4a', + 'cupdot;': '\u228d', + 'cupor;': '\u2a45', + 'cups;': '\u222a\ufe00', + 'curarr;': '\u21b7', + 'curarrm;': '\u293c', + 'curlyeqprec;': '\u22de', + 'curlyeqsucc;': '\u22df', + 'curlyvee;': '\u22ce', + 'curlywedge;': '\u22cf', + 'curren': '\xa4', + 'curren;': '\xa4', + 'curvearrowleft;': '\u21b6', + 'curvearrowright;': '\u21b7', + 'cuvee;': '\u22ce', + 'cuwed;': '\u22cf', + 'cwconint;': '\u2232', + 'cwint;': '\u2231', + 'cylcty;': '\u232d', + 'Dagger;': '\u2021', + 'dagger;': '\u2020', + 'daleth;': '\u2138', + 'Darr;': '\u21a1', + 'dArr;': '\u21d3', + 'darr;': '\u2193', + 'dash;': '\u2010', + 'Dashv;': '\u2ae4', + 'dashv;': '\u22a3', + 'dbkarow;': '\u290f', + 'dblac;': '\u02dd', + 'Dcaron;': '\u010e', + 'dcaron;': '\u010f', + 'Dcy;': '\u0414', + 'dcy;': '\u0434', + 'DD;': '\u2145', + 'dd;': '\u2146', + 'ddagger;': '\u2021', + 'ddarr;': '\u21ca', + 'DDotrahd;': '\u2911', + 'ddotseq;': '\u2a77', + 'deg': '\xb0', + 'deg;': '\xb0', + 'Del;': '\u2207', + 'Delta;': '\u0394', + 'delta;': '\u03b4', + 'demptyv;': '\u29b1', + 'dfisht;': '\u297f', + 'Dfr;': '\U0001d507', + 'dfr;': '\U0001d521', + 'dHar;': '\u2965', + 'dharl;': '\u21c3', + 'dharr;': '\u21c2', + 'DiacriticalAcute;': '\xb4', + 'DiacriticalDot;': '\u02d9', + 'DiacriticalDoubleAcute;': '\u02dd', + 'DiacriticalGrave;': '`', + 'DiacriticalTilde;': '\u02dc', + 'diam;': '\u22c4', + 'Diamond;': '\u22c4', + 'diamond;': '\u22c4', + 'diamondsuit;': '\u2666', + 'diams;': '\u2666', + 'die;': '\xa8', + 'DifferentialD;': '\u2146', + 'digamma;': '\u03dd', + 'disin;': '\u22f2', + 'div;': '\xf7', + 'divide': '\xf7', + 'divide;': '\xf7', + 'divideontimes;': '\u22c7', + 'divonx;': '\u22c7', + 'DJcy;': '\u0402', + 'djcy;': '\u0452', + 'dlcorn;': '\u231e', + 'dlcrop;': '\u230d', + 'dollar;': '$', + 'Dopf;': '\U0001d53b', + 'dopf;': '\U0001d555', + 'Dot;': '\xa8', + 'dot;': '\u02d9', + 'DotDot;': '\u20dc', + 'doteq;': '\u2250', + 'doteqdot;': '\u2251', + 'DotEqual;': '\u2250', + 'dotminus;': '\u2238', + 'dotplus;': '\u2214', + 'dotsquare;': '\u22a1', + 'doublebarwedge;': '\u2306', + 'DoubleContourIntegral;': '\u222f', + 'DoubleDot;': '\xa8', + 'DoubleDownArrow;': '\u21d3', + 'DoubleLeftArrow;': '\u21d0', + 'DoubleLeftRightArrow;': '\u21d4', + 'DoubleLeftTee;': '\u2ae4', + 'DoubleLongLeftArrow;': '\u27f8', + 'DoubleLongLeftRightArrow;': '\u27fa', + 'DoubleLongRightArrow;': '\u27f9', + 'DoubleRightArrow;': '\u21d2', + 'DoubleRightTee;': '\u22a8', + 'DoubleUpArrow;': '\u21d1', + 'DoubleUpDownArrow;': '\u21d5', + 'DoubleVerticalBar;': '\u2225', + 'DownArrow;': '\u2193', + 'Downarrow;': '\u21d3', + 'downarrow;': '\u2193', + 'DownArrowBar;': '\u2913', + 'DownArrowUpArrow;': '\u21f5', + 'DownBreve;': '\u0311', + 'downdownarrows;': '\u21ca', + 'downharpoonleft;': '\u21c3', + 'downharpoonright;': '\u21c2', + 'DownLeftRightVector;': '\u2950', + 'DownLeftTeeVector;': '\u295e', + 'DownLeftVector;': '\u21bd', + 'DownLeftVectorBar;': '\u2956', + 'DownRightTeeVector;': '\u295f', + 'DownRightVector;': '\u21c1', + 'DownRightVectorBar;': '\u2957', + 'DownTee;': '\u22a4', + 'DownTeeArrow;': '\u21a7', + 'drbkarow;': '\u2910', + 'drcorn;': '\u231f', + 'drcrop;': '\u230c', + 'Dscr;': '\U0001d49f', + 'dscr;': '\U0001d4b9', + 'DScy;': '\u0405', + 'dscy;': '\u0455', + 'dsol;': '\u29f6', + 'Dstrok;': '\u0110', + 'dstrok;': '\u0111', + 'dtdot;': '\u22f1', + 'dtri;': '\u25bf', + 'dtrif;': '\u25be', + 'duarr;': '\u21f5', + 'duhar;': '\u296f', + 'dwangle;': '\u29a6', + 'DZcy;': '\u040f', + 'dzcy;': '\u045f', + 'dzigrarr;': '\u27ff', + 'Eacute': '\xc9', + 'eacute': '\xe9', + 'Eacute;': '\xc9', + 'eacute;': '\xe9', + 'easter;': '\u2a6e', + 'Ecaron;': '\u011a', + 'ecaron;': '\u011b', + 'ecir;': '\u2256', + 'Ecirc': '\xca', + 'ecirc': '\xea', + 'Ecirc;': '\xca', + 'ecirc;': '\xea', + 'ecolon;': '\u2255', + 'Ecy;': '\u042d', + 'ecy;': '\u044d', + 'eDDot;': '\u2a77', + 'Edot;': '\u0116', + 'eDot;': '\u2251', + 'edot;': '\u0117', + 'ee;': '\u2147', + 'efDot;': '\u2252', + 'Efr;': '\U0001d508', + 'efr;': '\U0001d522', + 'eg;': '\u2a9a', + 'Egrave': '\xc8', + 'egrave': '\xe8', + 'Egrave;': '\xc8', + 'egrave;': '\xe8', + 'egs;': '\u2a96', + 'egsdot;': '\u2a98', + 'el;': '\u2a99', + 'Element;': '\u2208', + 'elinters;': '\u23e7', + 'ell;': '\u2113', + 'els;': '\u2a95', + 'elsdot;': '\u2a97', + 'Emacr;': '\u0112', + 'emacr;': '\u0113', + 'empty;': '\u2205', + 'emptyset;': '\u2205', + 'EmptySmallSquare;': '\u25fb', + 'emptyv;': '\u2205', + 'EmptyVerySmallSquare;': '\u25ab', + 'emsp13;': '\u2004', + 'emsp14;': '\u2005', + 'emsp;': '\u2003', + 'ENG;': '\u014a', + 'eng;': '\u014b', + 'ensp;': '\u2002', + 'Eogon;': '\u0118', + 'eogon;': '\u0119', + 'Eopf;': '\U0001d53c', + 'eopf;': '\U0001d556', + 'epar;': '\u22d5', + 'eparsl;': '\u29e3', + 'eplus;': '\u2a71', + 'epsi;': '\u03b5', + 'Epsilon;': '\u0395', + 'epsilon;': '\u03b5', + 'epsiv;': '\u03f5', + 'eqcirc;': '\u2256', + 'eqcolon;': '\u2255', + 'eqsim;': '\u2242', + 'eqslantgtr;': '\u2a96', + 'eqslantless;': '\u2a95', + 'Equal;': '\u2a75', + 'equals;': '=', + 'EqualTilde;': '\u2242', + 'equest;': '\u225f', + 'Equilibrium;': '\u21cc', + 'equiv;': '\u2261', + 'equivDD;': '\u2a78', + 'eqvparsl;': '\u29e5', + 'erarr;': '\u2971', + 'erDot;': '\u2253', + 'Escr;': '\u2130', + 'escr;': '\u212f', + 'esdot;': '\u2250', + 'Esim;': '\u2a73', + 'esim;': '\u2242', + 'Eta;': '\u0397', + 'eta;': '\u03b7', + 'ETH': '\xd0', + 'eth': '\xf0', + 'ETH;': '\xd0', + 'eth;': '\xf0', + 'Euml': '\xcb', + 'euml': '\xeb', + 'Euml;': '\xcb', + 'euml;': '\xeb', + 'euro;': '\u20ac', + 'excl;': '!', + 'exist;': '\u2203', + 'Exists;': '\u2203', + 'expectation;': '\u2130', + 'ExponentialE;': '\u2147', + 'exponentiale;': '\u2147', + 'fallingdotseq;': '\u2252', + 'Fcy;': '\u0424', + 'fcy;': '\u0444', + 'female;': '\u2640', + 'ffilig;': '\ufb03', + 'fflig;': '\ufb00', + 'ffllig;': '\ufb04', + 'Ffr;': '\U0001d509', + 'ffr;': '\U0001d523', + 'filig;': '\ufb01', + 'FilledSmallSquare;': '\u25fc', + 'FilledVerySmallSquare;': '\u25aa', + 'fjlig;': 'fj', + 'flat;': '\u266d', + 'fllig;': '\ufb02', + 'fltns;': '\u25b1', + 'fnof;': '\u0192', + 'Fopf;': '\U0001d53d', + 'fopf;': '\U0001d557', + 'ForAll;': '\u2200', + 'forall;': '\u2200', + 'fork;': '\u22d4', + 'forkv;': '\u2ad9', + 'Fouriertrf;': '\u2131', + 'fpartint;': '\u2a0d', + 'frac12': '\xbd', + 'frac12;': '\xbd', + 'frac13;': '\u2153', + 'frac14': '\xbc', + 'frac14;': '\xbc', + 'frac15;': '\u2155', + 'frac16;': '\u2159', + 'frac18;': '\u215b', + 'frac23;': '\u2154', + 'frac25;': '\u2156', + 'frac34': '\xbe', + 'frac34;': '\xbe', + 'frac35;': '\u2157', + 'frac38;': '\u215c', + 'frac45;': '\u2158', + 'frac56;': '\u215a', + 'frac58;': '\u215d', + 'frac78;': '\u215e', + 'frasl;': '\u2044', + 'frown;': '\u2322', + 'Fscr;': '\u2131', + 'fscr;': '\U0001d4bb', + 'gacute;': '\u01f5', + 'Gamma;': '\u0393', + 'gamma;': '\u03b3', + 'Gammad;': '\u03dc', + 'gammad;': '\u03dd', + 'gap;': '\u2a86', + 'Gbreve;': '\u011e', + 'gbreve;': '\u011f', + 'Gcedil;': '\u0122', + 'Gcirc;': '\u011c', + 'gcirc;': '\u011d', + 'Gcy;': '\u0413', + 'gcy;': '\u0433', + 'Gdot;': '\u0120', + 'gdot;': '\u0121', + 'gE;': '\u2267', + 'ge;': '\u2265', + 'gEl;': '\u2a8c', + 'gel;': '\u22db', + 'geq;': '\u2265', + 'geqq;': '\u2267', + 'geqslant;': '\u2a7e', + 'ges;': '\u2a7e', + 'gescc;': '\u2aa9', + 'gesdot;': '\u2a80', + 'gesdoto;': '\u2a82', + 'gesdotol;': '\u2a84', + 'gesl;': '\u22db\ufe00', + 'gesles;': '\u2a94', + 'Gfr;': '\U0001d50a', + 'gfr;': '\U0001d524', + 'Gg;': '\u22d9', + 'gg;': '\u226b', + 'ggg;': '\u22d9', + 'gimel;': '\u2137', + 'GJcy;': '\u0403', + 'gjcy;': '\u0453', + 'gl;': '\u2277', + 'gla;': '\u2aa5', + 'glE;': '\u2a92', + 'glj;': '\u2aa4', + 'gnap;': '\u2a8a', + 'gnapprox;': '\u2a8a', + 'gnE;': '\u2269', + 'gne;': '\u2a88', + 'gneq;': '\u2a88', + 'gneqq;': '\u2269', + 'gnsim;': '\u22e7', + 'Gopf;': '\U0001d53e', + 'gopf;': '\U0001d558', + 'grave;': '`', + 'GreaterEqual;': '\u2265', + 'GreaterEqualLess;': '\u22db', + 'GreaterFullEqual;': '\u2267', + 'GreaterGreater;': '\u2aa2', + 'GreaterLess;': '\u2277', + 'GreaterSlantEqual;': '\u2a7e', + 'GreaterTilde;': '\u2273', + 'Gscr;': '\U0001d4a2', + 'gscr;': '\u210a', + 'gsim;': '\u2273', + 'gsime;': '\u2a8e', + 'gsiml;': '\u2a90', + 'GT': '>', + 'gt': '>', + 'GT;': '>', + 'Gt;': '\u226b', + 'gt;': '>', + 'gtcc;': '\u2aa7', + 'gtcir;': '\u2a7a', + 'gtdot;': '\u22d7', + 'gtlPar;': '\u2995', + 'gtquest;': '\u2a7c', + 'gtrapprox;': '\u2a86', + 'gtrarr;': '\u2978', + 'gtrdot;': '\u22d7', + 'gtreqless;': '\u22db', + 'gtreqqless;': '\u2a8c', + 'gtrless;': '\u2277', + 'gtrsim;': '\u2273', + 'gvertneqq;': '\u2269\ufe00', + 'gvnE;': '\u2269\ufe00', + 'Hacek;': '\u02c7', + 'hairsp;': '\u200a', + 'half;': '\xbd', + 'hamilt;': '\u210b', + 'HARDcy;': '\u042a', + 'hardcy;': '\u044a', + 'hArr;': '\u21d4', + 'harr;': '\u2194', + 'harrcir;': '\u2948', + 'harrw;': '\u21ad', + 'Hat;': '^', + 'hbar;': '\u210f', + 'Hcirc;': '\u0124', + 'hcirc;': '\u0125', + 'hearts;': '\u2665', + 'heartsuit;': '\u2665', + 'hellip;': '\u2026', + 'hercon;': '\u22b9', + 'Hfr;': '\u210c', + 'hfr;': '\U0001d525', + 'HilbertSpace;': '\u210b', + 'hksearow;': '\u2925', + 'hkswarow;': '\u2926', + 'hoarr;': '\u21ff', + 'homtht;': '\u223b', + 'hookleftarrow;': '\u21a9', + 'hookrightarrow;': '\u21aa', + 'Hopf;': '\u210d', + 'hopf;': '\U0001d559', + 'horbar;': '\u2015', + 'HorizontalLine;': '\u2500', + 'Hscr;': '\u210b', + 'hscr;': '\U0001d4bd', + 'hslash;': '\u210f', + 'Hstrok;': '\u0126', + 'hstrok;': '\u0127', + 'HumpDownHump;': '\u224e', + 'HumpEqual;': '\u224f', + 'hybull;': '\u2043', + 'hyphen;': '\u2010', + 'Iacute': '\xcd', + 'iacute': '\xed', + 'Iacute;': '\xcd', + 'iacute;': '\xed', + 'ic;': '\u2063', + 'Icirc': '\xce', + 'icirc': '\xee', + 'Icirc;': '\xce', + 'icirc;': '\xee', + 'Icy;': '\u0418', + 'icy;': '\u0438', + 'Idot;': '\u0130', + 'IEcy;': '\u0415', + 'iecy;': '\u0435', + 'iexcl': '\xa1', + 'iexcl;': '\xa1', + 'iff;': '\u21d4', + 'Ifr;': '\u2111', + 'ifr;': '\U0001d526', + 'Igrave': '\xcc', + 'igrave': '\xec', + 'Igrave;': '\xcc', + 'igrave;': '\xec', + 'ii;': '\u2148', + 'iiiint;': '\u2a0c', + 'iiint;': '\u222d', + 'iinfin;': '\u29dc', + 'iiota;': '\u2129', + 'IJlig;': '\u0132', + 'ijlig;': '\u0133', + 'Im;': '\u2111', + 'Imacr;': '\u012a', + 'imacr;': '\u012b', + 'image;': '\u2111', + 'ImaginaryI;': '\u2148', + 'imagline;': '\u2110', + 'imagpart;': '\u2111', + 'imath;': '\u0131', + 'imof;': '\u22b7', + 'imped;': '\u01b5', + 'Implies;': '\u21d2', + 'in;': '\u2208', + 'incare;': '\u2105', + 'infin;': '\u221e', + 'infintie;': '\u29dd', + 'inodot;': '\u0131', + 'Int;': '\u222c', + 'int;': '\u222b', + 'intcal;': '\u22ba', + 'integers;': '\u2124', + 'Integral;': '\u222b', + 'intercal;': '\u22ba', + 'Intersection;': '\u22c2', + 'intlarhk;': '\u2a17', + 'intprod;': '\u2a3c', + 'InvisibleComma;': '\u2063', + 'InvisibleTimes;': '\u2062', + 'IOcy;': '\u0401', + 'iocy;': '\u0451', + 'Iogon;': '\u012e', + 'iogon;': '\u012f', + 'Iopf;': '\U0001d540', + 'iopf;': '\U0001d55a', + 'Iota;': '\u0399', + 'iota;': '\u03b9', + 'iprod;': '\u2a3c', + 'iquest': '\xbf', + 'iquest;': '\xbf', + 'Iscr;': '\u2110', + 'iscr;': '\U0001d4be', + 'isin;': '\u2208', + 'isindot;': '\u22f5', + 'isinE;': '\u22f9', + 'isins;': '\u22f4', + 'isinsv;': '\u22f3', + 'isinv;': '\u2208', + 'it;': '\u2062', + 'Itilde;': '\u0128', + 'itilde;': '\u0129', + 'Iukcy;': '\u0406', + 'iukcy;': '\u0456', + 'Iuml': '\xcf', + 'iuml': '\xef', + 'Iuml;': '\xcf', + 'iuml;': '\xef', + 'Jcirc;': '\u0134', + 'jcirc;': '\u0135', + 'Jcy;': '\u0419', + 'jcy;': '\u0439', + 'Jfr;': '\U0001d50d', + 'jfr;': '\U0001d527', + 'jmath;': '\u0237', + 'Jopf;': '\U0001d541', + 'jopf;': '\U0001d55b', + 'Jscr;': '\U0001d4a5', + 'jscr;': '\U0001d4bf', + 'Jsercy;': '\u0408', + 'jsercy;': '\u0458', + 'Jukcy;': '\u0404', + 'jukcy;': '\u0454', + 'Kappa;': '\u039a', + 'kappa;': '\u03ba', + 'kappav;': '\u03f0', + 'Kcedil;': '\u0136', + 'kcedil;': '\u0137', + 'Kcy;': '\u041a', + 'kcy;': '\u043a', + 'Kfr;': '\U0001d50e', + 'kfr;': '\U0001d528', + 'kgreen;': '\u0138', + 'KHcy;': '\u0425', + 'khcy;': '\u0445', + 'KJcy;': '\u040c', + 'kjcy;': '\u045c', + 'Kopf;': '\U0001d542', + 'kopf;': '\U0001d55c', + 'Kscr;': '\U0001d4a6', + 'kscr;': '\U0001d4c0', + 'lAarr;': '\u21da', + 'Lacute;': '\u0139', + 'lacute;': '\u013a', + 'laemptyv;': '\u29b4', + 'lagran;': '\u2112', + 'Lambda;': '\u039b', + 'lambda;': '\u03bb', + 'Lang;': '\u27ea', + 'lang;': '\u27e8', + 'langd;': '\u2991', + 'langle;': '\u27e8', + 'lap;': '\u2a85', + 'Laplacetrf;': '\u2112', + 'laquo': '\xab', + 'laquo;': '\xab', + 'Larr;': '\u219e', + 'lArr;': '\u21d0', + 'larr;': '\u2190', + 'larrb;': '\u21e4', + 'larrbfs;': '\u291f', + 'larrfs;': '\u291d', + 'larrhk;': '\u21a9', + 'larrlp;': '\u21ab', + 'larrpl;': '\u2939', + 'larrsim;': '\u2973', + 'larrtl;': '\u21a2', + 'lat;': '\u2aab', + 'lAtail;': '\u291b', + 'latail;': '\u2919', + 'late;': '\u2aad', + 'lates;': '\u2aad\ufe00', + 'lBarr;': '\u290e', + 'lbarr;': '\u290c', + 'lbbrk;': '\u2772', + 'lbrace;': '{', + 'lbrack;': '[', + 'lbrke;': '\u298b', + 'lbrksld;': '\u298f', + 'lbrkslu;': '\u298d', + 'Lcaron;': '\u013d', + 'lcaron;': '\u013e', + 'Lcedil;': '\u013b', + 'lcedil;': '\u013c', + 'lceil;': '\u2308', + 'lcub;': '{', + 'Lcy;': '\u041b', + 'lcy;': '\u043b', + 'ldca;': '\u2936', + 'ldquo;': '\u201c', + 'ldquor;': '\u201e', + 'ldrdhar;': '\u2967', + 'ldrushar;': '\u294b', + 'ldsh;': '\u21b2', + 'lE;': '\u2266', + 'le;': '\u2264', + 'LeftAngleBracket;': '\u27e8', + 'LeftArrow;': '\u2190', + 'Leftarrow;': '\u21d0', + 'leftarrow;': '\u2190', + 'LeftArrowBar;': '\u21e4', + 'LeftArrowRightArrow;': '\u21c6', + 'leftarrowtail;': '\u21a2', + 'LeftCeiling;': '\u2308', + 'LeftDoubleBracket;': '\u27e6', + 'LeftDownTeeVector;': '\u2961', + 'LeftDownVector;': '\u21c3', + 'LeftDownVectorBar;': '\u2959', + 'LeftFloor;': '\u230a', + 'leftharpoondown;': '\u21bd', + 'leftharpoonup;': '\u21bc', + 'leftleftarrows;': '\u21c7', + 'LeftRightArrow;': '\u2194', + 'Leftrightarrow;': '\u21d4', + 'leftrightarrow;': '\u2194', + 'leftrightarrows;': '\u21c6', + 'leftrightharpoons;': '\u21cb', + 'leftrightsquigarrow;': '\u21ad', + 'LeftRightVector;': '\u294e', + 'LeftTee;': '\u22a3', + 'LeftTeeArrow;': '\u21a4', + 'LeftTeeVector;': '\u295a', + 'leftthreetimes;': '\u22cb', + 'LeftTriangle;': '\u22b2', + 'LeftTriangleBar;': '\u29cf', + 'LeftTriangleEqual;': '\u22b4', + 'LeftUpDownVector;': '\u2951', + 'LeftUpTeeVector;': '\u2960', + 'LeftUpVector;': '\u21bf', + 'LeftUpVectorBar;': '\u2958', + 'LeftVector;': '\u21bc', + 'LeftVectorBar;': '\u2952', + 'lEg;': '\u2a8b', + 'leg;': '\u22da', + 'leq;': '\u2264', + 'leqq;': '\u2266', + 'leqslant;': '\u2a7d', + 'les;': '\u2a7d', + 'lescc;': '\u2aa8', + 'lesdot;': '\u2a7f', + 'lesdoto;': '\u2a81', + 'lesdotor;': '\u2a83', + 'lesg;': '\u22da\ufe00', + 'lesges;': '\u2a93', + 'lessapprox;': '\u2a85', + 'lessdot;': '\u22d6', + 'lesseqgtr;': '\u22da', + 'lesseqqgtr;': '\u2a8b', + 'LessEqualGreater;': '\u22da', + 'LessFullEqual;': '\u2266', + 'LessGreater;': '\u2276', + 'lessgtr;': '\u2276', + 'LessLess;': '\u2aa1', + 'lesssim;': '\u2272', + 'LessSlantEqual;': '\u2a7d', + 'LessTilde;': '\u2272', + 'lfisht;': '\u297c', + 'lfloor;': '\u230a', + 'Lfr;': '\U0001d50f', + 'lfr;': '\U0001d529', + 'lg;': '\u2276', + 'lgE;': '\u2a91', + 'lHar;': '\u2962', + 'lhard;': '\u21bd', + 'lharu;': '\u21bc', + 'lharul;': '\u296a', + 'lhblk;': '\u2584', + 'LJcy;': '\u0409', + 'ljcy;': '\u0459', + 'Ll;': '\u22d8', + 'll;': '\u226a', + 'llarr;': '\u21c7', + 'llcorner;': '\u231e', + 'Lleftarrow;': '\u21da', + 'llhard;': '\u296b', + 'lltri;': '\u25fa', + 'Lmidot;': '\u013f', + 'lmidot;': '\u0140', + 'lmoust;': '\u23b0', + 'lmoustache;': '\u23b0', + 'lnap;': '\u2a89', + 'lnapprox;': '\u2a89', + 'lnE;': '\u2268', + 'lne;': '\u2a87', + 'lneq;': '\u2a87', + 'lneqq;': '\u2268', + 'lnsim;': '\u22e6', + 'loang;': '\u27ec', + 'loarr;': '\u21fd', + 'lobrk;': '\u27e6', + 'LongLeftArrow;': '\u27f5', + 'Longleftarrow;': '\u27f8', + 'longleftarrow;': '\u27f5', + 'LongLeftRightArrow;': '\u27f7', + 'Longleftrightarrow;': '\u27fa', + 'longleftrightarrow;': '\u27f7', + 'longmapsto;': '\u27fc', + 'LongRightArrow;': '\u27f6', + 'Longrightarrow;': '\u27f9', + 'longrightarrow;': '\u27f6', + 'looparrowleft;': '\u21ab', + 'looparrowright;': '\u21ac', + 'lopar;': '\u2985', + 'Lopf;': '\U0001d543', + 'lopf;': '\U0001d55d', + 'loplus;': '\u2a2d', + 'lotimes;': '\u2a34', + 'lowast;': '\u2217', + 'lowbar;': '_', + 'LowerLeftArrow;': '\u2199', + 'LowerRightArrow;': '\u2198', + 'loz;': '\u25ca', + 'lozenge;': '\u25ca', + 'lozf;': '\u29eb', + 'lpar;': '(', + 'lparlt;': '\u2993', + 'lrarr;': '\u21c6', + 'lrcorner;': '\u231f', + 'lrhar;': '\u21cb', + 'lrhard;': '\u296d', + 'lrm;': '\u200e', + 'lrtri;': '\u22bf', + 'lsaquo;': '\u2039', + 'Lscr;': '\u2112', + 'lscr;': '\U0001d4c1', + 'Lsh;': '\u21b0', + 'lsh;': '\u21b0', + 'lsim;': '\u2272', + 'lsime;': '\u2a8d', + 'lsimg;': '\u2a8f', + 'lsqb;': '[', + 'lsquo;': '\u2018', + 'lsquor;': '\u201a', + 'Lstrok;': '\u0141', + 'lstrok;': '\u0142', + 'LT': '<', + 'lt': '<', + 'LT;': '<', + 'Lt;': '\u226a', + 'lt;': '<', + 'ltcc;': '\u2aa6', + 'ltcir;': '\u2a79', + 'ltdot;': '\u22d6', + 'lthree;': '\u22cb', + 'ltimes;': '\u22c9', + 'ltlarr;': '\u2976', + 'ltquest;': '\u2a7b', + 'ltri;': '\u25c3', + 'ltrie;': '\u22b4', + 'ltrif;': '\u25c2', + 'ltrPar;': '\u2996', + 'lurdshar;': '\u294a', + 'luruhar;': '\u2966', + 'lvertneqq;': '\u2268\ufe00', + 'lvnE;': '\u2268\ufe00', + 'macr': '\xaf', + 'macr;': '\xaf', + 'male;': '\u2642', + 'malt;': '\u2720', + 'maltese;': '\u2720', + 'Map;': '\u2905', + 'map;': '\u21a6', + 'mapsto;': '\u21a6', + 'mapstodown;': '\u21a7', + 'mapstoleft;': '\u21a4', + 'mapstoup;': '\u21a5', + 'marker;': '\u25ae', + 'mcomma;': '\u2a29', + 'Mcy;': '\u041c', + 'mcy;': '\u043c', + 'mdash;': '\u2014', + 'mDDot;': '\u223a', + 'measuredangle;': '\u2221', + 'MediumSpace;': '\u205f', + 'Mellintrf;': '\u2133', + 'Mfr;': '\U0001d510', + 'mfr;': '\U0001d52a', + 'mho;': '\u2127', + 'micro': '\xb5', + 'micro;': '\xb5', + 'mid;': '\u2223', + 'midast;': '*', + 'midcir;': '\u2af0', + 'middot': '\xb7', + 'middot;': '\xb7', + 'minus;': '\u2212', + 'minusb;': '\u229f', + 'minusd;': '\u2238', + 'minusdu;': '\u2a2a', + 'MinusPlus;': '\u2213', + 'mlcp;': '\u2adb', + 'mldr;': '\u2026', + 'mnplus;': '\u2213', + 'models;': '\u22a7', + 'Mopf;': '\U0001d544', + 'mopf;': '\U0001d55e', + 'mp;': '\u2213', + 'Mscr;': '\u2133', + 'mscr;': '\U0001d4c2', + 'mstpos;': '\u223e', + 'Mu;': '\u039c', + 'mu;': '\u03bc', + 'multimap;': '\u22b8', + 'mumap;': '\u22b8', + 'nabla;': '\u2207', + 'Nacute;': '\u0143', + 'nacute;': '\u0144', + 'nang;': '\u2220\u20d2', + 'nap;': '\u2249', + 'napE;': '\u2a70\u0338', + 'napid;': '\u224b\u0338', + 'napos;': '\u0149', + 'napprox;': '\u2249', + 'natur;': '\u266e', + 'natural;': '\u266e', + 'naturals;': '\u2115', + 'nbsp': '\xa0', + 'nbsp;': '\xa0', + 'nbump;': '\u224e\u0338', + 'nbumpe;': '\u224f\u0338', + 'ncap;': '\u2a43', + 'Ncaron;': '\u0147', + 'ncaron;': '\u0148', + 'Ncedil;': '\u0145', + 'ncedil;': '\u0146', + 'ncong;': '\u2247', + 'ncongdot;': '\u2a6d\u0338', + 'ncup;': '\u2a42', + 'Ncy;': '\u041d', + 'ncy;': '\u043d', + 'ndash;': '\u2013', + 'ne;': '\u2260', + 'nearhk;': '\u2924', + 'neArr;': '\u21d7', + 'nearr;': '\u2197', + 'nearrow;': '\u2197', + 'nedot;': '\u2250\u0338', + 'NegativeMediumSpace;': '\u200b', + 'NegativeThickSpace;': '\u200b', + 'NegativeThinSpace;': '\u200b', + 'NegativeVeryThinSpace;': '\u200b', + 'nequiv;': '\u2262', + 'nesear;': '\u2928', + 'nesim;': '\u2242\u0338', + 'NestedGreaterGreater;': '\u226b', + 'NestedLessLess;': '\u226a', + 'NewLine;': '\n', + 'nexist;': '\u2204', + 'nexists;': '\u2204', + 'Nfr;': '\U0001d511', + 'nfr;': '\U0001d52b', + 'ngE;': '\u2267\u0338', + 'nge;': '\u2271', + 'ngeq;': '\u2271', + 'ngeqq;': '\u2267\u0338', + 'ngeqslant;': '\u2a7e\u0338', + 'nges;': '\u2a7e\u0338', + 'nGg;': '\u22d9\u0338', + 'ngsim;': '\u2275', + 'nGt;': '\u226b\u20d2', + 'ngt;': '\u226f', + 'ngtr;': '\u226f', + 'nGtv;': '\u226b\u0338', + 'nhArr;': '\u21ce', + 'nharr;': '\u21ae', + 'nhpar;': '\u2af2', + 'ni;': '\u220b', + 'nis;': '\u22fc', + 'nisd;': '\u22fa', + 'niv;': '\u220b', + 'NJcy;': '\u040a', + 'njcy;': '\u045a', + 'nlArr;': '\u21cd', + 'nlarr;': '\u219a', + 'nldr;': '\u2025', + 'nlE;': '\u2266\u0338', + 'nle;': '\u2270', + 'nLeftarrow;': '\u21cd', + 'nleftarrow;': '\u219a', + 'nLeftrightarrow;': '\u21ce', + 'nleftrightarrow;': '\u21ae', + 'nleq;': '\u2270', + 'nleqq;': '\u2266\u0338', + 'nleqslant;': '\u2a7d\u0338', + 'nles;': '\u2a7d\u0338', + 'nless;': '\u226e', + 'nLl;': '\u22d8\u0338', + 'nlsim;': '\u2274', + 'nLt;': '\u226a\u20d2', + 'nlt;': '\u226e', + 'nltri;': '\u22ea', + 'nltrie;': '\u22ec', + 'nLtv;': '\u226a\u0338', + 'nmid;': '\u2224', + 'NoBreak;': '\u2060', + 'NonBreakingSpace;': '\xa0', + 'Nopf;': '\u2115', + 'nopf;': '\U0001d55f', + 'not': '\xac', + 'Not;': '\u2aec', + 'not;': '\xac', + 'NotCongruent;': '\u2262', + 'NotCupCap;': '\u226d', + 'NotDoubleVerticalBar;': '\u2226', + 'NotElement;': '\u2209', + 'NotEqual;': '\u2260', + 'NotEqualTilde;': '\u2242\u0338', + 'NotExists;': '\u2204', + 'NotGreater;': '\u226f', + 'NotGreaterEqual;': '\u2271', + 'NotGreaterFullEqual;': '\u2267\u0338', + 'NotGreaterGreater;': '\u226b\u0338', + 'NotGreaterLess;': '\u2279', + 'NotGreaterSlantEqual;': '\u2a7e\u0338', + 'NotGreaterTilde;': '\u2275', + 'NotHumpDownHump;': '\u224e\u0338', + 'NotHumpEqual;': '\u224f\u0338', + 'notin;': '\u2209', + 'notindot;': '\u22f5\u0338', + 'notinE;': '\u22f9\u0338', + 'notinva;': '\u2209', + 'notinvb;': '\u22f7', + 'notinvc;': '\u22f6', + 'NotLeftTriangle;': '\u22ea', + 'NotLeftTriangleBar;': '\u29cf\u0338', + 'NotLeftTriangleEqual;': '\u22ec', + 'NotLess;': '\u226e', + 'NotLessEqual;': '\u2270', + 'NotLessGreater;': '\u2278', + 'NotLessLess;': '\u226a\u0338', + 'NotLessSlantEqual;': '\u2a7d\u0338', + 'NotLessTilde;': '\u2274', + 'NotNestedGreaterGreater;': '\u2aa2\u0338', + 'NotNestedLessLess;': '\u2aa1\u0338', + 'notni;': '\u220c', + 'notniva;': '\u220c', + 'notnivb;': '\u22fe', + 'notnivc;': '\u22fd', + 'NotPrecedes;': '\u2280', + 'NotPrecedesEqual;': '\u2aaf\u0338', + 'NotPrecedesSlantEqual;': '\u22e0', + 'NotReverseElement;': '\u220c', + 'NotRightTriangle;': '\u22eb', + 'NotRightTriangleBar;': '\u29d0\u0338', + 'NotRightTriangleEqual;': '\u22ed', + 'NotSquareSubset;': '\u228f\u0338', + 'NotSquareSubsetEqual;': '\u22e2', + 'NotSquareSuperset;': '\u2290\u0338', + 'NotSquareSupersetEqual;': '\u22e3', + 'NotSubset;': '\u2282\u20d2', + 'NotSubsetEqual;': '\u2288', + 'NotSucceeds;': '\u2281', + 'NotSucceedsEqual;': '\u2ab0\u0338', + 'NotSucceedsSlantEqual;': '\u22e1', + 'NotSucceedsTilde;': '\u227f\u0338', + 'NotSuperset;': '\u2283\u20d2', + 'NotSupersetEqual;': '\u2289', + 'NotTilde;': '\u2241', + 'NotTildeEqual;': '\u2244', + 'NotTildeFullEqual;': '\u2247', + 'NotTildeTilde;': '\u2249', + 'NotVerticalBar;': '\u2224', + 'npar;': '\u2226', + 'nparallel;': '\u2226', + 'nparsl;': '\u2afd\u20e5', + 'npart;': '\u2202\u0338', + 'npolint;': '\u2a14', + 'npr;': '\u2280', + 'nprcue;': '\u22e0', + 'npre;': '\u2aaf\u0338', + 'nprec;': '\u2280', + 'npreceq;': '\u2aaf\u0338', + 'nrArr;': '\u21cf', + 'nrarr;': '\u219b', + 'nrarrc;': '\u2933\u0338', + 'nrarrw;': '\u219d\u0338', + 'nRightarrow;': '\u21cf', + 'nrightarrow;': '\u219b', + 'nrtri;': '\u22eb', + 'nrtrie;': '\u22ed', + 'nsc;': '\u2281', + 'nsccue;': '\u22e1', + 'nsce;': '\u2ab0\u0338', + 'Nscr;': '\U0001d4a9', + 'nscr;': '\U0001d4c3', + 'nshortmid;': '\u2224', + 'nshortparallel;': '\u2226', + 'nsim;': '\u2241', + 'nsime;': '\u2244', + 'nsimeq;': '\u2244', + 'nsmid;': '\u2224', + 'nspar;': '\u2226', + 'nsqsube;': '\u22e2', + 'nsqsupe;': '\u22e3', + 'nsub;': '\u2284', + 'nsubE;': '\u2ac5\u0338', + 'nsube;': '\u2288', + 'nsubset;': '\u2282\u20d2', + 'nsubseteq;': '\u2288', + 'nsubseteqq;': '\u2ac5\u0338', + 'nsucc;': '\u2281', + 'nsucceq;': '\u2ab0\u0338', + 'nsup;': '\u2285', + 'nsupE;': '\u2ac6\u0338', + 'nsupe;': '\u2289', + 'nsupset;': '\u2283\u20d2', + 'nsupseteq;': '\u2289', + 'nsupseteqq;': '\u2ac6\u0338', + 'ntgl;': '\u2279', + 'Ntilde': '\xd1', + 'ntilde': '\xf1', + 'Ntilde;': '\xd1', + 'ntilde;': '\xf1', + 'ntlg;': '\u2278', + 'ntriangleleft;': '\u22ea', + 'ntrianglelefteq;': '\u22ec', + 'ntriangleright;': '\u22eb', + 'ntrianglerighteq;': '\u22ed', + 'Nu;': '\u039d', + 'nu;': '\u03bd', + 'num;': '#', + 'numero;': '\u2116', + 'numsp;': '\u2007', + 'nvap;': '\u224d\u20d2', + 'nVDash;': '\u22af', + 'nVdash;': '\u22ae', + 'nvDash;': '\u22ad', + 'nvdash;': '\u22ac', + 'nvge;': '\u2265\u20d2', + 'nvgt;': '>\u20d2', + 'nvHarr;': '\u2904', + 'nvinfin;': '\u29de', + 'nvlArr;': '\u2902', + 'nvle;': '\u2264\u20d2', + 'nvlt;': '<\u20d2', + 'nvltrie;': '\u22b4\u20d2', + 'nvrArr;': '\u2903', + 'nvrtrie;': '\u22b5\u20d2', + 'nvsim;': '\u223c\u20d2', + 'nwarhk;': '\u2923', + 'nwArr;': '\u21d6', + 'nwarr;': '\u2196', + 'nwarrow;': '\u2196', + 'nwnear;': '\u2927', + 'Oacute': '\xd3', + 'oacute': '\xf3', + 'Oacute;': '\xd3', + 'oacute;': '\xf3', + 'oast;': '\u229b', + 'ocir;': '\u229a', + 'Ocirc': '\xd4', + 'ocirc': '\xf4', + 'Ocirc;': '\xd4', + 'ocirc;': '\xf4', + 'Ocy;': '\u041e', + 'ocy;': '\u043e', + 'odash;': '\u229d', + 'Odblac;': '\u0150', + 'odblac;': '\u0151', + 'odiv;': '\u2a38', + 'odot;': '\u2299', + 'odsold;': '\u29bc', + 'OElig;': '\u0152', + 'oelig;': '\u0153', + 'ofcir;': '\u29bf', + 'Ofr;': '\U0001d512', + 'ofr;': '\U0001d52c', + 'ogon;': '\u02db', + 'Ograve': '\xd2', + 'ograve': '\xf2', + 'Ograve;': '\xd2', + 'ograve;': '\xf2', + 'ogt;': '\u29c1', + 'ohbar;': '\u29b5', + 'ohm;': '\u03a9', + 'oint;': '\u222e', + 'olarr;': '\u21ba', + 'olcir;': '\u29be', + 'olcross;': '\u29bb', + 'oline;': '\u203e', + 'olt;': '\u29c0', + 'Omacr;': '\u014c', + 'omacr;': '\u014d', + 'Omega;': '\u03a9', + 'omega;': '\u03c9', + 'Omicron;': '\u039f', + 'omicron;': '\u03bf', + 'omid;': '\u29b6', + 'ominus;': '\u2296', + 'Oopf;': '\U0001d546', + 'oopf;': '\U0001d560', + 'opar;': '\u29b7', + 'OpenCurlyDoubleQuote;': '\u201c', + 'OpenCurlyQuote;': '\u2018', + 'operp;': '\u29b9', + 'oplus;': '\u2295', + 'Or;': '\u2a54', + 'or;': '\u2228', + 'orarr;': '\u21bb', + 'ord;': '\u2a5d', + 'order;': '\u2134', + 'orderof;': '\u2134', + 'ordf': '\xaa', + 'ordf;': '\xaa', + 'ordm': '\xba', + 'ordm;': '\xba', + 'origof;': '\u22b6', + 'oror;': '\u2a56', + 'orslope;': '\u2a57', + 'orv;': '\u2a5b', + 'oS;': '\u24c8', + 'Oscr;': '\U0001d4aa', + 'oscr;': '\u2134', + 'Oslash': '\xd8', + 'oslash': '\xf8', + 'Oslash;': '\xd8', + 'oslash;': '\xf8', + 'osol;': '\u2298', + 'Otilde': '\xd5', + 'otilde': '\xf5', + 'Otilde;': '\xd5', + 'otilde;': '\xf5', + 'Otimes;': '\u2a37', + 'otimes;': '\u2297', + 'otimesas;': '\u2a36', + 'Ouml': '\xd6', + 'ouml': '\xf6', + 'Ouml;': '\xd6', + 'ouml;': '\xf6', + 'ovbar;': '\u233d', + 'OverBar;': '\u203e', + 'OverBrace;': '\u23de', + 'OverBracket;': '\u23b4', + 'OverParenthesis;': '\u23dc', + 'par;': '\u2225', + 'para': '\xb6', + 'para;': '\xb6', + 'parallel;': '\u2225', + 'parsim;': '\u2af3', + 'parsl;': '\u2afd', + 'part;': '\u2202', + 'PartialD;': '\u2202', + 'Pcy;': '\u041f', + 'pcy;': '\u043f', + 'percnt;': '%', + 'period;': '.', + 'permil;': '\u2030', + 'perp;': '\u22a5', + 'pertenk;': '\u2031', + 'Pfr;': '\U0001d513', + 'pfr;': '\U0001d52d', + 'Phi;': '\u03a6', + 'phi;': '\u03c6', + 'phiv;': '\u03d5', + 'phmmat;': '\u2133', + 'phone;': '\u260e', + 'Pi;': '\u03a0', + 'pi;': '\u03c0', + 'pitchfork;': '\u22d4', + 'piv;': '\u03d6', + 'planck;': '\u210f', + 'planckh;': '\u210e', + 'plankv;': '\u210f', + 'plus;': '+', + 'plusacir;': '\u2a23', + 'plusb;': '\u229e', + 'pluscir;': '\u2a22', + 'plusdo;': '\u2214', + 'plusdu;': '\u2a25', + 'pluse;': '\u2a72', + 'PlusMinus;': '\xb1', + 'plusmn': '\xb1', + 'plusmn;': '\xb1', + 'plussim;': '\u2a26', + 'plustwo;': '\u2a27', + 'pm;': '\xb1', + 'Poincareplane;': '\u210c', + 'pointint;': '\u2a15', + 'Popf;': '\u2119', + 'popf;': '\U0001d561', + 'pound': '\xa3', + 'pound;': '\xa3', + 'Pr;': '\u2abb', + 'pr;': '\u227a', + 'prap;': '\u2ab7', + 'prcue;': '\u227c', + 'prE;': '\u2ab3', + 'pre;': '\u2aaf', + 'prec;': '\u227a', + 'precapprox;': '\u2ab7', + 'preccurlyeq;': '\u227c', + 'Precedes;': '\u227a', + 'PrecedesEqual;': '\u2aaf', + 'PrecedesSlantEqual;': '\u227c', + 'PrecedesTilde;': '\u227e', + 'preceq;': '\u2aaf', + 'precnapprox;': '\u2ab9', + 'precneqq;': '\u2ab5', + 'precnsim;': '\u22e8', + 'precsim;': '\u227e', + 'Prime;': '\u2033', + 'prime;': '\u2032', + 'primes;': '\u2119', + 'prnap;': '\u2ab9', + 'prnE;': '\u2ab5', + 'prnsim;': '\u22e8', + 'prod;': '\u220f', + 'Product;': '\u220f', + 'profalar;': '\u232e', + 'profline;': '\u2312', + 'profsurf;': '\u2313', + 'prop;': '\u221d', + 'Proportion;': '\u2237', + 'Proportional;': '\u221d', + 'propto;': '\u221d', + 'prsim;': '\u227e', + 'prurel;': '\u22b0', + 'Pscr;': '\U0001d4ab', + 'pscr;': '\U0001d4c5', + 'Psi;': '\u03a8', + 'psi;': '\u03c8', + 'puncsp;': '\u2008', + 'Qfr;': '\U0001d514', + 'qfr;': '\U0001d52e', + 'qint;': '\u2a0c', + 'Qopf;': '\u211a', + 'qopf;': '\U0001d562', + 'qprime;': '\u2057', + 'Qscr;': '\U0001d4ac', + 'qscr;': '\U0001d4c6', + 'quaternions;': '\u210d', + 'quatint;': '\u2a16', + 'quest;': '?', + 'questeq;': '\u225f', + 'QUOT': '"', + 'quot': '"', + 'QUOT;': '"', + 'quot;': '"', + 'rAarr;': '\u21db', + 'race;': '\u223d\u0331', + 'Racute;': '\u0154', + 'racute;': '\u0155', + 'radic;': '\u221a', + 'raemptyv;': '\u29b3', + 'Rang;': '\u27eb', + 'rang;': '\u27e9', + 'rangd;': '\u2992', + 'range;': '\u29a5', + 'rangle;': '\u27e9', + 'raquo': '\xbb', + 'raquo;': '\xbb', + 'Rarr;': '\u21a0', + 'rArr;': '\u21d2', + 'rarr;': '\u2192', + 'rarrap;': '\u2975', + 'rarrb;': '\u21e5', + 'rarrbfs;': '\u2920', + 'rarrc;': '\u2933', + 'rarrfs;': '\u291e', + 'rarrhk;': '\u21aa', + 'rarrlp;': '\u21ac', + 'rarrpl;': '\u2945', + 'rarrsim;': '\u2974', + 'Rarrtl;': '\u2916', + 'rarrtl;': '\u21a3', + 'rarrw;': '\u219d', + 'rAtail;': '\u291c', + 'ratail;': '\u291a', + 'ratio;': '\u2236', + 'rationals;': '\u211a', + 'RBarr;': '\u2910', + 'rBarr;': '\u290f', + 'rbarr;': '\u290d', + 'rbbrk;': '\u2773', + 'rbrace;': '}', + 'rbrack;': ']', + 'rbrke;': '\u298c', + 'rbrksld;': '\u298e', + 'rbrkslu;': '\u2990', + 'Rcaron;': '\u0158', + 'rcaron;': '\u0159', + 'Rcedil;': '\u0156', + 'rcedil;': '\u0157', + 'rceil;': '\u2309', + 'rcub;': '}', + 'Rcy;': '\u0420', + 'rcy;': '\u0440', + 'rdca;': '\u2937', + 'rdldhar;': '\u2969', + 'rdquo;': '\u201d', + 'rdquor;': '\u201d', + 'rdsh;': '\u21b3', + 'Re;': '\u211c', + 'real;': '\u211c', + 'realine;': '\u211b', + 'realpart;': '\u211c', + 'reals;': '\u211d', + 'rect;': '\u25ad', + 'REG': '\xae', + 'reg': '\xae', + 'REG;': '\xae', + 'reg;': '\xae', + 'ReverseElement;': '\u220b', + 'ReverseEquilibrium;': '\u21cb', + 'ReverseUpEquilibrium;': '\u296f', + 'rfisht;': '\u297d', + 'rfloor;': '\u230b', + 'Rfr;': '\u211c', + 'rfr;': '\U0001d52f', + 'rHar;': '\u2964', + 'rhard;': '\u21c1', + 'rharu;': '\u21c0', + 'rharul;': '\u296c', + 'Rho;': '\u03a1', + 'rho;': '\u03c1', + 'rhov;': '\u03f1', + 'RightAngleBracket;': '\u27e9', + 'RightArrow;': '\u2192', + 'Rightarrow;': '\u21d2', + 'rightarrow;': '\u2192', + 'RightArrowBar;': '\u21e5', + 'RightArrowLeftArrow;': '\u21c4', + 'rightarrowtail;': '\u21a3', + 'RightCeiling;': '\u2309', + 'RightDoubleBracket;': '\u27e7', + 'RightDownTeeVector;': '\u295d', + 'RightDownVector;': '\u21c2', + 'RightDownVectorBar;': '\u2955', + 'RightFloor;': '\u230b', + 'rightharpoondown;': '\u21c1', + 'rightharpoonup;': '\u21c0', + 'rightleftarrows;': '\u21c4', + 'rightleftharpoons;': '\u21cc', + 'rightrightarrows;': '\u21c9', + 'rightsquigarrow;': '\u219d', + 'RightTee;': '\u22a2', + 'RightTeeArrow;': '\u21a6', + 'RightTeeVector;': '\u295b', + 'rightthreetimes;': '\u22cc', + 'RightTriangle;': '\u22b3', + 'RightTriangleBar;': '\u29d0', + 'RightTriangleEqual;': '\u22b5', + 'RightUpDownVector;': '\u294f', + 'RightUpTeeVector;': '\u295c', + 'RightUpVector;': '\u21be', + 'RightUpVectorBar;': '\u2954', + 'RightVector;': '\u21c0', + 'RightVectorBar;': '\u2953', + 'ring;': '\u02da', + 'risingdotseq;': '\u2253', + 'rlarr;': '\u21c4', + 'rlhar;': '\u21cc', + 'rlm;': '\u200f', + 'rmoust;': '\u23b1', + 'rmoustache;': '\u23b1', + 'rnmid;': '\u2aee', + 'roang;': '\u27ed', + 'roarr;': '\u21fe', + 'robrk;': '\u27e7', + 'ropar;': '\u2986', + 'Ropf;': '\u211d', + 'ropf;': '\U0001d563', + 'roplus;': '\u2a2e', + 'rotimes;': '\u2a35', + 'RoundImplies;': '\u2970', + 'rpar;': ')', + 'rpargt;': '\u2994', + 'rppolint;': '\u2a12', + 'rrarr;': '\u21c9', + 'Rrightarrow;': '\u21db', + 'rsaquo;': '\u203a', + 'Rscr;': '\u211b', + 'rscr;': '\U0001d4c7', + 'Rsh;': '\u21b1', + 'rsh;': '\u21b1', + 'rsqb;': ']', + 'rsquo;': '\u2019', + 'rsquor;': '\u2019', + 'rthree;': '\u22cc', + 'rtimes;': '\u22ca', + 'rtri;': '\u25b9', + 'rtrie;': '\u22b5', + 'rtrif;': '\u25b8', + 'rtriltri;': '\u29ce', + 'RuleDelayed;': '\u29f4', + 'ruluhar;': '\u2968', + 'rx;': '\u211e', + 'Sacute;': '\u015a', + 'sacute;': '\u015b', + 'sbquo;': '\u201a', + 'Sc;': '\u2abc', + 'sc;': '\u227b', + 'scap;': '\u2ab8', + 'Scaron;': '\u0160', + 'scaron;': '\u0161', + 'sccue;': '\u227d', + 'scE;': '\u2ab4', + 'sce;': '\u2ab0', + 'Scedil;': '\u015e', + 'scedil;': '\u015f', + 'Scirc;': '\u015c', + 'scirc;': '\u015d', + 'scnap;': '\u2aba', + 'scnE;': '\u2ab6', + 'scnsim;': '\u22e9', + 'scpolint;': '\u2a13', + 'scsim;': '\u227f', + 'Scy;': '\u0421', + 'scy;': '\u0441', + 'sdot;': '\u22c5', + 'sdotb;': '\u22a1', + 'sdote;': '\u2a66', + 'searhk;': '\u2925', + 'seArr;': '\u21d8', + 'searr;': '\u2198', + 'searrow;': '\u2198', + 'sect': '\xa7', + 'sect;': '\xa7', + 'semi;': ';', + 'seswar;': '\u2929', + 'setminus;': '\u2216', + 'setmn;': '\u2216', + 'sext;': '\u2736', + 'Sfr;': '\U0001d516', + 'sfr;': '\U0001d530', + 'sfrown;': '\u2322', + 'sharp;': '\u266f', + 'SHCHcy;': '\u0429', + 'shchcy;': '\u0449', + 'SHcy;': '\u0428', + 'shcy;': '\u0448', + 'ShortDownArrow;': '\u2193', + 'ShortLeftArrow;': '\u2190', + 'shortmid;': '\u2223', + 'shortparallel;': '\u2225', + 'ShortRightArrow;': '\u2192', + 'ShortUpArrow;': '\u2191', + 'shy': '\xad', + 'shy;': '\xad', + 'Sigma;': '\u03a3', + 'sigma;': '\u03c3', + 'sigmaf;': '\u03c2', + 'sigmav;': '\u03c2', + 'sim;': '\u223c', + 'simdot;': '\u2a6a', + 'sime;': '\u2243', + 'simeq;': '\u2243', + 'simg;': '\u2a9e', + 'simgE;': '\u2aa0', + 'siml;': '\u2a9d', + 'simlE;': '\u2a9f', + 'simne;': '\u2246', + 'simplus;': '\u2a24', + 'simrarr;': '\u2972', + 'slarr;': '\u2190', + 'SmallCircle;': '\u2218', + 'smallsetminus;': '\u2216', + 'smashp;': '\u2a33', + 'smeparsl;': '\u29e4', + 'smid;': '\u2223', + 'smile;': '\u2323', + 'smt;': '\u2aaa', + 'smte;': '\u2aac', + 'smtes;': '\u2aac\ufe00', + 'SOFTcy;': '\u042c', + 'softcy;': '\u044c', + 'sol;': '/', + 'solb;': '\u29c4', + 'solbar;': '\u233f', + 'Sopf;': '\U0001d54a', + 'sopf;': '\U0001d564', + 'spades;': '\u2660', + 'spadesuit;': '\u2660', + 'spar;': '\u2225', + 'sqcap;': '\u2293', + 'sqcaps;': '\u2293\ufe00', + 'sqcup;': '\u2294', + 'sqcups;': '\u2294\ufe00', + 'Sqrt;': '\u221a', + 'sqsub;': '\u228f', + 'sqsube;': '\u2291', + 'sqsubset;': '\u228f', + 'sqsubseteq;': '\u2291', + 'sqsup;': '\u2290', + 'sqsupe;': '\u2292', + 'sqsupset;': '\u2290', + 'sqsupseteq;': '\u2292', + 'squ;': '\u25a1', + 'Square;': '\u25a1', + 'square;': '\u25a1', + 'SquareIntersection;': '\u2293', + 'SquareSubset;': '\u228f', + 'SquareSubsetEqual;': '\u2291', + 'SquareSuperset;': '\u2290', + 'SquareSupersetEqual;': '\u2292', + 'SquareUnion;': '\u2294', + 'squarf;': '\u25aa', + 'squf;': '\u25aa', + 'srarr;': '\u2192', + 'Sscr;': '\U0001d4ae', + 'sscr;': '\U0001d4c8', + 'ssetmn;': '\u2216', + 'ssmile;': '\u2323', + 'sstarf;': '\u22c6', + 'Star;': '\u22c6', + 'star;': '\u2606', + 'starf;': '\u2605', + 'straightepsilon;': '\u03f5', + 'straightphi;': '\u03d5', + 'strns;': '\xaf', + 'Sub;': '\u22d0', + 'sub;': '\u2282', + 'subdot;': '\u2abd', + 'subE;': '\u2ac5', + 'sube;': '\u2286', + 'subedot;': '\u2ac3', + 'submult;': '\u2ac1', + 'subnE;': '\u2acb', + 'subne;': '\u228a', + 'subplus;': '\u2abf', + 'subrarr;': '\u2979', + 'Subset;': '\u22d0', + 'subset;': '\u2282', + 'subseteq;': '\u2286', + 'subseteqq;': '\u2ac5', + 'SubsetEqual;': '\u2286', + 'subsetneq;': '\u228a', + 'subsetneqq;': '\u2acb', + 'subsim;': '\u2ac7', + 'subsub;': '\u2ad5', + 'subsup;': '\u2ad3', + 'succ;': '\u227b', + 'succapprox;': '\u2ab8', + 'succcurlyeq;': '\u227d', + 'Succeeds;': '\u227b', + 'SucceedsEqual;': '\u2ab0', + 'SucceedsSlantEqual;': '\u227d', + 'SucceedsTilde;': '\u227f', + 'succeq;': '\u2ab0', + 'succnapprox;': '\u2aba', + 'succneqq;': '\u2ab6', + 'succnsim;': '\u22e9', + 'succsim;': '\u227f', + 'SuchThat;': '\u220b', + 'Sum;': '\u2211', + 'sum;': '\u2211', + 'sung;': '\u266a', + 'sup1': '\xb9', + 'sup1;': '\xb9', + 'sup2': '\xb2', + 'sup2;': '\xb2', + 'sup3': '\xb3', + 'sup3;': '\xb3', + 'Sup;': '\u22d1', + 'sup;': '\u2283', + 'supdot;': '\u2abe', + 'supdsub;': '\u2ad8', + 'supE;': '\u2ac6', + 'supe;': '\u2287', + 'supedot;': '\u2ac4', + 'Superset;': '\u2283', + 'SupersetEqual;': '\u2287', + 'suphsol;': '\u27c9', + 'suphsub;': '\u2ad7', + 'suplarr;': '\u297b', + 'supmult;': '\u2ac2', + 'supnE;': '\u2acc', + 'supne;': '\u228b', + 'supplus;': '\u2ac0', + 'Supset;': '\u22d1', + 'supset;': '\u2283', + 'supseteq;': '\u2287', + 'supseteqq;': '\u2ac6', + 'supsetneq;': '\u228b', + 'supsetneqq;': '\u2acc', + 'supsim;': '\u2ac8', + 'supsub;': '\u2ad4', + 'supsup;': '\u2ad6', + 'swarhk;': '\u2926', + 'swArr;': '\u21d9', + 'swarr;': '\u2199', + 'swarrow;': '\u2199', + 'swnwar;': '\u292a', + 'szlig': '\xdf', + 'szlig;': '\xdf', + 'Tab;': '\t', + 'target;': '\u2316', + 'Tau;': '\u03a4', + 'tau;': '\u03c4', + 'tbrk;': '\u23b4', + 'Tcaron;': '\u0164', + 'tcaron;': '\u0165', + 'Tcedil;': '\u0162', + 'tcedil;': '\u0163', + 'Tcy;': '\u0422', + 'tcy;': '\u0442', + 'tdot;': '\u20db', + 'telrec;': '\u2315', + 'Tfr;': '\U0001d517', + 'tfr;': '\U0001d531', + 'there4;': '\u2234', + 'Therefore;': '\u2234', + 'therefore;': '\u2234', + 'Theta;': '\u0398', + 'theta;': '\u03b8', + 'thetasym;': '\u03d1', + 'thetav;': '\u03d1', + 'thickapprox;': '\u2248', + 'thicksim;': '\u223c', + 'ThickSpace;': '\u205f\u200a', + 'thinsp;': '\u2009', + 'ThinSpace;': '\u2009', + 'thkap;': '\u2248', + 'thksim;': '\u223c', + 'THORN': '\xde', + 'thorn': '\xfe', + 'THORN;': '\xde', + 'thorn;': '\xfe', + 'Tilde;': '\u223c', + 'tilde;': '\u02dc', + 'TildeEqual;': '\u2243', + 'TildeFullEqual;': '\u2245', + 'TildeTilde;': '\u2248', + 'times': '\xd7', + 'times;': '\xd7', + 'timesb;': '\u22a0', + 'timesbar;': '\u2a31', + 'timesd;': '\u2a30', + 'tint;': '\u222d', + 'toea;': '\u2928', + 'top;': '\u22a4', + 'topbot;': '\u2336', + 'topcir;': '\u2af1', + 'Topf;': '\U0001d54b', + 'topf;': '\U0001d565', + 'topfork;': '\u2ada', + 'tosa;': '\u2929', + 'tprime;': '\u2034', + 'TRADE;': '\u2122', + 'trade;': '\u2122', + 'triangle;': '\u25b5', + 'triangledown;': '\u25bf', + 'triangleleft;': '\u25c3', + 'trianglelefteq;': '\u22b4', + 'triangleq;': '\u225c', + 'triangleright;': '\u25b9', + 'trianglerighteq;': '\u22b5', + 'tridot;': '\u25ec', + 'trie;': '\u225c', + 'triminus;': '\u2a3a', + 'TripleDot;': '\u20db', + 'triplus;': '\u2a39', + 'trisb;': '\u29cd', + 'tritime;': '\u2a3b', + 'trpezium;': '\u23e2', + 'Tscr;': '\U0001d4af', + 'tscr;': '\U0001d4c9', + 'TScy;': '\u0426', + 'tscy;': '\u0446', + 'TSHcy;': '\u040b', + 'tshcy;': '\u045b', + 'Tstrok;': '\u0166', + 'tstrok;': '\u0167', + 'twixt;': '\u226c', + 'twoheadleftarrow;': '\u219e', + 'twoheadrightarrow;': '\u21a0', + 'Uacute': '\xda', + 'uacute': '\xfa', + 'Uacute;': '\xda', + 'uacute;': '\xfa', + 'Uarr;': '\u219f', + 'uArr;': '\u21d1', + 'uarr;': '\u2191', + 'Uarrocir;': '\u2949', + 'Ubrcy;': '\u040e', + 'ubrcy;': '\u045e', + 'Ubreve;': '\u016c', + 'ubreve;': '\u016d', + 'Ucirc': '\xdb', + 'ucirc': '\xfb', + 'Ucirc;': '\xdb', + 'ucirc;': '\xfb', + 'Ucy;': '\u0423', + 'ucy;': '\u0443', + 'udarr;': '\u21c5', + 'Udblac;': '\u0170', + 'udblac;': '\u0171', + 'udhar;': '\u296e', + 'ufisht;': '\u297e', + 'Ufr;': '\U0001d518', + 'ufr;': '\U0001d532', + 'Ugrave': '\xd9', + 'ugrave': '\xf9', + 'Ugrave;': '\xd9', + 'ugrave;': '\xf9', + 'uHar;': '\u2963', + 'uharl;': '\u21bf', + 'uharr;': '\u21be', + 'uhblk;': '\u2580', + 'ulcorn;': '\u231c', + 'ulcorner;': '\u231c', + 'ulcrop;': '\u230f', + 'ultri;': '\u25f8', + 'Umacr;': '\u016a', + 'umacr;': '\u016b', + 'uml': '\xa8', + 'uml;': '\xa8', + 'UnderBar;': '_', + 'UnderBrace;': '\u23df', + 'UnderBracket;': '\u23b5', + 'UnderParenthesis;': '\u23dd', + 'Union;': '\u22c3', + 'UnionPlus;': '\u228e', + 'Uogon;': '\u0172', + 'uogon;': '\u0173', + 'Uopf;': '\U0001d54c', + 'uopf;': '\U0001d566', + 'UpArrow;': '\u2191', + 'Uparrow;': '\u21d1', + 'uparrow;': '\u2191', + 'UpArrowBar;': '\u2912', + 'UpArrowDownArrow;': '\u21c5', + 'UpDownArrow;': '\u2195', + 'Updownarrow;': '\u21d5', + 'updownarrow;': '\u2195', + 'UpEquilibrium;': '\u296e', + 'upharpoonleft;': '\u21bf', + 'upharpoonright;': '\u21be', + 'uplus;': '\u228e', + 'UpperLeftArrow;': '\u2196', + 'UpperRightArrow;': '\u2197', + 'Upsi;': '\u03d2', + 'upsi;': '\u03c5', + 'upsih;': '\u03d2', + 'Upsilon;': '\u03a5', + 'upsilon;': '\u03c5', + 'UpTee;': '\u22a5', + 'UpTeeArrow;': '\u21a5', + 'upuparrows;': '\u21c8', + 'urcorn;': '\u231d', + 'urcorner;': '\u231d', + 'urcrop;': '\u230e', + 'Uring;': '\u016e', + 'uring;': '\u016f', + 'urtri;': '\u25f9', + 'Uscr;': '\U0001d4b0', + 'uscr;': '\U0001d4ca', + 'utdot;': '\u22f0', + 'Utilde;': '\u0168', + 'utilde;': '\u0169', + 'utri;': '\u25b5', + 'utrif;': '\u25b4', + 'uuarr;': '\u21c8', + 'Uuml': '\xdc', + 'uuml': '\xfc', + 'Uuml;': '\xdc', + 'uuml;': '\xfc', + 'uwangle;': '\u29a7', + 'vangrt;': '\u299c', + 'varepsilon;': '\u03f5', + 'varkappa;': '\u03f0', + 'varnothing;': '\u2205', + 'varphi;': '\u03d5', + 'varpi;': '\u03d6', + 'varpropto;': '\u221d', + 'vArr;': '\u21d5', + 'varr;': '\u2195', + 'varrho;': '\u03f1', + 'varsigma;': '\u03c2', + 'varsubsetneq;': '\u228a\ufe00', + 'varsubsetneqq;': '\u2acb\ufe00', + 'varsupsetneq;': '\u228b\ufe00', + 'varsupsetneqq;': '\u2acc\ufe00', + 'vartheta;': '\u03d1', + 'vartriangleleft;': '\u22b2', + 'vartriangleright;': '\u22b3', + 'Vbar;': '\u2aeb', + 'vBar;': '\u2ae8', + 'vBarv;': '\u2ae9', + 'Vcy;': '\u0412', + 'vcy;': '\u0432', + 'VDash;': '\u22ab', + 'Vdash;': '\u22a9', + 'vDash;': '\u22a8', + 'vdash;': '\u22a2', + 'Vdashl;': '\u2ae6', + 'Vee;': '\u22c1', + 'vee;': '\u2228', + 'veebar;': '\u22bb', + 'veeeq;': '\u225a', + 'vellip;': '\u22ee', + 'Verbar;': '\u2016', + 'verbar;': '|', + 'Vert;': '\u2016', + 'vert;': '|', + 'VerticalBar;': '\u2223', + 'VerticalLine;': '|', + 'VerticalSeparator;': '\u2758', + 'VerticalTilde;': '\u2240', + 'VeryThinSpace;': '\u200a', + 'Vfr;': '\U0001d519', + 'vfr;': '\U0001d533', + 'vltri;': '\u22b2', + 'vnsub;': '\u2282\u20d2', + 'vnsup;': '\u2283\u20d2', + 'Vopf;': '\U0001d54d', + 'vopf;': '\U0001d567', + 'vprop;': '\u221d', + 'vrtri;': '\u22b3', + 'Vscr;': '\U0001d4b1', + 'vscr;': '\U0001d4cb', + 'vsubnE;': '\u2acb\ufe00', + 'vsubne;': '\u228a\ufe00', + 'vsupnE;': '\u2acc\ufe00', + 'vsupne;': '\u228b\ufe00', + 'Vvdash;': '\u22aa', + 'vzigzag;': '\u299a', + 'Wcirc;': '\u0174', + 'wcirc;': '\u0175', + 'wedbar;': '\u2a5f', + 'Wedge;': '\u22c0', + 'wedge;': '\u2227', + 'wedgeq;': '\u2259', + 'weierp;': '\u2118', + 'Wfr;': '\U0001d51a', + 'wfr;': '\U0001d534', + 'Wopf;': '\U0001d54e', + 'wopf;': '\U0001d568', + 'wp;': '\u2118', + 'wr;': '\u2240', + 'wreath;': '\u2240', + 'Wscr;': '\U0001d4b2', + 'wscr;': '\U0001d4cc', + 'xcap;': '\u22c2', + 'xcirc;': '\u25ef', + 'xcup;': '\u22c3', + 'xdtri;': '\u25bd', + 'Xfr;': '\U0001d51b', + 'xfr;': '\U0001d535', + 'xhArr;': '\u27fa', + 'xharr;': '\u27f7', + 'Xi;': '\u039e', + 'xi;': '\u03be', + 'xlArr;': '\u27f8', + 'xlarr;': '\u27f5', + 'xmap;': '\u27fc', + 'xnis;': '\u22fb', + 'xodot;': '\u2a00', + 'Xopf;': '\U0001d54f', + 'xopf;': '\U0001d569', + 'xoplus;': '\u2a01', + 'xotime;': '\u2a02', + 'xrArr;': '\u27f9', + 'xrarr;': '\u27f6', + 'Xscr;': '\U0001d4b3', + 'xscr;': '\U0001d4cd', + 'xsqcup;': '\u2a06', + 'xuplus;': '\u2a04', + 'xutri;': '\u25b3', + 'xvee;': '\u22c1', + 'xwedge;': '\u22c0', + 'Yacute': '\xdd', + 'yacute': '\xfd', + 'Yacute;': '\xdd', + 'yacute;': '\xfd', + 'YAcy;': '\u042f', + 'yacy;': '\u044f', + 'Ycirc;': '\u0176', + 'ycirc;': '\u0177', + 'Ycy;': '\u042b', + 'ycy;': '\u044b', + 'yen': '\xa5', + 'yen;': '\xa5', + 'Yfr;': '\U0001d51c', + 'yfr;': '\U0001d536', + 'YIcy;': '\u0407', + 'yicy;': '\u0457', + 'Yopf;': '\U0001d550', + 'yopf;': '\U0001d56a', + 'Yscr;': '\U0001d4b4', + 'yscr;': '\U0001d4ce', + 'YUcy;': '\u042e', + 'yucy;': '\u044e', + 'yuml': '\xff', + 'Yuml;': '\u0178', + 'yuml;': '\xff', + 'Zacute;': '\u0179', + 'zacute;': '\u017a', + 'Zcaron;': '\u017d', + 'zcaron;': '\u017e', + 'Zcy;': '\u0417', + 'zcy;': '\u0437', + 'Zdot;': '\u017b', + 'zdot;': '\u017c', + 'zeetrf;': '\u2128', + 'ZeroWidthSpace;': '\u200b', + 'Zeta;': '\u0396', + 'zeta;': '\u03b6', + 'Zfr;': '\u2128', + 'zfr;': '\U0001d537', + 'ZHcy;': '\u0416', + 'zhcy;': '\u0436', + 'zigrarr;': '\u21dd', + 'Zopf;': '\u2124', + 'zopf;': '\U0001d56b', + 'Zscr;': '\U0001d4b5', + 'zscr;': '\U0001d4cf', + 'zwj;': '\u200d', + 'zwnj;': '\u200c', +} diff --git a/plugin.video.alfa/core/filetools.py b/plugin.video.alfa/core/filetools.py new file mode 100755 index 00000000..95f08771 --- /dev/null +++ b/plugin.video.alfa/core/filetools.py @@ -0,0 +1,579 @@ +# -*- coding: utf-8 -*- +# ------------------------------------------------------------ +# filetools +# Gestion de archivos con discriminación samba/local +# ------------------------------------------------------------ + +import os +import traceback + +from core import logger +from core import scrapertools +from platformcode import platformtools + +try: + from lib.sambatools import libsmb as samba +except: + samba = None + # Python 2.4 No compatible con modulo samba, hay que revisar + +# Windows es "mbcs" linux, osx, android es "utf8" +if os.name == "nt": + fs_encoding = "" +else: + fs_encoding = "utf8" + + +def validate_path(path): + """ + Elimina cáracteres no permitidos + @param path: cadena a validar + @type path: str + @rtype: str + @return: devuelve la cadena sin los caracteres no permitidos + """ + chars = ":*?<>|" + if path.lower().startswith("smb://"): + import re + parts = re.split(r'smb://(.+?)/(.+)', path)[1:3] + return "smb://" + parts[0] + "/" + ''.join([c for c in parts[1] if c not in chars]) + + else: + if path.find(":\\") == 1: + unidad = path[0:3] + path = path[2:] + else: + unidad = "" + + return unidad + ''.join([c for c in path if c not in chars]) + + +def encode(path, _samba=False): + """ + Codifica una ruta según el sistema operativo que estemos utilizando. + El argumento path tiene que estar codificado en utf-8 + @type path unicode o str con codificación utf-8 + @param path parámetro a codificar + @type _samba bool + @para _samba si la ruta es samba o no + @rtype: str + @return ruta codificada en juego de caracteres del sistema o utf-8 si samba + """ + if not type(path) == unicode: + path = unicode(path, "utf-8", "ignore") + + if path.lower().startswith("smb://") or _samba: + path = path.encode("utf-8", "ignore") + else: + if fs_encoding: + path = path.encode(fs_encoding, "ignore") + + return path + + +def decode(path): + """ + Convierte una cadena de texto al juego de caracteres utf-8 + eliminando los caracteres que no estén permitidos en utf-8 + @type: str, unicode, list de str o unicode + @param path: puede ser una ruta o un list() con varias rutas + @rtype: str + @return: ruta codificado en UTF-8 + """ + if type(path) == list: + for x in range(len(path)): + if not type(path[x]) == unicode: + path[x] = path[x].decode(fs_encoding, "ignore") + path[x] = path[x].encode("utf-8", "ignore") + else: + if not type(path) == unicode: + path = path.decode(fs_encoding, "ignore") + path = path.encode("utf-8", "ignore") + return path + + +def read(path, linea_inicio=0, total_lineas=None): + """ + Lee el contenido de un archivo y devuelve los datos + @param path: ruta del fichero + @type path: str + @param linea_inicio: primera linea a leer del fichero + @type linea_inicio: int positivo + @param total_lineas: numero maximo de lineas a leer. Si es None o superior al total de lineas se leera el + fichero hasta el final. + @type total_lineas: int positivo + @rtype: str + @return: datos que contiene el fichero + """ + path = encode(path) + try: + if path.lower().startswith("smb://"): + f = samba.smb_open(path, "rb") + else: + f = open(path, "rb") + + data = [] + for x, line in enumerate(f): + if x < linea_inicio: continue + if len(data) == total_lineas: break + data.append(line) + f.close() + except: + logger.error("ERROR al leer el archivo: %s" % path) + logger.error(traceback.format_exc()) + return False + + else: + return "".join(data) + + +def write(path, data): + """ + Guarda los datos en un archivo + @param path: ruta del archivo a guardar + @type path: str + @param data: datos a guardar + @type data: str + @rtype: bool + @return: devuelve True si se ha escrito correctamente o False si ha dado un error + """ + path = encode(path) + try: + if path.lower().startswith("smb://"): + f = samba.smb_open(path, "wb") + else: + f = open(path, "wb") + + f.write(data) + f.close() + except: + logger.error("ERROR al guardar el archivo: %s" % path) + logger.error(traceback.format_exc()) + return False + else: + return True + + +def file_open(path, mode="r"): + """ + Abre un archivo + @param path: ruta + @type path: str + @rtype: str + @return: objeto file + """ + path = encode(path) + try: + if path.lower().startswith("smb://"): + return samba.smb_open(path, mode) + else: + return open(path, mode) + except: + logger.error("ERROR al abrir el archivo: %s" % path) + logger.error(traceback.format_exc()) + platformtools.dialog_notification("Error al abrir", path) + return False + + +def rename(path, new_name): + """ + Renombra un archivo o carpeta + @param path: ruta del fichero o carpeta a renombrar + @type path: str + @param new_name: nuevo nombre + @type new_name: str + @rtype: bool + @return: devuelve False en caso de error + """ + path = encode(path) + try: + if path.lower().startswith("smb://"): + new_name = encode(new_name, True) + samba.rename(path, join(dirname(path), new_name)) + else: + new_name = encode(new_name, False) + os.rename(path, os.path.join(os.path.dirname(path), new_name)) + except: + logger.error("ERROR al renombrar el archivo: %s" % path) + logger.error(traceback.format_exc()) + platformtools.dialog_notification("Error al renombrar", path) + return False + else: + return True + + +def move(path, dest): + """ + Mueve un archivo + @param path: ruta del fichero a mover + @type path: str + @param dest: ruta donde mover + @type dest: str + @rtype: bool + @return: devuelve False en caso de error + """ + try: + # samba/samba + if path.lower().startswith("smb://") and dest.lower().startswith("smb://"): + dest = encode(dest, True) + path = encode(path, True) + samba.rename(path, dest) + + # local/local + elif not path.lower().startswith("smb://") and not dest.lower().startswith("smb://"): + dest = encode(dest) + path = encode(path) + os.rename(path, dest) + # mixto En este caso se copia el archivo y luego se elimina el de origen + else: + return copy(path, dest) == True and remove(path) == True + except: + logger.error("ERROR al mover el archivo: %s" % path) + return False + else: + return True + + +def copy(path, dest, silent=False): + """ + Copia un archivo + @param path: ruta del fichero a copiar + @type path: str + @param dest: ruta donde copiar + @type dest: str + @param silent: se muestra o no el cuadro de dialogo + @type silent: bool + @rtype: bool + @return: devuelve False en caso de error + """ + try: + fo = file_open(path, "rb") + fd = file_open(dest, "wb") + if fo and fd: + if not silent: + dialogo = platformtools.dialog_progress("Copiando archivo", "") + size = getsize(path) + copiado = 0 + while True: + if not silent: + dialogo.update(copiado * 100 / size, basename(path)) + buf = fo.read(1024 * 1024) + if not buf: + break + if not silent and dialogo.iscanceled(): + dialogo.close() + return False + fd.write(buf) + copiado += len(buf) + if not silent: + dialogo.close() + except: + logger.error("ERROR al copiar el archivo: %s" % path) + logger.error(traceback.format_exc()) + return False + else: + return True + + +def exists(path): + """ + Comprueba si existe una carpeta o fichero + @param path: ruta + @type path: str + @rtype: bool + @return: Retorna True si la ruta existe, tanto si es una carpeta como un archivo + """ + path = encode(path) + try: + if path.lower().startswith("smb://"): + return samba.exists(path) + else: + return os.path.exists(path) + except: + logger.error("ERROR al comprobar la ruta: %s" % path) + logger.error(traceback.format_exc()) + return False + + +def isfile(path): + """ + Comprueba si la ruta es un fichero + @param path: ruta + @type path: str + @rtype: bool + @return: Retorna True si la ruta existe y es un archivo + """ + path = encode(path) + try: + if path.lower().startswith("smb://"): + return samba.isfile(path) + else: + return os.path.isfile(path) + except: + logger.error("ERROR al comprobar el archivo: %s" % path) + logger.error(traceback.format_exc()) + return False + + +def isdir(path): + """ + Comprueba si la ruta es un directorio + @param path: ruta + @type path: str + @rtype: bool + @return: Retorna True si la ruta existe y es un directorio + """ + path = encode(path) + try: + if path.lower().startswith("smb://"): + return samba.isdir(path) + else: + return os.path.isdir(path) + except: + logger.error("ERROR al comprobar el directorio: %s" % path) + logger.error(traceback.format_exc()) + return False + + +def getsize(path): + """ + Obtiene el tamaño de un archivo + @param path: ruta del fichero + @type path: str + @rtype: str + @return: tamaño del fichero + """ + path = encode(path) + try: + if path.lower().startswith("smb://"): + return long(samba.get_attributes(path).file_size) + else: + return os.path.getsize(path) + except: + logger.error("ERROR al obtener el tamaño: %s" % path) + logger.error(traceback.format_exc()) + return 0L + + +def remove(path): + """ + Elimina un archivo + @param path: ruta del fichero a eliminar + @type path: str + @rtype: bool + @return: devuelve False en caso de error + """ + path = encode(path) + try: + if path.lower().startswith("smb://"): + samba.remove(path) + else: + os.remove(path) + except: + logger.error("ERROR al eliminar el archivo: %s" % path) + logger.error(traceback.format_exc()) + platformtools.dialog_notification("Error al eliminar el archivo", path) + return False + else: + return True + + +def rmdirtree(path): + """ + Elimina un directorio y su contenido + @param path: ruta a eliminar + @type path: str + @rtype: bool + @return: devuelve False en caso de error + """ + path = encode(path) + try: + if path.lower().startswith("smb://"): + for raiz, subcarpetas, ficheros in samba.walk(path, topdown=False): + for f in ficheros: + samba.remove(join(decode(raiz), decode(f))) + for s in subcarpetas: + samba.rmdir(join(decode(raiz), decode(s))) + samba.rmdir(path) + else: + import shutil + shutil.rmtree(path, ignore_errors=True) + except: + logger.error("ERROR al eliminar el directorio: %s" % path) + logger.error(traceback.format_exc()) + platformtools.dialog_notification("Error al eliminar el directorio", path) + return False + else: + return not exists(path) + + +def rmdir(path): + """ + Elimina un directorio + @param path: ruta a eliminar + @type path: str + @rtype: bool + @return: devuelve False en caso de error + """ + path = encode(path) + try: + if path.lower().startswith("smb://"): + samba.rmdir(path) + else: + os.rmdir(path) + except: + logger.error("ERROR al eliminar el directorio: %s" % path) + logger.error(traceback.format_exc()) + platformtools.dialog_notification("Error al eliminar el directorio", path) + return False + else: + return True + + +def mkdir(path): + """ + Crea un directorio + @param path: ruta a crear + @type path: str + @rtype: bool + @return: devuelve False en caso de error + """ + path = encode(path) + try: + if path.lower().startswith("smb://"): + samba.mkdir(path) + else: + os.mkdir(path) + except: + logger.error("ERROR al crear el directorio: %s" % path) + logger.error(traceback.format_exc()) + platformtools.dialog_notification("Error al crear el directorio", path) + return False + else: + return True + + +def walk(top, topdown=True, onerror=None): + """ + Lista un directorio de manera recursiva + @param top: Directorio a listar, debe ser un str "UTF-8" + @type top: str + @param topdown: se escanea de arriba a abajo + @type topdown: bool + @param onerror: muestra error para continuar con el listado si tiene algo seteado sino levanta una excepción + @type onerror: bool + ***El parametro followlinks que por defecto es True, no se usa aqui, ya que en samba no discrimina los links + """ + top = encode(top) + if top.lower().startswith("smb://"): + for a, b, c in samba.walk(top, topdown, onerror): + # list(b) es para que haga una copia del listado de directorios + # si no da error cuando tiene que entrar recursivamente en directorios con caracteres especiales + yield decode(a), decode(list(b)), decode(c) + else: + for a, b, c in os.walk(top, topdown, onerror): + # list(b) es para que haga una copia del listado de directorios + # si no da error cuando tiene que entrar recursivamente en directorios con caracteres especiales + yield decode(a), decode(list(b)), decode(c) + + +def listdir(path): + """ + Lista un directorio + @param path: Directorio a listar, debe ser un str "UTF-8" + @type path: str + @rtype: str + @return: contenido de un directorio + """ + + path = encode(path) + try: + if path.lower().startswith("smb://"): + return decode(samba.listdir(path)) + else: + return decode(os.listdir(path)) + except: + logger.error("ERROR al leer el directorio: %s" % path) + logger.error(traceback.format_exc()) + return False + + +def join(*paths): + """ + Junta varios directorios + Corrige las barras "/" o "\" segun el sistema operativo y si es o no smaba + @rytpe: str + @return: la ruta concatenada + """ + list_path = [] + if paths[0].startswith("/"): + list_path.append("") + + for path in paths: + if path: + list_path += path.replace("\\", "/").strip("/").split("/") + + if list_path[0].lower() == "smb:": + return "/".join(list_path) + else: + return os.sep.join(list_path) + + +def split(path): + """ + Devuelve una tupla formada por el directorio y el nombre del fichero de una ruta + @param path: ruta + @type path: str + @return: (dirname, basename) + @rtype: tuple + """ + if path.lower().startswith("smb://"): + if '/' not in path[6:]: + path = path.replace("smb://", "smb:///", 1) + return path.rsplit('/', 1) + else: + return os.path.split(path) + + +def basename(path): + """ + Devuelve el nombre del fichero de una ruta + @param path: ruta + @type path: str + @return: fichero de la ruta + @rtype: str + """ + return split(path)[1] + + +def dirname(path): + """ + Devuelve el directorio de una ruta + @param path: ruta + @type path: str + @return: directorio de la ruta + @rtype: str + """ + return split(path)[0] + + +def is_relative(path): + return "://" not in path and not path.startswith("/") and ":\\" not in path + + +def remove_tags(title): + """ + devuelve el titulo sin tags como color + @type title: str + @param title: title + @rtype: str + @return: cadena sin tags + """ + logger.info() + + title_without_tags = scrapertools.find_single_match(title, '\[color .+?\](.+)\[\/color\]') + + if title_without_tags: + return title_without_tags + else: + return title diff --git a/plugin.video.alfa/core/httptools.py b/plugin.video.alfa/core/httptools.py new file mode 100755 index 00000000..5ac76468 --- /dev/null +++ b/plugin.video.alfa/core/httptools.py @@ -0,0 +1,260 @@ +# -*- coding: utf-8 -*- +# -------------------------------------------------------------------------------- +# httptools +# -------------------------------------------------------------------------------- + +import cookielib +import gzip +import os +import time +import urllib +import urllib2 +import urlparse +from StringIO import StringIO +from threading import Lock + +from core import config +from core import logger +from core.cloudflare import Cloudflare + +cookies_lock = Lock() + +cj = cookielib.MozillaCookieJar() +ficherocookies = os.path.join(config.get_data_path(), "cookies.dat") + +# Headers por defecto, si no se especifica nada +default_headers = dict() +default_headers["User-Agent"] = "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0" +default_headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8" +default_headers["Accept-Language"] = "es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3" +default_headers["Accept-Charset"] = "UTF-8" +default_headers["Accept-Encoding"] = "gzip" + + +def get_url_headers(url): + domain_cookies = cj._cookies.get("." + urlparse.urlparse(url)[1], {}).get("/", {}) + + if "|" in url or not "cf_clearance" in domain_cookies: + return url + + headers = dict() + headers["User-Agent"] = default_headers["User-Agent"] + headers["Cookie"] = "; ".join(["%s=%s" % (c.name, c.value) for c in domain_cookies.values()]) + + return url + "|" + "&".join(["%s=%s" % (h, headers[h]) for h in headers]) + + +def load_cookies(): + cookies_lock.acquire() + if os.path.isfile(ficherocookies): + logger.info("Leyendo fichero cookies") + try: + cj.load(ficherocookies, ignore_discard=True) + except: + logger.info("El fichero de cookies existe pero es ilegible, se borra") + os.remove(ficherocookies) + cookies_lock.release() + + +def save_cookies(): + cookies_lock.acquire() + logger.info("Guardando cookies...") + cj.save(ficherocookies, ignore_discard=True) + cookies_lock.release() + + +load_cookies() + + +def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=True, cookies=True, replace_headers=False, + add_referer=False, only_headers=False, bypass_cloudflare=True): + """ + Abre una url y retorna los datos obtenidos + + @param url: url que abrir. + @type url: str + @param post: Si contiene algun valor este es enviado mediante POST. + @type post: str + @param headers: Headers para la petición, si no contiene nada se usara los headers por defecto. + @type headers: dict, list + @param timeout: Timeout para la petición. + @type timeout: int + @param follow_redirects: Indica si se han de seguir las redirecciones. + @type follow_redirects: bool + @param cookies: Indica si se han de usar las cookies. + @type cookies: bool + @param replace_headers: Si True, los headers pasados por el parametro "headers" sustituiran por completo los headers por defecto. + Si False, los headers pasados por el parametro "headers" modificaran los headers por defecto. + @type replace_headers: bool + @param add_referer: Indica si se ha de añadir el header "Referer" usando el dominio de la url como valor. + @type add_referer: bool + @param only_headers: Si True, solo se descargarán los headers, omitiendo el contenido de la url. + @type only_headers: bool + @return: Resultado de la petición + @rtype: HTTPResponse + + Parametro Tipo Descripción + ---------------------------------------------------------------------------------------------------------------- + HTTPResponse.sucess: bool True: Peticion realizada correctamente | False: Error al realizar la petición + HTTPResponse.code: int Código de respuesta del servidor o código de error en caso de producirse un error + HTTPResponse.error: str Descripción del error en caso de producirse un error + HTTPResponse.headers: dict Diccionario con los headers de respuesta del servidor + HTTPResponse.data: str Respuesta obtenida del servidor + HTTPResponse.time: float Tiempo empleado para realizar la petición + + """ + + response = {} + + # Headers por defecto, si no se especifica nada + request_headers = default_headers.copy() + + # Headers pasados como parametros + if headers is not None: + if not replace_headers: + request_headers.update(dict(headers)) + else: + request_headers = dict(headers) + + if add_referer: + request_headers["Referer"] = "/".join(url.split("/")[:3]) + + url = urllib.quote(url, safe="%/:=&?~#+!$,;'@()*[]") + + logger.info("----------------------------------------------") + logger.info("downloadpage") + logger.info("----------------------------------------------") + logger.info("Timeout: %s" % timeout) + logger.info("URL: " + url) + logger.info("Dominio: " + urlparse.urlparse(url)[1]) + if post: + logger.info("Peticion: POST") + else: + logger.info("Peticion: GET") + logger.info("Usar Cookies: %s" % cookies) + logger.info("Descargar Pagina: %s" % (not only_headers)) + logger.info("Fichero de Cookies: " + ficherocookies) + logger.info("Headers:") + for header in request_headers: + logger.info("- %s: %s" % (header, request_headers[header])) + + # Handlers + handlers = [urllib2.HTTPHandler(debuglevel=False)] + + if not follow_redirects: + handlers.append(NoRedirectHandler()) + + if cookies: + handlers.append(urllib2.HTTPCookieProcessor(cj)) + + opener = urllib2.build_opener(*handlers) + + logger.info("Realizando Peticion") + + # Contador + inicio = time.time() + + req = urllib2.Request(url, post, request_headers) + + try: + if urllib2.__version__ == "2.4": + import socket + deftimeout = socket.getdefaulttimeout() + if timeout is not None: + socket.setdefaulttimeout(timeout) + handle = opener.open(req) + socket.setdefaulttimeout(deftimeout) + else: + handle = opener.open(req, timeout=timeout) + + except urllib2.HTTPError, handle: + response["sucess"] = False + response["code"] = handle.code + response["error"] = handle.__dict__.get("reason", str(handle)) + response["headers"] = handle.headers.dict + if not only_headers: + response["data"] = handle.read() + else: + response["data"] = "" + response["time"] = time.time() - inicio + response["url"] = handle.geturl() + + except Exception, e: + response["sucess"] = False + response["code"] = e.__dict__.get("errno", e.__dict__.get("code", str(e))) + response["error"] = e.__dict__.get("reason", str(e)) + response["headers"] = {} + response["data"] = "" + response["time"] = time.time() - inicio + response["url"] = url + + else: + response["sucess"] = True + response["code"] = handle.code + response["error"] = None + response["headers"] = handle.headers.dict + if not only_headers: + response["data"] = handle.read() + else: + response["data"] = "" + response["time"] = time.time() - inicio + response["url"] = handle.geturl() + + logger.info("Terminado en %.2f segundos" % (response["time"])) + logger.info("Response sucess: %s" % (response["sucess"])) + logger.info("Response code: %s" % (response["code"])) + logger.info("Response error: %s" % (response["error"])) + logger.info("Response data length: %s" % (len(response["data"]))) + logger.info("Response headers:") + for header in response["headers"]: + logger.info("- %s: %s" % (header, response["headers"][header])) + + if cookies: + save_cookies() + + logger.info("Encoding: %s" % (response["headers"].get('content-encoding'))) + + if response["headers"].get('content-encoding') == 'gzip': + logger.info("Descomprimiendo...") + try: + response["data"] = gzip.GzipFile(fileobj=StringIO(response["data"])).read() + logger.info("Descomprimido") + except: + logger.info("No se ha podido descomprimir") + + # Anti Cloudflare + if bypass_cloudflare: + cf = Cloudflare(response) + if cf.is_cloudflare: + logger.info("cloudflare detectado, esperando %s segundos..." % cf.wait_time) + auth_url = cf.get_url() + logger.info("Autorizando... url: %s" % auth_url) + if downloadpage(auth_url, headers=request_headers, replace_headers=True).sucess: + logger.info("Autorización correcta, descargando página") + resp = downloadpage(url=response["url"], post=post, headers=headers, timeout=timeout, + follow_redirects=follow_redirects, + cookies=cookies, replace_headers=replace_headers, add_referer=add_referer) + response["sucess"] = resp.sucess + response["code"] = resp.code + response["error"] = resp.error + response["headers"] = resp.headers + response["data"] = resp.data + response["time"] = resp.time + response["url"] = resp.url + else: + logger.info("No se ha podido autorizar") + + return type('HTTPResponse', (), response) + + +class NoRedirectHandler(urllib2.HTTPRedirectHandler): + def http_error_302(self, req, fp, code, msg, headers): + infourl = urllib.addinfourl(fp, headers, req.get_full_url()) + infourl.status = code + infourl.code = code + return infourl + + http_error_300 = http_error_302 + http_error_301 = http_error_302 + http_error_303 = http_error_302 + http_error_307 = http_error_302 diff --git a/plugin.video.alfa/core/item.py b/plugin.video.alfa/core/item.py new file mode 100755 index 00000000..f3e89aeb --- /dev/null +++ b/plugin.video.alfa/core/item.py @@ -0,0 +1,482 @@ +# -*- coding: utf-8 -*- +# -------------------------------------------------------------------------------- +# Item is the object we use for representing data +# -------------------------------------------------------------------------------- + +import base64 +import copy +import os +import urllib +from HTMLParser import HTMLParser + +from core import jsontools as json + + +class InfoLabels(dict): + def __str__(self): + return self.tostring(separador=',\r\t') + + def __setitem__(self, name, value): + if name in ["season", "episode"]: + # forzamos int() en season y episode + try: + super(InfoLabels, self).__setitem__(name, int(value)) + except: + pass + + elif name in ['IMDBNumber', 'imdb_id']: + # Por compatibilidad hemos de guardar el valor en los tres campos + super(InfoLabels, self).__setitem__('IMDBNumber', str(value)) + # super(InfoLabels, self).__setitem__('code', value) + super(InfoLabels, self).__setitem__('imdb_id', str(value)) + + elif name == "mediatype" and value not in ["list", "movie", "tvshow", "season", "episode"]: + super(InfoLabels, self).__setitem__('mediatype', 'list') + + elif name in ['tmdb_id', 'tvdb_id', 'noscrap_id']: + super(InfoLabels, self).__setitem__(name, str(value)) + else: + super(InfoLabels, self).__setitem__(name, value) + + # Python 2.4 + def __getitem__(self, key): + try: + return super(InfoLabels, self).__getitem__(key) + except: + return self.__missing__(key) + + def __missing__(self, key): + """ + Valores por defecto en caso de que la clave solicitada no exista. + El parametro 'default' en la funcion obj_infoLabels.get(key,default) tiene preferencia sobre los aqui definidos. + """ + if key in ['rating']: + # Ejemplo de clave q devuelve un str formateado como float por defecto + return '0.0' + + elif key == 'code': + code = [] + # Añadir imdb_id al listado de codigos + if 'imdb_id' in super(InfoLabels, self).keys() and super(InfoLabels, self).__getitem__('imdb_id'): + code.append(super(InfoLabels, self).__getitem__('imdb_id')) + + # Completar con el resto de codigos + for scr in ['tmdb_id', 'tvdb_id', 'noscrap_id']: + if scr in super(InfoLabels, self).keys() and super(InfoLabels, self).__getitem__(scr): + value = "%s%s" % (scr[:-2], super(InfoLabels, self).__getitem__(scr)) + code.append(value) + + # Opcion añadir un code del tipo aleatorio + if not code: + import time + value = time.strftime("%Y%m%d%H%M%S", time.gmtime()) + code.append(value) + super(InfoLabels, self).__setitem__('noscrap_id', value) + + return code + + elif key == 'mediatype': + # "list", "movie", "tvshow", "season", "episode" + if 'tvshowtitle' in super(InfoLabels, self).keys() \ + and super(InfoLabels, self).__getitem__('tvshowtitle') != "": + if 'episode' in super(InfoLabels, self).keys() and super(InfoLabels, self).__getitem__('episode') != "": + return 'episode' + + if 'episodeName' in super(InfoLabels, self).keys() \ + and super(InfoLabels, self).__getitem__('episodeName') != "": + return 'episode' + + if 'season' in super(InfoLabels, self).keys() and super(InfoLabels, self).__getitem__('season') != "": + return 'season' + else: + return 'tvshow' + + elif 'title' in super(InfoLabels, self).keys() and super(InfoLabels, self).__getitem__('title') != "": + return 'movie' + + else: + return 'list' + + else: + # El resto de claves devuelven cadenas vacias por defecto + return "" + + def tostring(self, separador=', '): + ls = [] + dic = dict(super(InfoLabels, self).items()) + + for i in sorted(dic.items()): + i_str = str(i)[1:-1] + if isinstance(i[0], str): + old = i[0] + "'," + new = i[0] + "':" + else: + old = str(i[0]) + "," + new = str(i[0]) + ":" + ls.append(i_str.replace(old, new, 1)) + + return "{%s}" % separador.join(ls) + + +class Item(object): + def __init__(self, **kwargs): + """ + Inicializacion del item + """ + + # Creamos el atributo infoLabels + self.__dict__["infoLabels"] = InfoLabels() + if "infoLabels" in kwargs: + if isinstance(kwargs["infoLabels"], dict): + self.__dict__["infoLabels"].update(kwargs["infoLabels"]) + del kwargs["infoLabels"] + + if "parentContent" in kwargs: + self.set_parent_content(kwargs["parentContent"]) + del kwargs["parentContent"] + + kw = copy.copy(kwargs) + for k in kw: + if k in ["contentTitle", "contentPlot", "contentSerieName", "show", "contentType", "contentEpisodeTitle", + "contentSeason", "contentEpisodeNumber", "contentThumbnail", "plot", "duration", "contentQuality", + "quality"]: + self.__setattr__(k, kw[k]) + del kwargs[k] + + self.__dict__.update(kwargs) + self.__dict__ = self.toutf8(self.__dict__) + + def __contains__(self, m): + """ + Comprueba si un atributo existe en el item + """ + return m in self.__dict__ + + def __setattr__(self, name, value): + """ + Función llamada al modificar cualquier atributo del item, modifica algunos atributos en función de los datos + modificados. + """ + value = self.toutf8(value) + if name == "__dict__": + for key in value: + self.__setattr__(key, value[key]) + return + + # Descodificamos los HTML entities + if name in ["title", "plot", "fulltitle", "contentPlot", "contentTitle"]: + value = self.decode_html(value) + + # Al modificar cualquiera de estos atributos content... + if name in ["contentTitle", "contentPlot", "plot", "contentSerieName", "contentType", "contentEpisodeTitle", + "contentSeason", "contentEpisodeNumber", "contentThumbnail", "show", "contentQuality", "quality"]: + # ... marcamos hasContentDetails como "true"... + self.__dict__["hasContentDetails"] = True + # ...y actualizamos infoLables + if name == "contentTitle": + self.__dict__["infoLabels"]["title"] = value + elif name == "contentPlot" or name == "plot": + self.__dict__["infoLabels"]["plot"] = value + elif name == "contentSerieName" or name == "show": + self.__dict__["infoLabels"]["tvshowtitle"] = value + elif name == "contentType": + self.__dict__["infoLabels"]["mediatype"] = value + elif name == "contentEpisodeTitle": + self.__dict__["infoLabels"]["episodeName"] = value + elif name == "contentSeason": + self.__dict__["infoLabels"]["season"] = value + elif name == "contentEpisodeNumber": + self.__dict__["infoLabels"]["episode"] = value + elif name == "contentThumbnail": + self.__dict__["infoLabels"]["thumbnail"] = value + elif name == "contentQuality" or name == "quality": + self.__dict__["infoLabels"]["quality"] = value + + elif name == "duration": + # String q representa la duracion del video en segundos + self.__dict__["infoLabels"]["duration"] = str(value) + + elif name == "viewcontent" and value not in ["files", "movies", "tvshows", "seasons", "episodes"]: + super(Item, self).__setattr__("viewcontent", "files") + + # Al asignar un valor a infoLables + elif name == "infoLabels": + if isinstance(value, dict): + value_defaultdict = InfoLabels(value) + self.__dict__["infoLabels"] = value_defaultdict + + else: + super(Item, self).__setattr__(name, value) + + def __getattr__(self, name): + """ + Devuelve los valores por defecto en caso de que el atributo solicitado no exista en el item + """ + if name.startswith("__"): + return super(Item, self).__getattribute__(name) + + # valor por defecto para folder + if name == "folder": + return True + + # valor por defecto para contentChannel + elif name == "contentChannel": + return "list" + + # valor por defecto para viewcontent + elif name == "viewcontent": + # intentamos fijarlo segun el tipo de contenido... + if self.__dict__["infoLabels"]["mediatype"] == 'movie': + viewcontent = 'movies' + elif self.__dict__["infoLabels"]["mediatype"] in ["tvshow", "season", "episode"]: + viewcontent = "episodes" + else: + viewcontent = "files" + + self.__dict__["viewcontent"] = viewcontent + return viewcontent + + # Valor por defecto para hasContentDetails + elif name == "hasContentDetails": + return False + + # valores guardados en infoLabels + elif name in ["contentTitle", "contentPlot", "contentSerieName", "show", "contentType", "contentEpisodeTitle", + "contentSeason", "contentEpisodeNumber", "contentThumbnail", "plot", "duration", + "contentQuality", "quality"]: + if name == "contentTitle": + return self.__dict__["infoLabels"]["title"] + elif name == "contentPlot" or name == "plot": + return self.__dict__["infoLabels"]["plot"] + elif name == "contentSerieName" or name == "show": + return self.__dict__["infoLabels"]["tvshowtitle"] + elif name == "contentType": + ret = self.__dict__["infoLabels"]["mediatype"] + if ret == 'list' and self.__dict__.get("fulltitle", None): # retrocompatibilidad + ret = 'movie' + self.__dict__["infoLabels"]["mediatype"] = ret + return ret + elif name == "contentEpisodeTitle": + return self.__dict__["infoLabels"]["episodeName"] + elif name == "contentSeason": + return self.__dict__["infoLabels"]["season"] + elif name == "contentEpisodeNumber": + return self.__dict__["infoLabels"]["episode"] + elif name == "contentThumbnail": + return self.__dict__["infoLabels"]["thumbnail"] + elif name == "contentQuality" or name == "quality": + return self.__dict__["infoLabels"]["quality"] + else: + return self.__dict__["infoLabels"][name] + + # valor por defecto para el resto de atributos + else: + return "" + + def __str__(self): + return '\r\t' + self.tostring('\r\t') + + def set_parent_content(self, parentContent): + """ + Rellena los campos contentDetails con la informacion del item "padre" + @param parentContent: item padre + @type parentContent: item + """ + # Comprueba que parentContent sea un Item + if not type(parentContent) == type(self): + return + # Copia todos los atributos que empiecen por "content" y esten declarados y los infoLabels + for attr in parentContent.__dict__: + if attr.startswith("content") or attr == "infoLabels": + self.__setattr__(attr, parentContent.__dict__[attr]) + + def tostring(self, separator=", "): + """ + Genera una cadena de texto con los datos del item para el log + Uso: logger.info(item.tostring()) + @param separator: cadena que se usará como separador + @type separator: str + '""" + dic = self.__dict__.copy() + + # Añadimos los campos content... si tienen algun valor + for key in ["contentTitle", "contentPlot", "contentSerieName", "contentEpisodeTitle", + "contentSeason", "contentEpisodeNumber", "contentThumbnail"]: + value = self.__getattr__(key) + if value: + dic[key] = value + + if 'mediatype' in self.__dict__["infoLabels"]: + dic["contentType"] = self.__dict__["infoLabels"]['mediatype'] + + ls = [] + for var in sorted(dic): + if isinstance(dic[var], str): + valor = "'%s'" % dic[var] + elif isinstance(dic[var], InfoLabels): + if separator == '\r\t': + valor = dic[var].tostring(',\r\t\t') + else: + valor = dic[var].tostring() + else: + valor = str(dic[var]) + + ls.append(var + "= " + valor) + + return separator.join(ls) + + def tourl(self): + """ + Genera una cadena de texto con los datos del item para crear una url, para volver generar el Item usar + item.fromurl(). + + Uso: url = item.tourl() + """ + dump = json.dump(self.__dict__) + # if empty dict + if not dump: + # set a str to avoid b64encode fails + dump = "" + return urllib.quote(base64.b64encode(dump)) + + def fromurl(self, url): + """ + Genera un item a partir de una cadena de texto. La cadena puede ser creada por la funcion tourl() o tener + el formato antiguo: plugin://plugin.video.alfa/?channel=... (+ otros parametros) + Uso: item.fromurl("cadena") + + @param url: url + @type url: str + """ + if "?" in url: + url = url.split("?")[1] + decoded = False + try: + str_item = base64.b64decode(urllib.unquote(url)) + json_item = json.load(str_item, object_hook=self.toutf8) + if json_item is not None and len(json_item) > 0: + self.__dict__.update(json_item) + decoded = True + except: + pass + + if not decoded: + url = urllib.unquote_plus(url) + dct = dict([[param.split("=")[0], param.split("=")[1]] for param in url.split("&") if "=" in param]) + self.__dict__.update(dct) + self.__dict__ = self.toutf8(self.__dict__) + + if 'infoLabels' in self.__dict__ and not isinstance(self.__dict__['infoLabels'], InfoLabels): + self.__dict__['infoLabels'] = InfoLabels(self.__dict__['infoLabels']) + + return self + + def tojson(self, path=""): + """ + Crea un JSON a partir del item, para guardar archivos de favoritos, lista de descargas, etc... + Si se especifica un path, te lo guarda en la ruta especificada, si no, devuelve la cadena json + Usos: item.tojson(path="ruta\archivo\json.json") + file.write(item.tojson()) + + @param path: ruta + @type path: str + """ + if path: + open(path, "wb").write(json.dump(self.__dict__)) + else: + return json.dump(self.__dict__) + + def fromjson(self, json_item=None, path=""): + """ + Genera un item a partir de un archivo JSON + Si se especifica un path, lee directamente el archivo, si no, lee la cadena de texto pasada. + Usos: item = Item().fromjson(path="ruta\archivo\json.json") + item = Item().fromjson("Cadena de texto json") + + @param json_item: item + @type json_item: json + @param path: ruta + @type path: str + """ + if path: + if os.path.exists(path): + json_item = open(path, "rb").read() + else: + json_item = {} + + if json_item is None: + json_item = {} + + item = json.load(json_item, object_hook=self.toutf8) + self.__dict__.update(item) + + if 'infoLabels' in self.__dict__ and not isinstance(self.__dict__['infoLabels'], InfoLabels): + self.__dict__['infoLabels'] = InfoLabels(self.__dict__['infoLabels']) + + return self + + def clone(self, **kwargs): + """ + Genera un nuevo item clonando el item actual + Usos: NuevoItem = item.clone() + NuevoItem = item.clone(title="Nuevo Titulo", action = "Nueva Accion") + """ + newitem = copy.deepcopy(self) + if "infoLabels" in kwargs: + kwargs["infoLabels"] = InfoLabels(kwargs["infoLabels"]) + for kw in kwargs: + newitem.__setattr__(kw, kwargs[kw]) + newitem.__dict__ = newitem.toutf8(newitem.__dict__) + + return newitem + + @staticmethod + def decode_html(value): + """ + Descodifica las HTML entities + @param value: valor a decodificar + @type value: str + """ + try: + unicode_title = unicode(value, "utf8", "ignore") + return HTMLParser().unescape(unicode_title).encode("utf8") + except: + return value + + def toutf8(self, *args): + """ + Pasa el item a utf8 + """ + if len(args) > 0: + value = args[0] + else: + value = self.__dict__ + + if type(value) == unicode: + return value.encode("utf8") + + elif type(value) == str: + return unicode(value, "utf8", "ignore").encode("utf8") + + elif type(value) == list: + for x, key in enumerate(value): + value[x] = self.toutf8(value[x]) + return value + + elif isinstance(value, dict): + newdct = {} + for key in value: + v = self.toutf8(value[key]) + if type(key) == unicode: + key = key.encode("utf8") + + newdct[key] = v + + if len(args) > 0: + if isinstance(value, InfoLabels): + return InfoLabels(newdct) + else: + return newdct + + else: + return value diff --git a/plugin.video.alfa/core/jsontools.py b/plugin.video.alfa/core/jsontools.py new file mode 100755 index 00000000..42a49913 --- /dev/null +++ b/plugin.video.alfa/core/jsontools.py @@ -0,0 +1,192 @@ +# -*- coding: utf-8 -*- +# -------------------------------------------------------------------------------- +# json_tools - JSON load and parse functions with library detection +# -------------------------------------------------------------------------------- + +import traceback + +import logger + +try: + import json +except: + logger.info("json incluido en el interprete **NO** disponible") + + try: + import simplejson as json + except: + logger.info("simplejson incluido en el interprete **NO** disponible") + try: + from lib import simplejson as json + except: + logger.info("simplejson en el directorio lib **NO** disponible") + logger.error("No se ha encontrado un parser de JSON valido") + json = None + else: + logger.info("Usando simplejson en el directorio lib") + else: + logger.info("Usando simplejson incluido en el interprete") +else: + logger.info("Usando json incluido en el interprete") + + +def load(*args, **kwargs): + if "object_hook" not in kwargs: + kwargs["object_hook"] = to_utf8 + + try: + value = json.loads(*args, **kwargs) + except: + logger.error("**NO** se ha podido cargar el JSON") + logger.error(traceback.format_exc()) + value = {} + + return value + + +def dump(*args, **kwargs): + if not kwargs: + kwargs = {"indent": 4, "skipkeys": True, "sort_keys": True, "ensure_ascii": False} + + try: + value = json.dumps(*args, **kwargs) + except: + logger.error("**NO** se ha podido cargar el JSON") + logger.error(traceback.format_exc()) + value = "" + return value + + +def to_utf8(dct): + if isinstance(dct, dict): + return dict((to_utf8(key), to_utf8(value)) for key, value in dct.iteritems()) + elif isinstance(dct, list): + return [to_utf8(element) for element in dct] + elif isinstance(dct, unicode): + return dct.encode('utf-8') + else: + return dct + + +def get_node_from_file(name_file, node, path=None): + """ + Obtiene el nodo de un fichero JSON + + @param name_file: Puede ser el nombre de un canal o server (sin incluir extension) + o bien el nombre de un archivo json (con extension) + @type name_file: str + @param node: nombre del nodo a obtener + @type node: str + @param path: Ruta base del archivo json. Por defecto la ruta de settings_channels. + @return: dict con el nodo a devolver + @rtype: dict + """ + logger.info() + from core import config + from core import filetools + + dict_node = {} + + if not name_file.endswith(".json"): + name_file += "_data.json" + + if not path: + path = filetools.join(config.get_data_path(), "settings_channels") + + fname = filetools.join(path, name_file) + + if filetools.isfile(fname): + data = filetools.read(fname) + dict_data = load(data) + + check_to_backup(data, fname, dict_data) + + if node in dict_data: + dict_node = dict_data[node] + + logger.debug("dict_node: %s" % dict_node) + + return dict_node + + +def check_to_backup(data, fname, dict_data): + """ + Comprueba que si dict_data(conversion del fichero JSON a dict) no es un diccionario, se genere un fichero con + data de nombre fname.bk. + + @param data: contenido del fichero fname + @type data: str + @param fname: nombre del fichero leido + @type fname: str + @param dict_data: nombre del diccionario + @type dict_data: dict + """ + logger.info() + + if not dict_data: + logger.error("Error al cargar el json del fichero %s" % fname) + + if data != "": + # se crea un nuevo fichero + from core import filetools + title = filetools.write("%s.bk" % fname, data) + if title != "": + logger.error("Ha habido un error al guardar el fichero: %s.bk" % fname) + else: + logger.debug("Se ha guardado una copia con el nombre: %s.bk" % fname) + else: + logger.debug("Está vacío el fichero: %s" % fname) + + +def update_node(dict_node, name_file, node, path=None): + """ + actualiza el json_data de un fichero con el diccionario pasado + + @param dict_node: diccionario con el nodo + @type dict_node: dict + @param name_file: Puede ser el nombre de un canal o server (sin incluir extension) + o bien el nombre de un archivo json (con extension) + @type name_file: str + @param node: nodo a actualizar + @param path: Ruta base del archivo json. Por defecto la ruta de settings_channels. + @return result: Devuelve True si se ha escrito correctamente o False si ha dado un error + @rtype: bool + @return json_data + @rtype: dict + """ + logger.info() + + from core import config + from core import filetools + json_data = {} + result = False + + if not name_file.endswith(".json"): + name_file += "_data.json" + + if not path: + path = filetools.join(config.get_data_path(), "settings_channels") + + fname = filetools.join(path, name_file) + + try: + data = filetools.read(fname) + dict_data = load(data) + # es un dict + if dict_data: + if node in dict_data: + logger.debug(" existe el key %s" % node) + dict_data[node] = dict_node + else: + logger.debug(" NO existe el key %s" % node) + new_dict = {node: dict_node} + dict_data.update(new_dict) + else: + logger.debug(" NO es un dict") + dict_data = {node: dict_node} + json_data = dump(dict_data) + result = filetools.write(fname, json_data) + except: + logger.error("No se ha podido actualizar %s" % fname) + + return result, json_data diff --git a/plugin.video.alfa/core/logger.py b/plugin.video.alfa/core/logger.py new file mode 100755 index 00000000..58a9a65e --- /dev/null +++ b/plugin.video.alfa/core/logger.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- +# -------------------------------------------------------------------------------- +# Logger (kodi) +# -------------------------------------------------------------------------------- + +import inspect +import os + +import xbmc +from core import config + +loggeractive = (config.get_setting("debug") == True) + + +def log_enable(active): + global loggeractive + loggeractive = active + + +def encode_log(message=""): + # Unicode to utf8 + if type(message) == unicode: + message = message.encode("utf8") + + # All encodings to utf8 + elif type(message) == str: + message = unicode(message, "utf8", errors="replace").encode("utf8") + + # Objects to string + else: + message = str(message) + + return message + + +def get_caller(message=None): + module = inspect.getmodule(inspect.currentframe().f_back.f_back) + + module = module.__name__ + + function = inspect.currentframe().f_back.f_back.f_code.co_name + + if module == "__main__": + module = "alfa" + else: + module = "alfa." + module + if message: + if module not in message: + if function == "<module>": + return module + " " + message + else: + return module + " [" + function + "] " + message + else: + return message + else: + if function == "<module>": + return module + else: + return module + "." + function + + +def info(texto=""): + if loggeractive: + xbmc.log(get_caller(encode_log(texto)), xbmc.LOGNOTICE) + + +def debug(texto=""): + if loggeractive: + texto = " [" + get_caller() + "] " + encode_log(texto) + + xbmc.log("######## DEBUG #########", xbmc.LOGNOTICE) + xbmc.log(texto, xbmc.LOGNOTICE) + + +def error(texto=""): + texto = " [" + get_caller() + "] " + encode_log(texto) + + xbmc.log("######## ERROR #########", xbmc.LOGERROR) + xbmc.log(texto, xbmc.LOGERROR) diff --git a/plugin.video.alfa/core/scraper.py b/plugin.video.alfa/core/scraper.py new file mode 100755 index 00000000..ca8219d1 --- /dev/null +++ b/plugin.video.alfa/core/scraper.py @@ -0,0 +1,275 @@ +# -*- coding: utf-8 -*- + +from core import config +from core import logger +from core.item import InfoLabels +from platformcode import platformtools + +# Este modulo es una interface para poder implementar diferentes scrapers +# contendra todos las funciones comunes + +dict_default = None +scraper = None + + +def find_and_set_infoLabels(item): + """ + función que se llama para buscar y setear los infolabels + :param item: + :return: boleano que indica si se ha podido encontrar el 'code' + """ + global scraper + scraper = None + # logger.debug("item:\n" + item.tostring('\n')) + + list_opciones_cuadro = ["Introducir otro nombre", "Completar información"] + # Si se añaden más scrapers hay q declararlos aqui-> "modulo_scraper": "Texto_en_cuadro" + scrapers_disponibles = {'tmdb': "Buscar en TheMovieDB.org", + 'tvdb': "Buscar en TheTvDB.com"} + + # Obtener el Scraper por defecto de la configuracion segun el tipo de contenido + if item.contentType == "movie": + scraper_actual = ['tmdb'][config.get_setting("scraper_movies", "videolibrary")] + tipo_contenido = "película" + title = item.contentTitle + # Completar lista de opciones para este tipo de contenido + list_opciones_cuadro.append(scrapers_disponibles['tmdb']) + + else: + scraper_actual = ['tmdb', 'tvdb'][config.get_setting("scraper_tvshows", "videolibrary")] + tipo_contenido = "serie" + title = item.contentSerieName + # Completar lista de opciones para este tipo de contenido + list_opciones_cuadro.append(scrapers_disponibles['tmdb']) + list_opciones_cuadro.append(scrapers_disponibles['tvdb']) + + # Importamos el scraper + try: + scraper = __import__('core.%s' % scraper_actual, fromlist=["core.%s" % scraper_actual]) + except ImportError: + exec "import core." + scraper_actual + " as scraper" + except: + import traceback + logger.error(traceback.format_exc()) + + while scraper: + # Llamamos a la funcion find_and_set_infoLabels del scraper seleccionado + scraper_result = scraper.find_and_set_infoLabels(item) + + # Verificar si existe 'code' + if scraper_result and item.infoLabels['code']: + # code correcto + logger.info("Identificador encontrado: %s" % item.infoLabels['code']) + scraper.completar_codigos(item) + return True + elif scraper_result: + # Contenido encontrado pero no hay 'code' + msg = "Identificador no encontrado para: %s" % title + else: + # Contenido no encontrado + msg = "No se ha encontrado informacion para: %s" % title + + logger.info(msg) + # Mostrar cuadro con otras opciones: + if scrapers_disponibles[scraper_actual] in list_opciones_cuadro: + list_opciones_cuadro.remove(scrapers_disponibles[scraper_actual]) + index = platformtools.dialog_select(msg, list_opciones_cuadro) + + if index < 0: + logger.debug("Se ha pulsado 'cancelar' en la ventana '%s'" % msg) + return False + + elif index == 0: + # Pregunta el titulo + title = platformtools.dialog_input(title, "Introduzca el nombre de la %s a buscar" % tipo_contenido) + if title: + if item.contentType == "movie": + item.contentTitle = title + else: + item.contentSerieName = title + else: + logger.debug("he pulsado 'cancelar' en la ventana 'Introduzca el nombre correcto'") + return False + + elif index == 1: + # Hay q crear un cuadro de dialogo para introducir los datos + logger.info("Completar información") + if cuadro_completar(item): + # code correcto + logger.info("Identificador encontrado: %s" % str(item.infoLabels['code'])) + return True + # raise + + elif list_opciones_cuadro[index] in scrapers_disponibles.values(): + # Obtener el nombre del modulo del scraper + for k, v in scrapers_disponibles.items(): + if list_opciones_cuadro[index] == v: + if scrapers_disponibles[scraper_actual] not in list_opciones_cuadro: + list_opciones_cuadro.append(scrapers_disponibles[scraper_actual]) + # Importamos el scraper k + scraper_actual = k + try: + scraper = None + scraper = __import__('core.%s' % scraper_actual, fromlist=["core.%s" % scraper_actual]) + except ImportError: + exec "import core." + scraper_actual + " as scraper_module" + break + + logger.error("Error al importar el modulo scraper %s" % scraper_actual) + + +def cuadro_completar(item): + logger.info() + + global dict_default + dict_default = {} + + COLOR = ["0xFF8A4B08", "0xFFF7BE81"] + # Creamos la lista de campos del infoLabel + controls = [("title", "text", "Titulo:"), + ("originaltitle", "text", "Titulo original"), + ("year", "text", "Año"), + ("identificadores", "label", "Identificadores:"), + ("tmdb_id", "text", " The Movie Database ID"), + ("url_tmdb", "text", " URL Tmdb", "+!eq(-1,'')"), + ("tvdb_id", "text", " The TVDB ID", "+eq(-7,'Serie')"), + ("url_tvdb", "text", " URL TVDB", "+!eq(-1,'')+eq(-8,'Serie')"), + ("imdb_id", "text", " IMDb ID"), + ("otro_id", "text", " Otro ID", "+eq(-1,'')"), + ("urls", "label", "Imágenes (urls):"), + ("fanart", "text", " Fondo"), + ("thumbnail", "text", " Miniatura")] + + if item.infoLabels["mediatype"] == "movie": + mediatype_default = 0 + else: + mediatype_default = 1 + + listado_controles = [{'id': "mediatype", + 'type': "list", + 'label': "Tipo de contenido", + 'color': COLOR[1], + 'default': mediatype_default, + 'enabled': True, + 'visible': True, + 'lvalues': ["Película", "Serie"] + }] + + for i, c in enumerate(controls): + color = COLOR[0] + dict_default[c[0]] = item.infoLabels.get(c[0], '') + + enabled = True + + if i > 0 and c[1] != 'label': + color = COLOR[1] + enabled = "!eq(-%s,'')" % i + if len(c) > 3: + enabled += c[3] + + # default para casos especiales + if c[0] == "url_tmdb" and item.infoLabels["tmdb_id"] and 'tmdb' in item.infoLabels["url_scraper"]: + dict_default[c[0]] = item.infoLabels["url_scraper"] + + elif c[0] == "url_tvdb" and item.infoLabels["tvdb_id"] and 'thetvdb.com' in item.infoLabels["url_scraper"]: + dict_default[c[0]] = item.infoLabels["url_scraper"] + + if not dict_default[c[0]] or dict_default[c[0]] == 'None' or dict_default[c[0]] == 0: + dict_default[c[0]] = '' + elif isinstance(dict_default[c[0]], (int, float, long)): + # Si es numerico lo convertimos en str + dict_default[c[0]] = str(dict_default[c[0]]) + + listado_controles.append({'id': c[0], + 'type': c[1], + 'label': c[2], + 'color': color, + 'default': dict_default[c[0]], + 'enabled': enabled, + 'visible': True}) + + # logger.debug(dict_default) + if platformtools.show_channel_settings(listado_controles, caption="Completar información", item=item, + callback="core.scraper.callback_cuadro_completar", + custom_button={"visible": False}): + return True + + else: + return False + + +def callback_cuadro_completar(item, dict_values): + # logger.debug(dict_values) + global dict_default + + if dict_values.get("title", None): + # Adaptar dict_values a infoLabels validos + dict_values['mediatype'] = ['movie', 'tvshow'][dict_values['mediatype']] + for k, v in dict_values.items(): + if k in dict_default and dict_default[k] == dict_values[k]: + del dict_values[k] + + if isinstance(item.infoLabels, InfoLabels): + infoLabels = item.infoLabels + else: + infoLabels = InfoLabels() + + infoLabels.update(dict_values) + item.infoLabels = infoLabels + + if item.infoLabels['code']: + return True + + return False + + +def get_nfo(item): + """ + Devuelve la información necesaria para que se scrapee el resultado en la videoteca de kodi, + + @param item: elemento que contiene los datos necesarios para generar la info + @type item: Item + @rtype: str + @return: + """ + logger.info() + if "infoLabels" in item and "noscrap_id" in item.infoLabels: + # Crea el fichero xml con los datos que se obtiene de item ya que no hay ningún scraper activo + info_nfo = '<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>' + + if "season" in item.infoLabels and "episode" in item.infoLabels: + info_nfo += '<episodedetails><title>%s' % item.infoLabels['title'] + info_nfo += '%s' % item.infoLabels['tvshowtitle'] + info_nfo += '%s' % item.thumbnail + + info_nfo += '\n' + + elif item.infoLabels["mediatype"] == "tvshow": + info_nfo += '%s' % item.infoLabels['title'] + info_nfo += '%s' % item.thumbnail + info_nfo += '%s' % item.fanart + + info_nfo += '\n' + + else: + info_nfo += '%s' % item.infoLabels['title'] + info_nfo += '%s' % item.thumbnail + info_nfo += '%s' % item.fanart + + info_nfo += '\n' + + return info_nfo + else: + return scraper.get_nfo(item) + + +def sort_episode_list(episodelist): + scraper_actual = ['tmdb', 'tvdb'][config.get_setting("scraper_tvshows", "videolibrary")] + + if scraper_actual == "tmdb": + episodelist.sort(key=lambda e: (int(e.contentSeason), int(e.contentEpisodeNumber))) + + elif scraper_actual == "tvdb": + episodelist.sort(key=lambda e: (int(e.contentEpisodeNumber), int(e.contentSeason))) + + return episodelist diff --git a/plugin.video.alfa/core/scrapertools.py b/plugin.video.alfa/core/scrapertools.py new file mode 100755 index 00000000..d7640326 --- /dev/null +++ b/plugin.video.alfa/core/scrapertools.py @@ -0,0 +1,498 @@ +# -*- coding: utf-8 -*- +# -------------------------------------------------------------------------------- +# Scraper tools for reading and processing web elements +# -------------------------------------------------------------------------------- + +import re +import time + +import logger +from core import httptools + + +def cache_page(url, post=None, headers=None, modo_cache=None, timeout=None): + return cachePage(url, post, headers, modo_cache, timeout=timeout) + + +def cachePage(url, post=None, headers=None, modoCache=None, timeout=None): + data = downloadpage(url, post=post, headers=headers, timeout=timeout) + return data + + +def downloadpage(url, post=None, headers=None, follow_redirects=True, timeout=None, header_to_get=None): + response = httptools.downloadpage(url, post=post, headers=headers, follow_redirects=follow_redirects, + timeout=timeout) + + if header_to_get: + return response.headers.get(header_to_get) + else: + return response.data + + +def downloadpageWithResult(url, post=None, headers=None, follow_redirects=True, timeout=None, header_to_get=None): + response = httptools.downloadpage(url, post=post, headers=headers, follow_redirects=follow_redirects, + timeout=timeout) + + if header_to_get: + return response.headers.get(header_to_get) + else: + return response.data, response.code + + +def downloadpageWithoutCookies(url): + response = httptools.downloadpage(url, cookies=False) + return response.data + + +def downloadpageGzip(url): + response = httptools.downloadpage(url, add_referer=True) + return response.data + + +def getLocationHeaderFromResponse(url): + response = httptools.downloadpage(url, only_headers=True) + return response.headers.get("location") + + +def get_header_from_response(url, header_to_get="", post=None, headers=None): + header_to_get = header_to_get.lower() + response = httptools.downloadpage(url, post=post, headers=headers, only_headers=True) + return response.headers.get(header_to_get) + + +def get_headers_from_response(url, post=None, headers=None): + response = httptools.downloadpage(url, post=post, headers=headers, only_headers=True) + return response.headers.items() + + +def read_body_and_headers(url, post=None, headers=None, follow_redirects=False, timeout=None): + response = httptools.downloadpage(url, post=post, headers=headers, follow_redirects=follow_redirects, + timeout=timeout) + return response.data, response.headers + + +def anti_cloudflare(url, host="", headers=None, post=None, location=False): + # anti_cloudfare ya integrado en httptools por defecto + response = httptools.downloadpage(url, post=post, headers=headers) + return response.data + + +def printMatches(matches): + i = 0 + for match in matches: + logger.info("%d %s" % (i, match)) + i = i + 1 + + +def get_match(data, patron, index=0): + matches = re.findall(patron, data, flags=re.DOTALL) + return matches[index] + + +def find_single_match(data, patron, index=0): + try: + matches = re.findall(patron, data, flags=re.DOTALL) + return matches[index] + except: + return "" + + +# Parse string and extracts multiple matches using regular expressions +def find_multiple_matches(text, pattern): + return re.findall(pattern, text, re.DOTALL) + + +def entityunescape(cadena): + return unescape(cadena) + + +def unescape(text): + """Removes HTML or XML character references + and entities from a text string. + keep &, >, < in the source code. + from Fredrik Lundh + http://effbot.org/zone/re-sub.htm#unescape-html + """ + + def fixup(m): + text = m.group(0) + if text[:2] == "&#": + # character reference + try: + if text[:3] == "&#x": + return unichr(int(text[3:-1], 16)).encode("utf-8") + else: + return unichr(int(text[2:-1])).encode("utf-8") + + except ValueError: + logger.error("error de valor") + pass + else: + # named entity + try: + ''' + if text[1:-1] == "amp": + text = "&amp;" + elif text[1:-1] == "gt": + text = "&gt;" + elif text[1:-1] == "lt": + text = "&lt;" + else: + print text[1:-1] + text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]).encode("utf-8") + ''' + import htmlentitydefs + text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]).encode("utf-8") + except KeyError: + logger.error("keyerror") + pass + except: + pass + return text # leave as is + + return re.sub("&#?\w+;", fixup, text) + + # Convierte los codigos html "ñ" y lo reemplaza por "ñ" caracter unicode utf-8 + + +def decodeHtmlentities(string): + string = entitiesfix(string) + entity_re = re.compile("&(#?)(\d{1,5}|\w{1,8});") + + def substitute_entity(match): + from htmlentitydefs import name2codepoint as n2cp + ent = match.group(2) + if match.group(1) == "#": + return unichr(int(ent)).encode('utf-8') + else: + cp = n2cp.get(ent) + + if cp: + return unichr(cp).encode('utf-8') + else: + return match.group() + + return entity_re.subn(substitute_entity, string)[0] + + +def entitiesfix(string): + # Las entidades comienzan siempre con el símbolo & , y terminan con un punto y coma ( ; ). + string = string.replace("á", "á") + string = string.replace("é", "é") + string = string.replace("í", "í") + string = string.replace("ó", "ó") + string = string.replace("ú", "ú") + string = string.replace("Á", "Á") + string = string.replace("É", "É") + string = string.replace("Í", "Í") + string = string.replace("Ó", "Ó") + string = string.replace("Ú", "Ú") + string = string.replace("ü", "ü") + string = string.replace("Ü", "Ü") + string = string.replace("ñ", "ñ") + string = string.replace("¿", "¿") + string = string.replace("¡", "¡") + string = string.replace(";;", ";") + return string + + +def htmlclean(cadena): + cadena = re.compile("", re.DOTALL).sub("", cadena) + + cadena = cadena.replace("
          ", "") + cadena = cadena.replace("
          ", "") + cadena = cadena.replace("", "") + cadena = cadena.replace("", "") + cadena = cadena.replace("", "") + cadena = cadena.replace("", "") + cadena = cadena.replace("", "") + cadena = cadena.replace("", "") + cadena = cadena.replace("
        • ", "") + cadena = cadena.replace("
        • ", "") + cadena = cadena.replace("", "") + cadena = cadena.replace("", "") + cadena = cadena.replace("", "") + cadena = cadena.replace("", "") + cadena = cadena.replace("", " ") + cadena = cadena.replace("
          ", " ") + cadena = cadena.replace("
          ", " ") + cadena = re.compile("]*>", re.DOTALL).sub(" ", cadena) + + cadena = re.compile("", re.DOTALL).sub("", cadena) + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("
  • ", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("
    ", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("

    ", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("", re.DOTALL).sub("", cadena) + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + + cadena = cadena.replace("\t", "") + cadena = entityunescape(cadena) + return cadena + + +def slugify(title): + # print title + + # Sustituye acentos y eñes + title = title.replace("Á", "a") + title = title.replace("É", "e") + title = title.replace("Í", "i") + title = title.replace("Ó", "o") + title = title.replace("Ú", "u") + title = title.replace("á", "a") + title = title.replace("é", "e") + title = title.replace("í", "i") + title = title.replace("ó", "o") + title = title.replace("ú", "u") + title = title.replace("À", "a") + title = title.replace("È", "e") + title = title.replace("Ì", "i") + title = title.replace("Ò", "o") + title = title.replace("Ù", "u") + title = title.replace("à", "a") + title = title.replace("è", "e") + title = title.replace("ì", "i") + title = title.replace("ò", "o") + title = title.replace("ù", "u") + title = title.replace("ç", "c") + title = title.replace("Ç", "C") + title = title.replace("Ñ", "n") + title = title.replace("ñ", "n") + title = title.replace("/", "-") + title = title.replace("&", "&") + + # Pasa a minúsculas + title = title.lower().strip() + + # Elimina caracteres no válidos + validchars = "abcdefghijklmnopqrstuvwxyz1234567890- " + title = ''.join(c for c in title if c in validchars) + + # Sustituye espacios en blanco duplicados y saltos de línea + title = re.compile("\s+", re.DOTALL).sub(" ", title) + + # Sustituye espacios en blanco por guiones + title = re.compile("\s", re.DOTALL).sub("-", title.strip()) + + # Sustituye espacios en blanco duplicados y saltos de línea + title = re.compile("\-+", re.DOTALL).sub("-", title) + + # Arregla casos especiales + if title.startswith("-"): + title = title[1:] + + if title == "": + title = "-" + str(time.time()) + + return title + + +def remove_htmltags(string): + return re.sub('<[^<]+?>', '', string) + + +def remove_show_from_title(title, show): + # print slugify(title)+" == "+slugify(show) + # Quita el nombre del programa del título + if slugify(title).startswith(slugify(show)): + + # Convierte a unicode primero, o el encoding se pierde + title = unicode(title, "utf-8", "replace") + show = unicode(show, "utf-8", "replace") + title = title[len(show):].strip() + + if title.startswith("-"): + title = title[1:].strip() + + if title == "": + title = str(time.time()) + + # Vuelve a utf-8 + title = title.encode("utf-8", "ignore") + show = show.encode("utf-8", "ignore") + + return title + + +def getRandom(str): + return get_md5(str) + + +def unseo(cadena): + if cadena.upper().startswith("VER GRATIS LA PELICULA "): + cadena = cadena[23:] + elif cadena.upper().startswith("VER GRATIS PELICULA "): + cadena = cadena[20:] + elif cadena.upper().startswith("VER ONLINE LA PELICULA "): + cadena = cadena[23:] + elif cadena.upper().startswith("VER GRATIS "): + cadena = cadena[11:] + elif cadena.upper().startswith("VER ONLINE "): + cadena = cadena[11:] + elif cadena.upper().startswith("DESCARGA DIRECTA "): + cadena = cadena[17:] + return cadena + + +# scrapertools.get_filename_from_url(media_url)[-4:] +def get_filename_from_url(url): + import urlparse + parsed_url = urlparse.urlparse(url) + try: + filename = parsed_url.path + except: + # Si falla es porque la implementación de parsed_url no reconoce los atributos como "path" + if len(parsed_url) >= 4: + filename = parsed_url[2] + else: + filename = "" + + if "/" in filename: + filename = filename.split("/")[-1] + + return filename + + +def get_domain_from_url(url): + import urlparse + parsed_url = urlparse.urlparse(url) + try: + filename = parsed_url.netloc + except: + # Si falla es porque la implementación de parsed_url no reconoce los atributos como "path" + if len(parsed_url) >= 4: + filename = parsed_url[1] + else: + filename = "" + + return filename + + +def get_season_and_episode(title): + """ + Retorna el numero de temporada y de episodio en formato "1x01" obtenido del titulo de un episodio + Ejemplos de diferentes valores para title y su valor devuelto: + "serie 101x1.strm", "s101e1.avi", "t101e1.avi" -> '101x01' + "Name TvShow 1x6.avi" -> '1x06' + "Temp 3 episodio 2.avi" -> '3x02' + "Alcantara season 13 episodie 12.avi" -> '13x12' + "Temp1 capitulo 14" -> '1x14' + "Temporada 1: El origen Episodio 9" -> '' (entre el numero de temporada y los episodios no puede haber otro texto) + "Episodio 25: titulo episodio" -> '' (no existe el numero de temporada) + "Serie X Temporada 1" -> '' (no existe el numero del episodio) + @type title: str + @param title: titulo del episodio de una serie + @rtype: str + @return: Numero de temporada y episodio en formato "1x01" o cadena vacia si no se han encontrado + """ + filename = "" + + patrons = ["(\d+)x(\d+)", "(?:s|t)(\d+)e(\d+)", + "(?:season|temp\w*)\s*(\d+)\s*(?:capitulo|epi\w*)\s*(\d+)"] + + for patron in patrons: + try: + matches = re.compile(patron, re.I).search(title) + if matches: + filename = matches.group(1) + "x" + matches.group(2).zfill(2) + break + except: + pass + + logger.info("'" + title + "' -> '" + filename + "'") + + return filename + + +def get_sha1(cadena): + try: + import hashlib + devuelve = hashlib.sha1(cadena).hexdigest() + except: + import sha + import binascii + devuelve = binascii.hexlify(sha.new(cadena).digest()) + + return devuelve + + +def get_md5(cadena): + try: + import hashlib + devuelve = hashlib.md5(cadena).hexdigest() + except: + import md5 + import binascii + devuelve = binascii.hexlify(md5.new(cadena).digest()) + + return devuelve diff --git a/plugin.video.alfa/core/scrapertoolsV2.py b/plugin.video.alfa/core/scrapertoolsV2.py new file mode 100755 index 00000000..9a03736a --- /dev/null +++ b/plugin.video.alfa/core/scrapertoolsV2.py @@ -0,0 +1,343 @@ +# -*- coding: utf-8 -*- +# -------------------------------------------------------------------------------- +# Scraper tools v2 for reading and processing web elements +# -------------------------------------------------------------------------------- + +import re +import time +import urlparse + +import logger +from core.entities import html5 + + +def printMatches(matches): + i = 0 + for match in matches: + logger.info("%d %s" % (i, match)) + i = i + 1 + + +def get_match(data, patron, index=0): + return find_single_match(data, patron, index=0) + + +def find_single_match(data, patron, index=0): + try: + matches = re.findall(patron, data, flags=re.DOTALL) + return matches[index] + except: + return "" + + +# Parse string and extracts multiple matches using regular expressions +def find_multiple_matches(text, pattern): + return re.findall(pattern, text, re.DOTALL) + + +# Convierte los codigos html "ñ" y lo reemplaza por "ñ" caracter unicode utf-8 +def decodeHtmlentities(data): + entity_re = re.compile("&(#?)(\d{1,5}|\w{1,8})(;?)") + + def substitute_entity(match): + ent = match.group(2) + match.group(3) + res = "" + while not ent in html5 and not ent.endswith(";") and match.group(1) != "#": + # Excepción para cuando '&' se usa como argumento en la urls contenidas en los datos + try: + res = ent[-1] + res + ent = ent[:-1] + except: + break + + if match.group(1) == "#": + ent = unichr(int(ent.replace(";", ""))) + return ent.encode('utf-8') + else: + cp = html5.get(ent) + if cp: + return cp.decode("unicode-escape").encode('utf-8') + res + else: + return match.group() + + return entity_re.subn(substitute_entity, data)[0] + + +def htmlclean(cadena): + cadena = re.compile("", re.DOTALL).sub("", cadena) + + cadena = cadena.replace("
    ", "") + cadena = cadena.replace("
    ", "") + cadena = cadena.replace("", "") + cadena = cadena.replace("", "") + cadena = cadena.replace("", "") + cadena = cadena.replace("", "") + cadena = cadena.replace("", "") + cadena = cadena.replace("", "") + cadena = cadena.replace("
  • ", "") + cadena = cadena.replace("
  • ", "") + cadena = cadena.replace("", "") + cadena = cadena.replace("", "") + cadena = cadena.replace("", "") + cadena = cadena.replace("", "") + cadena = cadena.replace("", " ") + cadena = cadena.replace("
    ", " ") + cadena = cadena.replace("
    ", " ") + cadena = re.compile("]*>", re.DOTALL).sub(" ", cadena) + + cadena = re.compile("", re.DOTALL).sub("", cadena) + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("
    ", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("

    ", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("", re.DOTALL).sub("", cadena) + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + cadena = cadena.replace("", "") + + cadena = re.compile("]*>", re.DOTALL).sub("", cadena) + + cadena = cadena.replace("\t", "") + # cadena = entityunescape(cadena) + return cadena + + +def slugify(title): + # print title + + # Sustituye acentos y eñes + title = title.replace("Á", "a") + title = title.replace("É", "e") + title = title.replace("Í", "i") + title = title.replace("Ó", "o") + title = title.replace("Ú", "u") + title = title.replace("á", "a") + title = title.replace("é", "e") + title = title.replace("í", "i") + title = title.replace("ó", "o") + title = title.replace("ú", "u") + title = title.replace("À", "a") + title = title.replace("È", "e") + title = title.replace("Ì", "i") + title = title.replace("Ò", "o") + title = title.replace("Ù", "u") + title = title.replace("à", "a") + title = title.replace("è", "e") + title = title.replace("ì", "i") + title = title.replace("ò", "o") + title = title.replace("ù", "u") + title = title.replace("ç", "c") + title = title.replace("Ç", "C") + title = title.replace("Ñ", "n") + title = title.replace("ñ", "n") + title = title.replace("/", "-") + title = title.replace("&", "&") + + # Pasa a minúsculas + title = title.lower().strip() + + # Elimina caracteres no válidos + validchars = "abcdefghijklmnopqrstuvwxyz1234567890- " + title = ''.join(c for c in title if c in validchars) + + # Sustituye espacios en blanco duplicados y saltos de línea + title = re.compile("\s+", re.DOTALL).sub(" ", title) + + # Sustituye espacios en blanco por guiones + title = re.compile("\s", re.DOTALL).sub("-", title.strip()) + + # Sustituye espacios en blanco duplicados y saltos de línea + title = re.compile("\-+", re.DOTALL).sub("-", title) + + # Arregla casos especiales + if title.startswith("-"): + title = title[1:] + + if title == "": + title = "-" + str(time.time()) + + return title + + +def remove_htmltags(string): + return re.sub('<[^<]+?>', '', string) + + +def remove_show_from_title(title, show): + # print slugify(title)+" == "+slugify(show) + # Quita el nombre del programa del título + if slugify(title).startswith(slugify(show)): + + # Convierte a unicode primero, o el encoding se pierde + title = unicode(title, "utf-8", "replace") + show = unicode(show, "utf-8", "replace") + title = title[len(show):].strip() + + if title.startswith("-"): + title = title[1:].strip() + + if title == "": + title = str(time.time()) + + # Vuelve a utf-8 + title = title.encode("utf-8", "ignore") + show = show.encode("utf-8", "ignore") + + return title + + +# scrapertools.get_filename_from_url(media_url)[-4:] +def get_filename_from_url(url): + parsed_url = urlparse.urlparse(url) + try: + filename = parsed_url.path + except: + # Si falla es porque la implementación de parsed_url no reconoce los atributos como "path" + if len(parsed_url) >= 4: + filename = parsed_url[2] + else: + filename = "" + + if "/" in filename: + filename = filename.split("/")[-1] + + return filename + + +def get_domain_from_url(url): + parsed_url = urlparse.urlparse(url) + try: + filename = parsed_url.netloc + except: + # Si falla es porque la implementación de parsed_url no reconoce los atributos como "path" + if len(parsed_url) >= 4: + filename = parsed_url[1] + else: + filename = "" + + return filename + + +def get_season_and_episode(title): + """ + Retorna el numero de temporada y de episodio en formato "1x01" obtenido del titulo de un episodio + Ejemplos de diferentes valores para title y su valor devuelto: + "serie 101x1.strm", "s101e1.avi", "t101e1.avi" -> '101x01' + "Name TvShow 1x6.avi" -> '1x06' + "Temp 3 episodio 2.avi" -> '3x02' + "Alcantara season 13 episodie 12.avi" -> '13x12' + "Temp1 capitulo 14" -> '1x14' + "Temporada 1: El origen Episodio 9" -> '' (entre el numero de temporada y los episodios no puede haber otro texto) + "Episodio 25: titulo episodio" -> '' (no existe el numero de temporada) + "Serie X Temporada 1" -> '' (no existe el numero del episodio) + @type title: str + @param title: titulo del episodio de una serie + @rtype: str + @return: Numero de temporada y episodio en formato "1x01" o cadena vacia si no se han encontrado + """ + filename = "" + + patrons = ["(\d+)x(\d+)", "(?:s|t)(\d+)e(\d+)", + "(?:season|temp\w*)\s*(\d+)\s*(?:capitulo|epi\w*)\s*(\d+)"] + + for patron in patrons: + try: + matches = re.compile(patron, re.I).search(title) + if matches: + filename = matches.group(1) + "x" + matches.group(2).zfill(2) + break + except: + pass + + logger.info("'" + title + "' -> '" + filename + "'") + + return filename + + +def get_sha1(cadena): + try: + import hashlib + devuelve = hashlib.sha1(cadena).hexdigest() + except: + import sha + import binascii + devuelve = binascii.hexlify(sha.new(cadena).digest()) + + return devuelve + + +def get_md5(cadena): + try: + import hashlib + devuelve = hashlib.md5(cadena).hexdigest() + except: + import md5 + import binascii + devuelve = binascii.hexlify(md5.new(cadena).digest()) + + return devuelve diff --git a/plugin.video.alfa/core/servertools.py b/plugin.video.alfa/core/servertools.py new file mode 100755 index 00000000..8ee44147 --- /dev/null +++ b/plugin.video.alfa/core/servertools.py @@ -0,0 +1,761 @@ +# -*- coding: utf-8 -*- +# -------------------------------------------------------------------------------- +# Server management +# -------------------------------------------------------------------------------- + +import datetime +import os +import re +import time +import urlparse + +from core import config +from core import httptools +from core import jsontools +from core import logger +from core.item import Item +from platformcode import platformtools + +dict_servers_parameters = {} + + +def find_video_items(item=None, data=None): + """ + Función genérica para buscar vídeos en una página, devolviendo un itemlist con los items listos para usar. + - Si se pasa un Item como argumento, a los items resultantes mantienen los parametros del item pasado + - Si no se pasa un Item, se crea uno nuevo, pero no contendra ningun parametro mas que los propios del servidor. + + @param item: Item al cual se quieren buscar vídeos, este debe contener la url válida + @type item: Item + @param data: Cadena con el contendio de la página ya descargado (si no se pasa item) + @type data: str + + @return: devuelve el itemlist con los resultados + @rtype: list + """ + logger.info() + itemlist = [] + + # Descarga la página + if data is None: + data = httptools.downloadpage(item.url).data + + # Crea un item si no hay item + if item is None: + item = Item() + # Pasa los campos thumbnail y title a contentThumbnail y contentTitle + else: + if not item.contentThumbnail: + item.contentThumbnail = item.thumbnail + if not item.contentTitle: + item.contentTitle = item.title + + # Busca los enlaces a los videos + for label, url, server, thumbnail in findvideos(data): + title = "Enlace encontrado en %s" % label + itemlist.append( + item.clone(title=title, action="play", url=url, thumbnail=thumbnail, server=server, folder=False)) + + return itemlist + + +def get_servers_itemlist(itemlist, fnc=None, sort=False): + """ + Obtiene el servidor para cada uno de los items, en funcion de su url. + - Asigna el servidor, la url modificada, el thumbnail (si el item no contiene contentThumbnail se asigna el del thumbnail) + - Si se pasa una funcion por el argumento fnc, esta se ejecuta pasando el item como argumento, + el resultado de esa funcion se asigna al titulo del item + - En esta funcion podemos modificar cualquier cosa del item + - Esta funcion siempre tiene que devolver el item.title como resultado + - Si no se encuentra servidor para una url, se asigna "directo" + + @param itemlist: listado de items + @type itemlist: list + @param fnc: función para ejecutar con cada item (para asignar el titulo) + @type fnc: function + @param sort: indica si el listado resultante se ha de ordenar en funcion de la lista de servidores favoritos + @type sort: bool + """ + server_stats = {} + # Recorre los servidores + for serverid in get_servers_list().keys(): + server_parameters = get_server_parameters(serverid) + + # Recorre los patrones + for pattern in server_parameters.get("find_videos", {}).get("patterns", []): + logger.info(pattern["pattern"]) + # Recorre los resultados + for match in re.compile(pattern["pattern"], re.DOTALL).finditer( + "\n".join([item.url.split('|')[0] for item in itemlist if not item.server])): + url = pattern["url"] + for x in range(len(match.groups())): + url = url.replace("\\%s" % (x + 1), match.groups()[x]) + + server_stats[serverid] = "found" + for item in itemlist: + if match.group() in item.url: + if not item.contentThumbnail: + item.contentThumbnail = item.thumbnail + item.thumbnail = server_parameters.get("thumbnail", "") + item.server = serverid + if '|' in item.url: + item.url = url + '|' + item.url.split('|')[1] + else: + item.url = url + + save_server_stats(server_stats, "find_videos") + + # Eliminamos los servidores desactivados + itemlist = filter(lambda i: not i.server or is_server_enabled(i.server), itemlist) + + for item in itemlist: + # Asignamos "directo" en caso de que el server no se encuentre en pelisalcarta + if not item.server and item.url: + item.server = "directo" + + if fnc: + item.title = fnc(item) + + # Filtrar si es necesario + itemlist = filter_servers(itemlist) + + # Ordenar segun favoriteslist si es necesario + if sort: + itemlist = sort_servers(itemlist) + + return itemlist + + +def findvideos(data, skip=False): + """ + Recorre la lista de servidores disponibles y ejecuta la funcion findvideosbyserver para cada uno de ellos + :param data: Texto donde buscar los enlaces + :param skip: Indica un limite para dejar de recorrer la lista de servidores. Puede ser un booleano en cuyo caso + seria False para recorrer toda la lista (valor por defecto) o True para detenerse tras el primer servidor que + retorne algun enlace. Tambien puede ser un entero mayor de 1, que representaria el numero maximo de enlaces a buscar. + :return: + """ + logger.info() + devuelve = [] + skip = int(skip) + servers_list = get_servers_list().keys() + + # Ordenar segun favoriteslist si es necesario + servers_list = sort_servers(servers_list) + is_filter_servers = False + + # Ejecuta el findvideos en cada servidor activo + for serverid in servers_list: + if not is_server_enabled(serverid): + continue + if config.get_setting("black_list", server=serverid): + is_filter_servers = True + continue + + devuelve.extend(findvideosbyserver(data, serverid)) + if skip and len(devuelve) >= skip: + devuelve = devuelve[:skip] + break + + if not devuelve and is_filter_servers: + platformtools.dialog_ok("Filtrar servidores (Lista Negra)", + "No hay enlaces disponibles que cumplan los requisitos de su Lista Negra.", + "Pruebe de nuevo modificando el fíltro en 'Configuracíon Servidores") + + return devuelve + + +def findvideosbyserver(data, serverid): + serverid = get_server_name(serverid) + if not serverid: + return [] + + server_parameters = get_server_parameters(serverid) + devuelve = [] + + if "find_videos" in server_parameters: + # Recorre los patrones + for pattern in server_parameters["find_videos"].get("patterns", []): + msg = "%s\npattern: %s" % (serverid, pattern["pattern"]) + # Recorre los resultados + for match in re.compile(pattern["pattern"], re.DOTALL).finditer(data): + url = pattern["url"] + # Crea la url con los datos + for x in range(len(match.groups())): + url = url.replace("\\%s" % (x + 1), match.groups()[x]) + msg += "\nurl encontrada: %s" % url + value = server_parameters["name"], url, serverid, server_parameters.get("thumbnail", "") + if value not in devuelve and url not in server_parameters["find_videos"].get("ignore_urls", []): + devuelve.append(value) + logger.info(msg) + + # Guardar estadisticas + if devuelve: + save_server_stats({serverid: "found"}, "find_videos") + + return devuelve + + +def guess_server_thumbnail(serverid): + server = get_server_name(serverid) + server_parameters = get_server_parameters(server) + return server_parameters.get('thumbnail', "") + + +def get_server_from_url(url): + encontrado = findvideos(url, True) + if len(encontrado) > 0: + devuelve = encontrado[0][2] + else: + devuelve = "directo" + + return devuelve + + +def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialogo=False): + """ + Función para obtener la url real del vídeo + @param server: Servidor donde está alojado el vídeo + @type server: str + @param url: url del vídeo + @type url: str + @param video_password: Password para el vídeo + @type video_password: str + @param muestra_dialogo: Muestra el diálogo de progreso + @type muestra_dialogo: bool + + @return: devuelve la url del video + @rtype: list + """ + logger.info("Server: %s, Url: %s" % (server, url)) + + server = server.lower() + + video_urls = [] + video_exists = True + error_messages = [] + opciones = [] + + # Si el vídeo es "directo" o "local", no hay que buscar más + if server == "directo" or server == "local": + logger.info("Server: %s, la url es la buena" % server) + video_urls.append(["%s [%s]" % (urlparse.urlparse(url)[2][-4:], server), url]) + + # Averigua la URL del vídeo + else: + if server: + server_parameters = get_server_parameters(server) + else: + server_parameters = {} + + if server_parameters: + # Muestra un diágo de progreso + if muestra_dialogo: + progreso = platformtools.dialog_progress("alfa", + "Conectando con %s" % server_parameters["name"]) + + # Cuenta las opciones disponibles, para calcular el porcentaje + + orden = [ + ["free"] + [server] + [premium for premium in server_parameters["premium"] if not premium == server], + [server] + [premium for premium in server_parameters["premium"] if not premium == server] + ["free"], + [premium for premium in server_parameters["premium"] if not premium == server] + [server] + ["free"] + ] + + if server_parameters["free"] == True: + opciones.append("free") + opciones.extend( + [premium for premium in server_parameters["premium"] if config.get_setting("premium", server=premium)]) + + priority = int(config.get_setting("resolve_priority")) + opciones = sorted(opciones, key=lambda x: orden[priority].index(x)) + + logger.info("Opciones disponibles: %s | %s" % (len(opciones), opciones)) + else: + logger.error("No existe conector para el servidor %s" % server) + error_messages.append("No existe conector para el servidor %s" % server) + muestra_dialogo = False + + # Importa el server + try: + server_module = __import__('servers.%s' % server, None, None, ["servers.%s" % server]) + logger.info("Servidor importado: %s" % server_module) + except: + server_module = None + logger.error("No se ha podido importar el servidor: %s" % server) + import traceback + logger.error(traceback.format_exc()) + + # Si tiene una función para ver si el vídeo existe, lo comprueba ahora + if hasattr(server_module, 'test_video_exists'): + logger.info("Invocando a %s.test_video_exists" % server) + try: + video_exists, message = server_module.test_video_exists(page_url=url) + + if not video_exists: + error_messages.append(message) + logger.info("test_video_exists dice que el video no existe") + else: + logger.info("test_video_exists dice que el video SI existe") + except: + logger.error("No se ha podido comprobar si el video existe") + import traceback + logger.error(traceback.format_exc()) + + # Si el video existe y el modo free está disponible, obtenemos la url + if video_exists: + for opcion in opciones: + # Opcion free y premium propio usa el mismo server + if opcion == "free" or opcion == server: + serverid = server_module + server_name = server_parameters["name"] + + # Resto de opciones premium usa un debrider + else: + serverid = __import__('servers.debriders.%s' % opcion, None, None, + ["servers.debriders.%s" % opcion]) + server_name = get_server_parameters(opcion)["name"] + + # Muestra el progreso + if muestra_dialogo: + progreso.update((100 / len(opciones)) * opciones.index(opcion), "Conectando con %s" % server_name) + + # Modo free + if opcion == "free": + try: + logger.info("Invocando a %s.get_video_url" % server) + response = serverid.get_video_url(page_url=url, video_password=video_password) + if response: + save_server_stats({server: "sucess"}, "resolve") + video_urls.extend(response) + except: + save_server_stats({server: "error"}, "resolve") + logger.error("Error al obrener la url en modo free") + error_messages.append("Se ha producido un error en %s" % server_name) + import traceback + logger.error(traceback.format_exc()) + + # Modo premium + else: + try: + logger.info("Invocando a %s.get_video_url" % opcion) + response = serverid.get_video_url(page_url=url, premium=True, + user=config.get_setting("user", server=opcion), + password=config.get_setting("password", server=opcion), + video_password=video_password) + if response and response[0][1]: + if opcion == server: + save_server_stats({server: "sucess"}, "resolve") + video_urls.extend(response) + elif response and response[0][0]: + error_messages.append(response[0][0]) + else: + error_messages.append("Se ha producido un error en %s" % server_name) + except: + if opcion == server: + save_server_stats({server: "error"}, "resolve") + logger.error("Error en el servidor: %s" % opcion) + error_messages.append("Se ha producido un error en %s" % server_name) + import traceback + logger.error(traceback.format_exc()) + + # Si ya tenemos URLS, dejamos de buscar + if video_urls and config.get_setting("resolve_stop") == True: + break + + # Cerramos el progreso + if muestra_dialogo: + progreso.update(100, "Proceso finalizado") + progreso.close() + + # Si no hay opciones disponibles mostramos el aviso de las cuentas premium + if video_exists and not opciones and server_parameters.get("premium"): + listapremium = [get_server_parameters(premium)["name"] for premium in server_parameters["premium"]] + error_messages.append( + "Para ver un vídeo en %s necesitas
    una cuenta en: %s" % (server, " o ".join(listapremium))) + + # Si no tenemos urls ni mensaje de error, ponemos uno generico + elif not video_urls and not error_messages: + error_messages.append("Se ha producido un error en %s" % get_server_parameters(server)["name"]) + + return video_urls, len(video_urls) > 0, "
    ".join(error_messages) + + +def get_server_name(serverid): + """ + Función obtener el nombre del servidor real a partir de una cadena. + @param serverid: Cadena donde mirar + @type serverid: str + + @return: Nombre del servidor + @rtype: str + """ + serverid = serverid.lower().split(".")[0] + + # Obtenemos el listado de servers + server_list = get_servers_list().keys() + + # Si el nombre está en la lista + if serverid in server_list: + return serverid + + # Recorre todos los servers buscando el nombre + for server in server_list: + params = get_server_parameters(server) + # Si la nombre esta en el listado de ids + if serverid in params["id"]: + return server + # Si el nombre es mas de una palabra, comprueba si algun id esta dentro del nombre: + elif len(serverid.split()) > 1: + for id in params["id"]: + if id in serverid: + return server + + # Si no se encuentra nada se devuelve una cadena vacia + return "" + + +def is_server_enabled(server): + """ + Función comprobar si un servidor está segun la configuración establecida + @param server: Nombre del servidor + @type server: str + + @return: resultado de la comprobación + @rtype: bool + """ + + server = get_server_name(server) + + # El server no existe + if not server: + return False + + server_parameters = get_server_parameters(server) + if server_parameters["active"] == True: + if not config.get_setting("hidepremium"): + return True + elif server_parameters["free"] == True: + return True + elif [premium for premium in server_parameters["premium"] if config.get_setting("premium", server=premium)]: + return True + + return False + + +def get_server_parameters(server): + """ + Obtiene los datos del servidor + @param server: Nombre del servidor + @type server: str + + @return: datos del servidor + @rtype: dict + """ + # logger.info("server %s" % server) + global dict_servers_parameters + server = server.split('.')[0] + if not server: + return {} + + if server not in dict_servers_parameters: + try: + # Servers + if os.path.isfile(os.path.join(config.get_runtime_path(), "servers", server + ".json")): + path = os.path.join(config.get_runtime_path(), "servers", server + ".json") + + # Debriders + elif os.path.isfile(os.path.join(config.get_runtime_path(), "servers", "debriders", server + ".json")): + path = os.path.join(config.get_runtime_path(), "servers", "debriders", server + ".json") + + import filetools + data = filetools.read(path) + dict_server = jsontools.load(data) + + # Imagenes: se admiten url y archivos locales dentro de "resources/images" + if dict_server.get("thumbnail") and "://" not in dict_server["thumbnail"]: + dict_server["thumbnail"] = os.path.join(config.get_runtime_path(), "resources", "media", + "servers", dict_server["thumbnail"]) + for k in ['premium', 'id']: + dict_server[k] = dict_server.get(k, list()) + + if type(dict_server[k]) == str: + dict_server[k] = [dict_server[k]] + + # if not dict_server.has_key(k) or dict_server[k] == "": + # dict_server[k] = [] + # elif type(dict_server[k]) == dict: + # dict_server[k] = dict_server[k]["value"] + # if type(dict_server[k]) == str: + # dict_server[k] = [dict_server[k]] + + if "find_videos" in dict_server: + dict_server['find_videos']["patterns"] = dict_server['find_videos'].get("patterns", list()) + dict_server['find_videos']["ignore_urls"] = dict_server['find_videos'].get("ignore_urls", list()) + + if "settings" in dict_server: + dict_server['has_settings'] = True + else: + dict_server['has_settings'] = False + + dict_servers_parameters[server] = dict_server + + except: + mensaje = "Error al cargar el servidor: %s\n" % server + import traceback + logger.error(mensaje + traceback.format_exc()) + return {} + + return dict_servers_parameters[server] + + +def get_server_json(server_name): + # logger.info("server_name=" + server_name) + import filetools + try: + server_path = filetools.join(config.get_runtime_path(), "servers", server_name + ".json") + if not filetools.exists(server_path): + server_path = filetools.join(config.get_runtime_path(), "servers", "debriders", server_name + ".json") + + # logger.info("server_path=" + server_path) + server_json = jsontools.load(filetools.read(server_path)) + # logger.info("server_json= %s" % server_json) + + except Exception, ex: + template = "An exception of type %s occured. Arguments:\n%r" + message = template % (type(ex).__name__, ex.args) + logger.error(" %s" % message) + server_json = None + + return server_json + + +def get_server_controls_settings(server_name): + dict_settings = {} + + list_controls = get_server_json(server_name).get('settings', []) + import copy + list_controls = copy.deepcopy(list_controls) + + # Conversion de str a bool, etc... + for c in list_controls: + if 'id' not in c or 'type' not in c or 'default' not in c: + # Si algun control de la lista no tiene id, type o default lo ignoramos + continue + + # new dict with key(id) and value(default) from settings + dict_settings[c['id']] = c['default'] + + return list_controls, dict_settings + + +def get_server_setting(name, server, default=None): + """ + Retorna el valor de configuracion del parametro solicitado. + + Devuelve el valor del parametro 'name' en la configuracion propia del servidor 'server'. + + Busca en la ruta \addon_data\plugin.video.addon\settings_servers el archivo server_data.json y lee + el valor del parametro 'name'. Si el archivo server_data.json no existe busca en la carpeta servers el archivo + server.json y crea un archivo server_data.json antes de retornar el valor solicitado. Si el parametro 'name' + tampoco existe en el el archivo server.json se devuelve el parametro default. + + + @param name: nombre del parametro + @type name: str + @param server: nombre del servidor + @type server: str + @param default: valor devuelto en caso de que no exista el parametro name + @type default: cualquiera + + @return: El valor del parametro 'name' + @rtype: El tipo del valor del parametro + + """ + # Creamos la carpeta si no existe + if not os.path.exists(os.path.join(config.get_data_path(), "settings_servers")): + os.mkdir(os.path.join(config.get_data_path(), "settings_servers")) + + file_settings = os.path.join(config.get_data_path(), "settings_servers", server + "_data.json") + dict_settings = {} + dict_file = {} + if os.path.exists(file_settings): + # Obtenemos configuracion guardada de ../settings/channel_data.json + try: + dict_file = jsontools.load(open(file_settings, "rb").read()) + if isinstance(dict_file, dict) and 'settings' in dict_file: + dict_settings = dict_file['settings'] + except EnvironmentError: + logger.info("ERROR al leer el archivo: %s" % file_settings) + + if not dict_settings or name not in dict_settings: + # Obtenemos controles del archivo ../servers/server.json + try: + list_controls, default_settings = get_server_controls_settings(server) + except: + default_settings = {} + if name in default_settings: # Si el parametro existe en el server.json creamos el server_data.json + default_settings.update(dict_settings) + dict_settings = default_settings + dict_file['settings'] = dict_settings + # Creamos el archivo ../settings/channel_data.json + json_data = jsontools.dump(dict_file) + try: + open(file_settings, "wb").write(json_data) + except EnvironmentError: + logger.info("ERROR al salvar el archivo: %s" % file_settings) + + # Devolvemos el valor del parametro local 'name' si existe, si no se devuelve default + return dict_settings.get(name, default) + + +def set_server_setting(name, value, server): + # Creamos la carpeta si no existe + if not os.path.exists(os.path.join(config.get_data_path(), "settings_servers")): + os.mkdir(os.path.join(config.get_data_path(), "settings_servers")) + + file_settings = os.path.join(config.get_data_path(), "settings_servers", server + "_data.json") + dict_settings = {} + + dict_file = None + + if os.path.exists(file_settings): + # Obtenemos configuracion guardada de ../settings/channel_data.json + try: + dict_file = jsontools.load(open(file_settings, "r").read()) + dict_settings = dict_file.get('settings', {}) + except EnvironmentError: + logger.info("ERROR al leer el archivo: %s" % file_settings) + + dict_settings[name] = value + + # comprobamos si existe dict_file y es un diccionario, sino lo creamos + if dict_file is None or not dict_file: + dict_file = {} + + dict_file['settings'] = dict_settings + + # Creamos el archivo ../settings/channel_data.json + try: + json_data = jsontools.dump(dict_file) + open(file_settings, "w").write(json_data) + except EnvironmentError: + logger.info("ERROR al salvar el archivo: %s" % file_settings) + return None + + return value + + +def get_servers_list(): + """ + Obtiene un diccionario con todos los servidores disponibles + + @return: Diccionario cuyas claves son los nombre de los servidores (nombre del json) + y como valor un diccionario con los parametros del servidor. + @rtype: dict + """ + server_list = {} + for server in os.listdir(os.path.join(config.get_runtime_path(), "servers")): + if server.endswith(".json") and not server == "version.json": + server_parameters = get_server_parameters(server) + if server_parameters["active"] == True: + server_list[server.split(".")[0]] = server_parameters + + return server_list + + +def get_debriders_list(): + """ + Obtiene un diccionario con todos los debriders disponibles + + @return: Diccionario cuyas claves son los nombre de los debriders (nombre del json) + y como valor un diccionario con los parametros del servidor. + @rtype: dict + """ + server_list = {} + for server in os.listdir(os.path.join(config.get_runtime_path(), "servers", "debriders")): + if server.endswith(".json"): + server_parameters = get_server_parameters(server) + if server_parameters["active"] == True: + logger.info(server_parameters) + server_list[server.split(".")[0]] = server_parameters + return server_list + + +def sort_servers(servers_list): + """ + Si esta activada la opcion "Ordenar servidores" en la configuracion de servidores y existe un listado de servidores + favoritos en la configuracion lo utiliza para ordenar la lista servers_list + :param servers_list: Listado de servidores para ordenar. Los elementos de la lista servers_list pueden ser strings + u objetos Item. En cuyo caso es necesario q tengan un atributo item.server del tipo str. + :return: Lista del mismo tipo de objetos que servers_list ordenada en funcion de los servidores favoritos. + """ + if servers_list and config.get_setting('favorites_servers'): + if isinstance(servers_list[0], Item): + servers_list = sorted(servers_list, + key=lambda x: config.get_setting("favorites_servers_list", server=x.server) or 100) + else: + servers_list = sorted(servers_list, + key=lambda x: config.get_setting("favorites_servers_list", server=x) or 100) + return servers_list + + +def filter_servers(servers_list): + """ + Si esta activada la opcion "Filtrar por servidores" en la configuracion de servidores, elimina de la lista + de entrada los servidores incluidos en la Lista Negra. + :param servers_list: Listado de servidores para filtrar. Los elementos de la lista servers_list pueden ser strings + u objetos Item. En cuyo caso es necesario q tengan un atributo item.server del tipo str. + :return: Lista del mismo tipo de objetos que servers_list filtrada en funcion de la Lista Negra. + """ + if servers_list and config.get_setting('filter_servers'): + if isinstance(servers_list[0], Item): + servers_list_filter = filter(lambda x: not config.get_setting("black_list", server=x.server), servers_list) + else: + servers_list_filter = filter(lambda x: not config.get_setting("black_list", server=x), servers_list) + + # Si no hay enlaces despues de filtrarlos + if servers_list_filter or not platformtools.dialog_yesno("Filtrar servidores (Lista Negra)", + "Todos los enlaces disponibles pertenecen a servidores incluidos en su Lista Negra.", + "¿Desea mostrar estos enlaces?"): + servers_list = servers_list_filter + + return servers_list + + +def save_server_stats(stats, type="find_videos"): + if not config.get_setting("server_stats"): + return + + stats_file = os.path.join(config.get_data_path(), "server_stats.json") + today = datetime.datetime.now().strftime("%Y%m%d") + + # Leemos el archivo + try: + server_stats = jsontools.load(open(stats_file, "rb").read()) + except: + server_stats = {"created": time.time(), "data": {}} + + # Actualizamos los datos + for server in stats: + if not server in server_stats["data"]: + server_stats["data"][server] = {} + + if not today in server_stats["data"][server]: + server_stats["data"][server][today] = {"find_videos": {"found": 0}, "resolve": {"sucess": 0, "error": 0}} + + server_stats["data"][server][today][type][stats[server]] += 1 + + # Guardamos el archivo + open(stats_file, "wb").write(jsontools.dump(server_stats)) + + # Enviamos al servidor + return + if time.time() - server_stats["created"] > 86400: # 86400: #1 Dia + from core import httptools + if httptools.downloadpage("url servidor", headers={'Content-Type': 'application/json'}, + post=jsontools.dump(server_stats)).sucess: + os.remove(stats_file) + logger.info("Datos enviados correctamente") + else: + logger.info("No se han podido enviar los datos") diff --git a/plugin.video.alfa/core/tmdb.py b/plugin.video.alfa/core/tmdb.py new file mode 100755 index 00000000..877a3cfc --- /dev/null +++ b/plugin.video.alfa/core/tmdb.py @@ -0,0 +1,1442 @@ +# -*- coding: utf-8 -*- + +import copy +import re +import time + +from core import jsontools +from core import logger +from core import scrapertools +from core.item import InfoLabels + +# ----------------------------------------------------------------------------------------------------------- +# Conjunto de funciones relacionadas con las infoLabels. +# version 1.0: +# Version inicial +# +# Incluyen: +# set_infoLabels(source, seekTmdb, idioma_busqueda): Obtiene y fija (item.infoLabels) los datos extras de una o +# varias series, capitulos o peliculas. +# set_infoLabels_item(item, seekTmdb, idioma_busqueda): Obtiene y fija (item.infoLabels) los datos extras de una +# serie, capitulo o pelicula. +# set_infoLabels_itemlist(item_list, seekTmdb, idioma_busqueda): Obtiene y fija (item.infoLabels) los datos +# extras de una lista de series, capitulos o peliculas. +# infoLabels_tostring(item): Retorna un str con la lista ordenada con los infoLabels del item +# +# Uso: +# tmdb.set_infoLabels(item, seekTmdb = True) +# +# Obtener datos basicos de una pelicula: +# Antes de llamar al metodo set_infoLabels el titulo a buscar debe estar en item.fulltitle +# o en item.contentTitle y el año en item.infoLabels['year']. +# +# Obtener datos basicos de una serie: +# Antes de llamar al metodo set_infoLabels el titulo a buscar debe estar en item.show o en +# item.contentSerieName. +# +# Obtener mas datos de una pelicula o serie: +# Despues de obtener los datos basicos en item.infoLabels['tmdb'] tendremos el codigo de la serie o pelicula. +# Tambien podriamos directamente fijar este codigo, si se conoce, o utilizar los codigo correspondientes de: +# IMDB (en item.infoLabels['IMDBNumber'] o item.infoLabels['code'] o item.infoLabels['imdb_id']), TVDB +# (solo series, en item.infoLabels['tvdb_id']), +# Freebase (solo series, en item.infoLabels['freebase_mid']),TVRage (solo series, en +# item.infoLabels['tvrage_id']) +# +# Obtener datos de una temporada: +# Antes de llamar al metodo set_infoLabels el titulo de la serie debe estar en item.show o en +# item.contentSerieName, +# el codigo TMDB de la serie debe estar en item.infoLabels['tmdb'] (puede fijarse automaticamente mediante +# la consulta de datos basica) +# y el numero de temporada debe estar en item.infoLabels['season']. +# +# Obtener datos de un episodio: +# Antes de llamar al metodo set_infoLabels el titulo de la serie debe estar en item.show o en +# item.contentSerieName, +# el codigo TMDB de la serie debe estar en item.infoLabels['tmdb'] (puede fijarse automaticamente mediante la +# consulta de datos basica), +# el numero de temporada debe estar en item.infoLabels['season'] y el numero de episodio debe estar en +# item.infoLabels['episode']. +# +# +# -------------------------------------------------------------------------------------------------------------- + +otmdb_global = None + + +def set_infoLabels(source, seekTmdb=True, idioma_busqueda='es'): + """ + Dependiendo del tipo de dato de source obtiene y fija (item.infoLabels) los datos extras de una o varias series, + capitulos o peliculas. + + @param source: variable que contiene la información para establecer infoLabels + @type source: list, item + @param seekTmdb: si es True hace una busqueda en www.themoviedb.org para obtener los datos, en caso contrario + obtiene los datos del propio Item. + @type seekTmdb: bool + @param idioma_busqueda: fija el valor de idioma en caso de busqueda en www.themoviedb.org + @type idioma_busqueda: str + @return: un numero o lista de numeros con el resultado de las llamadas a set_infoLabels_item + @rtype: int, list + """ + start_time = time.time() + if type(source) == list: + ret = set_infoLabels_itemlist(source, seekTmdb, idioma_busqueda) + logger.debug("Se han obtenido los datos de %i enlaces en %f segundos" % (len(source), time.time() - start_time)) + else: + ret = set_infoLabels_item(source, seekTmdb, idioma_busqueda) + logger.debug("Se han obtenido los datos del enlace en %f segundos" % (time.time() - start_time)) + return ret + + +def set_infoLabels_itemlist(item_list, seekTmdb=False, idioma_busqueda='es'): + """ + De manera concurrente, obtiene los datos de los items incluidos en la lista item_list. + + La API tiene un limite de 40 peticiones por IP cada 10'' y por eso la lista no deberia tener mas de 30 items + para asegurar un buen funcionamiento de esta funcion. + + :param item_list: listado de objetos Item que representan peliculas, series o capitulos. El atributo + infoLabels de cada objeto Item sera modificado incluyendo los datos extras localizados. + :type item_list: list + :param seekTmdb: Si es True hace una busqueda en www.themoviedb.org para obtener los datos, en caso contrario + obtiene los datos del propio Item si existen. + :type seekTmdb: bool + :param idioma_busqueda: Codigo del idioma segun ISO 639-1, en caso de busqueda en www.themoviedb.org. + :type idioma_busqueda: str + + :return: Una lista de numeros cuyo valor absoluto representa la cantidad de elementos incluidos en el atributo + infoLabels de cada Item. Este numero sera positivo si los datos se han obtenido de www.themoviedb.org y + negativo en caso contrario. + :rtype: list + """ + import threading + + semaforo = threading.Semaphore(20) + lock = threading.Lock() + r_list = list() + i = 0 + l_hilo = list() + + def sub_thread(item, _i, _seekTmdb): + semaforo.acquire() + ret = set_infoLabels_item(item, _seekTmdb, idioma_busqueda, lock) + # logger.debug(str(ret) + "item: " + item.tostring()) + semaforo.release() + r_list.append((_i, item, ret)) + + for item in item_list: + t = threading.Thread(target=sub_thread, args=(item, i, seekTmdb)) + t.start() + i += 1 + l_hilo.append(t) + + # esperar q todos los hilos terminen + for x in l_hilo: + x.join() + + # Ordenar lista de resultados por orden de llamada para mantener el mismo orden q item_list + r_list.sort(key=lambda i: i[0]) + + # Reconstruir y devolver la lista solo con los resultados de las llamadas individuales + return [ii[2] for ii in r_list] + + +def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda='es', lock=None): + # ----------------------------------------------------------------------------------------------------------- + # Obtiene y fija (item.infoLabels) los datos extras de una serie, capitulo o pelicula. + # + # Parametros: + # item: (Item) Objeto Item que representa un pelicula, serie o capitulo. El atributo infoLabels sera + # modificado incluyendo los datos extras localizados. + # (opcional) seekTmdb: (bool) Si es True hace una busqueda en www.themoviedb.org para obtener los datos, + # en caso contrario obtiene los datos del propio Item si existen. + # (opcional) idioma_busqueda: (str) Codigo del idioma segun ISO 639-1, en caso de busqueda en + # www.themoviedb.org. + # Retorna: + # Un numero cuyo valor absoluto representa la cantidad de elementos incluidos en el atributo + # item.infoLabels. + # Este numero sera positivo si los datos se han obtenido de www.themoviedb.org y negativo en caso contrario. + # --------------------------------------------------------------------------------------------------------- + global otmdb_global + + def __leer_datos(otmdb_aux): + item.infoLabels = otmdb_aux.get_infoLabels(item.infoLabels) + if item.infoLabels['thumbnail']: + item.thumbnail = item.infoLabels['thumbnail'] + if item.infoLabels['fanart']: + item.fanart = item.infoLabels['fanart'] + + if seekTmdb: + # Comprobamos q tipo de contenido es... + if item.contentType == 'movie': + tipo_busqueda = 'movie' + else: + tipo_busqueda = 'tv' + + if item.infoLabels['season']: + try: + numtemporada = int(item.infoLabels['season']) + except ValueError: + logger.debug("El numero de temporada no es valido") + return -1 * len(item.infoLabels) + + if lock: + lock.acquire() + + if not otmdb_global or (item.infoLabels['tmdb_id'] and + str(otmdb_global.result.get("id")) != item.infoLabels['tmdb_id']) \ + or (otmdb_global.texto_buscado and + otmdb_global.texto_buscado != item.infoLabels['tvshowtitle']): + if item.infoLabels['tmdb_id']: + otmdb_global = Tmdb(id_Tmdb=item.infoLabels['tmdb_id'], tipo=tipo_busqueda, + idioma_busqueda=idioma_busqueda) + else: + otmdb_global = Tmdb(texto_buscado=item.infoLabels['tvshowtitle'], tipo=tipo_busqueda, + idioma_busqueda=idioma_busqueda, year=item.infoLabels['year']) + + __leer_datos(otmdb_global) + + temporada = otmdb_global.get_temporada(numtemporada) + + if lock: + lock.release() + + if item.infoLabels['episode']: + try: + episode = int(item.infoLabels['episode']) + except ValueError: + logger.debug("El número de episodio (%s) no es valido" % repr(item.infoLabels['episode'])) + return -1 * len(item.infoLabels) + + # Tenemos numero de temporada y numero de episodio validos... + # ... buscar datos episodio + item.infoLabels['mediatype'] = 'episode' + episodio = otmdb_global.get_episodio(numtemporada, episode) + + if episodio: + # Actualizar datos + __leer_datos(otmdb_global) + item.infoLabels['title'] = episodio['episodio_titulo'] + if episodio['episodio_sinopsis']: + item.infoLabels['plot'] = episodio['episodio_sinopsis'] + if episodio['episodio_imagen']: + item.infoLabels['poster_path'] = episodio['episodio_imagen'] + item.thumbnail = item.infoLabels['poster_path'] + if episodio['episodio_air_date']: + item.infoLabels['aired'] = episodio['episodio_air_date'] + if episodio['episodio_vote_average']: + item.infoLabels['rating'] = episodio['episodio_vote_average'] + item.infoLabels['votes'] = episodio['episodio_vote_count'] + + return len(item.infoLabels) + + + else: + # Tenemos numero de temporada valido pero no numero de episodio... + # ... buscar datos temporada + item.infoLabels['mediatype'] = 'season' + temporada = otmdb_global.get_temporada(numtemporada) + + if temporada: + # Actualizar datos + __leer_datos(otmdb_global) + item.infoLabels['title'] = temporada['name'] + if temporada['overview']: + item.infoLabels['plot'] = temporada['overview'] + if temporada['air_date']: + date = temporada['air_date'].split('-') + item.infoLabels['aired'] = date[2] + "/" + date[1] + "/" + date[0] + if temporada['poster_path']: + item.infoLabels['poster_path'] = 'http://image.tmdb.org/t/p/original' + temporada['poster_path'] + item.thumbnail = item.infoLabels['poster_path'] + return len(item.infoLabels) + + # Buscar... + else: + otmdb = copy.copy(otmdb_global) + # if otmdb is None: # Se elimina por q sino falla al añadir series por falta de imdb, pero por contra provoca mas llamadas + # Busquedas por ID... + if item.infoLabels['tmdb_id']: + # ...Busqueda por tmdb_id + otmdb = Tmdb(id_Tmdb=item.infoLabels['tmdb_id'], tipo=tipo_busqueda, + idioma_busqueda=idioma_busqueda) + + elif item.infoLabels['imdb_id']: + # ...Busqueda por imdb code + otmdb = Tmdb(external_id=item.infoLabels['imdb_id'], external_source="imdb_id", + tipo=tipo_busqueda, + idioma_busqueda=idioma_busqueda) + + elif tipo_busqueda == 'tv': # buscar con otros codigos + if item.infoLabels['tvdb_id']: + # ...Busqueda por tvdb_id + otmdb = Tmdb(external_id=item.infoLabels['tvdb_id'], external_source="tvdb_id", + tipo=tipo_busqueda, + idioma_busqueda=idioma_busqueda) + elif item.infoLabels['freebase_mid']: + # ...Busqueda por freebase_mid + otmdb = Tmdb(external_id=item.infoLabels['freebase_mid'], external_source="freebase_mid", + tipo=tipo_busqueda, idioma_busqueda=idioma_busqueda) + elif item.infoLabels['freebase_id']: + # ...Busqueda por freebase_id + otmdb = Tmdb(external_id=item.infoLabels['freebase_id'], external_source="freebase_id", + tipo=tipo_busqueda, idioma_busqueda=idioma_busqueda) + elif item.infoLabels['tvrage_id']: + # ...Busqueda por tvrage_id + otmdb = Tmdb(external_id=item.infoLabels['tvrage_id'], external_source="tvrage_id", + tipo=tipo_busqueda, idioma_busqueda=idioma_busqueda) + + if otmdb is None: + # No se ha podido buscar por ID... + # hacerlo por titulo + if tipo_busqueda == 'tv': + # Busqueda de serie por titulo y filtrando sus resultados si es necesario + otmdb = Tmdb(texto_buscado=item.infoLabels['tvshowtitle'], tipo=tipo_busqueda, + idioma_busqueda=idioma_busqueda, filtro=item.infoLabels.get('filtro', {}), + year=item.infoLabels['year']) + else: + # Busqueda de pelicula por titulo... + if item.infoLabels['year'] or item.infoLabels['filtro']: + # ...y año o filtro + if item.contentTitle: + titulo_buscado = item.contentTitle + else: + titulo_buscado = item.fulltitle + + otmdb = Tmdb(texto_buscado=titulo_buscado, tipo=tipo_busqueda, + idioma_busqueda=idioma_busqueda, + filtro=item.infoLabels.get('filtro', {}), + year=item.infoLabels['year']) + + if otmdb.get_id() and not lock: + # Si la busqueda ha dado resultado y no se esta buscando una lista de items, + # realizar otra busqueda para ampliar la informacion + otmdb = Tmdb(id_Tmdb=otmdb.result.get("id"), tipo=tipo_busqueda, + idioma_busqueda=idioma_busqueda) + + if otmdb is not None and otmdb.get_id(): + # La busqueda ha encontrado un resultado valido + __leer_datos(otmdb) + return len(item.infoLabels) + + # La busqueda en tmdb esta desactivada o no ha dado resultado + # item.contentType = item.infoLabels['mediatype'] + return -1 * len(item.infoLabels) + + +def find_and_set_infoLabels(item): + logger.info() + + global otmdb_global + tmdb_result = None + + if item.contentType == "movie": + tipo_busqueda = "movie" + tipo_contenido = "pelicula" + title = item.contentTitle + else: + tipo_busqueda = "tv" + tipo_contenido = "serie" + title = item.contentSerieName + + # Si el titulo incluye el (año) se lo quitamos + year = scrapertools.find_single_match(title, "^.+?\s*(\(\d{4}\))$") + if year: + title = title.replace(year, "").strip() + item.infoLabels['year'] = year[1:-1] + + if not item.infoLabels.get("tmdb_id"): + if not item.infoLabels.get("imdb_id"): + otmdb_global = Tmdb(texto_buscado=title, tipo=tipo_busqueda, year=item.infoLabels['year']) + else: + otmdb_global = Tmdb(external_id=item.infoLabels.get("imdb_id"), external_source="imdb_id", + tipo=tipo_busqueda) + elif not otmdb_global or str(otmdb_global.result.get("id")) != item.infoLabels['tmdb_id']: + otmdb_global = Tmdb(id_Tmdb=item.infoLabels['tmdb_id'], tipo=tipo_busqueda, idioma_busqueda="es") + + results = otmdb_global.get_list_resultados() + + if len(results) > 1: + from platformcode import platformtools + tmdb_result = platformtools.show_video_info(results, item=item, + caption="[%s]: Selecciona la %s correcta" % (title, tipo_contenido)) + elif len(results) > 0: + tmdb_result = results[0] + + if isinstance(item.infoLabels, InfoLabels): + infoLabels = item.infoLabels + else: + infoLabels = InfoLabels() + + if tmdb_result: + infoLabels['tmdb_id'] = tmdb_result['id'] + # todo mirar si se puede eliminar y obtener solo desde get_nfo() + infoLabels['url_scraper'] = ["https://www.themoviedb.org/%s/%s" % (tipo_busqueda, infoLabels['tmdb_id'])] + if infoLabels['tvdb_id']: + infoLabels['url_scraper'].append("http://thetvdb.com/index.php?tab=series&id=%s" % infoLabels['tvdb_id']) + item.infoLabels = infoLabels + set_infoLabels_item(item) + + return True + + else: + item.infoLabels = infoLabels + return False + + +def get_nfo(item): + """ + Devuelve la información necesaria para que se scrapee el resultado en la videoteca de kodi, + para tmdb funciona solo pasandole la url + @param item: elemento que contiene los datos necesarios para generar la info + @type item: Item + @rtype: str + @return: + """ + if "season" in item.infoLabels and "episode" in item.infoLabels: + info_nfo = "https://www.themoviedb.org/tv/%s/season/%s/episode/%s\n" % \ + (item.infoLabels['tmdb_id'], item.contentSeason, item.contentEpisodeNumber) + else: + info_nfo = ', '.join(item.infoLabels['url_scraper']) + "\n" + + return info_nfo + + +def completar_codigos(item): + """ + Si es necesario comprueba si existe el identificador de tvdb y sino existe trata de buscarlo + """ + if item.contentType != "movie" and not item.infoLabels['tvdb_id']: + # Lanzar busqueda por imdb_id en tvdb + from core.tvdb import Tvdb + ob = Tvdb(imdb_id=item.infoLabels['imdb_id']) + item.infoLabels['tvdb_id'] = ob.get_id() + if item.infoLabels['tvdb_id']: + url_scraper = "http://thetvdb.com/index.php?tab=series&id=%s" % item.infoLabels['tvdb_id'] + if url_scraper not in item.infoLabels['url_scraper']: + item.infoLabels['url_scraper'].append(url_scraper) + + +# Clase auxiliar +class ResultDictDefault(dict): + # Python 2.4 + def __getitem__(self, key): + try: + return super(ResultDictDefault, self).__getitem__(key) + except: + return self.__missing__(key) + + def __missing__(self, key): + ''' + valores por defecto en caso de que la clave solicitada no exista + ''' + if key in ['genre_ids', 'genre', 'genres']: + return list() + elif key == 'images_posters': + posters = dict() + if 'images' in super(ResultDictDefault, self).keys() and \ + 'posters' in super(ResultDictDefault, self).__getitem__('images'): + posters = super(ResultDictDefault, self).__getitem__('images')['posters'] + super(ResultDictDefault, self).__setattr__("images_posters", posters) + + return posters + + elif key == "images_backdrops": + backdrops = dict() + if 'images' in super(ResultDictDefault, self).keys() and \ + 'backdrops' in super(ResultDictDefault, self).__getitem__('images'): + backdrops = super(ResultDictDefault, self).__getitem__('images')['backdrops'] + super(ResultDictDefault, self).__setattr__("images_backdrops", backdrops) + + return backdrops + + elif key == "images_profiles": + profiles = dict() + if 'images' in super(ResultDictDefault, self).keys() and \ + 'profiles' in super(ResultDictDefault, self).__getitem__('images'): + profiles = super(ResultDictDefault, self).__getitem__('images')['profiles'] + super(ResultDictDefault, self).__setattr__("images_profiles", profiles) + + return profiles + + else: + # El resto de claves devuelven cadenas vacias por defecto + return "" + + def __str__(self): + return self.tostring(separador=',\n') + + def tostring(self, separador=',\n'): + ls = [] + for i in super(ResultDictDefault, self).items(): + i_str = str(i)[1:-1] + if isinstance(i[0], str): + old = i[0] + "'," + new = i[0] + "':" + else: + old = str(i[0]) + "," + new = str(i[0]) + ":" + ls.append(i_str.replace(old, new, 1)) + + return "{%s}" % separador.join(ls) + + +# --------------------------------------------------------------------------------------------------------------- +# class Tmdb: +# Scraper para el addon basado en el Api de https://www.themoviedb.org/ +# version 1.4: +# - Documentada limitacion de uso de la API (ver mas abajo). +# - Añadido metodo get_temporada() +# version 1.3: +# - Corregido error al devolver None el path_poster y el backdrop_path +# - Corregido error que hacia que en el listado de generos se fueran acumulando de una llamada a otra +# - Añadido metodo get_generos() +# - Añadido parametros opcional idioma_alternativo al metodo get_sinopsis() +# +# +# Uso: +# Metodos constructores: +# Tmdb(texto_buscado, tipo) +# Parametros: +# texto_buscado:(str) Texto o parte del texto a buscar +# tipo: ("movie" o "tv") Tipo de resultado buscado peliculas o series. Por defecto "movie" +# (opcional) idioma_busqueda: (str) codigo del idioma segun ISO 639-1 +# (opcional) include_adult: (bool) Se incluyen contenidos para adultos en la busqueda o no. Por defecto +# 'False' +# (opcional) year: (str) Año de lanzamiento. +# (opcional) page: (int) Cuando hay muchos resultados para una busqueda estos se organizan por paginas. +# Podemos cargar la pagina que deseemos aunque por defecto siempre es la primera. +# Return: +# Esta llamada devuelve un objeto Tmdb que contiene la primera pagina del resultado de buscar 'texto_buscado' +# en la web themoviedb.org. Cuantos mas parametros opcionales se incluyan mas precisa sera la busqueda. +# Ademas el objeto esta inicializado con el primer resultado de la primera pagina de resultados. +# Tmdb(id_Tmdb,tipo) +# Parametros: +# id_Tmdb: (str) Codigo identificador de una determinada pelicula o serie en themoviedb.org +# tipo: ("movie" o "tv") Tipo de resultado buscado peliculas o series. Por defecto "movie" +# (opcional) idioma_busqueda: (str) codigo del idioma segun ISO 639-1 +# Return: +# Esta llamada devuelve un objeto Tmdb que contiene el resultado de buscar una pelicula o serie con el +# identificador id_Tmd +# en la web themoviedb.org. +# Tmdb(external_id, external_source, tipo) +# Parametros: +# external_id: (str) Codigo identificador de una determinada pelicula o serie en la web referenciada por +# 'external_source'. +# external_source: (Para series:"imdb_id","freebase_mid","freebase_id","tvdb_id","tvrage_id"; Para +# peliculas:"imdb_id") +# tipo: ("movie" o "tv") Tipo de resultado buscado peliculas o series. Por defecto "movie" +# (opcional) idioma_busqueda: (str) codigo del idioma segun ISO 639-1 +# Return: +# Esta llamada devuelve un objeto Tmdb que contiene el resultado de buscar una pelicula o serie con el +# identificador 'external_id' de +# la web referenciada por 'external_source' en la web themoviedb.org. +# +# Metodos principales: +# get_id(): Retorna un str con el identificador Tmdb de la pelicula o serie cargada o una cadena vacia si no hubiese +# nada cargado. +# get_sinopsis(idioma_alternativo): Retorna un str con la sinopsis de la serie o pelicula cargada. +# get_poster (tipo_respuesta,size): Obtiene el poster o un listado de posters. +# get_backdrop (tipo_respuesta,size): Obtiene una imagen de fondo o un listado de imagenes de fondo. +# get_temporada(temporada): Obtiene un diccionario con datos especificos de la temporada. +# get_episodio (temporada, capitulo): Obtiene un diccionario con datos especificos del episodio. +# get_generos(): Retorna un str con la lista de generos a los que pertenece la pelicula o serie. +# +# +# Otros metodos: +# load_resultado(resultado, page): Cuando la busqueda devuelve varios resultados podemos seleccionar que resultado +# concreto y de que pagina cargar los datos. +# +# Limitaciones: +# El uso de la API impone un limite de 20 conexiones simultaneas (concurrencia) o 30 peticiones en 10 segundos por IP +# Informacion sobre la api : http://docs.themoviedb.apiary.io +# ------------------------------------------------------------------------------------------------------------------- + + +class Tmdb(object): + # Atributo de clase + dic_generos = {} + ''' + dic_generos={"id_idioma1": {"tv": {"id1": "name1", + "id2": "name2" + }, + "movie": {"id1": "name1", + "id2": "name2" + } + } + } + ''' + dic_country = {"AD": "Andorra", "AE": "Emiratos Árabes Unidos", "AF": "Afganistán", "AG": "Antigua y Barbuda", + "AI": "Anguila", "AL": "Albania", "AM": "Armenia", "AN": "Antillas Neerlandesas", "AO": "Angola", + "AQ": "Antártida", "AR": "Argentina", "AS": "Samoa Americana", "AT": "Austria", "AU": "Australia", + "AW": "Aruba", "AX": "Islas de Åland", "AZ": "Azerbayán", "BA": "Bosnia y Herzegovina", + "BD": "Bangladesh", "BE": "Bélgica", "BF": "Burkina Faso", "BG": "Bulgaria", "BI": "Burundi", + "BJ": "Benín", "BL": "San Bartolomé", "BM": "Islas Bermudas", "BN": "Brunéi", "BO": "Bolivia", + "BR": "Brasil", "BS": "Bahamas", "BT": "Bhután", "BV": "Isla Bouvet", "BW": "Botsuana", + "BY": "Bielorrusia", "BZ": "Belice", "CA": "Canadá", "CC": "Islas Cocos (Keeling)", "CD": "Congo", + "CF": "República Centroafricana", "CG": "Congo", "CH": "Suiza", "CI": "Costa de Marfil", + "CK": "Islas Cook", "CL": "Chile", "CM": "Camerún", "CN": "China", "CO": "Colombia", + "CR": "Costa Rica", "CU": "Cuba", "CV": "Cabo Verde", "CX": "Isla de Navidad", "CY": "Chipre", + "CZ": "República Checa", "DE": "Alemania", "DJ": "Yibuti", "DK": "Dinamarca", "DZ": "Algeria", + "EC": "Ecuador", "EE": "Estonia", "EG": "Egipto", "EH": "Sahara Occidental", "ER": "Eritrea", + "ES": "España", "ET": "Etiopía", "FI": "Finlandia", "FJ": "Fiyi", "FK": "Islas Malvinas", + "FM": "Micronesia", "FO": "Islas Feroe", "FR": "Francia", "GA": "Gabón", "GB": "Gran Bretaña", + "GD": "Granada", "GE": "Georgia", "GF": "Guayana Francesa", "GG": "Guernsey", "GH": "Ghana", + "GI": "Gibraltar", "GL": "Groenlandia", "GM": "Gambia", "GN": "Guinea", "GP": "Guadalupe", + "GQ": "Guinea Ecuatorial", "GR": "Grecia", "GS": "Islas Georgias del Sur y Sandwich del Sur", + "GT": "Guatemala", "GW": "Guinea-Bissau", "GY": "Guyana", "HK": "Hong kong", + "HM": "Islas Heard y McDonald", "HN": "Honduras", "HR": "Croacia", "HT": "Haití", "HU": "Hungría", + "ID": "Indonesia", "IE": "Irlanda", "IM": "Isla de Man", "IN": "India", + "IO": "Territorio Británico del Océano Índico", "IQ": "Irak", "IR": "Irán", "IS": "Islandia", + "IT": "Italia", "JE": "Jersey", "JM": "Jamaica", "JO": "Jordania", "JP": "Japón", "KG": "Kirgizstán", + "KH": "Camboya", "KM": "Comoras", "KP": "Corea del Norte", "KR": "Corea del Sur", "KW": "Kuwait", + "KY": "Islas Caimán", "KZ": "Kazajistán", "LA": "Laos", "LB": "Líbano", "LC": "Santa Lucía", + "LI": "Liechtenstein", "LK": "Sri lanka", "LR": "Liberia", "LS": "Lesoto", "LT": "Lituania", + "LU": "Luxemburgo", "LV": "Letonia", "LY": "Libia", "MA": "Marruecos", "MC": "Mónaco", + "MD": "Moldavia", "ME": "Montenegro", "MF": "San Martín (Francia)", "MG": "Madagascar", + "MH": "Islas Marshall", "MK": "Macedônia", "ML": "Mali", "MM": "Birmania", "MN": "Mongolia", + "MO": "Macao", "MP": "Islas Marianas del Norte", "MQ": "Martinica", "MR": "Mauritania", + "MS": "Montserrat", "MT": "Malta", "MU": "Mauricio", "MV": "Islas Maldivas", "MW": "Malawi", + "MX": "México", "MY": "Malasia", "NA": "Namibia", "NE": "Niger", "NG": "Nigeria", "NI": "Nicaragua", + "NL": "Países Bajos", "NO": "Noruega", "NP": "Nepal", "NR": "Nauru", "NU": "Niue", + "NZ": "Nueva Zelanda", "OM": "Omán", "PA": "Panamá", "PE": "Perú", "PF": "Polinesia Francesa", + "PH": "Filipinas", "PK": "Pakistán", "PL": "Polonia", "PM": "San Pedro y Miquelón", + "PN": "Islas Pitcairn", "PR": "Puerto Rico", "PS": "Palestina", "PT": "Portugal", "PW": "Palau", + "PY": "Paraguay", "QA": "Qatar", "RE": "Reunión", "RO": "Rumanía", "RS": "Serbia", "RU": "Rusia", + "RW": "Ruanda", "SA": "Arabia Saudita", "SB": "Islas Salomón", "SC": "Seychelles", "SD": "Sudán", + "SE": "Suecia", "SG": "Singapur", "SH": "Santa Elena", "SI": "Eslovenia", + "SJ": "Svalbard y Jan Mayen", + "SK": "Eslovaquia", "SL": "Sierra Leona", "SM": "San Marino", "SN": "Senegal", "SO": "Somalia", + "SV": "El Salvador", "SY": "Siria", "SZ": "Swazilandia", "TC": "Islas Turcas y Caicos", "TD": "Chad", + "TF": "Territorios Australes y Antárticas Franceses", "TG": "Togo", "TH": "Tailandia", + "TJ": "Tadjikistán", "TK": "Tokelau", "TL": "Timor Oriental", "TM": "Turkmenistán", "TN": "Tunez", + "TO": "Tonga", "TR": "Turquía", "TT": "Trinidad y Tobago", "TV": "Tuvalu", "TW": "Taiwán", + "TZ": "Tanzania", "UA": "Ucrania", "UG": "Uganda", + "UM": "Islas Ultramarinas Menores de Estados Unidos", + "UY": "Uruguay", "UZ": "Uzbekistán", "VA": "Ciudad del Vaticano", + "VC": "San Vicente y las Granadinas", + "VE": "Venezuela", "VG": "Islas Vírgenes Británicas", "VI": "Islas Vírgenes de los Estados Unidos", + "VN": "Vietnam", "VU": "Vanuatu", "WF": "Wallis y Futuna", "WS": "Samoa", "YE": "Yemen", + "YT": "Mayotte", "ZA": "Sudáfrica", "ZM": "Zambia", "ZW": "Zimbabue", "BB": "Barbados", + "BH": "Bahrein", + "DM": "Dominica", "DO": "República Dominicana", "GU": "Guam", "IL": "Israel", "KE": "Kenia", + "KI": "Kiribati", "KN": "San Cristóbal y Nieves", "MZ": "Mozambique", "NC": "Nueva Caledonia", + "NF": "Isla Norfolk", "PG": "Papúa Nueva Guinea", "SR": "Surinám", "ST": "Santo Tomé y Príncipe", + "US": "EEUU"} + + def __init__(self, **kwargs): + self.page = kwargs.get('page', 1) + self.index_results = 0 + self.results = [] + self.result = ResultDictDefault() + self.total_pages = 0 + self.total_results = 0 + + self.temporada = {} + self.texto_buscado = kwargs.get('texto_buscado', '') + + self.busqueda_id = kwargs.get('id_Tmdb', '') + self.busqueda_texto = re.sub('\[\\\?(B|I|COLOR)\s?[^\]]*\]', '', self.texto_buscado) + self.busqueda_tipo = kwargs.get('tipo', '') + self.busqueda_idioma = kwargs.get('idioma_busqueda', 'es') + self.busqueda_include_adult = kwargs.get('include_adult', False) + self.busqueda_year = kwargs.get('year', '') + self.busqueda_filtro = kwargs.get('filtro', {}) + self.discover = kwargs.get('discover', {}) + + # Reellenar diccionario de generos si es necesario + if (self.busqueda_tipo == 'movie' or self.busqueda_tipo == "tv") and \ + (self.busqueda_idioma not in Tmdb.dic_generos or + self.busqueda_tipo not in Tmdb.dic_generos[self.busqueda_idioma]): + self.rellenar_dic_generos(self.busqueda_tipo, self.busqueda_idioma) + + if not self.busqueda_tipo: + self.busqueda_tipo = 'movie' + + if self.busqueda_id: + # Busqueda por identificador tmdb + self.__by_id() + + elif self.busqueda_texto: + # Busqueda por texto + self.__search(page=self.page) + + elif 'external_source' in kwargs and 'external_id' in kwargs: + # Busqueda por identificador externo segun el tipo. + # TV Series: imdb_id, freebase_mid, freebase_id, tvdb_id, tvrage_id + # Movies: imdb_id + if (self.busqueda_tipo == 'movie' and kwargs.get('external_source') == "imdb_id") or \ + (self.busqueda_tipo == 'tv' and kwargs.get('external_source') in ( + "imdb_id", "freebase_mid", "freebase_id", "tvdb_id", "tvrage_id")): + self.busqueda_id = kwargs.get('external_id') + self.__by_id(source=kwargs.get('external_source')) + + elif self.discover: + self.__discover() + + else: + logger.debug("Creado objeto vacio") + + @classmethod + def rellenar_dic_generos(cls, tipo='movie', idioma='es'): + resultado = {} + + # Si se busca en idioma catalán, se cambia a español para el diccionario de géneros + if idioma == "ca": + idioma = "es" + + # Rellenar diccionario de generos del tipo e idioma pasados como parametros + if idioma not in cls.dic_generos: + cls.dic_generos[idioma] = {} + + if tipo not in cls.dic_generos[idioma]: + cls.dic_generos[idioma][tipo] = {} + url = ('http://api.themoviedb.org/3/genre/%s/list?api_key=6889f6089877fd092454d00edb44a84d&language=%s' + % (tipo, idioma)) + try: + logger.info("[Tmdb.py] Rellenando dicionario de generos") + resultado = jsontools.load(scrapertools.downloadpageWithoutCookies(url)) + lista_generos = resultado["genres"] + + for i in lista_generos: + cls.dic_generos[idioma][tipo][str(i["id"])] = i["name"] + except: + pass + + if "status_code" in resultado: + msg = "Error de tmdb: %s %s" % (resultado["status_code"], resultado["status_message"]) + logger.error(msg) + + def __by_id(self, source='tmdb'): + resultado = {} + buscando = "" + + if self.busqueda_id: + if source == "tmdb": + # http://api.themoviedb.org/3/movie/1924?api_key=6889f6089877fd092454d00edb44a84d&language=es + # &append_to_response=images,videos,external_ids,credits&include_image_language=es,null + # http://api.themoviedb.org/3/tv/1407?api_key=6889f6089877fd092454d00edb44a84d&language=es + # &append_to_response=images,videos,external_ids,credits&include_image_language=es,null + url = ('http://api.themoviedb.org/3/%s/%s?api_key=6889f6089877fd092454d00edb44a84d&language=%s' + '&append_to_response=images,videos,external_ids,credits&include_image_language=%s,null' % + (self.busqueda_tipo, self.busqueda_id, self.busqueda_idioma, self.busqueda_idioma)) + buscando = "id_Tmdb: %s" % self.busqueda_id + else: + # http://api.themoviedb.org/3/find/%s?external_source=imdb_id&api_key=6889f6089877fd092454d00edb44a84d + url = ('http://api.themoviedb.org/3/find/%s?external_source=%s&api_key=6889f6089877fd092454d00edb44a84d' + '&language=%s' % (self.busqueda_id, source, self.busqueda_idioma)) + buscando = "%s: %s" % (source.capitalize(), self.busqueda_id) + + logger.info("[Tmdb.py] Buscando %s:\n%s" % (buscando, url)) + + try: + resultado = jsontools.load(scrapertools.downloadpageWithoutCookies(url)) + if source != "tmdb": + if self.busqueda_tipo == "movie": + resultado = resultado["movie_results"][0] + else: + resultado = resultado["tv_results"][0] + except: + resultado = {} + + if resultado and not "status_code" in resultado: + self.results = [resultado] + self.total_results = 1 + self.total_pages = 1 + self.result = ResultDictDefault(resultado) + else: + # No hay resultados de la busqueda + msg = "La busqueda de %s no dio resultados." % buscando + if "status_code" in resultado: + msg += "\nError de tmdb: %s %s" % (resultado["status_code"], resultado["status_message"]) + logger.debug(msg) + + def __search(self, index_results=0, page=1): + resultado = {} + self.result = ResultDictDefault() + results = [] + total_results = 0 + total_pages = 0 + buscando = "" + + if self.busqueda_texto: + # http://api.themoviedb.org/3/search/movie?api_key=6889f6089877fd092454d00edb44a84d&query=superman&language=es + # &include_adult=false&page=1 + url = ('http://api.themoviedb.org/3/search/%s?api_key=6889f6089877fd092454d00edb44a84d&query=%s&language=%s' + '&include_adult=%s&page=%s' % (self.busqueda_tipo, self.busqueda_texto.replace(' ', '%20'), + self.busqueda_idioma, self.busqueda_include_adult, page)) + + if self.busqueda_year: + url += '&year=%s' % (self.busqueda_year) + + buscando = self.busqueda_texto.capitalize() + logger.info("[Tmdb.py] Buscando %s en pagina %s:\n%s" % (buscando, page, url)) + + try: + resultado = jsontools.load(scrapertools.downloadpageWithoutCookies(url)) + total_results = resultado["total_results"] + total_pages = resultado["total_pages"] + except: + total_results = 0 + + if total_results > 0: + results = resultado["results"] + + if self.busqueda_filtro and results: + # TODO documentar esta parte + for key, value in dict(self.busqueda_filtro).items(): + for r in results[:]: + if key not in r or r[key] != value: + results.remove(r) + total_results -= 1 + + if results: + if index_results >= len(results): + # Se ha solicitado un numero de resultado mayor de los obtenidos + logger.error( + "La busqueda de '%s' dio %s resultados para la pagina %s\nImposible mostrar el resultado numero %s" + % (buscando, len(results), page, index_results)) + return 0 + + # Retornamos el numero de resultados de esta pagina + self.results = results + self.total_results = total_results + self.total_pages = total_pages + self.result = ResultDictDefault(self.results[index_results]) + return len(self.results) + + else: + # No hay resultados de la busqueda + msg = "La busqueda de '%s' no dio resultados para la pagina %s" % (buscando, page) + if "status_code" in resultado: + msg += "\nError de tmdb: %s %s" % (resultado["status_code"], resultado["status_message"]) + logger.error(msg) + return 0 + + def __discover(self, index_results=0): + resultado = {} + self.result = ResultDictDefault() + results = [] + total_results = 0 + total_pages = 0 + + # Ejemplo self.discover: {'url': 'discover/movie', 'with_cast': '1'} + # url: Método de la api a ejecutar + # resto de claves: Parámetros de la búsqueda concatenados a la url + type_search = self.discover.get('url', '') + if type_search: + params = [] + for key, value in self.discover.items(): + if key != "url": + params.append(key + "=" + str(value)) + # http://api.themoviedb.org/3/discover/movie?api_key=6889f6089877fd092454d00edb44a84d&query=superman&language=es + url = ('http://api.themoviedb.org/3/%s?api_key=6889f6089877fd092454d00edb44a84d&%s' + % (type_search, "&".join(params))) + + logger.info("[Tmdb.py] Buscando %s:\n%s" % (type_search, url)) + + try: + resultado = jsontools.load(scrapertools.downloadpageWithoutCookies(url)) + total_results = resultado["total_results"] + total_pages = resultado["total_pages"] + except: + if resultado and not "status_code" in resultado: + total_results = -1 + total_pages = 1 + else: + total_results = 0 + + if total_results > 0: + results = resultado["results"] + if self.busqueda_filtro and results: + # TODO documentar esta parte + for key, value in dict(self.busqueda_filtro).items(): + for r in results[:]: + if key not in r or r[key] != value: + results.remove(r) + total_results -= 1 + elif total_results == -1: + results = resultado + + if index_results >= len(results): + logger.error( + "La busqueda de '%s' no dio %s resultados" % (type_search, index_results)) + return 0 + + # Retornamos el numero de resultados de esta pagina + if results: + self.results = results + self.total_results = total_results + self.total_pages = total_pages + if total_results > 0: + self.result = ResultDictDefault(self.results[index_results]) + else: + self.result = results + return len(self.results) + else: + # No hay resultados de la busqueda + logger.error("La busqueda de '%s' no dio resultados" % type_search) + return 0 + + def load_resultado(self, index_results=0, page=1): + # Si no hay resultados, solo hay uno o + # si el numero de resultados de esta pagina es menor al indice buscado salir + self.result = ResultDictDefault() + num_result_page = len(self.results) + + if page > self.total_pages: + return False + + if page != self.page: + num_result_page = self.__search(index_results, page) + + if num_result_page == 0 or num_result_page <= index_results: + return False + + self.page = page + self.index_results = index_results + self.result = ResultDictDefault(self.results[index_results]) + return True + + def get_list_resultados(self, num_result=20): + # logger.info("self %s" % str(self)) + # TODO documentar + res = [] + + if num_result <= 0: + num_result = self.total_results + num_result = min([num_result, self.total_results]) + + cr = 0 + for p in range(1, self.total_pages + 1): + for r in range(0, len(self.results)): + try: + if self.load_resultado(r, p): + result = self.result.copy() + + result['thumbnail'] = self.get_poster(size="w300") + result['fanart'] = self.get_backdrop() + res.append(result) + cr += 1 + if cr >= num_result: + return res + except: + continue + + return res + + def get_generos(self, origen=None): + """ + :param origen: Diccionario origen de donde se obtiene los infoLabels, por omision self.result + :type origen: Dict + :return: Devuelve la lista de generos a los que pertenece la pelicula o serie. + :rtype: str + """ + genre_list = [] + + if not origen: + origen = self.result + + if "genre_ids" in origen: + # Buscar lista de generos por IDs + for i in origen.get("genre_ids"): + try: + genre_list.append(Tmdb.dic_generos[self.busqueda_idioma][self.busqueda_tipo][str(i)]) + except: + pass + + elif "genre" in origen or "genres" in origen: + # Buscar lista de generos (lista de objetos {id,nombre}) + v = origen["genre"] + v.extend(origen["genres"]) + for i in v: + genre_list.append(i['name']) + + return ', '.join(genre_list) + + def search_by_id(self, id, source='tmdb', tipo='movie'): + self.busqueda_id = id + self.busqueda_tipo = tipo + self.__by_id(source=source) + + def get_id(self): + """ + + :return: Devuelve el identificador Tmdb de la pelicula o serie cargada o una cadena vacia en caso de que no + hubiese nada cargado. Se puede utilizar este metodo para saber si una busqueda ha dado resultado o no. + :rtype: str + """ + return str(self.result.get('id', "")) + + def get_sinopsis(self, idioma_alternativo=""): + """ + + :param idioma_alternativo: codigo del idioma, segun ISO 639-1, en el caso de que en el idioma fijado para la + busqueda no exista sinopsis. + Por defecto, se utiliza el idioma original. Si se utiliza None como idioma_alternativo, solo se buscara en + el idioma fijado. + :type idioma_alternativo: str + :return: Devuelve la sinopsis de una pelicula o serie + :rtype: str + """ + resultado = {} + ret = "" + + if 'id' in self.result: + ret = self.result.get('overview') + if ret == "" and str(idioma_alternativo).lower() != 'none': + # Vamos a lanzar una busqueda por id y releer de nuevo la sinopsis + self.busqueda_id = str(self.result["id"]) + if idioma_alternativo: + self.busqueda_idioma = idioma_alternativo + else: + self.busqueda_idioma = self.result['original_language'] + + url = ('http://api.themoviedb.org/3/%s/%s?api_key=6889f6089877fd092454d00edb44a84d&language=%s' % + (self.busqueda_tipo, self.busqueda_id, self.busqueda_idioma)) + try: + resultado = jsontools.load(scrapertools.downloadpageWithoutCookies(url)) + except: + pass + + if 'overview' in resultado: + self.result['overview'] = resultado['overview'] + ret = self.result['overview'] + + if "status_code" in resultado: + msg = "Error de tmdb: %s %s" % (resultado["status_code"], resultado["status_message"]) + logger.debug(msg) + + return ret + + def get_poster(self, tipo_respuesta="str", size="original"): + """ + + @param tipo_respuesta: Tipo de dato devuelto por este metodo. Por defecto "str" + @type tipo_respuesta: list, str + @param size: ("w45", "w92", "w154", "w185", "w300", "w342", "w500", "w600", "h632", "w780", "w1280", "original") + Indica la anchura(w) o altura(h) de la imagen a descargar. Por defecto "original" + @return: Si el tipo_respuesta es "list" devuelve un listado con todas las urls de las imagenes tipo poster del + tamaño especificado. + Si el tipo_respuesta es "str" devuelve la url de la imagen tipo poster, mas valorada, del tamaño + especificado. + Si el tamaño especificado no existe se retornan las imagenes al tamaño original. + @rtype: list, str + """ + ret = [] + if size not in ("w45", "w92", "w154", "w185", "w300", "w342", "w500", "w600", "h632", "w780", "w1280"): + size = "original" + + if self.result["poster_path"] is None or self.result["poster_path"] == "": + poster_path = "" + else: + poster_path = 'http://image.tmdb.org/t/p/' + size + self.result["poster_path"] + + if tipo_respuesta == 'str': + return poster_path + elif not self.result["id"]: + return [] + + if len(self.result['images_posters']) == 0: + # Vamos a lanzar una busqueda por id y releer de nuevo + self.busqueda_id = str(self.result["id"]) + self.__by_id() + + if len(self.result['images_posters']) > 0: + for i in self.result['images_posters']: + imagen_path = i['file_path'] + if size != "original": + # No podemos pedir tamaños mayores que el original + if size[1] == 'w' and int(imagen_path['width']) < int(size[1:]): + size = "original" + elif size[1] == 'h' and int(imagen_path['height']) < int(size[1:]): + size = "original" + ret.append('http://image.tmdb.org/t/p/' + size + imagen_path) + else: + ret.append(poster_path) + + return ret + + def get_backdrop(self, tipo_respuesta="str", size="original"): + """ + Devuelve las imagenes de tipo backdrop + @param tipo_respuesta: Tipo de dato devuelto por este metodo. Por defecto "str" + @type tipo_respuesta: list, str + @param size: ("w45", "w92", "w154", "w185", "w300", "w342", "w500", "w600", "h632", "w780", "w1280", "original") + Indica la anchura(w) o altura(h) de la imagen a descargar. Por defecto "original" + @type size: str + @return: Si el tipo_respuesta es "list" devuelve un listado con todas las urls de las imagenes tipo backdrop del + tamaño especificado. + Si el tipo_respuesta es "str" devuelve la url de la imagen tipo backdrop, mas valorada, del tamaño especificado. + Si el tamaño especificado no existe se retornan las imagenes al tamaño original. + @rtype: list, str + """ + ret = [] + if size not in ("w45", "w92", "w154", "w185", "w300", "w342", "w500", "w600", "h632", "w780", "w1280"): + size = "original" + + if self.result["backdrop_path"] is None or self.result["backdrop_path"] == "": + backdrop_path = "" + else: + backdrop_path = 'http://image.tmdb.org/t/p/' + size + self.result["backdrop_path"] + + if tipo_respuesta == 'str': + return backdrop_path + elif self.result["id"] == "": + return [] + + if len(self.result['images_backdrops']) == 0: + # Vamos a lanzar una busqueda por id y releer de nuevo todo + self.busqueda_id = str(self.result["id"]) + self.__by_id() + + if len(self.result['images_backdrops']) > 0: + for i in self.result['images_backdrops']: + imagen_path = i['file_path'] + if size != "original": + # No podemos pedir tamaños mayores que el original + if size[1] == 'w' and int(imagen_path['width']) < int(size[1:]): + size = "original" + elif size[1] == 'h' and int(imagen_path['height']) < int(size[1:]): + size = "original" + ret.append('http://image.tmdb.org/t/p/' + size + imagen_path) + else: + ret.append(backdrop_path) + + return ret + + def get_temporada(self, numtemporada=1): + # -------------------------------------------------------------------------------------------------------------------------------------------- + # Parametros: + # numtemporada: (int) Numero de temporada. Por defecto 1. + # Return: (dic) + # Devuelve un dicionario con datos sobre la temporada. + # Puede obtener mas informacion sobre los datos devueltos en: + # http://docs.themoviedb.apiary.io/#reference/tv-seasons/tvidseasonseasonnumber/get + # http://docs.themoviedb.apiary.io/#reference/tv-seasons/tvidseasonseasonnumbercredits/get + # -------------------------------------------------------------------------------------------------------------------------------------------- + if not self.result["id"] or self.busqueda_tipo != "tv": + return {} + + numtemporada = int(numtemporada) + if numtemporada < 0: + numtemporada = 1 + + if not self.temporada.get(numtemporada, {}): + # Si no hay datos sobre la temporada solicitada, consultar en la web + + # http://api.themoviedb.org/3/tv/1407/season/1?api_key=6889f6089877fd092454d00edb44a84d&language=es& + # append_to_response=credits + url = "http://api.themoviedb.org/3/tv/%s/season/%s?api_key=6889f6089877fd092454d00edb44a84d&language=%s" \ + "&append_to_response=credits" % (self.result["id"], numtemporada, self.busqueda_idioma) + + buscando = "id_Tmdb: " + str(self.result["id"]) + " temporada: " + str(numtemporada) + "\nURL: " + url + logger.info("[Tmdb.py] Buscando " + buscando) + try: + self.temporada[numtemporada] = jsontools.load(scrapertools.downloadpageWithoutCookies(url)) + except: + self.temporada[numtemporada] = {"status_code": 15, "status_message": "Failed"} + + if "status_code" in self.temporada[numtemporada]: + # Se ha producido un error + msg = "La busqueda de " + buscando + " no dio resultados." + msg += "\nError de tmdb: %s %s" % ( + self.temporada[numtemporada]["status_code"], self.temporada[numtemporada]["status_message"]) + logger.debug(msg) + self.temporada[numtemporada] = {"episodes": {}} + + return self.temporada[numtemporada] + + def get_episodio(self, numtemporada=1, capitulo=1): + # -------------------------------------------------------------------------------------------------------------------------------------------- + # Parametros: + # numtemporada(opcional): (int) Numero de temporada. Por defecto 1. + # capitulo: (int) Numero de capitulo. Por defecto 1. + # Return: (dic) + # Devuelve un dicionario con los siguientes elementos: + # "temporada_nombre", "temporada_sinopsis", "temporada_poster", "temporada_num_episodios"(int), + # "temporada_air_date", "episodio_vote_count", "episodio_vote_average", + # "episodio_titulo", "episodio_sinopsis", "episodio_imagen", "episodio_air_date", + # "episodio_crew" y "episodio_guest_stars", + # Con capitulo == -1 el diccionario solo tendra los elementos referentes a la temporada + # -------------------------------------------------------------------------------------------------------------------------------------------- + if not self.result["id"] or self.busqueda_tipo != "tv": + return {} + + try: + capitulo = int(capitulo) + numtemporada = int(numtemporada) + except ValueError: + logger.debug("El número de episodio o temporada no es valido") + return {} + + temporada = self.get_temporada(numtemporada) + if not temporada: + # Se ha producido un error + return {} + + if len(temporada["episodes"]) == 0 or len(temporada["episodes"]) < capitulo: + # Se ha producido un error + logger.error("Episodio %d de la temporada %d no encontrado." % (capitulo, numtemporada)) + return {} + + ret_dic = dict() + # Obtener datos para esta temporada + ret_dic["temporada_nombre"] = temporada["name"] + ret_dic["temporada_sinopsis"] = temporada["overview"] + ret_dic["temporada_num_episodios"] = len(temporada["episodes"]) + if temporada["air_date"]: + date = temporada["air_date"].split("-") + ret_dic["temporada_air_date"] = date[2] + "/" + date[1] + "/" + date[0] + else: + ret_dic["temporada_air_date"] = "" + if temporada["poster_path"]: + ret_dic["temporada_poster"] = 'http://image.tmdb.org/t/p/original' + temporada["poster_path"] + else: + ret_dic["temporada_poster"] = "" + dic_aux = temporada.get('credits', {}) + ret_dic["temporada_cast"] = dic_aux.get('cast', []) + ret_dic["temporada_crew"] = dic_aux.get('crew', []) + if capitulo == -1: + # Si solo buscamos datos de la temporada, + # incluir el equipo tecnico que ha intervenido en algun capitulo + dic_aux = dict((i['id'], i) for i in ret_dic["temporada_crew"]) + for e in temporada["episodes"]: + for crew in e['crew']: + if crew['id'] not in dic_aux.keys(): + dic_aux[crew['id']] = crew + ret_dic["temporada_crew"] = dic_aux.values() + + # Obtener datos del capitulo si procede + if capitulo != -1: + episodio = temporada["episodes"][capitulo - 1] + ret_dic["episodio_titulo"] = episodio["name"] + ret_dic["episodio_sinopsis"] = episodio["overview"] + if episodio["air_date"]: + date = episodio["air_date"].split("-") + ret_dic["episodio_air_date"] = date[2] + "/" + date[1] + "/" + date[0] + else: + ret_dic["episodio_air_date"] = "" + ret_dic["episodio_crew"] = episodio["crew"] + ret_dic["episodio_guest_stars"] = episodio["guest_stars"] + ret_dic["episodio_vote_count"] = episodio["vote_count"] + ret_dic["episodio_vote_average"] = episodio["vote_average"] + if episodio["still_path"]: + ret_dic["episodio_imagen"] = 'http://image.tmdb.org/t/p/original' + episodio["still_path"] + else: + ret_dic["episodio_imagen"] = "" + + return ret_dic + + def get_videos(self): + """ + :return: Devuelve una lista ordenada (idioma/resolucion/tipo) de objetos Dict en la que cada uno de + sus elementos corresponde con un trailer, teaser o clip de youtube. + :rtype: list of Dict + """ + ret = [] + if self.result['id']: + if self.result['videos']: + self.result["videos"] = self.result["videos"]['results'] + else: + # Primera búsqueda de videos en el idioma de busqueda + url = "http://api.themoviedb.org/3/%s/%s/videos?api_key=6889f6089877fd092454d00edb44a84d&language=%s" \ + % (self.busqueda_tipo, self.result['id'], self.busqueda_idioma) + try: + dict_videos = jsontools.load(scrapertools.downloadpageWithoutCookies(url)) + except: + pass + + if dict_videos['results']: + dict_videos['results'] = sorted(dict_videos['results'], key=lambda x: (x['type'], x['size'])) + self.result["videos"] = dict_videos['results'] + + # Si el idioma de busqueda no es ingles, hacer una segunda búsqueda de videos en inglés + if self.busqueda_idioma != 'en': + url = "http://api.themoviedb.org/3/%s/%s/videos?api_key=6889f6089877fd092454d00edb44a84d" \ + % (self.busqueda_tipo, self.result['id']) + try: + dict_videos = jsontools.load(scrapertools.downloadpageWithoutCookies(url)) + except: + pass + + if dict_videos['results']: + dict_videos['results'] = sorted(dict_videos['results'], key=lambda x: (x['type'], x['size'])) + self.result["videos"].extend(dict_videos['results']) + + if "status_code" in dict_videos: + msg = "Error de tmdb: %s %s" % (dict_videos["status_code"], dict_videos["status_message"]) + logger.debug(msg) + + # Si las busqueda han obtenido resultados devolver un listado de objetos + for i in self.result['videos']: + if i['site'] == "YouTube": + ret.append({'name': i['name'], + 'url': "https://www.youtube.com/watch?v=%s" % i['key'], + 'size': str(i['size']), + 'type': i['type'], + 'language': i['iso_639_1']}) + + return ret + + def get_infoLabels(self, infoLabels=None, origen=None): + """ + :param infoLabels: Informacion extra de la pelicula, serie, temporada o capitulo. + :type infoLabels: Dict + :param origen: Diccionario origen de donde se obtiene los infoLabels, por omision self.result + :type origen: Dict + :return: Devuelve la informacion extra obtenida del objeto actual. Si se paso el parametro infoLables, el valor + devuelto sera el leido como parametro debidamente actualizado. + :rtype: Dict + """ + + if infoLabels: + ret_infoLabels = InfoLabels(infoLabels) + else: + ret_infoLabels = InfoLabels() + + # Iniciar listados + l_country = [i.strip() for i in ret_infoLabels['country'].split(',') if ret_infoLabels['country']] + l_director = [i.strip() for i in ret_infoLabels['director'].split(',') if ret_infoLabels['director']] + l_writer = [i.strip() for i in ret_infoLabels['writer'].split(',') if ret_infoLabels['writer']] + l_castandrole = ret_infoLabels.get('castandrole', []) + + if not origen: + origen = self.result + + if 'credits' in origen.keys(): + dic_origen_credits = origen['credits'] + origen['credits_cast'] = dic_origen_credits.get('cast', []) + origen['credits_crew'] = dic_origen_credits.get('crew', []) + del origen['credits'] + + items = origen.items() + + # Informacion Temporada/episodio + if ret_infoLabels['season'] and self.temporada.get(ret_infoLabels['season']): + # Si hay datos cargados de la temporada indicada + episodio = -1 + if ret_infoLabels['episode']: episodio = ret_infoLabels['episode'] + + items.extend(self.get_episodio(ret_infoLabels['season'], episodio).items()) + + # logger.info("ret_infoLabels" % ret_infoLabels) + + for k, v in items: + if not v: + continue + elif type(v) == str: + v = re.sub(r"\n|\r|\t", "", v) + # fix + if v == "None": + continue + + if k == 'overview': + if origen: + ret_infoLabels['plot'] = v + else: + ret_infoLabels['plot'] = self.get_sinopsis() + + elif k == 'runtime': + ret_infoLabels['duration'] = int(v) * 60 + + elif k == 'release_date': + ret_infoLabels['year'] = int(v[:4]) + ret_infoLabels['release_date'] = v.split("-")[2] + "/" + v.split("-")[1] + "/" + v.split("-")[0] + + elif k == 'first_air_date': + ret_infoLabels['year'] = int(v[:4]) + ret_infoLabels['aired'] = v.split("-")[2] + "/" + v.split("-")[1] + "/" + v.split("-")[0] + ret_infoLabels['premiered'] = ret_infoLabels['aired'] + + elif k == 'original_title' or k == 'original_name': + ret_infoLabels['originaltitle'] = v + + elif k == 'vote_average': + ret_infoLabels['rating'] = float(v) + + elif k == 'vote_count': + ret_infoLabels['votes'] = v + + elif k == 'poster_path': + ret_infoLabels['thumbnail'] = 'http://image.tmdb.org/t/p/original' + v + + elif k == 'backdrop_path': + ret_infoLabels['fanart'] = 'http://image.tmdb.org/t/p/original' + v + + elif k == 'id': + ret_infoLabels['tmdb_id'] = v + + elif k == 'imdb_id': + ret_infoLabels['imdb_id'] = v + + elif k == 'external_ids': + if 'tvdb_id' in v: ret_infoLabels['tvdb_id'] = v['tvdb_id'] + if 'imdb_id' in v: ret_infoLabels['imdb_id'] = v['imdb_id'] + + elif k in ['genres', "genre_ids", "genre"]: + ret_infoLabels['genre'] = self.get_generos(origen) + + elif k == 'name' or k == 'title': + ret_infoLabels['title'] = v + + elif k == 'production_companies': + ret_infoLabels['studio'] = ", ".join(i['name'] for i in v) + + elif k == 'credits_cast' or k == 'temporada_cast' or k == 'episodio_guest_stars': + dic_aux = dict((name, character) for (name, character) in l_castandrole) + l_castandrole.extend([(p['name'], p['character']) for p in v if p['name'] not in dic_aux.keys()]) + + elif k == 'videos': + if not isinstance(v, list): + v = v.get('result', []) + for i in v: + if i.get("site", "") == "YouTube": + ret_infoLabels['trailer'] = "https://www.youtube.com/watch?v=" + v[0]["key"] + break + + elif k == 'production_countries' or k == 'origin_country': + if isinstance(v, str): + l_country = list(set(l_country + v.split(','))) + + elif isinstance(v, list) and len(v) > 0: + if isinstance(v[0], str): + l_country = list(set(l_country + v)) + elif isinstance(v[0], dict): + # {'iso_3166_1': 'FR', 'name':'France'} + for i in v: + if i.has_key('iso_3166_1'): + pais = Tmdb.dic_country.get(i['iso_3166_1'], i['iso_3166_1']) + l_country = list(set(l_country + [pais])) + + elif k == 'credits_crew' or k == 'episodio_crew' or k == 'temporada_crew': + for crew in v: + if crew['job'].lower() == 'director': + l_director = list(set(l_director + [crew['name']])) + + elif crew['job'].lower() in ('screenplay', 'writer'): + l_writer = list(set(l_writer + [crew['name']])) + + elif k == 'created_by': + for crew in v: + l_writer = list(set(l_writer + [crew['name']])) + + + elif isinstance(v, str) or isinstance(v, int) or isinstance(v, float): + ret_infoLabels[k] = v + + else: + # logger.debug("Atributos no añadidos: " + k +'= '+ str(v)) + pass + + # Ordenar las listas y convertirlas en str si es necesario + if l_castandrole: + ret_infoLabels['castandrole'] = sorted(l_castandrole, key=lambda tup: tup[0]) + if l_country: + ret_infoLabels['country'] = ', '.join(sorted(l_country)) + if l_director: + ret_infoLabels['director'] = ', '.join(sorted(l_director)) + if l_writer: + ret_infoLabels['writer'] = ', '.join(sorted(l_writer)) + + return ret_infoLabels diff --git a/plugin.video.alfa/core/tvdb.py b/plugin.video.alfa/core/tvdb.py new file mode 100755 index 00000000..d1b31893 --- /dev/null +++ b/plugin.video.alfa/core/tvdb.py @@ -0,0 +1,1135 @@ +# -*- coding: utf-8 -*- +# ------------------------------------------------------------ +# tvdb +# ------------------------------------------------------------ +# Scraper para el site thetvdb.com usando API v2.1 +# Utilizado para obtener datos de series para la videoteca +# del addon y también Kodi. +# ------------------------------------------------------------ + +import re +import urllib2 + +from core import config +from core import jsontools +from core import logger +from core import scrapertools +from core.item import InfoLabels +from platformcode import platformtools + +HOST = "https://api.thetvdb.com" +HOST_IMAGE = "http://thetvdb.com/banners/" + +# comprobación tras el cambio de tipos en config.get_setting +if config.get_setting("tvdb_token") is not None: + TOKEN = config.get_setting("tvdb_token") +else: + TOKEN = "" + +DEFAULT_LANG = "es" +DEFAULT_HEADERS = { + 'Content-Type': 'application/json', + 'Accept': 'application/json, application/vnd.thetvdb.v2.1.1', + 'Accept-Language': DEFAULT_LANG, + 'Authorization': 'Bearer ' + TOKEN, +} + +# Traducciones - Inicio +DICT_STATUS = {'Continuing': 'En emisión', 'Ended': 'Finalizada'} +DICT_GENRE = { + 'Action': 'Acción', + 'Adventure': 'Aventura', + 'Animation': 'Animación', + 'Children': 'Niños', + 'Comedy': 'Comedia', + 'Crime': 'Crimen', + 'Documentary': 'Documental', + # 'Drama': 'Drama', + 'Family': 'Familiar', + 'Fantasy': 'Fantasía', + 'Food': 'Comida', + 'Game Show': 'Concurso', + 'Home and Garden': 'Hogar y Jardín', + # 'Horror': 'Horror', 'Mini-Series': 'Mini-Series', + 'Mystery': 'Misterio', + 'News': 'Noticias', + # 'Reality': 'Telerrealidad', + 'Romance': 'Romántico', + 'Science-Fiction': 'Ciencia-Ficción', + 'Soap': 'Telenovela', + # 'Special Interest': 'Special Interest', + 'Sport': 'Deporte', + # 'Suspense': 'Suspense', + 'Talk Show': 'Programa de Entrevistas', + # 'Thriller': 'Thriller', + 'Travel': 'Viaje', + # 'Western': 'Western' +} +DICT_MPAA = {'TV-Y': 'Público pre-infantil: niños menores de 6 años', 'TV-Y7': 'Público infantil: desde 7 años', + 'TV-G': 'Público general: sin supervisión familiar', 'TV-PG': 'Guía paterna: Supervisión paternal', + 'TV-14': 'Mayores de 14 años', 'TV-MA': 'Mayores de 17 años'} +# Traducciones - Fin + +otvdb_global = None + + +def find_and_set_infoLabels(item): + logger.info() + # logger.info("item es %s" % item) + + p_dialog = None + if not item.contentSeason: + p_dialog = platformtools.dialog_progress_bg("Buscando información de la serie", "Espere por favor...") + + global otvdb_global + tvdb_result = None + + title = item.contentSerieName + # Si el titulo incluye el (año) se lo quitamos + year = scrapertools.find_single_match(title, "^.+?\s*(\(\d{4}\))$") + if year: + title = title.replace(year, "").strip() + item.infoLabels['year'] = year[1:-1] + + if not item.infoLabels.get("tvdb_id"): + if not item.infoLabels.get("imdb_id"): + otvdb_global = Tvdb(search=title, year=item.infoLabels['year']) + else: + otvdb_global = Tvdb(imdb_id=item.infoLabels.get("imdb_id")) + + elif not otvdb_global or otvdb_global.get_id() != item.infoLabels['tvdb_id']: + otvdb_global = Tvdb(tvdb_id=item.infoLabels['tvdb_id']) # , tipo=tipo_busqueda, idioma_busqueda="es") + + if not item.contentSeason: + p_dialog.update(50, "Buscando información de la serie", "Obteniendo resultados...") + results, info_load = otvdb_global.get_list_results() + logger.debug("results es %s" % results) + + if not item.contentSeason: + p_dialog.update(100, "Buscando información de la serie", "Encontrados %s posibles coincidencias" % len(results)) + p_dialog.close() + + if len(results) > 1: + tvdb_result = platformtools.show_video_info(results, item=item, scraper=Tvdb, + caption="[%s]: Selecciona la serie correcta" % title) + elif len(results) > 0: + tvdb_result = results[0] + + # todo revisar + if isinstance(item.infoLabels, InfoLabels): + logger.debug("es instancia de infoLabels") + infoLabels = item.infoLabels + else: + logger.debug("NO ES instancia de infoLabels") + infoLabels = InfoLabels() + + if tvdb_result: + infoLabels['tvdb_id'] = tvdb_result['id'] + infoLabels['url_scraper'] = ["http://thetvdb.com/index.php?tab=series&id=%s" % infoLabels['tvdb_id']] + if not info_load: + if otvdb_global.get_id() != infoLabels['tvdb_id']: + otvdb_global = Tvdb(tvdb_id=infoLabels['tvdb_id']) + otvdb_global.get_images(infoLabels['tvdb_id'], image="poster") + otvdb_global.get_images(infoLabels['tvdb_id'], image="fanart") + otvdb_global.get_tvshow_cast(infoLabels['tvdb_id']) + + item.infoLabels = infoLabels + set_infoLabels_item(item) + + return True + + else: + item.infoLabels = infoLabels + return False + + +def set_infoLabels_item(item): + """ + Obtiene y fija (item.infoLabels) los datos extras de una serie, capitulo o pelicula. + @param item: Objeto que representa un pelicula, serie o capitulo. El atributo infoLabels sera modificado + incluyendo los datos extras localizados. + @type item: Item + + + """ + global otvdb_global + + def __leer_datos(otvdb_aux): + item.infoLabels = otvdb_aux.get_infoLabels(item.infoLabels) + if 'infoLabels' in item and 'thumbnail' in item.infoLabels: + item.thumbnail = item.infoLabels['thumbnail'] + if 'infoLabels' in item and 'fanart' in item.infoLabels['fanart']: + item.fanart = item.infoLabels['fanart'] + + if 'infoLabels' in item and 'season' in item.infoLabels: + try: + int_season = int(item.infoLabels['season']) + except ValueError: + logger.debug("El numero de temporada no es valido") + item.contentType = item.infoLabels['mediatype'] + return -1 * len(item.infoLabels) + + if not otvdb_global or \ + (item.infoLabels['tvdb_id'] and otvdb_global.get_id() != item.infoLabels['tvdb_id']) \ + or (otvdb_global.search_name and otvdb_global.search_name != item.infoLabels['tvshowtitle']): + if item.infoLabels['tvdb_id']: + otvdb_global = Tvdb(tvdb_id=item.infoLabels['tvdb_id']) + else: + otvdb_global = Tvdb(search=item.infoLabels['tvshowtitle']) + + __leer_datos(otvdb_global) + + if item.infoLabels['episode']: + try: + int_episode = int(item.infoLabels['episode']) + except ValueError: + logger.debug("El número de episodio (%s) no es valido" % repr(item.infoLabels['episode'])) + item.contentType = item.infoLabels['mediatype'] + return -1 * len(item.infoLabels) + + # Tenemos numero de temporada y numero de episodio validos... + # ... buscar datos episodio + item.infoLabels['mediatype'] = 'episode' + + lang = DEFAULT_LANG + if otvdb_global.lang: + lang = otvdb_global.lang + + page = 1 + _id = None + while not _id: + list_episodes = otvdb_global.list_episodes.get(page) + if not list_episodes: + list_episodes = otvdb_global.get_list_episodes(otvdb_global.get_id(), page) + import threading + semaforo = threading.Semaphore(20) + l_hilo = list() + + for e in list_episodes["data"]: + t = threading.Thread(target=otvdb_global.get_episode_by_id, args=(e["id"], lang, semaforo)) + t.start() + l_hilo.append(t) + + # esperar q todos los hilos terminen + for x in l_hilo: + x.join() + + for e in list_episodes['data']: + if e['airedSeason'] == int_season and e['airedEpisodeNumber'] == int_episode: + _id = e['id'] + break + + _next = list_episodes['links']['next'] + if type(_next) == int: + page = _next + else: + break + + data_episode = otvdb_global.get_info_episode(otvdb_global.get_id(), int_season, int_episode, lang, _id) + + # todo repasar valores que hay que insertar en infoLabels + if data_episode: + item.infoLabels['title'] = data_episode['episodeName'] + # fix en casos que el campo desde la api era null--> None + if data_episode["overview"] is not None: + item.infoLabels['plot'] = data_episode["overview"] + + item.thumbnail = HOST_IMAGE + data_episode.get('filename', "") + + item.infoLabels["rating"] = data_episode.get("siteRating", "") + item.infoLabels['director'] = ', '.join(sorted(data_episode.get('directors', []))) + item.infoLabels['writer'] = ', '.join(sorted(data_episode.get("writers", []))) + + if data_episode["firstAired"]: + item.infoLabels['premiered'] = data_episode["firstAired"].split("-")[2] + "/" + \ + data_episode["firstAired"].split("-")[1] + "/" + \ + data_episode["firstAired"].split("-")[0] + item.infoLabels['aired'] = item.infoLabels['premiered'] + + guest_stars = data_episode.get("guestStars", []) + l_castandrole = item.infoLabels.get("castandrole", []) + l_castandrole.extend([(p, '') for p in guest_stars]) + item.infoLabels['castandrole'] = l_castandrole + + # datos para nfo + item.season_id = data_episode["airedSeasonID"] + item.episode_id = data_episode["id"] + + return len(item.infoLabels) + + else: + # Tenemos numero de temporada valido pero no numero de episodio... + # ... buscar datos temporada + item.infoLabels['mediatype'] = 'season' + data_season = otvdb_global.get_images(otvdb_global.get_id(), "season", int_season) + + if data_season and 'image_season_%s' % int_season in data_season: + item.thumbnail = HOST_IMAGE + data_season['image_season_%s' % int_season][0]['fileName'] + return len(item.infoLabels) + + # Buscar... + else: + # Busquedas por ID... + if (not otvdb_global or otvdb_global.get_id() != item.infoLabels['tvdb_id']) and item.infoLabels['tvdb_id']: + otvdb_global = Tvdb(tvdb_id=item.infoLabels['tvdb_id']) + + elif not otvdb_global and item.infoLabels['imdb_id']: + otvdb_global = Tvdb(imdb_id=item.infoLabels['imdb_id']) + + elif not otvdb_global and item.infoLabels['zap2it_id']: + otvdb_global = Tvdb(zap2it_id=item.infoLabels['zap2it_id']) + + # No se ha podido buscar por ID... se hace por título + if otvdb_global is None: + otvdb_global = Tvdb(search=item.infoLabels['tvshowtitle']) + + if otvdb_global and otvdb_global.get_id(): + __leer_datos(otvdb_global) + # La busqueda ha encontrado un resultado valido + return len(item.infoLabels) + + +def get_nfo(item): + """ + Devuelve la información necesaria para que se scrapee el resultado en la videoteca de kodi, + + @param item: elemento que contiene los datos necesarios para generar la info + @type item: Item + @rtype: str + @return: + """ + + if "season" in item.infoLabels and "episode" in item.infoLabels: + info_nfo = "http://thetvdb.com/?tab=episode&seriesid=%s&seasonid=%s&id=%s\n" \ + % (item.infoLabels['tvdb_id'], item.season_id, item.episode_id) + else: + info_nfo = ', '.join(item.infoLabels['url_scraper']) + "\n" + + return info_nfo + + +def completar_codigos(item): + """ + Si es necesario comprueba si existe el identificador de tmdb y sino existe trata de buscarlo + @param item: tipo item + @type item: Item + """ + if not item.infoLabels['tmdb_id']: + listsources = [(item.infoLabels['tvdb_id'], "tvdb_id")] + if item.infoLabels['imdb_id']: + listsources.append((item.infoLabels['imdb_id'], "imdb_id")) + + from core.tmdb import Tmdb + ob = Tmdb() + + for external_id, external_source in listsources: + ob.search_by_id(id=external_id, source=external_source, tipo='tv') + + item.infoLabels['tmdb_id'] = ob.get_id() + if item.infoLabels['tmdb_id']: + url_scraper = "https://www.themoviedb.org/tv/%s" % item.infoLabels['tmdb_id'] + item.infoLabels['url_scraper'].append(url_scraper) + break + + +class Tvdb: + def __init__(self, **kwargs): + + self.__check_token() + + self.result = {} + self.list_results = [] + self.lang = "" + self.search_name = kwargs['search'] = \ + re.sub('\[\\\?(B|I|COLOR)\s?[^\]]*\]', '', kwargs.get('search', '')) + self.list_episodes = {} + self.episodes = {} + + if kwargs.get('tvdb_id', ''): + # Busqueda por identificador tvdb + self.__get_by_id(kwargs.get('tvdb_id', '')) + if not self.list_results and config.get_setting("tvdb_retry_eng", "videolibrary"): + from platformcode import platformtools + platformtools.dialog_notification("No se ha encontrado en idioma '%s'" % DEFAULT_LANG, + "Se busca en idioma 'en'") + self.__get_by_id(kwargs.get('tvdb_id', ''), "en") + self.lang = "en" + + elif self.search_name: + # Busqueda por texto + self.__search(kwargs.get('search', ''), kwargs.get('imdb_id', ''), kwargs.get('zap2it_id', '')) + if not self.list_results and config.get_setting("tvdb_retry_eng", "videolibrary"): + from platformcode import platformtools + platformtools.dialog_notification("No se ha encontrado en idioma '%s'" % DEFAULT_LANG, + "Se busca en idioma 'en'") + self.__search(kwargs.get('search', ''), kwargs.get('imdb_id', ''), kwargs.get('zap2it_id', ''), "en") + self.lang = "en" + + if not self.result: + # No hay resultados de la busqueda + if kwargs.get('tvdb_id', ''): + buscando = kwargs.get('tvdb_id', '') + else: + buscando = kwargs.get('search', '') + msg = "La busqueda de %s no dio resultados." % buscando + logger.debug(msg) + + @classmethod + def __check_token(cls): + # logger.info() + if TOKEN == "": + cls.__login() + else: + # si la fecha no se corresponde con la actual llamamos a refresh_token, ya que el token expira en 24 horas + from time import gmtime, strftime + current_date = strftime("%Y-%m-%d", gmtime()) + + if config.get_setting("tvdb_token_date", "") != current_date: + # si se ha renovado el token grabamos la nueva fecha + if cls.__refresh_token(): + config.set_setting("tvdb_token_date", current_date) + + @staticmethod + def __login(): + # logger.info() + global TOKEN + + apikey = "106B699FDC04301C" + + url = HOST + "/login" + params = {"apikey": apikey} + + try: + req = urllib2.Request(url, data=jsontools.dump(params), headers=DEFAULT_HEADERS) + response = urllib2.urlopen(req) + html = response.read() + response.close() + + except Exception, ex: + message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args)) + logger.error("error en: %s" % message) + + else: + dict_html = jsontools.load(html) + # logger.debug("dict_html %s" % dict_html) + + if "token" in dict_html: + token = dict_html["token"] + DEFAULT_HEADERS["Authorization"] = "Bearer " + token + + TOKEN = config.set_setting("tvdb_token", token) + + @classmethod + def __refresh_token(cls): + # logger.info() + global TOKEN + is_success = False + + url = HOST + "/refresh_token" + + try: + req = urllib2.Request(url, headers=DEFAULT_HEADERS) + response = urllib2.urlopen(req) + html = response.read() + response.close() + + except urllib2.HTTPError, err: + logger.error("err.code es %s" % err.code) + # si hay error 401 es que el token se ha pasado de tiempo y tenemos que volver a llamar a login + if err.code == 401: + cls.__login() + else: + raise + + except Exception, ex: + message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args)) + logger.error("error en: %s" % message) + + else: + dict_html = jsontools.load(html) + # logger.error("tokencito %s" % dict_html) + if "token" in dict_html: + token = dict_html["token"] + DEFAULT_HEADERS["Authorization"] = "Bearer " + token + TOKEN = config.set_setting("tvdb_token", token) + is_success = True + + return is_success + + def get_info_episode(self, _id, season=1, episode=1, lang=DEFAULT_LANG, id_episode=None): + """ + Devuelve los datos de un episodio. + @param _id: identificador de la serie + @type _id: str + @param season: numero de temporada [por defecto = 1] + @type season: int + @param episode: numero de episodio [por defecto = 1] + @type episode: int + @param lang: codigo de idioma para buscar + @type lang: str + @param id_episode: codigo del episodio. + @type id_episode: int + @rtype: dict + @return: + "data": { + "id": 0, + "airedSeason": 0, + "airedEpisodeNumber": 0, + "episodeName": "string", + "firstAired": "string", + "guestStars": [ + "string" + ], + "director": "string", # deprecated + "directors": [ + "string" + ], + "writers": [ + "string" + ], + "overview": "string", + "productionCode": "string", + "showUrl": "string", + "lastUpdated": 0, + "dvdDiscid": "string", + "dvdSeason": 0, + "dvdEpisodeNumber": 0, + "dvdChapter": 0, + "absoluteNumber": 0, + "filename": "string", + "seriesId": "string", + "lastUpdatedBy": "string", + "airsAfterSeason": 0, + "airsBeforeSeason": 0, + "airsBeforeEpisode": 0, + "thumbAuthor": 0, + "thumbAdded": "string", + "thumbWidth": "string", + "thumbHeight": "string", + "imdbId": "string", + "siteRating": 0, + "siteRatingCount": 0 + }, + "errors": { + "invalidFilters": [ + "string" + ], + "invalidLanguage": "string", + "invalidQueryParams": [ + "string" + ] + } + """ + logger.info() + if id_episode and self.episodes.get(id_episode): + return self.episodes.get(id_episode) + + params = {"airedSeason": "%s" % season, "airedEpisode": "%s" % episode} + + try: + import urllib + params = urllib.urlencode(params) + + url = HOST + "/series/%s/episodes/query?%s" % (_id, params) + DEFAULT_HEADERS["Accept-Language"] = lang + logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS)) + + req = urllib2.Request(url, headers=DEFAULT_HEADERS) + response = urllib2.urlopen(req) + html = response.read() + response.close() + + except Exception, ex: + message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args)) + logger.error("error en: %s" % message) + + else: + dict_html = jsontools.load(html) + + if "data" in dict_html and "id" in dict_html["data"][0]: + self.get_episode_by_id(dict_html["data"][0]["id"], lang) + return dict_html["data"] + + def get_list_episodes(self, _id, page=1): + """ + Devuelve el listado de episodios de una serie. + @param _id: identificador de la serie + @type _id: str + @param page: numero de pagina a buscar [por defecto = 1] + @type page: int + @rtype: dict + @return: + { + "links": { + "first": 0, + "last": 0, + "next": 0, + "previous": 0 + }, + "data": [ + { + "absoluteNumber": 0, + "airedEpisodeNumber": 0, + "airedSeason": 0, + "dvdEpisodeNumber": 0, + "dvdSeason": 0, + "episodeName": "string", + "id": 0, + "overview": "string", + "firstAired": "string", + "lastUpdated": 0 + } + ], + "errors": { + "invalidFilters": [ + "string" + ], + "invalidLanguage": "string", + "invalidQueryParams": [ + "string" + ] + } + } + """ + logger.info() + + try: + url = HOST + "/series/%s/episodes?page=%s" % (_id, page) + logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS)) + + req = urllib2.Request(url, headers=DEFAULT_HEADERS) + response = urllib2.urlopen(req) + html = response.read() + response.close() + + except Exception, ex: + message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args)) + logger.error("error en: %s" % message) + + else: + self.list_episodes[page] = jsontools.load(html) + + # logger.info("dict_html %s" % self.list_episodes) + + return self.list_episodes[page] + + def get_episode_by_id(self, _id, lang=DEFAULT_LANG, semaforo=None): + """ + Obtiene los datos de un episodio + @param _id: identificador del episodio + @type _id: str + @param lang: código de idioma + @param semaforo: semaforo para multihilos + @type semaforo: threading.Semaphore + @type lang: str + @rtype: dict + @return: + { + "data": { + "id": 0, + "airedSeason": 0, + "airedEpisodeNumber": 0, + "episodeName": "string", + "firstAired": "string", + "guestStars": [ + "string" + ], + "director": "string", + "directors": [ + "string" + ], + "writers": [ + "string" + ], + "overview": "string", + "productionCode": "string", + "showUrl": "string", + "lastUpdated": 0, + "dvdDiscid": "string", + "dvdSeason": 0, + "dvdEpisodeNumber": 0, + "dvdChapter": 0, + "absoluteNumber": 0, + "filename": "string", + "seriesId": "string", + "lastUpdatedBy": "string", + "airsAfterSeason": 0, + "airsBeforeSeason": 0, + "airsBeforeEpisode": 0, + "thumbAuthor": 0, + "thumbAdded": "string", + "thumbWidth": "string", + "thumbHeight": "string", + "imdbId": "string", + "siteRating": 0, + "siteRatingCount": 0 + }, + "errors": { + "invalidFilters": [ + "string" + ], + "invalidLanguage": "string", + "invalidQueryParams": [ + "string" + ] + } + } + """ + if semaforo: + semaforo.acquire() + logger.info() + + url = HOST + "/episodes/%s" % _id + + try: + DEFAULT_HEADERS["Accept-Language"] = lang + logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS)) + req = urllib2.Request(url, headers=DEFAULT_HEADERS) + response = urllib2.urlopen(req) + html = response.read() + response.close() + + except Exception, ex: + if type(ex) == urllib2.HTTPError: + logger.debug("code es %s " % ex.code) + + message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args)) + logger.error("error en: %s" % message) + + else: + dict_html = jsontools.load(html) + dict_html = dict_html.pop("data") + + logger.info("dict_html %s" % dict_html) + self.episodes[_id] = dict_html + + if semaforo: + semaforo.release() + + def __search(self, name, imdb_id, zap2it_id, lang=DEFAULT_LANG): + """ + Busca una serie a través de una serie de parámetros. + @param name: nombre a buscar + @type name: str + @param imdb_id: codigo identificativo de imdb + @type imdb_id: str + @param zap2it_id: codigo identificativo de zap2it + @type zap2it_id: str + @param lang: código de idioma + @type lang: str + + data:{ + "aliases": [ + "string" + ], + "banner": "string", + "firstAired": "string", + "id": 0, + "network": "string", + "overview": "string", + "seriesName": "string", + "status": "string" + } + """ + logger.info() + + try: + + params = {} + if name: + params["name"] = name + elif imdb_id: + params["imdbId"] = imdb_id + elif zap2it_id: + params["zap2itId"] = zap2it_id + + import urllib + params = urllib.urlencode(params) + + DEFAULT_HEADERS["Accept-Language"] = lang + url = HOST + "/search/series?%s" % params + logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS)) + + req = urllib2.Request(url, headers=DEFAULT_HEADERS) + response = urllib2.urlopen(req) + html = response.read() + response.close() + + except Exception, ex: + if type(ex) == urllib2.HTTPError: + logger.debug("code es %s " % ex.code) + + message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args)) + logger.error("error en: %s" % message) + + else: + dict_html = jsontools.load(html) + + if "errors" in dict_html and "invalidLanguage" in dict_html["errors"]: + # no hay información en idioma por defecto + return + + else: + resultado = dict_html["data"] + + # todo revisar + if len(resultado) > 1: + index = 0 + else: + index = 0 + + logger.debug("resultado %s" % resultado) + self.list_results = resultado + self.result = resultado[index] + + def __get_by_id(self, _id, lang=DEFAULT_LANG, from_get_list=False): + """ + Obtiene los datos de una serie por identificador. + @param _id: código de la serie + @type _id: str + @param lang: código de idioma + @type lang: str + @rtype: dict + @return: + { + "data": { + "id": 0, + "seriesName": "string", + "aliases": [ + "string" + ], + "banner": "string", + "seriesId": 0, + "status": "string", + "firstAired": "string", + "network": "string", + "networkId": "string", + "runtime": "string", + "genre": [ + "string" + ], + "overview": "string", + "lastUpdated": 0, + "airsDayOfWeek": "string", + "airsTime": "string", + "rating": "string", + "imdbId": "string", + "zap2itId": "string", + "added": "string", + "siteRating": 0, + "siteRatingCount": 0 + }, + "errors": { + "invalidFilters": [ + "string" + ], + "invalidLanguage": "string", + "invalidQueryParams": [ + "string" + ] + } + } + """ + logger.info() + resultado = {} + + url = HOST + "/series/%s" % _id + + try: + DEFAULT_HEADERS["Accept-Language"] = lang + req = urllib2.Request(url, headers=DEFAULT_HEADERS) + logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS)) + + response = urllib2.urlopen(req) + html = response.read() + response.close() + + except Exception, ex: + if type(ex) == urllib2.HTTPError: + logger.debug("code es %s " % ex.code) + + message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args)) + logger.error("error en: %s" % message) + + else: + dict_html = jsontools.load(html) + + if "errors" in dict_html and "invalidLanguage" in dict_html["errors"]: + return {} + else: + resultado1 = dict_html["data"] + if not resultado1 and from_get_list: + return self.__get_by_id(_id, "en") + + logger.debug("resultado %s" % dict_html) + resultado2 = {"image_poster": [{'keyType': 'poster', 'fileName': 'posters/%s-1.jpg' % _id}]} + resultado3 = {"image_fanart": [{'keyType': 'fanart', 'fileName': 'fanart/original/%s-1.jpg' % _id}]} + + resultado = resultado1.copy() + resultado.update(resultado2) + resultado.update(resultado3) + + logger.debug("resultado total %s" % resultado) + self.list_results = [resultado] + self.result = resultado + + return resultado + + def get_images(self, _id, image="poster", season=1, lang="en"): + """ + Obtiene un tipo de imagen para una serie para un idioma. + @param _id: identificador de la serie + @type _id: str + @param image: codigo de busqueda, ["poster" (por defecto), "fanart", "season"] + @type image: str + @type season: numero de temporada + @param lang: código de idioma para el que se busca + @type lang: str + @return: diccionario con el tipo de imagenes elegidas. + @rtype: dict + + """ + logger.info() + + if self.result.get('image_season_%s' % season): + return self.result['image_season_%s' % season] + + params = {} + if image == "poster": + params["keyType"] = "poster" + elif image == "fanart": + params["keyType"] = "fanart" + params["subKey"] = "graphical" + elif image == "season": + params["keyType"] = "season" + params["subKey"] = "%s" % season + image += "_%s" % season + + try: + + import urllib + params = urllib.urlencode(params) + DEFAULT_HEADERS["Accept-Language"] = lang + url = HOST + "/series/%s/images/query?%s" % (_id, params) + logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS)) + + req = urllib2.Request(url, headers=DEFAULT_HEADERS) + response = urllib2.urlopen(req) + html = response.read() + response.close() + + except Exception, ex: + message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args)) + logger.error("error en: %s" % message) + + return {} + + else: + dict_html = jsontools.load(html) + + dict_html["image_" + image] = dict_html.pop("data") + self.result.update(dict_html) + + return dict_html + + def get_tvshow_cast(self, _id, lang=DEFAULT_LANG): + """ + obtiene el casting de una serie + @param _id: codigo de la serie + @type _id: str + @param lang: codigo idioma para buscar + @type lang: str + @return: diccionario con los actores + @rtype: dict + """ + logger.info() + + url = HOST + "/series/%s/actors" % _id + DEFAULT_HEADERS["Accept-Language"] = lang + logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS)) + + req = urllib2.Request(url, headers=DEFAULT_HEADERS) + response = urllib2.urlopen(req) + html = response.read() + response.close() + + dict_html = jsontools.load(html) + + dict_html["cast"] = dict_html.pop("data") + self.result.update(dict_html) + + def get_id(self): + """ + @return: Devuelve el identificador Tvdb de la serie cargada o una cadena vacia en caso de que no + hubiese nada cargado. Se puede utilizar este metodo para saber si una busqueda ha dado resultado o no. + @rtype: str + """ + return str(self.result.get('id', "")) + + def get_list_results(self): + """ + Devuelve los resultados encontramos para una serie. + @rtype: list + @return: lista de resultados + """ + logger.info() + list_results = [] + + # TODO revisar condicion + # si tenemos un resultado y tiene seriesName, ya tenemos la info de la serie, no hace falta volver a buscar + if len(self.list_results) == 1 and "seriesName" in self.result: + list_results.append(self.result) + info_load = True + else: + import threading + semaforo = threading.Semaphore(20) + l_hilo = list() + r_list = list() + + def sub_thread(_id, i): + semaforo.acquire() + ret = self.__get_by_id(_id, DEFAULT_LANG, True) + semaforo.release() + r_list.append((ret, i)) + + for index, e in enumerate(self.list_results): + t = threading.Thread(target=sub_thread, args=(e["id"], index)) + t.start() + l_hilo.append(t) + + for x in l_hilo: + x.join() + + r_list.sort(key=lambda i: i[1]) + list_results = [ii[0] for ii in r_list] + info_load = False + return list_results, info_load + + def get_infoLabels(self, infoLabels=None, origen=None): + """ + @param infoLabels: Informacion extra de la pelicula, serie, temporada o capitulo. + @type infoLabels: dict + @param origen: Diccionario origen de donde se obtiene los infoLabels, por omision self.result + @type origen: dict + @return: Devuelve la informacion extra obtenida del objeto actual. Si se paso el parametro infoLables, el valor + devuelto sera el leido como parametro debidamente actualizado. + @rtype: dict + """ + + # TODO revisar + if infoLabels: + # logger.debug("es instancia de infoLabels") + ret_infoLabels = InfoLabels(infoLabels) + else: + # logger.debug("NO ES instancia de infoLabels") + ret_infoLabels = InfoLabels() + # fix + ret_infoLabels['mediatype'] = 'tvshow' + + # Iniciar listados + l_castandrole = ret_infoLabels.get('castandrole', []) + + # logger.debug("self.result %s" % self.result) + + if not origen: + origen = self.result + + # todo revisar + # if 'credits' in origen.keys(): + # dic_origen_credits = origen['credits'] + # origen['credits_cast'] = dic_origen_credits.get('cast', []) + # origen['credits_crew'] = dic_origen_credits.get('crew', []) + # del origen['credits'] + + items = origen.items() + + for k, v in items: + if not v: + continue + + if k == 'overview': + ret_infoLabels['plot'] = v + + elif k == 'runtime': + ret_infoLabels['duration'] = int(v) * 60 + + elif k == 'firstAired': + ret_infoLabels['year'] = int(v[:4]) + ret_infoLabels['premiered'] = v.split("-")[2] + "/" + v.split("-")[1] + "/" + v.split("-")[0] + + # todo revisar + # elif k == 'original_title' or k == 'original_name': + # ret_infoLabels['originaltitle'] = v + + elif k == 'siteRating': + ret_infoLabels['rating'] = float(v) + + elif k == 'siteRatingCount': + ret_infoLabels['votes'] = v + + elif k == 'status': + # se traduce los estados de una serie + ret_infoLabels['status'] = DICT_STATUS.get(v, v) + + # no soy partidario de poner la cadena como studio pero es como lo hace el scraper de manera genérica + elif k == 'network': + ret_infoLabels['studio'] = v + + elif k == 'image_poster': + # obtenemos la primera imagen de la lista + ret_infoLabels['thumbnail'] = HOST_IMAGE + v[0]['fileName'] + + elif k == 'image_fanart': + # obtenemos la primera imagen de la lista + ret_infoLabels['fanart'] = HOST_IMAGE + v[0]['fileName'] + + # # no disponemos de la imagen de fondo + # elif k == 'banner': + # ret_infoLabels['fanart'] = HOST_IMAGE + v + + elif k == 'id': + ret_infoLabels['tvdb_id'] = v + + elif k == 'imdbId': + ret_infoLabels['imdb_id'] = v + # no se muestra + # ret_infoLabels['code'] = v + + elif k in "rating": + # traducimos la clasificación por edades (content rating system) + ret_infoLabels['mpaa'] = DICT_MPAA.get(v, v) + + elif k in "genre": + genre_list = "" + for index, i in enumerate(v): + if index > 0: + genre_list += ", " + + # traducimos los generos + genre_list += DICT_GENRE.get(i, i) + + ret_infoLabels['genre'] = genre_list + + elif k == 'seriesName': # or k == 'name' or k == 'title': + # if len(origen.get('aliases', [])) > 0: + # ret_infoLabels['title'] = v + " " + origen.get('aliases', [''])[0] + # else: + # ret_infoLabels['title'] = v + # logger.info("el titulo es %s " % ret_infoLabels['title']) + ret_infoLabels['title'] = v + + elif k == 'cast': + dic_aux = dict((name, character) for (name, character) in l_castandrole) + l_castandrole.extend([(p['name'], p['role']) for p in v if p['name'] not in dic_aux.keys()]) + + else: + logger.debug("Atributos no añadidos: %s=%s" % (k, v)) + pass + + # Ordenar las listas y convertirlas en str si es necesario + if l_castandrole: + ret_infoLabels['castandrole'] = l_castandrole + + logger.debug("ret_infoLabels %s" % ret_infoLabels) + + return ret_infoLabels diff --git a/plugin.video.alfa/core/update_servers.py b/plugin.video.alfa/core/update_servers.py new file mode 100755 index 00000000..043ae528 --- /dev/null +++ b/plugin.video.alfa/core/update_servers.py @@ -0,0 +1,170 @@ +# -*- coding: utf-8 -*- +# -------------------------------------------------------------------------------- +# update_servers.py +# -------------------------------------------------------------------------------- + +import os +import urlparse + +from core import config +from core import scrapertools +from core import servertools + +remote_url = "" +local_folder = os.path.join(config.get_runtime_path(), "servers") + + +### Procedures +def update_servers(): + update_servers_files( + read_remote_servers_list( + dict(read_local_servers_list()) + ) + ) + + +def update_servers_files(update_servers_list): + # ---------------------------- + from platformcode import platformtools + progress = platformtools.dialog_progress_bg("Update servers list") + # ---------------------------- + + for index, server in enumerate(update_servers_list): + # ---------------------------- + percentage = index * 100 / len(update_servers_list) + # ---------------------------- + + data = scrapertools.cache_page(remote_url + server[0] + ".py") + + f = open(os.path.join(local_folder, server[0] + ".py"), 'w') + f.write(data) + f.close() + + # ---------------------------- + progress.update(percentage, ' Update server: "' + server[0] + '"', 'MD5: "' + server[1] + '"') + # ---------------------------- + + # ---------------------------- + progress.close() + # ---------------------------- + + +### Functions +## init +def read_remote_servers_list(local_servers): + data = scrapertools.cache_page(remote_url + "servertools.py") + + f = open(os.path.join(local_folder, "servertools.py"), 'w') + f.write(data) + f.close() + + all_servers = sorted( + servertools.FREE_SERVERS + \ + servertools.PREMIUM_SERVERS + \ + servertools.FILENIUM_SERVERS + \ + servertools.REALDEBRID_SERVERS + \ + servertools.ALLDEBRID_SERVERS + ) + + servers = [] + for server_id in all_servers: + if server_id not in servers: + servers.append(server_id) + + # ---------------------------- + from platformcode import platformtools + progress = platformtools.dialog_progress_bg("Remote servers list") + # ---------------------------- + + remote_servers = [] + update_servers_list = [] + for index, server in enumerate(servers): + # ---------------------------- + percentage = index * 100 / len(servers) + # ---------------------------- + server_file = urlparse.urljoin(remote_url, server + ".py") + + data = scrapertools.cache_page(server_file) + if data != "Not Found": + md5_remote_server = md5_remote(data) + remote_servers.append([server, md5_remote_server]) + + md5_local_server = local_servers.get(server) + if md5_local_server: + if md5_local_server != md5_remote_server: + update_servers_list.append([server, md5_remote_server, md5_local_server, "Update"]) + else: + update_servers_list.append([server, md5_remote_server, "New", "Update"]) + + # ---------------------------- + progress.update(percentage, ' Remote server: "' + server + '"', 'MD5: "' + md5_remote_server + '"') + # ---------------------------- + + # ---------------------------- + progress.close() + # ---------------------------- + + return update_servers_list + + +def read_local_servers_list(): + all_servers = sorted( + servertools.FREE_SERVERS + \ + servertools.PREMIUM_SERVERS + \ + servertools.FILENIUM_SERVERS + \ + servertools.REALDEBRID_SERVERS + \ + servertools.ALLDEBRID_SERVERS + ) + + servers = [] + for server_id in all_servers: + if server_id not in servers: + servers.append(server_id) + + # ---------------------------- + from platformcode import platformtools + progress = platformtools.dialog_progress_bg("Local servers list") + # ---------------------------- + + local_servers = [] + for index, server in enumerate(servers): + # ---------------------------- + percentage = index * 100 / len(servers) + # ---------------------------- + server_file = os.path.join(config.get_runtime_path(), "servers", server + ".py") + if os.path.exists(server_file): + md5_local_server = md5_local(server_file) + local_servers.append([server, md5_local_server]) + # ---------------------------- + progress.update(percentage, ' Local server: "' + server + '"', 'MD5: "' + md5_local_server + '"') + # ---------------------------- + + # ---------------------------- + progress.close() + # ---------------------------- + + return local_servers + + +def md5_local(file_server): + import hashlib + hash = hashlib.md5() + with open(file_server) as f: + for chunk in iter(lambda: f.read(4096), ""): + hash.update(chunk) + + return hash.hexdigest() + + +def md5_remote(data_server): + import hashlib + hash = hashlib.md5() + hash.update(data_server) + + return hash.hexdigest() + + +### Run +update_servers() +# from threading import Thread +# Thread( target=update_servers ).start() diff --git a/plugin.video.alfa/core/updater.py b/plugin.video.alfa/core/updater.py new file mode 100755 index 00000000..99f45e0c --- /dev/null +++ b/plugin.video.alfa/core/updater.py @@ -0,0 +1,209 @@ +# -*- coding: utf-8 -*- +# -------------------------------------------------------------------------------- +# Updater process +# -------------------------------------------------------------------------------- + +import os +import time + +import config +import logger +import scrapertools +import versiontools + + +# Método antiguo, muestra un popup con la versión +def checkforupdates(): + logger.info() + + # Valores por defecto + numero_version_publicada = 0 + tag_version_publicada = "" + + # Lee la versión remota + from core import api + latest_packages = api.plugins_get_latest_packages() + for latest_package in latest_packages["body"]: + if latest_package["package"] == "plugin": + numero_version_publicada = latest_package["version"] + tag_version_publicada = latest_package["tag"] + break + + logger.info("version remota=" + str(numero_version_publicada)) + + # Lee la versión local + numero_version_local = versiontools.get_current_plugin_version() + logger.info("version local=" + str(numero_version_local)) + + hayqueactualizar = numero_version_publicada > numero_version_local + logger.info("-> hayqueactualizar=" + repr(hayqueactualizar)) + + # Si hay actualización disponible, devuelve la Nueva versión para que cada plataforma se encargue de mostrar los avisos + if hayqueactualizar: + return tag_version_publicada + else: + return None + + +# Método nuevo, devuelve el nº de actualizaciones disponibles además de indicar si hay nueva versión del plugin +def get_available_updates(): + logger.info() + + # Cuantas actualizaciones hay? + number_of_updates = 0 + new_published_version_tag = "" + + # Lee la versión remota + from core import api + latest_packages = api.plugins_get_latest_packages() + + for latest_package in latest_packages["body"]: + + if latest_package["package"] == "plugin": + if latest_package["version"] > versiontools.get_current_plugin_version(): + number_of_updates = number_of_updates + 1 + new_published_version_tag = latest_package["tag"] + + elif latest_package["package"] == "channels": + if latest_package["version"] > versiontools.get_current_channels_version(): + number_of_updates = number_of_updates + 1 + + elif latest_package["package"] == "servers": + if latest_package["version"] > versiontools.get_current_servers_version(): + number_of_updates = number_of_updates + 1 + + return new_published_version_tag, number_of_updates + + +def update(item): + logger.info() + + # Valores por defecto + published_version_url = "" + published_version_filename = "" + + # Lee la versión remota + from core import api + latest_packages = api.plugins_get_latest_packages() + for latest_package in latest_packages["body"]: + if latest_package["package"] == "plugin": + published_version_url = latest_package["url"] + published_version_filename = latest_package["filename"] + published_version_number = latest_package["version"] + break + + # La URL viene del API, y lo descarga en "userdata" + remotefilename = published_version_url + localfilename = os.path.join(config.get_data_path(), published_version_filename) + + download_and_install(remotefilename, localfilename) + + +def download_and_install(remote_file_name, local_file_name): + logger.info("from " + remote_file_name + " to " + local_file_name) + + if os.path.exists(local_file_name): + os.remove(local_file_name) + + # Descarga el fichero + inicio = time.clock() + from core import downloadtools + downloadtools.downloadfile(remote_file_name, local_file_name, continuar=False) + fin = time.clock() + logger.info("Descargado en %d segundos " % (fin - inicio + 1)) + + logger.info("descomprime fichero...") + import ziptools + unzipper = ziptools.ziptools() + + # Lo descomprime en "addons" (un nivel por encima del plugin) + installation_target = os.path.join(config.get_runtime_path(), "..") + logger.info("installation_target=%s" % installation_target) + + unzipper.extract(local_file_name, installation_target) + + # Borra el zip descargado + logger.info("borra fichero...") + os.remove(local_file_name) + logger.info("...fichero borrado") + + +def update_channel(channel_name): + logger.info(channel_name) + + import channeltools + remote_channel_url, remote_version_url = channeltools.get_channel_remote_url(channel_name) + local_channel_path, local_version_path, local_compiled_path = channeltools.get_channel_local_path(channel_name) + + # Version remota + try: + data = scrapertools.cachePage(remote_version_url) + logger.info("remote_data=" + data) + remote_version = int(scrapertools.find_single_match(data, '([^<]+)')) + addon_condition = int(scrapertools.find_single_match(data, "([^<]*)") + .replace(".", "").ljust(len(str(versiontools.get_current_plugin_version())), '0')) + except: + remote_version = 0 + addon_condition = 0 + + logger.info("remote_version=%d" % remote_version) + + # Version local + if os.path.exists(local_version_path): + infile = open(local_version_path) + from core import jsontools + data = jsontools.load(infile.read()) + infile.close() + + local_version = data.get('version', 0) + else: + local_version = 0 + + logger.info("local_version=%d" % local_version) + + # Comprueba si ha cambiado + updated = (remote_version > local_version) and (versiontools.get_current_plugin_version() >= addon_condition) + + if updated: + logger.info("downloading...") + download_channel(channel_name) + + return updated + + +def download_channel(channel_name): + logger.info(channel_name) + + import channeltools + remote_channel_url, remote_version_url = channeltools.get_channel_remote_url(channel_name) + local_channel_path, local_version_path, local_compiled_path = channeltools.get_channel_local_path(channel_name) + + # Descarga el canal + try: + updated_channel_data = scrapertools.cachePage(remote_channel_url) + outfile = open(local_channel_path, "wb") + outfile.write(updated_channel_data) + outfile.flush() + outfile.close() + logger.info("Grabado a " + local_channel_path) + except: + import traceback + logger.error(traceback.format_exc()) + + # Descarga la version (puede no estar) + try: + updated_version_data = scrapertools.cachePage(remote_version_url) + outfile = open(local_version_path, "w") + outfile.write(updated_version_data) + outfile.flush() + outfile.close() + logger.info("Grabado a " + local_version_path) + except: + import traceback + logger.error(traceback.format_exc()) + + if os.path.exists(local_compiled_path): + os.remove(local_compiled_path) + + from platformcode import platformtools + platformtools.dialog_notification(channel_name + " actualizado", "Se ha descargado una nueva versión") diff --git a/plugin.video.alfa/core/versiontools.py b/plugin.video.alfa/core/versiontools.py new file mode 100755 index 00000000..41f5d290 --- /dev/null +++ b/plugin.video.alfa/core/versiontools.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# -------------------------------------------------------------------------------- +# Version Tools +# -------------------------------------------------------------------------------- + +import os + +import config +import scrapertools + + +def get_current_plugin_version(): + return 4300 + + +def get_current_plugin_version_tag(): + return "4.3.0-beta1" + + +def get_current_plugin_date(): + return "30/06/2017" + + +def get_current_channels_version(): + f = open(os.path.join(config.get_runtime_path(), "channels", "version.xml")) + data = f.read() + f.close() + + return int(scrapertools.find_single_match(data, "([^<]+)")) + + +def get_current_servers_version(): + f = open(os.path.join(config.get_runtime_path(), "servers", "version.xml")) + data = f.read() + f.close() + + return int(scrapertools.find_single_match(data, "([^<]+)")) diff --git a/plugin.video.alfa/core/videolibrarytools.py b/plugin.video.alfa/core/videolibrarytools.py new file mode 100755 index 00000000..1b2b1193 --- /dev/null +++ b/plugin.video.alfa/core/videolibrarytools.py @@ -0,0 +1,605 @@ +# -*- coding: utf-8 -*- +# ------------------------------------------------------------ +# Common Library Tools +# ------------------------------------------------------------ + +import errno +import math +import os + +from core import config +from core import filetools +from core import logger +from core import scraper +from core import scrapertools +from core.item import Item +from platformcode import platformtools + +FOLDER_MOVIES = config.get_setting("folder_movies") +FOLDER_TVSHOWS = config.get_setting("folder_tvshows") +VIDEOLIBRARY_PATH = config.get_videolibrary_path() +MOVIES_PATH = filetools.join(VIDEOLIBRARY_PATH, FOLDER_MOVIES) +TVSHOWS_PATH = filetools.join(VIDEOLIBRARY_PATH, FOLDER_TVSHOWS) + +if not FOLDER_MOVIES or not FOLDER_TVSHOWS or not VIDEOLIBRARY_PATH \ + or not filetools.exists(MOVIES_PATH) or not filetools.exists(TVSHOWS_PATH): + config.verify_directories_created() + +addon_name = "plugin://plugin.video.%s/" % config.PLUGIN_NAME + + +def read_nfo(path_nfo, item=None): + """ + Metodo para leer archivos nfo. + Los arcivos nfo tienen la siguiente extructura: url_scraper | xml + item_json + [url_scraper] y [xml] son opcionales, pero solo uno de ellos ha de existir siempre. + @param path_nfo: ruta absoluta al archivo nfo + @type path_nfo: str + @param item: Si se pasa este parametro el item devuelto sera una copia de este con + los valores de 'infoLabels', 'library_playcounts' y 'path' leidos del nfo + @type: Item + @return: Una tupla formada por la cabecera (head_nfo ='url_scraper'|'xml') y el objeto 'item_json' + @rtype: tuple (str, Item) + """ + head_nfo = "" + it = None + + data = filetools.read(path_nfo) + + if data: + head_nfo = data.splitlines()[0] + "\n" + data = "\n".join(data.splitlines()[1:]) + + it_nfo = Item().fromjson(data) + + if item: + it = item.clone() + it.infoLabels = it_nfo.infoLabels + if 'library_playcounts' in it_nfo: + it.library_playcounts = it_nfo.library_playcounts + if it_nfo.path: + it.path = it_nfo.path + else: + it = it_nfo + + if 'fanart' in it.infoLabels: + it.fanart = it.infoLabels['fanart'] + + return head_nfo, it + + +def save_movie(item): + """ + guarda en la libreria de peliculas el elemento item, con los valores que contiene. + @type item: item + @param item: elemento que se va a guardar. + @rtype insertados: int + @return: el número de elementos insertados + @rtype sobreescritos: int + @return: el número de elementos sobreescritos + @rtype fallidos: int + @return: el número de elementos fallidos o -1 si ha fallado todo + """ + logger.info() + # logger.debug(item.tostring('\n')) + insertados = 0 + sobreescritos = 0 + fallidos = 0 + path = "" + + # Itentamos obtener el titulo correcto: + # 1. contentTitle: Este deberia ser el sitio correcto, ya que title suele contener "Añadir a la videoteca..." + # 2. fulltitle + # 3. title + if not item.contentTitle: + # Colocamos el titulo correcto en su sitio para que scraper lo localize + if item.fulltitle: + item.contentTitle = item.fulltitle + else: + item.contentTitle = item.title + + # Si llegados a este punto no tenemos titulo, salimos + if not item.contentTitle or not item.channel: + logger.debug("NO ENCONTRADO contentTitle") + return 0, 0, -1 # Salimos sin guardar + + scraper_return = scraper.find_and_set_infoLabels(item) + + # Llegados a este punto podemos tener: + # scraper_return = True: Un item con infoLabels con la información actualizada de la peli + # scraper_return = False: Un item sin información de la peli (se ha dado a cancelar en la ventana) + # item.infoLabels['code'] == "" : No se ha encontrado el identificador de IMDB necesario para continuar, salimos + if not scraper_return or not item.infoLabels['code']: + # TODO de momento si no hay resultado no añadimos nada, + # aunq podriamos abrir un cuadro para introducir el identificador/nombre a mano + logger.debug("NO ENCONTRADO EN SCRAPER O NO TIENE code") + return 0, 0, -1 + + _id = item.infoLabels['code'][0] + + # progress dialog + p_dialog = platformtools.dialog_progress('alfa', 'Añadiendo película...') + + if config.get_setting("original_title_folder", "videolibrary") == 1 and item.infoLabels['originaltitle']: + base_name = item.infoLabels['originaltitle'] + else: + base_name = item.contentTitle + + base_name = unicode(filetools.validate_path(base_name.replace('/', '-')), "utf8").lower().encode("utf8") + + subcarpetas = os.listdir(MOVIES_PATH) + + for c in subcarpetas: + code = scrapertools.find_single_match(c, '\[(.*?)\]') + if code and code in item.infoLabels['code']: + path = c + _id = code + break + + if not path: + # Crear carpeta + path = filetools.join(MOVIES_PATH, ("%s [%s]" % (base_name, _id)).strip()) + logger.info("Creando directorio pelicula:" + path) + if not filetools.mkdir(path): + logger.debug("No se ha podido crear el directorio") + return 0, 0, -1 + + nfo_path = filetools.join(path, "%s [%s].nfo" % (base_name, _id)) + strm_path = filetools.join(path, "%s.strm" % base_name) + json_path = filetools.join(path, ("%s [%s].json" % (base_name, item.channel.lower()))) + + nfo_exists = filetools.exists(nfo_path) + strm_exists = filetools.exists(strm_path) + json_exists = filetools.exists(json_path) + + if not nfo_exists: + # Creamos .nfo si no existe + logger.info("Creando .nfo: " + nfo_path) + head_nfo = scraper.get_nfo(item) + + item_nfo = Item(title=item.contentTitle, channel="videolibrary", action='findvideos', + library_playcounts={"%s [%s]" % (base_name, _id): 0}, infoLabels=item.infoLabels, + library_urls={}) + + else: + # Si existe .nfo, pero estamos añadiendo un nuevo canal lo abrimos + head_nfo, item_nfo = read_nfo(nfo_path) + + if not strm_exists: + # Crear base_name.strm si no existe + item_strm = Item(channel='videolibrary', action='play_from_library', + strm_path=strm_path.replace(MOVIES_PATH, ""), contentType='movie', + contentTitle=item.contentTitle) + strm_exists = filetools.write(strm_path, '%s?%s' % (addon_name, item_strm.tourl())) + item_nfo.strm_path = strm_path.replace(MOVIES_PATH, "") + + # Solo si existen item_nfo y .strm continuamos + if item_nfo and strm_exists: + + if json_exists: + logger.info("El fichero existe. Se sobreescribe") + sobreescritos += 1 + else: + insertados += 1 + + if filetools.write(json_path, item.tojson()): + p_dialog.update(100, 'Añadiendo película...', item.contentTitle) + item_nfo.library_urls[item.channel] = item.url + + if filetools.write(nfo_path, head_nfo + item_nfo.tojson()): + # actualizamos la videoteca de Kodi con la pelicula + if config.is_xbmc(): + from platformcode import xbmc_videolibrary + xbmc_videolibrary.update(FOLDER_MOVIES, filetools.basename(path) + "/") + + p_dialog.close() + return insertados, sobreescritos, fallidos + + # Si llegamos a este punto es por q algo ha fallado + logger.error("No se ha podido guardar %s en la videoteca" % item.contentTitle) + p_dialog.update(100, 'Fallo al añadir...', item.contentTitle) + p_dialog.close() + return 0, 0, -1 + + +def save_tvshow(item, episodelist): + """ + guarda en la libreria de series la serie con todos los capitulos incluidos en la lista episodelist + @type item: item + @param item: item que representa la serie a guardar + @type episodelist: list + @param episodelist: listado de items que representan los episodios que se van a guardar. + @rtype insertados: int + @return: el número de episodios insertados + @rtype sobreescritos: int + @return: el número de episodios sobreescritos + @rtype fallidos: int + @return: el número de episodios fallidos o -1 si ha fallado toda la serie + """ + logger.info() + # logger.debug(item.tostring('\n')) + path = "" + + # Si llegados a este punto no tenemos titulo o code, salimos + if not (item.contentSerieName or item.infoLabels['code']) or not item.channel: + logger.debug("NO ENCONTRADO contentSerieName NI code") + return 0, 0, -1 # Salimos sin guardar + + scraper_return = scraper.find_and_set_infoLabels(item) + + # Llegados a este punto podemos tener: + # scraper_return = True: Un item con infoLabels con la información actualizada de la serie + # scraper_return = False: Un item sin información de la peli (se ha dado a cancelar en la ventana) + # item.infoLabels['code'] == "" : No se ha encontrado el identificador de IMDB necesario para continuar, salimos + if not scraper_return or not item.infoLabels['code']: + # TODO de momento si no hay resultado no añadimos nada, + # aunq podriamos abrir un cuadro para introducir el identificador/nombre a mano + logger.debug("NO ENCONTRADO EN SCRAPER O NO TIENE code") + return 0, 0, -1 + + _id = item.infoLabels['code'][0] + + if config.get_setting("original_title_folder", "videolibrary") == 1 and item.infoLabels['originaltitle']: + base_name = item.infoLabels['originaltitle'] + elif item.infoLabels['title']: + base_name = item.infoLabels['title'] + else: + base_name = item.contentSerieName + + base_name = unicode(filetools.validate_path(base_name.replace('/', '-')), "utf8").lower().encode("utf8") + + subcarpetas = os.listdir(TVSHOWS_PATH) + + for c in subcarpetas: + code = scrapertools.find_single_match(c, '\[(.*?)\]') + if code and code in item.infoLabels['code']: + path = filetools.join(TVSHOWS_PATH, c) + _id = code + break + + if not path: + path = filetools.join(TVSHOWS_PATH, ("%s [%s]" % (base_name, _id)).strip()) + logger.info("Creando directorio serie: " + path) + try: + filetools.mkdir(path) + except OSError, exception: + if exception.errno != errno.EEXIST: + raise + + tvshow_path = filetools.join(path, "tvshow.nfo") + if not filetools.exists(tvshow_path): + # Creamos tvshow.nfo, si no existe, con la head_nfo, info de la serie y marcas de episodios vistos + logger.info("Creando tvshow.nfo: " + tvshow_path) + head_nfo = scraper.get_nfo(item) + + item_tvshow = Item(title=item.contentTitle, channel="videolibrary", action="get_seasons", + fanart=item.infoLabels['fanart'], thumbnail=item.infoLabels['thumbnail'], + infoLabels=item.infoLabels, path=path.replace(TVSHOWS_PATH, "")) + item_tvshow.library_playcounts = {} + item_tvshow.library_urls = {item.channel: item.url} + + else: + # Si existe tvshow.nfo, pero estamos añadiendo un nuevo canal actualizamos el listado de urls + head_nfo, item_tvshow = read_nfo(tvshow_path) + item_tvshow.channel = "videolibrary" + item_tvshow.action = "get_seasons" + item_tvshow.library_urls[item.channel] = item.url + + # FILTERTOOLS + # si el canal tiene filtro de idiomas, añadimos el canal y el show + if episodelist and "list_language" in episodelist[0]: + # si ya hemos añadido un canal previamente con filtro, añadimos o actualizamos el canal y show + if "library_filter_show" in item_tvshow: + item_tvshow.library_filter_show[item.channel] = item.show + # no habia ningún canal con filtro y lo generamos por primera vez + else: + item_tvshow.library_filter_show = {item.channel: item.show} + + if item.channel != "downloads": + item_tvshow.active = 1 # para que se actualice a diario cuando se llame a videolibrary_service + + filetools.write(tvshow_path, head_nfo + item_tvshow.tojson()) + + if not episodelist: + # La lista de episodios esta vacia + return 0, 0, 0 + + # Guardar los episodios + '''import time + start_time = time.time()''' + insertados, sobreescritos, fallidos = save_episodes(path, episodelist, item) + '''msg = "Insertados: %d | Sobreescritos: %d | Fallidos: %d | Tiempo: %2.2f segundos" % \ + (insertados, sobreescritos, fallidos, time.time() - start_time) + logger.debug(msg)''' + + return insertados, sobreescritos, fallidos, path + + +def save_episodes(path, episodelist, serie, silent=False, overwrite=True): + """ + guarda en la ruta indicada todos los capitulos incluidos en la lista episodelist + @type path: str + @param path: ruta donde guardar los episodios + @type episodelist: list + @param episodelist: listado de items que representan los episodios que se van a guardar. + @type serie: item + @param serie: serie de la que se van a guardar los episodios + @type silent: bool + @param silent: establece si se muestra la notificación + @param overwrite: permite sobreescribir los ficheros existentes + @type overwrite: bool + @rtype insertados: int + @return: el número de episodios insertados + @rtype sobreescritos: int + @return: el número de episodios sobreescritos + @rtype fallidos: int + @return: el número de episodios fallidos + """ + logger.info() + + # No hay lista de episodios, no hay nada que guardar + if not len(episodelist): + logger.info("No hay lista de episodios, salimos sin crear strm") + return 0, 0, 0 + + insertados = 0 + sobreescritos = 0 + fallidos = 0 + news_in_playcounts = {} + + # Listamos todos los ficheros de la serie, asi evitamos tener que comprobar si existe uno por uno + ficheros = os.listdir(path) + ficheros = [filetools.join(path, f) for f in ficheros] + + # Silent es para no mostrar progreso (para videolibrary_service) + if not silent: + # progress dialog + p_dialog = platformtools.dialog_progress('alfa', 'Añadiendo episodios...') + p_dialog.update(0, 'Añadiendo episodio...') + + new_episodelist = [] + # Obtenemos el numero de temporada y episodio y descartamos los q no lo sean + for e in episodelist: + try: + season_episode = scrapertools.get_season_and_episode(e.title) + + e.infoLabels = serie.infoLabels + e.contentSeason, e.contentEpisodeNumber = season_episode.split("x") + new_episodelist.append(e) + except: + continue + + # No hay lista de episodios, no hay nada que guardar + if not len(new_episodelist): + logger.info("No hay lista de episodios, salimos sin crear strm") + return 0, 0, 0 + + # fix float porque la division se hace mal en python 2.x + t = float(100) / len(new_episodelist) + + for i, e in enumerate(scraper.sort_episode_list(new_episodelist)): + if not silent: + p_dialog.update(int(math.ceil((i + 1) * t)), 'Añadiendo episodio...', e.title) + + season_episode = "%sx%s" % (e.contentSeason, str(e.contentEpisodeNumber).zfill(2)) + strm_path = filetools.join(path, "%s.strm" % season_episode) + nfo_path = filetools.join(path, "%s.nfo" % season_episode) + json_path = filetools.join(path, ("%s [%s].json" % (season_episode, e.channel)).lower()) + + strm_exists = strm_path in ficheros + nfo_exists = nfo_path in ficheros + json_exists = json_path in ficheros + + if not strm_exists: + # Si no existe season_episode.strm añadirlo + item_strm = Item(action='play_from_library', channel='videolibrary', + strm_path=strm_path.replace(TVSHOWS_PATH, ""), infoLabels={}) + item_strm.contentSeason = e.contentSeason + item_strm.contentEpisodeNumber = e.contentEpisodeNumber + item_strm.contentType = e.contentType + item_strm.contentTitle = season_episode + + # FILTERTOOLS + if item_strm.list_language: + # si tvshow.nfo tiene filtro se le pasa al item_strm que se va a generar + if "library_filter_show" in serie: + item_strm.library_filter_show = serie.library_filter_show + + if item_strm.library_filter_show == "": + logger.error("Se ha producido un error al obtener el nombre de la serie a filtrar") + + # logger.debug("item_strm" + item_strm.tostring('\n')) + # logger.debug("serie " + serie.tostring('\n')) + strm_exists = filetools.write(strm_path, '%s?%s' % (addon_name, item_strm.tourl())) + + item_nfo = None + if not nfo_exists and e.infoLabels["code"]: + # Si no existe season_episode.nfo añadirlo + scraper.find_and_set_infoLabels(e) + head_nfo = scraper.get_nfo(e) + + item_nfo = e.clone(channel="videolibrary", url="", action='findvideos', + strm_path=strm_path.replace(TVSHOWS_PATH, "")) + + nfo_exists = filetools.write(nfo_path, head_nfo + item_nfo.tojson()) + + # Solo si existen season_episode.nfo y season_episode.strm continuamos + if nfo_exists and strm_exists: + if not json_exists or overwrite: + # Obtenemos infoLabel del episodio + if not item_nfo: + head_nfo, item_nfo = read_nfo(nfo_path) + + e.infoLabels = item_nfo.infoLabels + + if filetools.write(json_path, e.tojson()): + if not json_exists: + logger.info("Insertado: %s" % json_path) + insertados += 1 + # Marcamos episodio como no visto + news_in_playcounts[season_episode] = 0 + # Marcamos la temporada como no vista + news_in_playcounts["season %s" % e.contentSeason] = 0 + # Marcamos la serie como no vista + # logger.debug("serie " + serie.tostring('\n')) + news_in_playcounts[serie.contentTitle] = 0 + + else: + logger.info("Sobreescrito: %s" % json_path) + sobreescritos += 1 + else: + logger.info("Fallido: %s" % json_path) + fallidos += 1 + + else: + logger.info("Fallido: %s" % json_path) + fallidos += 1 + + if not silent and p_dialog.iscanceled(): + break + + if not silent: + p_dialog.close() + + if news_in_playcounts: + # Si hay nuevos episodios los marcamos como no vistos en tvshow.nfo ... + tvshow_path = filetools.join(path, "tvshow.nfo") + try: + import datetime + head_nfo, tvshow_item = read_nfo(tvshow_path) + tvshow_item.library_playcounts.update(news_in_playcounts) + + if tvshow_item.active == 30: + tvshow_item.active = 1 + update_last = datetime.date.today() + tvshow_item.update_last = update_last.strftime('%Y-%m-%d') + update_next = datetime.date.today() + datetime.timedelta(days=int(tvshow_item.active)) + tvshow_item.update_next = update_next.strftime('%Y-%m-%d') + + filetools.write(tvshow_path, head_nfo + tvshow_item.tojson()) + except: + logger.error("Error al actualizar tvshow.nfo") + fallidos = -1 + else: + # ... si ha sido correcto actualizamos la videoteca de Kodi + if config.is_xbmc() and not silent: + from platformcode import xbmc_videolibrary + xbmc_videolibrary.update(FOLDER_TVSHOWS, filetools.basename(path)) + + if fallidos == len(episodelist): + fallidos = -1 + + logger.debug("%s [%s]: insertados= %s, sobreescritos= %s, fallidos= %s" % + (serie.contentSerieName, serie.channel, insertados, sobreescritos, fallidos)) + return insertados, sobreescritos, fallidos + + +def add_movie(item): + """ + guarda una pelicula en la libreria de cine. La pelicula puede ser un enlace dentro de un canal o un video + descargado previamente. + + Para añadir episodios descargados en local, el item debe tener exclusivamente: + - contentTitle: titulo de la pelicula + - title: titulo a mostrar junto al listado de enlaces -findvideos- ("Reproducir video local HD") + - infoLabels["tmdb_id"] o infoLabels["imdb_id"] + - contentType == "movie" + - channel = "downloads" + - url : ruta local al video + + @type item: item + @param item: elemento que se va a guardar. + """ + logger.info() + + new_item = item.clone(action="findvideos") + insertados, sobreescritos, fallidos = save_movie(new_item) + + if fallidos == 0: + platformtools.dialog_ok(config.get_localized_string(30131), new_item.contentTitle, + config.get_localized_string(30135)) # 'se ha añadido a la videoteca' + else: + platformtools.dialog_ok(config.get_localized_string(30131), + "ERROR, la pelicula NO se ha añadido a la videoteca") + + +def add_tvshow(item, channel=None): + """ + Guarda contenido en la libreria de series. Este contenido puede ser uno de estos dos: + - La serie con todos los capitulos incluidos en la lista episodelist. + - Un solo capitulo descargado previamente en local. + + Para añadir episodios descargados en local, el item debe tener exclusivamente: + - contentSerieName (o show): Titulo de la serie + - contentTitle: titulo del episodio para extraer season_and_episode ("1x01 Piloto") + - title: titulo a mostrar junto al listado de enlaces -findvideos- ("Reproducir video local") + - infoLabels["tmdb_id"] o infoLabels["imdb_id"] + - contentType != "movie" + - channel = "downloads" + - url : ruta local al video + + @type item: item + @param item: item que representa la serie a guardar + @type channel: modulo + @param channel: canal desde el que se guardara la serie. + Por defecto se importara item.from_channel o item.channel + + """ + logger.info("show=#" + item.show + "#") + + if item.channel == "downloads": + itemlist = [item.clone()] + + else: + # Esta marca es porque el item tiene algo más aparte en el atributo "extra" + item.action = item.extra + if isinstance(item.extra, str) and "###" in item.extra: + item.action = item.extra.split("###")[0] + item.extra = item.extra.split("###")[1] + + if item.from_action: + item.__dict__["action"] = item.__dict__.pop("from_action") + if item.from_channel: + item.__dict__["channel"] = item.__dict__.pop("from_channel") + + if not channel: + try: + channel = __import__('channels.%s' % item.channel, fromlist=["channels.%s" % item.channel]) + except ImportError: + exec "import channels." + item.channel + " as channel" + + # Obtiene el listado de episodios + itemlist = getattr(channel, item.action)(item) + + insertados, sobreescritos, fallidos, path = save_tvshow(item, itemlist) + + if not insertados and not sobreescritos and not fallidos: + platformtools.dialog_ok("Videoteca", "ERROR, la serie NO se ha añadido a la videoteca", + "No se ha podido obtener ningun episodio") + logger.error("La serie %s no se ha podido añadir a la videoteca. No se ha podido obtener ningun episodio" + % item.show) + + elif fallidos == -1: + platformtools.dialog_ok("Videoteca", "ERROR, la serie NO se ha añadido a la videoteca") + logger.error("La serie %s no se ha podido añadir a la videoteca" % item.show) + + elif fallidos > 0: + platformtools.dialog_ok("Videoteca", "ERROR, la serie NO se ha añadido completa a la videoteca") + logger.error("No se han podido añadir %s episodios de la serie %s a la videoteca" % (fallidos, item.show)) + + else: + platformtools.dialog_ok("Videoteca", "La serie se ha añadido a la videoteca") + logger.info("Se han añadido %s episodios de la serie %s a la videoteca" % + (insertados, item.show)) + if config.is_xbmc(): + if config.get_setting("sync_trakt_new_tvshow", "videolibrary"): + import xbmc + from platformcode import xbmc_videolibrary + if config.get_setting("sync_trakt_new_tvshow_wait", "videolibrary"): + # Comprobar que no se esta buscando contenido en la videoteca de Kodi + while xbmc.getCondVisibility('Library.IsScanningVideo()'): + xbmc.sleep(1000) + # Se lanza la sincronizacion para la videoteca de Kodi + xbmc_videolibrary.sync_trakt_kodi() + # Se lanza la sincronización para la videoteca del addon + xbmc_videolibrary.sync_trakt_addon(path) diff --git a/plugin.video.alfa/core/ziptools.py b/plugin.video.alfa/core/ziptools.py new file mode 100755 index 00000000..bea3e8f0 --- /dev/null +++ b/plugin.video.alfa/core/ziptools.py @@ -0,0 +1,94 @@ +# -*- coding: utf-8 -*- +# -------------------------------------------------------------------------------- +# Zip Tools +# -------------------------------------------------------------------------------- + +import os +import zipfile + +import config +import logger + + +class ziptools: + def extract(self, file, dir, folder_to_extract="", overwrite_question=False, backup=False): + logger.info("file=%s" % file) + logger.info("dir=%s" % dir) + + if not dir.endswith(':') and not os.path.exists(dir): + os.mkdir(dir) + + zf = zipfile.ZipFile(file) + if not folder_to_extract: + self._createstructure(file, dir) + num_files = len(zf.namelist()) + + for name in zf.namelist(): + logger.info("name=%s" % name) + if not name.endswith('/'): + logger.info("no es un directorio") + try: + (path, filename) = os.path.split(os.path.join(dir, name)) + logger.info("path=%s" % path) + logger.info("name=%s" % name) + if folder_to_extract: + if path != os.path.join(dir, folder): + break + else: + os.makedirs(path) + except: + pass + if folder_to_extract: + outfilename = os.path.join(dir, filename) + + else: + outfilename = os.path.join(dir, name) + logger.info("outfilename=%s" % outfilename) + try: + if os.path.exists(outfilename) and overwrite_question: + from platformcode import platformtools + dyesno = platformtools.dialog_yesno("El archivo ya existe", + "El archivo %s a descomprimir ya existe" \ + ", ¿desea sobrescribirlo?" \ + % os.path.basename(outfilename)) + if not dyesno: + break + if backup: + import time + import shutil + hora_folder = "Copia seguridad [%s]" % time.strftime("%d-%m_%H-%M", time.localtime()) + backup = os.path.join(config.get_data_path(), 'backups', hora_folder, folder_to_extract) + if not os.path.exists(backup): + os.makedirs(backup) + shutil.copy2(outfilename, os.path.join(backup, os.path.basename(outfilename))) + + outfile = open(outfilename, 'wb') + outfile.write(zf.read(name)) + except: + logger.error("Error en fichero " + name) + + def _createstructure(self, file, dir): + self._makedirs(self._listdirs(file), dir) + + def create_necessary_paths(filename): + try: + (path, name) = os.path.split(filename) + os.makedirs(path) + except: + pass + + def _makedirs(self, directories, basedir): + for dir in directories: + curdir = os.path.join(basedir, dir) + if not os.path.exists(curdir): + os.mkdir(curdir) + + def _listdirs(self, file): + zf = zipfile.ZipFile(file) + dirs = [] + for name in zf.namelist(): + if name.endswith('/'): + dirs.append(name) + + dirs.sort() + return dirs diff --git a/plugin.video.alfa/default.py b/plugin.video.alfa/default.py new file mode 100755 index 00000000..76a5e816 --- /dev/null +++ b/plugin.video.alfa/default.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +# ------------------------------------------------------------ +# XBMC entry point +# ------------------------------------------------------------ + + +import os +import sys + +import xbmc +from core import config +from core import logger + +logger.info("init...") + +librerias = xbmc.translatePath(os.path.join(config.get_runtime_path(), 'lib')) +sys.path.append(librerias) + +from platformcode import launcher + +if sys.argv[2] == "": + launcher.start() + launcher.run() +else: + launcher.run() diff --git a/plugin.video.alfa/fanart.jpg b/plugin.video.alfa/fanart.jpg new file mode 100755 index 00000000..a975dcbb Binary files /dev/null and b/plugin.video.alfa/fanart.jpg differ diff --git a/plugin.video.alfa/icon.png b/plugin.video.alfa/icon.png new file mode 100755 index 00000000..14dc99d5 Binary files /dev/null and b/plugin.video.alfa/icon.png differ diff --git a/plugin.video.alfa/lib/Crypto/Cipher/AES.py b/plugin.video.alfa/lib/Crypto/Cipher/AES.py new file mode 100755 index 00000000..0b1bb51c --- /dev/null +++ b/plugin.video.alfa/lib/Crypto/Cipher/AES.py @@ -0,0 +1,115 @@ +# -*- coding: utf-8 -*- +# +# Cipher/AES.py : AES +# +# =================================================================== +# The contents of this file are dedicated to the public domain. To +# the extent that dedication to the public domain is not available, +# everyone is granted a worldwide, perpetual, royalty-free, +# non-exclusive license to exercise all rights associated with the +# contents of this file for any purpose whatsoever. +# No rights are reserved. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +# =================================================================== +"""AES symmetric cipher + +AES `(Advanced Encryption Standard)`__ is a symmetric block cipher standardized +by NIST_ . It has a fixed data block size of 16 bytes. +Its keys can be 128, 192, or 256 bits long. + +AES is very fast and secure, and it is the de facto standard for symmetric +encryption. + +As an example, encryption can be done as follows: + + >>> from Crypto.Cipher import AES + >>> from Crypto import Random + >>> + >>> key = b'Sixteen byte key' + >>> iv = Random.new().read(AES.block_size) + >>> cipher = AES.new(key, AES.MODE_CFB, iv) + >>> msg = iv + cipher.encrypt(b'Attack at dawn') + +.. __: http://en.wikipedia.org/wiki/Advanced_Encryption_Standard +.. _NIST: http://csrc.nist.gov/publications/fips/fips197/fips-197.pdf + +:undocumented: __revision__, __package__ +""" + +__revision__ = "$Id$" + +from Crypto.Cipher import blockalgo +from Crypto.Cipher import _AES + +class AESCipher (blockalgo.BlockAlgo): + """AES cipher object""" + + def __init__(self, key, *args, **kwargs): + """Initialize an AES cipher object + + See also `new()` at the module level.""" + blockalgo.BlockAlgo.__init__(self, _AES, key, *args, **kwargs) + +def new(key, *args, **kwargs): + """Create a new AES cipher + + :Parameters: + key : byte string + The secret key to use in the symmetric cipher. + It must be 16 (*AES-128*), 24 (*AES-192*), or 32 (*AES-256*) bytes long. + :Keywords: + mode : a *MODE_** constant + The chaining mode to use for encryption or decryption. + Default is `MODE_ECB`. + IV : byte string + The initialization vector to use for encryption or decryption. + + It is ignored for `MODE_ECB` and `MODE_CTR`. + + For `MODE_OPENPGP`, IV must be `block_size` bytes long for encryption + and `block_size` +2 bytes for decryption (in the latter case, it is + actually the *encrypted* IV which was prefixed to the ciphertext). + It is mandatory. + + For all other modes, it must be `block_size` bytes longs. It is optional and + when not present it will be given a default value of all zeroes. + counter : callable + (*Only* `MODE_CTR`). A stateful function that returns the next + *counter block*, which is a byte string of `block_size` bytes. + For better performance, use `Crypto.Util.Counter`. + segment_size : integer + (*Only* `MODE_CFB`).The number of bits the plaintext and ciphertext + are segmented in. + It must be a multiple of 8. If 0 or not specified, it will be assumed to be 8. + + :Return: an `AESCipher` object + """ + return AESCipher(key, *args, **kwargs) + +#: Electronic Code Book (ECB). See `blockalgo.MODE_ECB`. +MODE_ECB = 1 +#: Cipher-Block Chaining (CBC). See `blockalgo.MODE_CBC`. +MODE_CBC = 2 +#: Cipher FeedBack (CFB). See `blockalgo.MODE_CFB`. +MODE_CFB = 3 +#: This mode should not be used. +MODE_PGP = 4 +#: Output FeedBack (OFB). See `blockalgo.MODE_OFB`. +MODE_OFB = 5 +#: CounTer Mode (CTR). See `blockalgo.MODE_CTR`. +MODE_CTR = 6 +#: OpenPGP Mode. See `blockalgo.MODE_OPENPGP`. +MODE_OPENPGP = 7 +#: Size of a data block (in bytes) +block_size = 16 +#: Size of a key (in bytes) +key_size = ( 16, 24, 32 ) + diff --git a/plugin.video.alfa/lib/Crypto/Cipher/_AES.pyd b/plugin.video.alfa/lib/Crypto/Cipher/_AES.pyd new file mode 100755 index 00000000..750eb9dc Binary files /dev/null and b/plugin.video.alfa/lib/Crypto/Cipher/_AES.pyd differ diff --git a/plugin.video.alfa/lib/Crypto/Cipher/__init__.py b/plugin.video.alfa/lib/Crypto/Cipher/__init__.py new file mode 100755 index 00000000..663468d3 --- /dev/null +++ b/plugin.video.alfa/lib/Crypto/Cipher/__init__.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- +# +# =================================================================== +# The contents of this file are dedicated to the public domain. To +# the extent that dedication to the public domain is not available, +# everyone is granted a worldwide, perpetual, royalty-free, +# non-exclusive license to exercise all rights associated with the +# contents of this file for any purpose whatsoever. +# No rights are reserved. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +# =================================================================== + +"""Symmetric- and asymmetric-key encryption algorithms. + +Encryption algorithms transform plaintext in some way that +is dependent on a key or key pair, producing ciphertext. + +Symmetric algorithms +-------------------- + +Encryption can easily be reversed, if (and, hopefully, only if) +one knows the same key. +In other words, sender and receiver share the same key. + +The symmetric encryption modules here all support the interface described in PEP +272, "API for Block Encryption Algorithms". + +If you don't know which algorithm to choose, use AES because it's +standard and has undergone a fair bit of examination. + +======================== ======= ======================== +Module name Type Description +======================== ======= ======================== +`Crypto.Cipher.AES` Block Advanced Encryption Standard +`Crypto.Cipher.ARC2` Block Alleged RC2 +`Crypto.Cipher.ARC4` Stream Alleged RC4 +`Crypto.Cipher.Blowfish` Block Blowfish +`Crypto.Cipher.CAST` Block CAST +`Crypto.Cipher.DES` Block The Data Encryption Standard. + Very commonly used in the past, + but today its 56-bit keys are too small. +`Crypto.Cipher.DES3` Block Triple DES. +`Crypto.Cipher.XOR` Stream The simple XOR cipher. +======================== ======= ======================== + + +Asymmetric algorithms +--------------------- + +For asymmetric algorithms, the key to be used for decryption is totally +different and cannot be derived in a feasible way from the key used +for encryption. Put differently, sender and receiver each own one half +of a key pair. The encryption key is often called ``public`` whereas +the decryption key is called ``private``. + +========================== ======================= +Module name Description +========================== ======================= +`Crypto.Cipher.PKCS1_v1_5` PKCS#1 v1.5 encryption, based on RSA key pairs +`Crypto.Cipher.PKCS1_OAEP` PKCS#1 OAEP encryption, based on RSA key pairs +========================== ======================= + +:undocumented: __revision__, __package__, _AES, _ARC2, _ARC4, _Blowfish + _CAST, _DES, _DES3, _XOR +""" + +__all__ = ['AES', 'ARC2', 'ARC4', + 'Blowfish', 'CAST', 'DES', 'DES3', + 'XOR', + 'PKCS1_v1_5', 'PKCS1_OAEP' + ] + +__revision__ = "$Id$" + + diff --git a/plugin.video.alfa/lib/Crypto/Cipher/blockalgo.py b/plugin.video.alfa/lib/Crypto/Cipher/blockalgo.py new file mode 100755 index 00000000..b081c959 --- /dev/null +++ b/plugin.video.alfa/lib/Crypto/Cipher/blockalgo.py @@ -0,0 +1,296 @@ +# -*- coding: utf-8 -*- +# +# Cipher/blockalgo.py +# +# =================================================================== +# The contents of this file are dedicated to the public domain. To +# the extent that dedication to the public domain is not available, +# everyone is granted a worldwide, perpetual, royalty-free, +# non-exclusive license to exercise all rights associated with the +# contents of this file for any purpose whatsoever. +# No rights are reserved. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +# =================================================================== +"""Module with definitions common to all block ciphers.""" + +import sys +if sys.version_info[0] == 2 and sys.version_info[1] == 1: + from Crypto.Util.py21compat import * +from Crypto.Util.py3compat import * + +#: *Electronic Code Book (ECB)*. +#: This is the simplest encryption mode. Each of the plaintext blocks +#: is directly encrypted into a ciphertext block, independently of +#: any other block. This mode exposes frequency of symbols +#: in your plaintext. Other modes (e.g. *CBC*) should be used instead. +#: +#: See `NIST SP800-38A`_ , Section 6.1 . +#: +#: .. _`NIST SP800-38A` : http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf +MODE_ECB = 1 + +#: *Cipher-Block Chaining (CBC)*. Each of the ciphertext blocks depends +#: on the current and all previous plaintext blocks. An Initialization Vector +#: (*IV*) is required. +#: +#: The *IV* is a data block to be transmitted to the receiver. +#: The *IV* can be made public, but it must be authenticated by the receiver and +#: it should be picked randomly. +#: +#: See `NIST SP800-38A`_ , Section 6.2 . +#: +#: .. _`NIST SP800-38A` : http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf +MODE_CBC = 2 + +#: *Cipher FeedBack (CFB)*. This mode is similar to CBC, but it transforms +#: the underlying block cipher into a stream cipher. Plaintext and ciphertext +#: are processed in *segments* of **s** bits. The mode is therefore sometimes +#: labelled **s**-bit CFB. An Initialization Vector (*IV*) is required. +#: +#: When encrypting, each ciphertext segment contributes to the encryption of +#: the next plaintext segment. +#: +#: This *IV* is a data block to be transmitted to the receiver. +#: The *IV* can be made public, but it should be picked randomly. +#: Reusing the same *IV* for encryptions done with the same key lead to +#: catastrophic cryptographic failures. +#: +#: See `NIST SP800-38A`_ , Section 6.3 . +#: +#: .. _`NIST SP800-38A` : http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf +MODE_CFB = 3 + +#: This mode should not be used. +MODE_PGP = 4 + +#: *Output FeedBack (OFB)*. This mode is very similar to CBC, but it +#: transforms the underlying block cipher into a stream cipher. +#: The keystream is the iterated block encryption of an Initialization Vector (*IV*). +#: +#: The *IV* is a data block to be transmitted to the receiver. +#: The *IV* can be made public, but it should be picked randomly. +#: +#: Reusing the same *IV* for encryptions done with the same key lead to +#: catastrophic cryptograhic failures. +#: +#: See `NIST SP800-38A`_ , Section 6.4 . +#: +#: .. _`NIST SP800-38A` : http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf +MODE_OFB = 5 + +#: *CounTeR (CTR)*. This mode is very similar to ECB, in that +#: encryption of one block is done independently of all other blocks. +#: Unlike ECB, the block *position* contributes to the encryption and no +#: information leaks about symbol frequency. +#: +#: Each message block is associated to a *counter* which must be unique +#: across all messages that get encrypted with the same key (not just within +#: the same message). The counter is as big as the block size. +#: +#: Counters can be generated in several ways. The most straightword one is +#: to choose an *initial counter block* (which can be made public, similarly +#: to the *IV* for the other modes) and increment its lowest **m** bits by +#: one (modulo *2^m*) for each block. In most cases, **m** is chosen to be half +#: the block size. +#: +#: Reusing the same *initial counter block* for encryptions done with the same +#: key lead to catastrophic cryptograhic failures. +#: +#: See `NIST SP800-38A`_ , Section 6.5 (for the mode) and Appendix B (for how +#: to manage the *initial counter block*). +#: +#: .. _`NIST SP800-38A` : http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf +MODE_CTR = 6 + +#: OpenPGP. This mode is a variant of CFB, and it is only used in PGP and OpenPGP_ applications. +#: An Initialization Vector (*IV*) is required. +#: +#: Unlike CFB, the IV is not transmitted to the receiver. Instead, the *encrypted* IV is. +#: The IV is a random data block. Two of its bytes are duplicated to act as a checksum +#: for the correctness of the key. The encrypted IV is therefore 2 bytes longer than +#: the clean IV. +#: +#: .. _OpenPGP: http://tools.ietf.org/html/rfc4880 +MODE_OPENPGP = 7 + +def _getParameter(name, index, args, kwargs, default=None): + """Find a parameter in tuple and dictionary arguments a function receives""" + param = kwargs.get(name) + if len(args)>index: + if param: + raise ValueError("Parameter '%s' is specified twice" % name) + param = args[index] + return param or default + +class BlockAlgo: + """Class modelling an abstract block cipher.""" + + def __init__(self, factory, key, *args, **kwargs): + self.mode = _getParameter('mode', 0, args, kwargs, default=MODE_ECB) + self.block_size = factory.block_size + + if self.mode != MODE_OPENPGP: + self._cipher = factory.new(key, *args, **kwargs) + self.IV = self._cipher.IV + else: + # OPENPGP mode. For details, see 13.9 in RCC4880. + # + # A few members are specifically created for this mode: + # - _encrypted_iv, set in this constructor + # - _done_first_block, set to True after the first encryption + # - _done_last_block, set to True after a partial block is processed + + self._done_first_block = False + self._done_last_block = False + self.IV = _getParameter('iv', 1, args, kwargs) + if not self.IV: + raise ValueError("MODE_OPENPGP requires an IV") + + # Instantiate a temporary cipher to process the IV + IV_cipher = factory.new(key, MODE_CFB, + b('\x00')*self.block_size, # IV for CFB + segment_size=self.block_size*8) + + # The cipher will be used for... + if len(self.IV) == self.block_size: + # ... encryption + self._encrypted_IV = IV_cipher.encrypt( + self.IV + self.IV[-2:] + # Plaintext + b('\x00')*(self.block_size-2) # Padding + )[:self.block_size+2] + elif len(self.IV) == self.block_size+2: + # ... decryption + self._encrypted_IV = self.IV + self.IV = IV_cipher.decrypt(self.IV + # Ciphertext + b('\x00')*(self.block_size-2) # Padding + )[:self.block_size+2] + if self.IV[-2:] != self.IV[-4:-2]: + raise ValueError("Failed integrity check for OPENPGP IV") + self.IV = self.IV[:-2] + else: + raise ValueError("Length of IV must be %d or %d bytes for MODE_OPENPGP" + % (self.block_size, self.block_size+2)) + + # Instantiate the cipher for the real PGP data + self._cipher = factory.new(key, MODE_CFB, + self._encrypted_IV[-self.block_size:], + segment_size=self.block_size*8) + + def encrypt(self, plaintext): + """Encrypt data with the key and the parameters set at initialization. + + The cipher object is stateful; encryption of a long block + of data can be broken up in two or more calls to `encrypt()`. + That is, the statement: + + >>> c.encrypt(a) + c.encrypt(b) + + is always equivalent to: + + >>> c.encrypt(a+b) + + That also means that you cannot reuse an object for encrypting + or decrypting other data with the same key. + + This function does not perform any padding. + + - For `MODE_ECB`, `MODE_CBC`, and `MODE_OFB`, *plaintext* length + (in bytes) must be a multiple of *block_size*. + + - For `MODE_CFB`, *plaintext* length (in bytes) must be a multiple + of *segment_size*/8. + + - For `MODE_CTR`, *plaintext* can be of any length. + + - For `MODE_OPENPGP`, *plaintext* must be a multiple of *block_size*, + unless it is the last chunk of the message. + + :Parameters: + plaintext : byte string + The piece of data to encrypt. + :Return: + the encrypted data, as a byte string. It is as long as + *plaintext* with one exception: when encrypting the first message + chunk with `MODE_OPENPGP`, the encypted IV is prepended to the + returned ciphertext. + """ + + if self.mode == MODE_OPENPGP: + padding_length = (self.block_size - len(plaintext) % self.block_size) % self.block_size + if padding_length>0: + # CFB mode requires ciphertext to have length multiple of block size, + # but PGP mode allows the last block to be shorter + if self._done_last_block: + raise ValueError("Only the last chunk is allowed to have length not multiple of %d bytes", + self.block_size) + self._done_last_block = True + padded = plaintext + b('\x00')*padding_length + res = self._cipher.encrypt(padded)[:len(plaintext)] + else: + res = self._cipher.encrypt(plaintext) + if not self._done_first_block: + res = self._encrypted_IV + res + self._done_first_block = True + return res + + return self._cipher.encrypt(plaintext) + + def decrypt(self, ciphertext): + """Decrypt data with the key and the parameters set at initialization. + + The cipher object is stateful; decryption of a long block + of data can be broken up in two or more calls to `decrypt()`. + That is, the statement: + + >>> c.decrypt(a) + c.decrypt(b) + + is always equivalent to: + + >>> c.decrypt(a+b) + + That also means that you cannot reuse an object for encrypting + or decrypting other data with the same key. + + This function does not perform any padding. + + - For `MODE_ECB`, `MODE_CBC`, and `MODE_OFB`, *ciphertext* length + (in bytes) must be a multiple of *block_size*. + + - For `MODE_CFB`, *ciphertext* length (in bytes) must be a multiple + of *segment_size*/8. + + - For `MODE_CTR`, *ciphertext* can be of any length. + + - For `MODE_OPENPGP`, *plaintext* must be a multiple of *block_size*, + unless it is the last chunk of the message. + + :Parameters: + ciphertext : byte string + The piece of data to decrypt. + :Return: the decrypted data (byte string, as long as *ciphertext*). + """ + if self.mode == MODE_OPENPGP: + padding_length = (self.block_size - len(ciphertext) % self.block_size) % self.block_size + if padding_length>0: + # CFB mode requires ciphertext to have length multiple of block size, + # but PGP mode allows the last block to be shorter + if self._done_last_block: + raise ValueError("Only the last chunk is allowed to have length not multiple of %d bytes", + self.block_size) + self._done_last_block = True + padded = ciphertext + b('\x00')*padding_length + res = self._cipher.decrypt(padded)[:len(ciphertext)] + else: + res = self._cipher.decrypt(ciphertext) + return res + + return self._cipher.decrypt(ciphertext) + diff --git a/plugin.video.alfa/lib/Crypto/Util/Counter.py b/plugin.video.alfa/lib/Crypto/Util/Counter.py new file mode 100755 index 00000000..28a23bc8 --- /dev/null +++ b/plugin.video.alfa/lib/Crypto/Util/Counter.py @@ -0,0 +1,127 @@ +# -*- coding: ascii -*- +# +# Util/Counter.py : Fast counter for use with CTR-mode ciphers +# +# Written in 2008 by Dwayne C. Litzenberger +# +# =================================================================== +# The contents of this file are dedicated to the public domain. To +# the extent that dedication to the public domain is not available, +# everyone is granted a worldwide, perpetual, royalty-free, +# non-exclusive license to exercise all rights associated with the +# contents of this file for any purpose whatsoever. +# No rights are reserved. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +# =================================================================== +"""Fast counter functions for CTR cipher modes. + +CTR is a chaining mode for symmetric block encryption or decryption. +Messages are divideded into blocks, and the cipher operation takes +place on each block using the secret key and a unique *counter block*. + +The most straightforward way to fulfil the uniqueness property is +to start with an initial, random *counter block* value, and increment it as +the next block is processed. + +The block ciphers from `Crypto.Cipher` (when configured in *MODE_CTR* mode) +invoke a callable object (the *counter* parameter) to get the next *counter block*. +Unfortunately, the Python calling protocol leads to major performance degradations. + +The counter functions instantiated by this module will be invoked directly +by the ciphers in `Crypto.Cipher`. The fact that the Python layer is bypassed +lead to more efficient (and faster) execution of CTR cipher modes. + +An example of usage is the following: + + >>> from Crypto.Cipher import AES + >>> from Crypto.Util import Counter + >>> + >>> pt = b'\x00'*1000000 + >>> ctr = Counter.new(128) + >>> cipher = AES.new(b'\x00'*16, AES.MODE_CTR, counter=ctr) + >>> ct = cipher.encrypt(pt) + +:undocumented: __package__ +""" +import sys +if sys.version_info[0] == 2 and sys.version_info[1] == 1: + from Crypto.Util.py21compat import * +from Crypto.Util.py3compat import * + +from Crypto.Util import _counter +import struct + +# Factory function +def new(nbits, prefix=b(""), suffix=b(""), initial_value=1, overflow=0, little_endian=False, allow_wraparound=False, disable_shortcut=False): + """Create a stateful counter block function suitable for CTR encryption modes. + + Each call to the function returns the next counter block. + Each counter block is made up by three parts:: + + prefix || counter value || postfix + + The counter value is incremented by one at each call. + + :Parameters: + nbits : integer + Length of the desired counter, in bits. It must be a multiple of 8. + prefix : byte string + The constant prefix of the counter block. By default, no prefix is + used. + suffix : byte string + The constant postfix of the counter block. By default, no suffix is + used. + initial_value : integer + The initial value of the counter. Default value is 1. + little_endian : boolean + If True, the counter number will be encoded in little endian format. + If False (default), in big endian format. + allow_wraparound : boolean + If True, the function will raise an *OverflowError* exception as soon + as the counter wraps around. If False (default), the counter will + simply restart from zero. + disable_shortcut : boolean + If True, do not make ciphers from `Crypto.Cipher` bypass the Python + layer when invoking the counter block function. + If False (default), bypass the Python layer. + :Returns: + The counter block function. + """ + + # Sanity-check the message size + (nbytes, remainder) = divmod(nbits, 8) + if remainder != 0: + # In the future, we might support arbitrary bit lengths, but for now we don't. + raise ValueError("nbits must be a multiple of 8; got %d" % (nbits,)) + if nbytes < 1: + raise ValueError("nbits too small") + elif nbytes > 0xffff: + raise ValueError("nbits too large") + + initval = _encode(initial_value, nbytes, little_endian) + + if little_endian: + return _counter._newLE(bstr(prefix), bstr(suffix), initval, allow_wraparound=allow_wraparound, disable_shortcut=disable_shortcut) + else: + return _counter._newBE(bstr(prefix), bstr(suffix), initval, allow_wraparound=allow_wraparound, disable_shortcut=disable_shortcut) + +def _encode(n, nbytes, little_endian=False): + retval = [] + n = long(n) + for i in range(nbytes): + if little_endian: + retval.append(bchr(n & 0xff)) + else: + retval.insert(0, bchr(n & 0xff)) + n >>= 8 + return b("").join(retval) + +# vim:set ts=4 sw=4 sts=4 expandtab: diff --git a/plugin.video.alfa/lib/Crypto/Util/__init__.py b/plugin.video.alfa/lib/Crypto/Util/__init__.py new file mode 100755 index 00000000..682be069 --- /dev/null +++ b/plugin.video.alfa/lib/Crypto/Util/__init__.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# +# =================================================================== +# The contents of this file are dedicated to the public domain. To +# the extent that dedication to the public domain is not available, +# everyone is granted a worldwide, perpetual, royalty-free, +# non-exclusive license to exercise all rights associated with the +# contents of this file for any purpose whatsoever. +# No rights are reserved. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +# =================================================================== + +"""Miscellaneous modules + +Contains useful modules that don't belong into any of the +other Crypto.* subpackages. + +Crypto.Util.number Number-theoretic functions (primality testing, etc.) +Crypto.Util.randpool Random number generation +Crypto.Util.RFC1751 Converts between 128-bit keys and human-readable + strings of words. +Crypto.Util.asn1 Minimal support for ASN.1 DER encoding + +""" + +__all__ = ['randpool', 'RFC1751', 'number', 'strxor', 'asn1' ] + +__revision__ = "$Id$" + diff --git a/plugin.video.alfa/lib/Crypto/Util/_counter.pyd b/plugin.video.alfa/lib/Crypto/Util/_counter.pyd new file mode 100755 index 00000000..6b84a1d9 Binary files /dev/null and b/plugin.video.alfa/lib/Crypto/Util/_counter.pyd differ diff --git a/plugin.video.alfa/lib/Crypto/Util/py3compat.py b/plugin.video.alfa/lib/Crypto/Util/py3compat.py new file mode 100755 index 00000000..8caf7be8 --- /dev/null +++ b/plugin.video.alfa/lib/Crypto/Util/py3compat.py @@ -0,0 +1,107 @@ +# -*- coding: utf-8 -*- +# +# Util/py3compat.py : Compatibility code for handling Py3k / Python 2.x +# +# Written in 2010 by Thorsten Behrens +# +# =================================================================== +# The contents of this file are dedicated to the public domain. To +# the extent that dedication to the public domain is not available, +# everyone is granted a worldwide, perpetual, royalty-free, +# non-exclusive license to exercise all rights associated with the +# contents of this file for any purpose whatsoever. +# No rights are reserved. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +# =================================================================== + +"""Compatibility code for handling string/bytes changes from Python 2.x to Py3k + +In Python 2.x, strings (of type ''str'') contain binary data, including encoded +Unicode text (e.g. UTF-8). The separate type ''unicode'' holds Unicode text. +Unicode literals are specified via the u'...' prefix. Indexing or slicing +either type always produces a string of the same type as the original. +Data read from a file is always of '''str'' type. + +In Python 3.x, strings (type ''str'') may only contain Unicode text. The u'...' +prefix and the ''unicode'' type are now redundant. A new type (called +''bytes'') has to be used for binary data (including any particular +''encoding'' of a string). The b'...' prefix allows one to specify a binary +literal. Indexing or slicing a string produces another string. Slicing a byte +string produces another byte string, but the indexing operation produces an +integer. Data read from a file is of '''str'' type if the file was opened in +text mode, or of ''bytes'' type otherwise. + +Since PyCrypto aims at supporting both Python 2.x and 3.x, the following helper +functions are used to keep the rest of the library as independent as possible +from the actual Python version. + +In general, the code should always deal with binary strings, and use integers +instead of 1-byte character strings. + +b(s) + Take a text string literal (with no prefix or with u'...' prefix) and + make a byte string. +bchr(c) + Take an integer and make a 1-character byte string. +bord(c) + Take the result of indexing on a byte string and make an integer. +tobytes(s) + Take a text string, a byte string, or a sequence of character taken from + a byte string and make a byte string. +""" + +__revision__ = "$Id$" + +import sys + +if sys.version_info[0] == 2: + def b(s): + return s + def bchr(s): + return chr(s) + def bstr(s): + return str(s) + def bord(s): + return ord(s) + if sys.version_info[1] == 1: + def tobytes(s): + try: + return s.encode('latin-1') + except: + return ''.join(s) + else: + def tobytes(s): + if isinstance(s, unicode): + return s.encode("latin-1") + else: + return ''.join(s) +else: + def b(s): + return s.encode("latin-1") # utf-8 would cause some side-effects we don't want + def bchr(s): + return bytes([s]) + def bstr(s): + if isinstance(s,str): + return bytes(s,"latin-1") + else: + return bytes(s) + def bord(s): + return s + def tobytes(s): + if isinstance(s,bytes): + return s + else: + if isinstance(s,str): + return s.encode("latin-1") + else: + return bytes(s) + +# vim:set ts=4 sw=4 sts=4 expandtab: diff --git a/plugin.video.alfa/lib/Crypto/__init__.py b/plugin.video.alfa/lib/Crypto/__init__.py new file mode 100755 index 00000000..63da2bfb --- /dev/null +++ b/plugin.video.alfa/lib/Crypto/__init__.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# +# =================================================================== +# The contents of this file are dedicated to the public domain. To +# the extent that dedication to the public domain is not available, +# everyone is granted a worldwide, perpetual, royalty-free, +# non-exclusive license to exercise all rights associated with the +# contents of this file for any purpose whatsoever. +# No rights are reserved. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +# =================================================================== + +"""Python Cryptography Toolkit + +A collection of cryptographic modules implementing various algorithms +and protocols. + +Subpackages: + +Crypto.Cipher + Secret-key (AES, DES, ARC4) and public-key encryption (RSA PKCS#1) algorithms +Crypto.Hash + Hashing algorithms (MD5, SHA, HMAC) +Crypto.Protocol + Cryptographic protocols (Chaffing, all-or-nothing transform, key derivation + functions). This package does not contain any network protocols. +Crypto.PublicKey + Public-key encryption and signature algorithms (RSA, DSA) +Crypto.Signature + Public-key signature algorithms (RSA PKCS#1) +Crypto.Util + Various useful modules and functions (long-to-string conversion, random number + generation, number theoretic functions) +""" + +__all__ = ['Cipher', 'Hash', 'Protocol', 'PublicKey', 'Util', 'Signature'] + +__version__ = '2.6' # See also below and setup.py +__revision__ = "$Id$" + +# New software should look at this instead of at __version__ above. +version_info = (2, 6, 0, 'final', 0) # See also above and setup.py + diff --git a/plugin.video.alfa/lib/Crypto/pct_warnings.py b/plugin.video.alfa/lib/Crypto/pct_warnings.py new file mode 100755 index 00000000..aca3b03b --- /dev/null +++ b/plugin.video.alfa/lib/Crypto/pct_warnings.py @@ -0,0 +1,60 @@ +# -*- coding: ascii -*- +# +# pct_warnings.py : PyCrypto warnings file +# +# Written in 2008 by Dwayne C. Litzenberger +# +# =================================================================== +# The contents of this file are dedicated to the public domain. To +# the extent that dedication to the public domain is not available, +# everyone is granted a worldwide, perpetual, royalty-free, +# non-exclusive license to exercise all rights associated with the +# contents of this file for any purpose whatsoever. +# No rights are reserved. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +# =================================================================== + +# +# Base classes. All our warnings inherit from one of these in order to allow +# the user to specifically filter them. +# + +class CryptoWarning(Warning): + """Base class for PyCrypto warnings""" + +class CryptoDeprecationWarning(DeprecationWarning, CryptoWarning): + """Base PyCrypto DeprecationWarning class""" + +class CryptoRuntimeWarning(RuntimeWarning, CryptoWarning): + """Base PyCrypto RuntimeWarning class""" + +# +# Warnings that we might actually use +# + +class RandomPool_DeprecationWarning(CryptoDeprecationWarning): + """Issued when Crypto.Util.randpool.RandomPool is instantiated.""" + +class ClockRewindWarning(CryptoRuntimeWarning): + """Warning for when the system clock moves backwards.""" + +class GetRandomNumber_DeprecationWarning(CryptoDeprecationWarning): + """Issued when Crypto.Util.number.getRandomNumber is invoked.""" + +class PowmInsecureWarning(CryptoRuntimeWarning): + """Warning for when _fastmath is built without mpz_powm_sec""" + +# By default, we want this warning to be shown every time we compensate for +# clock rewinding. +import warnings as _warnings +_warnings.filterwarnings('always', category=ClockRewindWarning, append=1) + +# vim:set ts=4 sw=4 sts=4 expandtab: diff --git a/plugin.video.alfa/lib/__init__.py b/plugin.video.alfa/lib/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/plugin.video.alfa/lib/aadecode.py b/plugin.video.alfa/lib/aadecode.py new file mode 100755 index 00000000..3af287cb --- /dev/null +++ b/plugin.video.alfa/lib/aadecode.py @@ -0,0 +1,67 @@ +# -*- coding: utf-8 -*- + +from core.scrapertools import * + + +def decode(text): + text = re.sub(r"\s+|/\*.*?\*/", "", text) + data = text.split("+(゚Д゚)[゚o゚]")[1] + chars = data.split("+(゚Д゚)[゚ε゚]+")[1:] + + txt = "" + for char in chars: + char = char \ + .replace("(o゚ー゚o)", "u") \ + .replace("c", "0") \ + .replace("(゚Д゚)['0']", "c") \ + .replace("゚Θ゚", "1") \ + .replace("!+[]", "1") \ + .replace("-~", "1+") \ + .replace("o", "3") \ + .replace("_", "3") \ + .replace("゚ー゚", "4") \ + .replace("(+", "(") + char = re.sub(r'\((\d)\)', r'\1', char) + + c = ""; + subchar = "" + for v in char: + c += v + try: + x = c; subchar += str(eval(x)); c = "" + except: + pass + if subchar != '': txt += subchar + "|" + txt = txt[:-1].replace('+', '') + + txt_result = "".join([chr(int(n, 8)) for n in txt.split('|')]) + + return toStringCases(txt_result) + + +def toStringCases(txt_result): + sum_base = "" + m3 = False + if ".toString(" in txt_result: + if "+(" in txt_result: + m3 = True + sum_base = "+" + find_single_match(txt_result, ".toString...(\d+).") + txt_pre_temp = find_multiple_matches(txt_result, "..(\d),(\d+).") + txt_temp = [(n, b) for b, n in txt_pre_temp] + else: + txt_temp = find_multiple_matches(txt_result, '(\d+)\.0.\w+.([^\)]+).') + for numero, base in txt_temp: + code = toString(int(numero), eval(base + sum_base)) + if m3: + txt_result = re.sub(r'"|\+', '', txt_result.replace("(" + base + "," + numero + ")", code)) + else: + txt_result = re.sub(r"'|\+", '', txt_result.replace(numero + ".0.toString(" + base + ")", code)) + return txt_result + + +def toString(number, base): + string = "0123456789abcdefghijklmnopqrstuvwxyz" + if number < base: + return string[number] + else: + return toString(number // base, base) + string[number % base] diff --git a/plugin.video.alfa/lib/btserver/__init__.py b/plugin.video.alfa/lib/btserver/__init__.py new file mode 100755 index 00000000..092c1737 --- /dev/null +++ b/plugin.video.alfa/lib/btserver/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- + +from client import Client + +__all__ = ["Client"] diff --git a/plugin.video.alfa/lib/btserver/cache.py b/plugin.video.alfa/lib/btserver/cache.py new file mode 100755 index 00000000..6c59c677 --- /dev/null +++ b/plugin.video.alfa/lib/btserver/cache.py @@ -0,0 +1,87 @@ +# -*- coding: utf-8 -*- +# ------------------------------------------------------------ +# Gestiona el cache del servidor torrent: +# Guarda los .torrent generado +# Guarda los .resume de cada torrent +# ------------------------------------------------------------ +import base64 +import os.path +import re + +try: + from python_libtorrent import get_libtorrent + + lt = get_libtorrent() +except Exception, e: + import libtorrent as lt + + +class Cache(object): + CACHE_DIR = '.cache' + + def __init__(self, path): + + if not os.path.isdir(path): + os.makedirs(path) + self.path = os.path.join(path, Cache.CACHE_DIR) + if not os.path.isdir(self.path): + os.makedirs(self.path) + + def _tname(self, info_hash): + return os.path.join(self.path, info_hash.upper() + '.torrent') + + def _rname(self, info_hash): + return os.path.join(self.path, info_hash.upper() + '.resume') + + def save_resume(self, info_hash, data): + f = open(self._rname(info_hash), 'wb') + f.write(data) + f.close() + + def get_resume(self, url=None, info_hash=None): + if url: + info_hash = self._index.get(url) + if not info_hash: + return + rname = self._rname(info_hash) + if os.access(rname, os.R_OK): + f = open(rname, 'rb') + v = f.read() + f.close() + return v + + def file_complete(self, torrent): + info_hash = str(torrent.info_hash()) + nt = lt.create_torrent(torrent) + tname = self._tname(info_hash) + f = open(tname, 'wb') + f.write(lt.bencode(nt.generate())) + f.close() + + def get_torrent(self, url=None, info_hash=None): + if url: + info_hash = self._index.get(url) + if not info_hash: + return + tname = self._tname(info_hash) + if os.access(tname, os.R_OK): + return tname + + magnet_re = re.compile('xt=urn:btih:([0-9A-Za-z]+)') + hexa_chars = re.compile('^[0-9A-F]+$') + + @staticmethod + def hash_from_magnet(m): + res = Cache.magnet_re.search(m) + if res: + ih = res.group(1).upper() + if len(ih) == 40 and Cache.hexa_chars.match(ih): + return res.group(1).upper() + elif len(ih) == 32: + s = base64.b32decode(ih) + return "".join("{:02X}".format(ord(c)) for c in s) + else: + raise ValueError('Not BT magnet link') + + else: + raise ValueError('Not BT magnet link') diff --git a/plugin.video.alfa/lib/btserver/client.py b/plugin.video.alfa/lib/btserver/client.py new file mode 100755 index 00000000..dae6c021 --- /dev/null +++ b/plugin.video.alfa/lib/btserver/client.py @@ -0,0 +1,555 @@ +# -*- coding: utf-8 -*- + +try: + from python_libtorrent import get_libtorrent, get_platform + + lt = get_libtorrent() +except Exception, e: + import libtorrent as lt + +import os +import pickle +import random +import time +import urllib + +from cache import Cache +from core import logger +from dispatcher import Dispatcher +from file import File +from handler import Handler +from monitor import Monitor +from resume_data import ResumeData +from server import Server + + +class Client(object): + INITIAL_TRACKERS = ['udp://tracker.openbittorrent.com:80', + 'udp://tracker.istole.it:80', + 'udp://open.demonii.com:80', + 'udp://tracker.coppersurfer.tk:80', + 'udp://tracker.leechers-paradise.org:6969', + 'udp://exodus.desync.com:6969', + 'udp://tracker.publicbt.com:80'] + + VIDEO_EXTS = {'.avi': 'video/x-msvideo', '.mp4': 'video/mp4', '.mkv': 'video/x-matroska', + '.m4v': 'video/mp4', '.mov': 'video/quicktime', '.mpg': 'video/mpeg', '.ogv': 'video/ogg', + '.ogg': 'video/ogg', '.webm': 'video/webm', '.ts': 'video/mp2t', '.3gp': 'video/3gpp'} + + def __init__(self, url=None, port=None, ip=None, auto_shutdown=True, wait_time=20, timeout=5, auto_delete=True, + temp_path=None, is_playing_fnc=None, print_status=False): + + # server + if port: + self.port = port + else: + self.port = random.randint(8000, 8099) + if ip: + self.ip = ip + else: + self.ip = "127.0.0.1" + self.server = Server((self.ip, self.port), Handler, client=self) + + # Options + if temp_path: + self.temp_path = temp_path + else: + self.temp_path = os.path.join(os.path.dirname(__file__), "tmp") + self.is_playing_fnc = is_playing_fnc + self.timeout = timeout + self.auto_delete = auto_delete + self.wait_time = wait_time + self.auto_shutdown = auto_shutdown + self.buffer_size = 15 + self.last_pieces_priorize = 5 + self.state_file = "state" + self.torrent_paramss = {'save_path': self.temp_path, 'storage_mode': lt.storage_mode_t.storage_mode_sparse} + + # State + self.has_meta = False + self.meta = None + self.start_time = None + self.last_connect = 0 + self.connected = False + self.closed = False + self.file = None + self.files = None + self._th = None + + # Sesion + self._cache = Cache(self.temp_path) + self._ses = lt.session() + self._ses.listen_on(0, 0) + # Cargamos el archivo de estado (si esxiste) + if os.path.exists(os.path.join(self.temp_path, self.state_file)): + try: + f = open(os.path.join(self.temp_path, self.state_file), "rb") + state = pickle.load(f) + self._ses.load_state(state) + f.close() + except: + pass + + self._start_services() + + # Monitor & Dispatcher + self._monitor = Monitor(self) + if print_status: + self._monitor.add_listener(self.print_status) + self._monitor.add_listener(self._check_meta) + self._monitor.add_listener(self.save_state) + self._monitor.add_listener(self.priorize_start_file) + self._monitor.add_listener(self.announce_torrent) + + if self.auto_shutdown: + self._monitor.add_listener(self._auto_shutdown) + + self._dispatcher = Dispatcher(self) + self._dispatcher.add_listener(self._update_ready_pieces) + + # Iniciamos la URL + if url: + self.start_url(url) + + def get_play_list(self): + """ + Función encargada de generar el playlist + """ + # Esperamos a lo metadatos + while not self.has_meta: + time.sleep(1) + + # Comprobamos que haya archivos de video + if self.files: + if len(self.files) > 1: + return "http://" + self.ip + ":" + str(self.port) + "/playlist.pls" + else: + return "http://" + self.ip + ":" + str(self.port) + "/" + urllib.quote(self.files[0].path) + + def get_files(self): + """ + Función encargada de genera el listado de archivos + """ + # Esperamos a lo metadatos + while not self.has_meta: + time.sleep(1) + files = [] + + # Comprobamos que haya archivos de video + if self.files: + # Creamos el dict con los archivos + for file in self.files: + n = file.path + u = "http://" + self.ip + ":" + str(self.port) + "/" + urllib.quote(n) + s = file.size + files.append({"name": n, "url": u, "size": s}) + + return files + + def _find_files(self, files, search=None): + """ + Función encargada de buscar los archivos reproducibles del torrent + """ + # Obtenemos los archivos que la extension este en la lista + videos = filter(lambda f: self.VIDEO_EXTS.has_key(os.path.splitext(f.path)[1]), files) + + if not videos: + raise Exception('No video files in torrent') + for v in videos: + videos[videos.index(v)].index = files.index(v) + return videos + + def set_file(self, f): + """ + Función encargada de seleccionar el archivo que vamos a servir y por tanto, priorizar su descarga + """ + # Seleccionamos el archivo que vamos a servir + fmap = self.meta.map_file(f.index, 0, 1) + self.file = File(f.path, self.temp_path, f.index, f.size, fmap, self.meta.piece_length(), self) + self.prioritize_file() + + def prioritize_piece(self, pc, idx): + """ + Función encargada de priorizar una determinada pieza + """ + piece_duration = 1000 + min_deadline = 2000 + dl = idx * piece_duration + min_deadline + self._th.set_piece_deadline(pc, dl, lt.deadline_flags.alert_when_available) + + if idx == 0: + tail_pieces = 9 + # Piezas anteriores a la primera se desactivan + if (self.file.last_piece - pc) > tail_pieces: + for i in xrange(self.file.first_piece, pc): + self._th.piece_priority(i, 0) + self._th.reset_piece_deadline(i) + + # Piezas siguientes a la primera se activan + for i in xrange(pc + 1, self.file.last_piece + 1): + self._th.piece_priority(i, 1) + + def prioritize_file(self): + """ + Función encargada de priorizar las piezas correspondientes al archivo seleccionado en la funcion set_file() + """ + priorities = [] + for i in xrange(self.meta.num_pieces()): + if i >= self.file.first_piece and i <= self.file.last_piece: + priorities.append(1) + else: + priorities.append(0) + self._th.prioritize_pieces(priorities) + + def download_torrent(self, url): + """ + Función encargada de descargar un archivo .torrent + """ + from core import scrapertools + + data = scrapertools.downloadpage(url) + return data + + def start_url(self, uri): + """ + Función encargada iniciar la descarga del torrent desde la url, permite: + - Url apuntando a un .torrent + - Url magnet + - Archivo .torrent local + """ + + if self._th: + raise Exception('Torrent is already started') + + if uri.startswith('http://') or uri.startswith('https://'): + torrent_data = self.download_torrent(uri) + info = lt.torrent_info(lt.bdecode(torrent_data)) + tp = {'ti': info} + resume_data = self._cache.get_resume(info_hash=str(info.info_hash())) + if resume_data: + tp['resume_data'] = resume_data + + elif uri.startswith('magnet:'): + tp = {'url': uri} + resume_data = self._cache.get_resume(info_hash=Cache.hash_from_magnet(uri)) + if resume_data: + tp['resume_data'] = resume_data + + elif os.path.isfile(uri): + if os.access(uri, os.R_OK): + info = lt.torrent_info(uri) + tp = {'ti': info} + resume_data = self._cache.get_resume(info_hash=str(info.info_hash())) + if resume_data: + tp['resume_data'] = resume_data + else: + raise ValueError('Invalid torrent path %s' % uri) + else: + raise ValueError("Invalid torrent %s" % uri) + + tp.update(self.torrent_paramss) + self._th = self._ses.add_torrent(tp) + + for tr in self.INITIAL_TRACKERS: + self._th.add_tracker({'url': tr}) + + self._th.set_sequential_download(True) + self._th.force_reannounce() + self._th.force_dht_announce() + + self._monitor.start() + self._dispatcher.do_start(self._th, self._ses) + self.server.run() + + def stop(self): + """ + Función encargada de de detener el torrent y salir + """ + self._dispatcher.stop() + self._dispatcher.join() + self._monitor.stop() + self.server.stop() + self._dispatcher.stop() + if self._ses: + self._ses.pause() + if self._th: + self.save_resume() + self.save_state() + self._stop_services() + self._ses.remove_torrent(self._th, self.auto_delete) + del self._ses + self.closed = True + + def _start_services(self): + """ + Función encargada de iniciar los servicios de libtorrent: dht, lsd, upnp, natpnp + """ + self._ses.add_dht_router("router.bittorrent.com", 6881) + self._ses.add_dht_router("router.bitcomet.com", 554) + self._ses.add_dht_router("router.utorrent.com", 6881) + self._ses.start_dht() + self._ses.start_lsd() + self._ses.start_upnp() + self._ses.start_natpmp() + + def _stop_services(self): + """ + Función encargada de detener los servicios de libtorrent: dht, lsd, upnp, natpnp + """ + self._ses.stop_natpmp() + self._ses.stop_upnp() + self._ses.stop_lsd() + self._ses.stop_dht() + + def save_resume(self): + """ + Función encargada guardar los metadatos para continuar una descarga mas rapidamente + """ + if self._th.need_save_resume_data() and self._th.is_valid() and self.meta: + r = ResumeData(self) + start = time.time() + while (time.time() - start) <= 5: + if r.data or r.failed: + break + time.sleep(0.1) + if r.data: + self._cache.save_resume(self.unique_file_id, lt.bencode(r.data)) + + @property + def status(self): + """ + Función encargada de devolver el estado del torrent + """ + if self._th: + s = self._th.status() + # Download Rate + s._download_rate = s.download_rate / 1000 + + # Progreso del archivo + if self.file: + pieces = s.pieces[self.file.first_piece:self.file.last_piece] + progress = float(sum(pieces)) / len(pieces) + else: + progress = 0 + + s.progress_file = progress * 100 + + # Tamaño del archivo + if self.file: + s.file_size = self.file.size / 1048576.0 + else: + s.file_size = 0 + + # Estado del buffer + if self.file and self.file.cursor: # Con una conexion activa: Disponible vs Posicion del reproductor + percent = len(self.file.cursor.cache) + percent = percent * 100 / self.buffer_size + s.buffer = int(percent) + + elif self.file: # Sin una conexion activa: Pre-buffer antes de iniciar + # El Pre-buffer consta de dos partes_ + # 1. Buffer al inicio del archivo para que el reproductor empieze sin cortes + # 2. Buffer al final del archivo (en algunos archivos el reproductor mira el final del archivo antes de comenzar) + bp = [] + + # El tamaño del buffer de inicio es el tamaño del buffer menos el tamaño del buffer del final + first_pieces_priorize = self.buffer_size - self.last_pieces_priorize + + # Comprobamos que partes del buffer del inicio estan disponibles + for x in range(first_pieces_priorize): + if self._th.have_piece(self.file.first_piece + x): + bp.append(True) + else: + bp.append(False) + + # Comprobamos que partes del buffer del final estan disponibles + for x in range(self.last_pieces_priorize): + if self._th.have_piece(self.file.last_piece - x): + bp.append(True) + else: + bp.append(False) + + s.buffer = int(sum(bp) * 100 / self.buffer_size) + + else: # Si no hay ningun archivo seleccionado: No hay buffer + s.buffer = 0 + + # Tiempo restante para cerrar en caso de tener el timeout activo + if self.auto_shutdown: + if self.connected: + if self.timeout: + s.timeout = int(self.timeout - (time.time() - self.last_connect - 1)) + if self.file and self.file.cursor: + s.timeout = self.timeout + if s.timeout < 0: s.timeout = "Cerrando" + else: + s.timeout = "---" + else: + if self.start_time and self.wait_time: + s.timeout = int(self.wait_time - (time.time() - self.start_time - 1)) + if s.timeout < 0: s.timeout = "Cerrando" + else: + s.timeout = "---" + + else: + s.timeout = "Off" + + # Estado de la descarga + STATE_STR = ['En cola', 'Comprobando', 'Descargando metadata', \ + 'Descargando', 'Finalizado', 'Seeding', 'Allocating', 'Comprobando fastresume'] + s.str_state = STATE_STR[s.state] + + # Estado DHT + if self._ses.dht_state() is not None: + s.dht_state = "On" + s.dht_nodes = self._ses.status().dht_nodes + else: + s.dht_state = "Off" + s.dht_nodes = 0 + + # Cantidad de Trackers + s.trackers = len(self._th.trackers()) + + # Origen de los peers + s.dht_peers = 0 + s.trk_peers = 0 + s.pex_peers = 0 + s.lsd_peers = 0 + + for peer in self._th.get_peer_info(): + if peer.source & 1: + s.trk_peers += 1 + if peer.source & 2: + s.dht_peers += 1 + if peer.source & 4: + s.pex_peers += 1 + if peer.source & 8: + s.lsd_peers += 1 + + return s + + """ + Servicios: + - Estas funciones se ejecutan de forma automatica cada x tiempo en otro Thread. + - Estas funciones son ejecutadas mientras el torrent esta activo algunas pueden desactivarse + segun la configuracion como por ejemplo la escritura en el log + """ + + def _auto_shutdown(self, *args, **kwargs): + """ + Servicio encargado de autoapagar el servidor + """ + if self.file and self.file.cursor: + self.last_connect = time.time() + self.connected = True + + if self.is_playing_fnc and self.is_playing_fnc(): + self.last_connect = time.time() + self.connected = True + + if self.auto_shutdown: + # shudown por haber cerrado el reproductor + if self.connected and self.is_playing_fnc and not self.is_playing_fnc(): + if time.time() - self.last_connect - 1 > self.timeout: + self.stop() + + # shutdown por no realizar ninguna conexion + if (not self.file or not self.file.cursor) and self.start_time and self.wait_time and not self.connected: + if time.time() - self.start_time - 1 > self.wait_time: + self.stop() + + # shutdown tras la ultima conexion + if (not self.file or not self.file.cursor) and self.timeout and self.connected and not self.is_playing_fnc: + if time.time() - self.last_connect - 1 > self.timeout: + self.stop() + + def announce_torrent(self): + """ + Servicio encargado de anunciar el torrent + """ + self._th.force_reannounce() + self._th.force_dht_announce() + + def save_state(self): + """ + Servicio encargado de guardar el estado + """ + state = self._ses.save_state() + f = open(os.path.join(self.temp_path, self.state_file), 'wb') + pickle.dump(state, f) + f.close() + + def _update_ready_pieces(self, alert_type, alert): + """ + Servicio encargado de informar que hay una pieza disponible + """ + if alert_type == 'read_piece_alert' and self.file: + self.file.update_piece(alert.piece, alert.buffer) + + def _check_meta(self): + """ + Servicio encargado de comprobar si los metadatos se han descargado + """ + if self.status.state >= 3 and self.status.state <= 5 and not self.has_meta: + + # Guardamos los metadatos + self.meta = self._th.get_torrent_info() + + # Obtenemos la lista de archivos del meta + fs = self.meta.files() + if isinstance(fs, list): + files = fs + else: + files = [fs.at(i) for i in xrange(fs.num_files())] + + # Guardamos la lista de archivos + self.files = self._find_files(files) + + # Marcamos el primer archivo como activo + self.set_file(self.files[0]) + + # Damos por iniciada la descarga + self.start_time = time.time() + + # Guardamos el .torrent en el cahce + self._cache.file_complete(self._th.get_torrent_info()) + + self.has_meta = True + + def priorize_start_file(self): + ''' + Servicio encargado de priorizar el principio y final de archivo cuando no hay conexion + ''' + if self.file and not self.file.cursor: + num_start_pieces = self.buffer_size - self.last_pieces_priorize # Cantidad de piezas a priorizar al inicio + num_end_pieces = self.last_pieces_priorize # Canridad de piezas a priorizar al final + + pieces_count = 0 + # Priorizamos las ultimas piezas + for y in range(self.file.last_piece - num_end_pieces, self.file.last_piece + 1): + if not self._th.have_piece(y): + self.prioritize_piece(y, pieces_count) + pieces_count += 1 + + # Priorizamos las primeras piezas + for y in range(self.file.first_piece, self.file.last_piece + 1): + if not self._th.have_piece(y): + if pieces_count == self.buffer_size: + break + self.prioritize_piece(y, pieces_count) + pieces_count += 1 + + def print_status(self): + ''' + Servicio encargado de mostrar en el log el estado de la descarga + ''' + s = self.status + if self.file: + archivo = self.file.index + else: + archivo = "N/D" + logger.info( + '%.2f%% de %.1fMB %s | %.1f kB/s | #%s %d%% | AutoClose: %s | S: %d(%d) P: %d(%d)) | TRK: %d DHT: %d PEX: %d LSD %d | DHT:%s (%d) | Trakers: %d' % \ + (s.progress_file, s.file_size, s.str_state, s._download_rate, archivo, s.buffer, s.timeout, s.num_seeds, \ + s.num_complete, s.num_peers, s.num_incomplete, s.trk_peers, s.dht_peers, s.pex_peers, s.lsd_peers, + s.dht_state, s.dht_nodes, s.trackers)) diff --git a/plugin.video.alfa/lib/btserver/cursor.py b/plugin.video.alfa/lib/btserver/cursor.py new file mode 100755 index 00000000..d49a4421 --- /dev/null +++ b/plugin.video.alfa/lib/btserver/cursor.py @@ -0,0 +1,106 @@ +# -*- coding: utf-8 -*- + +from threading import Lock, Event + + +class Cursor(object): + def __init__(self, file): + self._file = file + self.pos = 0 + self.timeout = 30 + self.cache_size = self._file._client.buffer_size + self.cache = {} + self.lock = Lock() + self.event = Event() + self.cache_first = 0 + + def fill_cache(self, first): + self.cache_first = first + + with self.lock: + for p in sorted(self.cache): + if p < first: del self.cache[p] + + self.event.clear() + for i in xrange(first, first + self.cache_size): + if i <= self._file.last_piece: + self._file._client.prioritize_piece(i, i - first) + + def has_piece(self, n): + with self.lock: + return n in self.cache + + def _wait_piece(self, pc_no): + while not self.has_piece(pc_no): + self.fill_cache(pc_no) + self.event.wait(self.timeout) + + def _get_piece(self, n): + with self.lock: + if not n in self.cache: + raise ValueError('index of of scope of current cache') + return self.cache[n] + + def get_piece(self, n): + self._wait_piece(n) + return self._get_piece(n) + + def close(self): + self._file.cursor = None + + def read(self, size=None): + data = "" + max_size = self._file.size - self.pos + if not size: + size = max_size + else: + size = min(size, max_size) + + if size: + pc_no, ofs = self._file.map_piece(self.pos) + data = self.get_piece(pc_no)[ofs: ofs + size] + + if len(data) < size: + remains = size - len(data) + pc_no += 1 + self.fill_cache(pc_no) + while remains and self.has_piece(pc_no): + sz = min(remains, self._file.piece_size) + data += self.get_piece(pc_no)[:sz] + remains -= sz + if remains: + pc_no += 1 + self.fill_cache(pc_no) + + self.pos += len(data) + + return data + + def seek(self, n): + if n > self._file.size: + n = self._file.size + elif n < 0: + raise ValueError('Seeking negative') + self.pos = n + + def tell(self): + return self.pos + + def update_piece(self, n, data): + with self.lock: + pcs = sorted(self.cache) + if len(pcs) < self.cache_size: + if len(pcs): + new = max(pcs) + 1 + else: + new = self.cache_first + if n == new: + self.cache[n] = data + if n == self.cache_first: + self.event.set() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() diff --git a/plugin.video.alfa/lib/btserver/dispatcher.py b/plugin.video.alfa/lib/btserver/dispatcher.py new file mode 100755 index 00000000..e9dda1c7 --- /dev/null +++ b/plugin.video.alfa/lib/btserver/dispatcher.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- + +from monitor import Monitor + +try: + from python_libtorrent import get_libtorrent + + lt = get_libtorrent() +except Exception, e: + import libtorrent as lt + + +class Dispatcher(Monitor): + def __init__(self, client): + super(Dispatcher, self).__init__(client) + + def do_start(self, th, ses): + self._th = th + self._ses = ses + self.start() + + def run(self): + if not self._ses: + raise Exception('Invalid state, session is not initialized') + + while self.running: + a = self._ses.wait_for_alert(1000) + if a: + alerts = self._ses.pop_alerts() + for alert in alerts: + with self.lock: + for cb in self.listeners: + cb(lt.alert.what(alert), alert) diff --git a/plugin.video.alfa/lib/btserver/file.py b/plugin.video.alfa/lib/btserver/file.py new file mode 100755 index 00000000..ab1f98d6 --- /dev/null +++ b/plugin.video.alfa/lib/btserver/file.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- + +import os + +from cursor import Cursor + + +class File(object): + def __init__(self, path, base, index, size, fmap, piece_size, client): + self._client = client + self.path = path + self.base = base + self.index = index + self.size = size + + self.piece_size = piece_size + + self.full_path = os.path.join(base, path) + self.first_piece = fmap.piece + self.offset = fmap.start + self.last_piece = self.first_piece + max((size - 1 + fmap.start), 0) // piece_size + + self.cursor = None + + def create_cursor(self, offset=None): + self.cursor = Cursor(self) + if offset: + self.cursor.seek(offset) + return self.cursor + + def map_piece(self, ofs): + return self.first_piece + (ofs + self.offset) // self.piece_size, (ofs + self.offset) % self.piece_size + + def update_piece(self, n, data): + if self.cursor: + self.cursor.update_piece(n, data) + + def __str__(self): + return self.path diff --git a/plugin.video.alfa/lib/btserver/handler.py b/plugin.video.alfa/lib/btserver/handler.py new file mode 100755 index 00000000..343c1735 --- /dev/null +++ b/plugin.video.alfa/lib/btserver/handler.py @@ -0,0 +1,135 @@ +# -*- coding: utf-8 -*- + +import BaseHTTPServer +import os +import re +import time +import types +import urllib +import urlparse + +RANGE_RE = re.compile(r'bytes=(\d+)-') + + +def parse_range(range): # @ReservedAssignment + if range: + m = RANGE_RE.match(range) + if m: + try: + return int(m.group(1)) + except: + pass + return 0 + + +class Handler(BaseHTTPServer.BaseHTTPRequestHandler): + protocol_version = 'HTTP/1.1' + + def log_message(self, format, *args): + pass + + def do_GET(self): + if self.server.request: + self.server.request.wfile.close() + self.server.request = self + + if self.do_HEAD(): + f = self.server.file.create_cursor(self.offset) + while f == self.server.file.cursor: + buf = f.read(1024) + if buf: + try: + self.wfile.write(buf) + except: + break + else: + break + f.close() + + def send_pls(self, files): + playlist = "[playlist]\n\n" + for x, f in enumerate(files): + playlist += "File" + str(x + 1) + "=http://127.0.0.1:" + str(self.server._client.port) + "/" + urllib.quote( + f.path) + "\n" + playlist += "Title" + str(x + 1) + "=" + f.path + "\n" + playlist += "NumberOfEntries=" + str(len(files)) + playlist += "Version=2" + self.send_response(200, 'OK') + self.send_header("Content-Length", str(len(playlist))) + self.finish_header() + self.wfile.write(playlist) + + def do_HEAD(self): + url = urlparse.urlparse(self.path).path + + '''Whait to list of files ''' + while not self.server._client.files: + time.sleep(1) + + files = self.server._client.files + self.server.file = self.server._client.file + + '''Creates PLS playlist ''' + if url == "/playlist.pls": + self.send_pls(files) + return False + + '''Change File to download ''' + if not self.server.file or urllib.unquote(url) != '/' + self.server.file.path: + file = urllib.unquote(url) + client = self.server._client + for f in client.files: + if file == '/' + f.path: + client.set_file(f) + self.server.file = client.file + break + + while not self.server._client.has_meta: + time.sleep(1) + if self.server.file and urllib.unquote(url) == '/' + self.server.file.path: + self.offset = 0 + size, mime = self._file_info() + range = parse_range(self.headers.get('Range', None)) + if range: + self.offset = range + range = (range, size - 1, size) + + self.send_resp_header(mime, size, range) + return True + + else: + self.send_error(404, 'Not Found') + + def _file_info(self): + size = self.server.file.size + ext = os.path.splitext(self.server.file.path)[1] + mime = self.server._client.VIDEO_EXTS.get(ext) + if not mime: + mime = 'application/octet-stream' + return size, mime + + def send_resp_header(self, cont_type, cont_length, range=False): # @ReservedAssignment + if range: + self.send_response(206, 'Partial Content') + else: + self.send_response(200, 'OK') + + self.send_header('Content-Type', cont_type) + self.send_header('transferMode.dlna.org', 'Streaming') + self.send_header('contentFeatures.dlna.org', + 'DLNA.ORG_OP=01;DLNA.ORG_CI=0;DLNA.ORG_FLAGS=01700000000000000000000000000000') + self.send_header('Accept-Ranges', 'bytes') + + if range: + if isinstance(range, (types.TupleType, types.ListType)) and len(range) == 3: + self.send_header('Content-Range', 'bytes %d-%d/%d' % range) + self.send_header('Content-Length', range[1] - range[0] + 1) + else: + raise ValueError('Invalid range value') + else: + self.send_header('Content-Length', cont_length) + self.finish_header() + + def finish_header(self): + self.send_header('Connection', 'close') + self.end_headers() diff --git a/plugin.video.alfa/lib/btserver/monitor.py b/plugin.video.alfa/lib/btserver/monitor.py new file mode 100755 index 00000000..1b3f772c --- /dev/null +++ b/plugin.video.alfa/lib/btserver/monitor.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- + +from threading import Thread, Lock, Event + + +class Monitor(Thread): + def __init__(self, client): + Thread.__init__(self) + self.daemon = True + self.listeners = [] + self.lock = Lock() + self.wait_event = Event() + self.running = True + self.client = client + self.ses = None + self.client = client + + def stop(self): + self.running = False + self.wait_event.set() + + def add_listener(self, cb): + with self.lock: + if not cb in self.listeners: + self.listeners.append(cb) + + def remove_listener(self, cb): + with self.lock: + try: + self.listeners.remove(cb) + except ValueError: + pass + + def remove_all_listeners(self): + with self.lock: + self.listeners = [] + + def run(self): + while (self.running): + with self.lock: + for cb in self.listeners: + cb() + + self.wait_event.wait(1.0) diff --git a/plugin.video.alfa/lib/btserver/resume_data.py b/plugin.video.alfa/lib/btserver/resume_data.py new file mode 100755 index 00000000..90eacfe9 --- /dev/null +++ b/plugin.video.alfa/lib/btserver/resume_data.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- + +class ResumeData(object): + def __init__(self, client): + self.data = None + self.failed = False + client._dispatcher.add_listener(self._process_alert) + client._th.save_resume_data() + + def _process_alert(self, t, alert): + if t == 'save_resume_data_failed_alert': + self.failed = True + + elif t == 'save_resume_data_alert': + self.data = alert.resume_data diff --git a/plugin.video.alfa/lib/btserver/server.py b/plugin.video.alfa/lib/btserver/server.py new file mode 100755 index 00000000..29a7acaa --- /dev/null +++ b/plugin.video.alfa/lib/btserver/server.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- + +import BaseHTTPServer +import traceback +from SocketServer import ThreadingMixIn +from threading import Thread + + +class Server(ThreadingMixIn, BaseHTTPServer.HTTPServer): + daemon_threads = True + timeout = 1 + + def __init__(self, address, handler, client): + BaseHTTPServer.HTTPServer.__init__(self, address, handler) + self._client = client + self.file = None + self.running = True + self.request = None + + def stop(self): + self.running = False + + def serve(self): + while self.running: + try: + self.handle_request() + except: + print traceback.format_exc() + + def run(self): + t = Thread(target=self.serve, name='HTTP Server') + t.daemon = True + t.start() + + def handle_error(self, request, client_address): + if not "socket.py" in traceback.format_exc(): + print traceback.format_exc() diff --git a/plugin.video.alfa/lib/jjdecode.py b/plugin.video.alfa/lib/jjdecode.py new file mode 100755 index 00000000..cb102210 --- /dev/null +++ b/plugin.video.alfa/lib/jjdecode.py @@ -0,0 +1,312 @@ +# -*- coding: utf-8 -*- +#!/usr/bin/env python +# +# Python version of the jjdecode function written by Syed Zainudeen +# http://csc.cs.utm.my/syed/images/files/jjdecode/jjdecode.html +# +# +NCR/CRC! [ReVeRsEr] - crackinglandia@gmail.com +# Thanks to Jose Miguel Esparza (@EternalTodo) for the final push to make it work! +# + +import re + +class JJDecoder(object): + + def __init__(self, jj_encoded_data): + self.encoded_str = jj_encoded_data + + + def clean(self): + return re.sub('^\s+|\s+$', '', self.encoded_str) + + + def checkPalindrome(self, Str): + startpos = -1 + endpos = -1 + gv, gvl = -1, -1 + + index = Str.find('"\'\\"+\'+",') + + if index == 0: + startpos = Str.find('$$+"\\""+') + 8 + endpos = Str.find('"\\"")())()') + gv = Str[Str.find('"\'\\"+\'+",')+9:Str.find('=~[]')] + gvl = len(gv) + else: + gv = Str[0:Str.find('=')] + gvl = len(gv) + startpos = Str.find('"\\""+') + 5 + endpos = Str.find('"\\"")())()') + + return (startpos, endpos, gv, gvl) + + + def decode(self): + + self.encoded_str = self.clean() + startpos, endpos, gv, gvl = self.checkPalindrome(self.encoded_str) + + if startpos == endpos: + raise Exception('No data!') + + data = self.encoded_str[startpos:endpos] + + b = ['___+', '__$+', '_$_+', '_$$+', '$__+', '$_$+', '$$_+', '$$$+', '$___+', '$__$+', '$_$_+', '$_$$+', '$$__+', '$$_$+', '$$$_+', '$$$$+'] + + str_l = '(![]+"")[' + gv + '._$_]+' + str_o = gv + '._$+' + str_t = gv + '.__+' + str_u = gv + '._+' + + str_hex = gv + '.' + + str_s = '"' + gvsig = gv + '.' + + str_quote = '\\\\\\"' + str_slash = '\\\\\\\\' + + str_lower = '\\\\"+' + str_upper = '\\\\"+' + gv + '._+' + + str_end = '"+' + + out = '' + while data != '': + # l o t u + if data.find(str_l) == 0: + data = data[len(str_l):] + out += 'l' + continue + elif data.find(str_o) == 0: + data = data[len(str_o):] + out += 'o' + continue + elif data.find(str_t) == 0: + data = data[len(str_t):] + out += 't' + continue + elif data.find(str_u) == 0: + data = data[len(str_u):] + out += 'u' + continue + + # 0123456789abcdef + if data.find(str_hex) == 0: + data = data[len(str_hex):] + + for i in range(len(b)): + if data.find(b[i]) == 0: + data = data[len(b[i]):] + out += '%x' % i + break + continue + + # start of s block + if data.find(str_s) == 0: + data = data[len(str_s):] + + # check if "R + if data.find(str_upper) == 0: # r4 n >= 128 + data = data[len(str_upper):] # skip sig + ch_str = '' + for i in range(2): # shouldn't be more than 2 hex chars + # gv + "."+b[ c ] + if data.find(gvsig) == 0: + data = data[len(gvsig):] + for k in range(len(b)): # for every entry in b + if data.find(b[k]) == 0: + data = data[len(b[k]):] + ch_str = '%x' % k + break + else: + break + + out += chr(int(ch_str, 16)) + continue + + elif data.find(str_lower) == 0: # r3 check if "R // n < 128 + data = data[len(str_lower):] # skip sig + + ch_str = '' + ch_lotux = '' + temp = '' + b_checkR1 = 0 + for j in range(3): # shouldn't be more than 3 octal chars + if j > 1: # lotu check + if data.find(str_l) == 0: + data = data[len(str_l):] + ch_lotux = 'l' + break + elif data.find(str_o) == 0: + data = data[len(str_o):] + ch_lotux = 'o' + break + elif data.find(str_t) == 0: + data = data[len(str_t):] + ch_lotux = 't' + break + elif data.find(str_u) == 0: + data = data[len(str_u):] + ch_lotux = 'u' + break + + # gv + "."+b[ c ] + if data.find(gvsig) == 0: + temp = data[len(gvsig):] + for k in range(8): # for every entry in b octal + if temp.find(b[k]) == 0: + if int(ch_str + str(k), 8) > 128: + b_checkR1 = 1 + break + + ch_str += str(k) + data = data[len(gvsig):] # skip gvsig + data = data[len(b[k]):] + break + + if b_checkR1 == 1: + if data.find(str_hex) == 0: # 0123456789abcdef + data = data[len(str_hex):] + # check every element of hex decode string for a match + for i in range(len(b)): + if data.find(b[i]) == 0: + data = data[len(b[i]):] + ch_lotux = '%x' % i + break + break + else: + break + + out += chr(int(ch_str,8)) + ch_lotux + continue + + else: # "S ----> "SR or "S+ + # if there is, loop s until R 0r + + # if there is no matching s block, throw error + + match = 0; + n = None + + # searching for matching pure s block + while True: + n = ord(data[0]) + if data.find(str_quote) == 0: + data = data[len(str_quote):] + out += '"' + match += 1 + continue + elif data.find(str_slash) == 0: + data = data[len(str_slash):] + out += '\\' + match += 1 + continue + elif data.find(str_end) == 0: # reached end off S block ? + + if match == 0: + raise '+ no match S block: ' + data + data = data[len(str_end):] + break # step out of the while loop + elif data.find(str_upper) == 0: # r4 reached end off S block ? - check if "R n >= 128 + if match == 0: + raise 'no match S block n>128: ' + data + data = data[len(str_upper):] # skip sig + + ch_str = '' + ch_lotux = '' + + for j in range(10): # shouldn't be more than 10 hex chars + if j > 1: # lotu check + if data.find(str_l) == 0: + data = data[len(str_l):] + ch_lotux = 'l' + break + elif data.find(str_o) == 0: + data = data[len(str_o):] + ch_lotux = 'o' + break + elif data.find(str_t) == 0: + data = data[len(str_t):] + ch_lotux = 't' + break + elif data.find(str_u) == 0: + data = data[len(str_u):] + ch_lotux = 'u' + break + + # gv + "."+b[ c ] + if data.find(gvsig) == 0: + data = data[len(gvsig):] # skip gvsig + for k in range(len(b)): # for every entry in b + if data.find(b[k]) == 0: + data = data[len(b[k]):] + ch_str += '%x' % k + break + else: + break # done + out += chr(int(ch_str, 16)) + break # step out of the while loop + elif data.find(str_lower) == 0: # r3 check if "R // n < 128 + if match == 0: + raise 'no match S block n<128: ' + data + + data = data[len(str_lower):] # skip sig + + ch_str = '' + ch_lotux = '' + temp = '' + b_checkR1 = 0 + + for j in range(3): # shouldn't be more than 3 octal chars + if j > 1: # lotu check + if data.find(str_l) == 0: + data = data[len(str_l):] + ch_lotux = 'l' + break + elif data.find(str_o) == 0: + data = data[len(str_o):] + ch_lotux = 'o' + break + elif data.find(str_t) == 0: + data = data[len(str_t):] + ch_lotux = 't' + break + elif data.find(str_u) == 0: + data = data[len(str_u):] + ch_lotux = 'u' + break + + # gv + "."+b[ c ] + if data.find(gvsig) == 0: + temp = data[len(gvsig):] + for k in range(8): # for every entry in b octal + if temp.find(b[k]) == 0: + if int(ch_str + str(k), 8) > 128: + b_checkR1 = 1 + break + + ch_str += str(k) + data = data[len(gvsig):] # skip gvsig + data = data[len(b[k]):] + break + + if b_checkR1 == 1: + if data.find(str_hex) == 0: # 0123456789abcdef + data = data[len(str_hex):] + # check every element of hex decode string for a match + for i in range(len(b)): + if data.find(b[i]) == 0: + data = data[len(b[i]):] + ch_lotux = '%x' % i + break + else: + break + out += chr(int(ch_str, 8)) + ch_lotux + break # step out of the while loop + elif (0x21 <= n and n <= 0x2f) or (0x3A <= n and n <= 0x40) or ( 0x5b <= n and n <= 0x60 ) or ( 0x7b <= n and n <= 0x7f ): + out += data[0] + data = data[1:] + match += 1 + continue + print 'No match : ' + data + break + return out diff --git a/plugin.video.alfa/lib/jscrypto.py b/plugin.video.alfa/lib/jscrypto.py new file mode 100755 index 00000000..22ce606e --- /dev/null +++ b/plugin.video.alfa/lib/jscrypto.py @@ -0,0 +1,546 @@ +# -*- coding: utf-8 -*- + +import hashlib +import base64 +import os +import binascii +import StringIO +from array import array + + +def evpKDF(passwd, salt, key_size=8, iv_size=4, iterations=1, hash_algorithm="md5"): + target_key_size = key_size + iv_size + derived_bytes = "" + number_of_derived_words = 0 + block = None + hasher = hashlib.new(hash_algorithm) + while number_of_derived_words < target_key_size: + if block is not None: + hasher.update(block) + + hasher.update(passwd) + hasher.update(salt) + block = hasher.digest() + hasher = hashlib.new(hash_algorithm) + + for i in range(1, iterations): + hasher.update(block) + block = hasher.digest() + hasher = hashlib.new(hash_algorithm) + + derived_bytes += block[0: min(len(block), (target_key_size - number_of_derived_words) * 4)] + + number_of_derived_words += len(block)/4 + + return { + "key": derived_bytes[0: key_size * 4], + "iv": derived_bytes[key_size * 4:] + } + + +class PKCS7Encoder(object): + ''' + RFC 2315: PKCS#7 page 21 + Some content-encryption algorithms assume the + input length is a multiple of k octets, where k > 1, and + let the application define a method for handling inputs + whose lengths are not a multiple of k octets. For such + algorithms, the method shall be to pad the input at the + trailing end with k - (l mod k) octets all having value k - + (l mod k), where l is the length of the input. In other + words, the input is padded at the trailing end with one of + the following strings: + + 01 -- if l mod k = k-1 + 02 02 -- if l mod k = k-2 + . + . + . + k k ... k k -- if l mod k = 0 + + The padding can be removed unambiguously since all input is + padded and no padding string is a suffix of another. This + padding method is well-defined if and only if k < 256; + methods for larger k are an open issue for further study. + ''' + def __init__(self, k=16): + self.k = k + + ## @param text The padded text for which the padding is to be removed. + # @exception ValueError Raised when the input padding is missing or corrupt. + def decode(self, text): + ''' + Remove the PKCS#7 padding from a text string + ''' + nl = len(text) + val = int(binascii.hexlify(text[-1]), 16) + if val > self.k: + raise ValueError('Input is not padded or padding is corrupt') + + l = nl - val + return text[:l] + + ## @param text The text to encode. + def encode(self, text): + ''' + Pad an input string according to PKCS#7 + ''' + l = len(text) + output = StringIO.StringIO() + val = self.k - (l % self.k) + for _ in xrange(val): + output.write('%02x' % val) + return text + binascii.unhexlify(output.getvalue()) + + +# Pyaes file +# Globals mandated by PEP 272: +# http://www.python.org/dev/peps/pep-0272/ +MODE_ECB = 1 +MODE_CBC = 2 +#MODE_CTR = 6 + +block_size = 16 +key_size = None + +def new(key, mode, IV=None): + if mode == MODE_ECB: + return ECBMode(AES(key)) + elif mode == MODE_CBC: + if IV is None: + raise ValueError, "CBC mode needs an IV value!" + + return CBCMode(AES(key), IV) + else: + raise NotImplementedError + +#### AES cipher implementation + +class AES(object): + block_size = 16 + + def __init__(self, key): + self.setkey(key) + + def setkey(self, key): + """Sets the key and performs key expansion.""" + + self.key = key + self.key_size = len(key) + + if self.key_size == 16: + self.rounds = 10 + elif self.key_size == 24: + self.rounds = 12 + elif self.key_size == 32: + self.rounds = 14 + else: + raise ValueError, "Key length must be 16, 24 or 32 bytes" + + self.expand_key() + + def expand_key(self): + """Performs AES key expansion on self.key and stores in self.exkey""" + + # The key schedule specifies how parts of the key are fed into the + # cipher's round functions. "Key expansion" means performing this + # schedule in advance. Almost all implementations do this. + # + # Here's a description of AES key schedule: + # http://en.wikipedia.org/wiki/Rijndael_key_schedule + + # The expanded key starts with the actual key itself + exkey = array('B', self.key) + + # extra key expansion steps + if self.key_size == 16: + extra_cnt = 0 + elif self.key_size == 24: + extra_cnt = 2 + else: + extra_cnt = 3 + + # 4-byte temporary variable for key expansion + word = exkey[-4:] + # Each expansion cycle uses 'i' once for Rcon table lookup + for i in xrange(1, 11): + + #### key schedule core: + # left-rotate by 1 byte + word = word[1:4] + word[0:1] + + # apply S-box to all bytes + for j in xrange(4): + word[j] = aes_sbox[word[j]] + + # apply the Rcon table to the leftmost byte + word[0] = word[0] ^ aes_Rcon[i] + #### end key schedule core + + for z in xrange(4): + for j in xrange(4): + # mix in bytes from the last subkey + word[j] ^= exkey[-self.key_size + j] + exkey.extend(word) + + # Last key expansion cycle always finishes here + if len(exkey) >= (self.rounds+1) * self.block_size: + break + + # Special substitution step for 256-bit key + if self.key_size == 32: + for j in xrange(4): + # mix in bytes from the last subkey XORed with S-box of + # current word bytes + word[j] = aes_sbox[word[j]] ^ exkey[-self.key_size + j] + exkey.extend(word) + + # Twice for 192-bit key, thrice for 256-bit key + for z in xrange(extra_cnt): + for j in xrange(4): + # mix in bytes from the last subkey + word[j] ^= exkey[-self.key_size + j] + exkey.extend(word) + + self.exkey = exkey + + def add_round_key(self, block, round): + """AddRoundKey step in AES. This is where the key is mixed into plaintext""" + + offset = round * 16 + exkey = self.exkey + + for i in xrange(16): + block[i] ^= exkey[offset + i] + + #print 'AddRoundKey:', block + + def sub_bytes(self, block, sbox): + """SubBytes step, apply S-box to all bytes + + Depending on whether encrypting or decrypting, a different sbox array + is passed in. + """ + + for i in xrange(16): + block[i] = sbox[block[i]] + + #print 'SubBytes :', block + + def shift_rows(self, b): + """ShiftRows step. Shifts 2nd row to left by 1, 3rd row by 2, 4th row by 3 + + Since we're performing this on a transposed matrix, cells are numbered + from top to bottom:: + + 0 4 8 12 -> 0 4 8 12 -- 1st row doesn't change + 1 5 9 13 -> 5 9 13 1 -- row shifted to left by 1 (wraps around) + 2 6 10 14 -> 10 14 2 6 -- shifted by 2 + 3 7 11 15 -> 15 3 7 11 -- shifted by 3 + """ + + b[1], b[5], b[ 9], b[13] = b[ 5], b[ 9], b[13], b[ 1] + b[2], b[6], b[10], b[14] = b[10], b[14], b[ 2], b[ 6] + b[3], b[7], b[11], b[15] = b[15], b[ 3], b[ 7], b[11] + + #print 'ShiftRows :', b + + def shift_rows_inv(self, b): + """Similar to shift_rows above, but performed in inverse for decryption.""" + + b[ 5], b[ 9], b[13], b[ 1] = b[1], b[5], b[ 9], b[13] + b[10], b[14], b[ 2], b[ 6] = b[2], b[6], b[10], b[14] + b[15], b[ 3], b[ 7], b[11] = b[3], b[7], b[11], b[15] + + #print 'ShiftRows :', b + + def mix_columns(self, block): + """MixColumns step. Mixes the values in each column""" + + # Cache global multiplication tables (see below) + mul_by_2 = gf_mul_by_2 + mul_by_3 = gf_mul_by_3 + + # Since we're dealing with a transposed matrix, columns are already + # sequential + for i in xrange(4): + col = i * 4 + + #v0, v1, v2, v3 = block[col : col+4] + v0, v1, v2, v3 = (block[col], block[col + 1], block[col + 2], + block[col + 3]) + + block[col ] = mul_by_2[v0] ^ v3 ^ v2 ^ mul_by_3[v1] + block[col+1] = mul_by_2[v1] ^ v0 ^ v3 ^ mul_by_3[v2] + block[col+2] = mul_by_2[v2] ^ v1 ^ v0 ^ mul_by_3[v3] + block[col+3] = mul_by_2[v3] ^ v2 ^ v1 ^ mul_by_3[v0] + + #print 'MixColumns :', block + + def mix_columns_inv(self, block): + """Similar to mix_columns above, but performed in inverse for decryption.""" + + # Cache global multiplication tables (see below) + mul_9 = gf_mul_by_9 + mul_11 = gf_mul_by_11 + mul_13 = gf_mul_by_13 + mul_14 = gf_mul_by_14 + + # Since we're dealing with a transposed matrix, columns are already + # sequential + for i in xrange(4): + col = i * 4 + + v0, v1, v2, v3 = (block[col], block[col + 1], block[col + 2], + block[col + 3]) + #v0, v1, v2, v3 = block[col:col+4] + + block[col ] = mul_14[v0] ^ mul_9[v3] ^ mul_13[v2] ^ mul_11[v1] + block[col+1] = mul_14[v1] ^ mul_9[v0] ^ mul_13[v3] ^ mul_11[v2] + block[col+2] = mul_14[v2] ^ mul_9[v1] ^ mul_13[v0] ^ mul_11[v3] + block[col+3] = mul_14[v3] ^ mul_9[v2] ^ mul_13[v1] ^ mul_11[v0] + + #print 'MixColumns :', block + + def encrypt_block(self, block): + """Encrypts a single block. This is the main AES function""" + + # For efficiency reasons, the state between steps is transmitted via a + # mutable array, not returned. + self.add_round_key(block, 0) + + for round in xrange(1, self.rounds): + self.sub_bytes(block, aes_sbox) + self.shift_rows(block) + self.mix_columns(block) + self.add_round_key(block, round) + + self.sub_bytes(block, aes_sbox) + self.shift_rows(block) + # no mix_columns step in the last round + self.add_round_key(block, self.rounds) + + def decrypt_block(self, block): + """Decrypts a single block. This is the main AES decryption function""" + + # For efficiency reasons, the state between steps is transmitted via a + # mutable array, not returned. + self.add_round_key(block, self.rounds) + + # count rounds down from 15 ... 1 + for round in xrange(self.rounds-1, 0, -1): + self.shift_rows_inv(block) + self.sub_bytes(block, aes_inv_sbox) + self.add_round_key(block, round) + self.mix_columns_inv(block) + + self.shift_rows_inv(block) + self.sub_bytes(block, aes_inv_sbox) + self.add_round_key(block, 0) + # no mix_columns step in the last round + + +#### ECB mode implementation + +class ECBMode(object): + """Electronic CodeBook (ECB) mode encryption. + + Basically this mode applies the cipher function to each block individually; + no feedback is done. NB! This is insecure for almost all purposes + """ + + def __init__(self, cipher): + self.cipher = cipher + self.block_size = cipher.block_size + + def ecb(self, data, block_func): + """Perform ECB mode with the given function""" + + if len(data) % self.block_size != 0: + raise ValueError, "Plaintext length must be multiple of 16" + + block_size = self.block_size + data = array('B', data) + + for offset in xrange(0, len(data), block_size): + block = data[offset : offset+block_size] + block_func(block) + data[offset : offset+block_size] = block + + return data.tostring() + + def encrypt(self, data): + """Encrypt data in ECB mode""" + + return self.ecb(data, self.cipher.encrypt_block) + + def decrypt(self, data): + """Decrypt data in ECB mode""" + + return self.ecb(data, self.cipher.decrypt_block) + +#### CBC mode + +class CBCMode(object): + """Cipher Block Chaining (CBC) mode encryption. This mode avoids content leaks. + + In CBC encryption, each plaintext block is XORed with the ciphertext block + preceding it; decryption is simply the inverse. + """ + + # A better explanation of CBC can be found here: + # http://en.wikipedia.org/wiki/Block_cipher_modes_of_operation#Cipher-block_chaining_.28CBC.29 + + def __init__(self, cipher, IV): + self.cipher = cipher + self.block_size = cipher.block_size + self.IV = array('B', IV) + + def encrypt(self, data): + """Encrypt data in CBC mode""" + + block_size = self.block_size + if len(data) % block_size != 0: + raise ValueError, "Plaintext length must be multiple of 16" + + data = array('B', data) + IV = self.IV + + for offset in xrange(0, len(data), block_size): + block = data[offset : offset+block_size] + + # Perform CBC chaining + for i in xrange(block_size): + block[i] ^= IV[i] + + self.cipher.encrypt_block(block) + data[offset : offset+block_size] = block + IV = block + + self.IV = IV + return data.tostring() + + def decrypt(self, data): + """Decrypt data in CBC mode""" + + block_size = self.block_size + if len(data) % block_size != 0: + raise ValueError, "Ciphertext length must be multiple of 16" + + data = array('B', data) + IV = self.IV + + for offset in xrange(0, len(data), block_size): + ctext = data[offset : offset+block_size] + block = ctext[:] + self.cipher.decrypt_block(block) + + # Perform CBC chaining + #for i in xrange(block_size): + # data[offset + i] ^= IV[i] + for i in xrange(block_size): + block[i] ^= IV[i] + data[offset : offset+block_size] = block + + IV = ctext + #data[offset : offset+block_size] = block + + self.IV = IV + return data.tostring() + +#### + +def galois_multiply(a, b): + """Galois Field multiplicaiton for AES""" + p = 0 + while b: + if b & 1: + p ^= a + a <<= 1 + if a & 0x100: + a ^= 0x1b + b >>= 1 + + return p & 0xff + +# Precompute the multiplication tables for encryption +gf_mul_by_2 = array('B', [galois_multiply(x, 2) for x in range(256)]) +gf_mul_by_3 = array('B', [galois_multiply(x, 3) for x in range(256)]) +# ... for decryption +gf_mul_by_9 = array('B', [galois_multiply(x, 9) for x in range(256)]) +gf_mul_by_11 = array('B', [galois_multiply(x, 11) for x in range(256)]) +gf_mul_by_13 = array('B', [galois_multiply(x, 13) for x in range(256)]) +gf_mul_by_14 = array('B', [galois_multiply(x, 14) for x in range(256)]) + +#### + +# The S-box is a 256-element array, that maps a single byte value to another +# byte value. Since it's designed to be reversible, each value occurs only once +# in the S-box +# +# More information: http://en.wikipedia.org/wiki/Rijndael_S-box + +aes_sbox = array('B', + '637c777bf26b6fc53001672bfed7ab76' + 'ca82c97dfa5947f0add4a2af9ca472c0' + 'b7fd9326363ff7cc34a5e5f171d83115' + '04c723c31896059a071280e2eb27b275' + '09832c1a1b6e5aa0523bd6b329e32f84' + '53d100ed20fcb15b6acbbe394a4c58cf' + 'd0efaafb434d338545f9027f503c9fa8' + '51a3408f929d38f5bcb6da2110fff3d2' + 'cd0c13ec5f974417c4a77e3d645d1973' + '60814fdc222a908846eeb814de5e0bdb' + 'e0323a0a4906245cc2d3ac629195e479' + 'e7c8376d8dd54ea96c56f4ea657aae08' + 'ba78252e1ca6b4c6e8dd741f4bbd8b8a' + '703eb5664803f60e613557b986c11d9e' + 'e1f8981169d98e949b1e87e9ce5528df' + '8ca1890dbfe6426841992d0fb054bb16'.decode('hex') +) + +# This is the inverse of the above. In other words: +# aes_inv_sbox[aes_sbox[val]] == val + +aes_inv_sbox = array('B', + '52096ad53036a538bf40a39e81f3d7fb' + '7ce339829b2fff87348e4344c4dee9cb' + '547b9432a6c2233dee4c950b42fac34e' + '082ea16628d924b2765ba2496d8bd125' + '72f8f66486689816d4a45ccc5d65b692' + '6c704850fdedb9da5e154657a78d9d84' + '90d8ab008cbcd30af7e45805b8b34506' + 'd02c1e8fca3f0f02c1afbd0301138a6b' + '3a9111414f67dcea97f2cfcef0b4e673' + '96ac7422e7ad3585e2f937e81c75df6e' + '47f11a711d29c5896fb7620eaa18be1b' + 'fc563e4bc6d279209adbc0fe78cd5af4' + '1fdda8338807c731b11210592780ec5f' + '60517fa919b54a0d2de57a9f93c99cef' + 'a0e03b4dae2af5b0c8ebbb3c83539961' + '172b047eba77d626e169146355210c7d'.decode('hex') +) + +# The Rcon table is used in AES's key schedule (key expansion) +# It's a pre-computed table of exponentation of 2 in AES's finite field +# +# More information: http://en.wikipedia.org/wiki/Rijndael_key_schedule + +aes_Rcon = array('B', + '8d01020408102040801b366cd8ab4d9a' + '2f5ebc63c697356ad4b37dfaefc59139' + '72e4d3bd61c29f254a943366cc831d3a' + '74e8cb8d01020408102040801b366cd8' + 'ab4d9a2f5ebc63c697356ad4b37dfaef' + 'c5913972e4d3bd61c29f254a943366cc' + '831d3a74e8cb8d01020408102040801b' + '366cd8ab4d9a2f5ebc63c697356ad4b3' + '7dfaefc5913972e4d3bd61c29f254a94' + '3366cc831d3a74e8cb8d010204081020' + '40801b366cd8ab4d9a2f5ebc63c69735' + '6ad4b37dfaefc5913972e4d3bd61c29f' + '254a943366cc831d3a74e8cb8d010204' + '08102040801b366cd8ab4d9a2f5ebc63' + 'c697356ad4b37dfaefc5913972e4d3bd' + '61c29f254a943366cc831d3a74e8cb'.decode('hex') +) \ No newline at end of file diff --git a/plugin.video.alfa/lib/jsinterpreter.py b/plugin.video.alfa/lib/jsinterpreter.py new file mode 100755 index 00000000..bb60bebb --- /dev/null +++ b/plugin.video.alfa/lib/jsinterpreter.py @@ -0,0 +1,249 @@ +# -*- coding: utf-8 -*- + +import json +import operator +import re + + +_OPERATORS = [ + ('|', operator.or_), + ('^', operator.xor), + ('&', operator.and_), + ('>>', operator.rshift), + ('<<', operator.lshift), + ('-', operator.sub), + ('+', operator.add), + ('%', operator.mod), + ('/', operator.truediv), + ('*', operator.mul), +] + +_ASSIGN_OPERATORS = [] +for op, opfunc in _OPERATORS: + _ASSIGN_OPERATORS.append([op + '=', opfunc]) +_ASSIGN_OPERATORS.append(('=', lambda cur, right: right)) + +_NAME_RE = r'[a-zA-Z_$][a-zA-Z_$0-9]*' + + +class JSInterpreter(object): + def __init__(self, code, objects=None): + if objects is None: + objects = {} + self.code = code + self._functions = {} + self._objects = objects + + def interpret_statement(self, stmt, local_vars, allow_recursion=100): + + should_abort = False + stmt = stmt.lstrip() + stmt_m = re.match(r'var\s', stmt) + if stmt_m: + expr = stmt[len(stmt_m.group(0)):] + else: + return_m = re.match(r'return(?:\s+|$)', stmt) + if return_m: + expr = stmt[len(return_m.group(0)):] + should_abort = True + else: + # Try interpreting it as an expression + expr = stmt + + v = self.interpret_expression(expr, local_vars, allow_recursion) + return v, should_abort + + def interpret_expression(self, expr, local_vars, allow_recursion): + expr = expr.strip() + + if expr == '': # Empty expression + return None + + if expr.startswith('('): + parens_count = 0 + for m in re.finditer(r'[()]', expr): + if m.group(0) == '(': + parens_count += 1 + else: + parens_count -= 1 + if parens_count == 0: + sub_expr = expr[1:m.start()] + sub_result = self.interpret_expression( + sub_expr, local_vars, allow_recursion) + remaining_expr = expr[m.end():].strip() + if not remaining_expr: + return sub_result + else: + expr = json.dumps(sub_result) + remaining_expr + break + + for op, opfunc in _ASSIGN_OPERATORS: + m = re.match(r'''(?x) + (?P%s)(?:\[(?P[^\]]+?)\])? + \s*%s + (?P.*)$''' % (_NAME_RE, re.escape(op)), expr) + if not m: + continue + right_val = self.interpret_expression( + m.group('expr'), local_vars, allow_recursion - 1) + + if m.groupdict().get('index'): + lvar = local_vars[m.group('out')] + idx = self.interpret_expression( + m.group('index'), local_vars, allow_recursion) + assert isinstance(idx, int) + cur = lvar[idx] + val = opfunc(cur, right_val) + lvar[idx] = val + return val + else: + cur = local_vars.get(m.group('out')) + val = opfunc(cur, right_val) + local_vars[m.group('out')] = val + return val + + if expr.isdigit(): + return int(expr) + + var_m = re.match( + r'(?!if|return|true|false)(?P%s)$' % _NAME_RE, + expr) + if var_m: + return local_vars[var_m.group('name')] + + try: + return json.loads(expr) + except ValueError: + pass + + m = re.match( + r'(?P%s)\.(?P[^(]+)(?:\(+(?P[^()]*)\))?$' % _NAME_RE, + expr) + if m: + variable = m.group('var') + member = m.group('member') + arg_str = m.group('args') + + if variable in local_vars: + obj = local_vars[variable] + else: + if variable not in self._objects: + self._objects[variable] = self.extract_object(variable) + obj = self._objects[variable] + + if arg_str is None: + # Member access + if member == 'length': + return len(obj) + return obj[member] + + assert expr.endswith(')') + # Function call + if arg_str == '': + argvals = tuple() + else: + argvals = [] + for v in arg_str.split(','): + argvals.extend([self.interpret_expression(v, local_vars, allow_recursion)]) + + if member == 'split': + assert argvals == ('',) + return list(obj) + if member == 'join': + assert len(argvals) == 1 + return argvals[0].join(obj) + if member == 'reverse': + assert len(argvals) == 0 + obj.reverse() + return obj + if member == 'slice': + assert len(argvals) == 1 + return obj[argvals[0]:] + if member == 'splice': + assert isinstance(obj, list) + index, howMany = argvals + res = [] + for i in range(index, min(index + howMany, len(obj))): + res.append(obj.pop(index)) + return res + + return obj[member](argvals) + + m = re.match( + r'(?P%s)\[(?P.+)\]$' % _NAME_RE, expr) + if m: + val = local_vars[m.group('in')] + idx = self.interpret_expression( + m.group('idx'), local_vars, allow_recursion - 1) + return val[idx] + + for op, opfunc in _OPERATORS: + m = re.match(r'(?P.+?)%s(?P.+)' % re.escape(op), expr) + if not m: + continue + x, abort = self.interpret_statement( + m.group('x'), local_vars, allow_recursion - 1) + y, abort = self.interpret_statement( + m.group('y'), local_vars, allow_recursion - 1) + return opfunc(x, y) + + m = re.match( + r'^(?P%s)\((?P[a-zA-Z0-9_$,]+)\)$' % _NAME_RE, expr) + if m: + fname = m.group('func') + argvals = [] + for v in m.group('args').split(','): + if v.isdigit(): + argvals.append([int(v)]) + else: + argvals.append([local_vars[v]]) + + if fname not in self._functions: + self._functions[fname] = self.extract_function(fname) + return self._functions[fname](argvals) + + + def extract_object(self, objname): + obj = {} + obj_m = re.search( + (r'(?:var\s+)?%s\s*=\s*\{' % re.escape(objname)) + + r'\s*(?P([a-zA-Z$0-9]+\s*:\s*function\(.*?\)\s*\{.*?\}(?:,\s*)?)*)' + + r'\}\s*;', + self.code) + fields = obj_m.group('fields') + # Currently, it only supports function definitions + fields_m = re.finditer( + r'(?P[a-zA-Z$0-9]+)\s*:\s*function' + r'\((?P[a-z,]+)\){(?P[^}]+)}', + fields) + for f in fields_m: + argnames = f.group('args').split(',') + obj[f.group('key')] = self.build_function(argnames, f.group('code')) + + return obj + + def extract_function(self, funcname): + func_m = re.search( + r'''(?x) + (?:function\s+%s|[{;,]\s*%s\s*=\s*function|var\s+%s\s*=\s*function)\s* + \((?P[^)]*)\)\s* + \{(?P[^}]+)\}''' % ( + re.escape(funcname), re.escape(funcname), re.escape(funcname)), + self.code) + argnames = func_m.group('args').split(',') + + return self.build_function(argnames, func_m.group('code')) + + def call_function(self, funcname, *args): + f = self.extract_function(funcname) + return f(args) + + def build_function(self, argnames, code): + def resf(args): + local_vars = dict(zip(argnames, args)) + for stmt in code.split(';'): + res, abort = self.interpret_statement(stmt, local_vars) + if abort: + break + return res + return resf diff --git a/plugin.video.alfa/lib/jsunpack.py b/plugin.video.alfa/lib/jsunpack.py new file mode 100755 index 00000000..a5e5e73b --- /dev/null +++ b/plugin.video.alfa/lib/jsunpack.py @@ -0,0 +1,110 @@ +# -*- coding: utf-8 -*- +""" + urlresolver XBMC Addon + Copyright (C) 2013 Bstrdsmkr + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + You should have received a copy of the GNU General Public License + along with this program. If not, see . + Adapted for use in xbmc from: + https://github.com/einars/js-beautify/blob/master/python/jsbeautifier/unpackers/packer.py + + usage: + if detect(some_string): + unpacked = unpack(some_string) +Unpacker for Dean Edward's p.a.c.k.e.r +""" +import re +def detect(source): + """Detects whether `source` is P.A.C.K.E.R. coded.""" + source = source.replace(' ', '') + if re.search('eval\(function\(p,a,c,k,e,(?:r|d)', source): + return True + else: + return False +def unpack(source): + """Unpacks P.A.C.K.E.R. packed js code.""" + payload, symtab, radix, count = _filterargs(source) + if count != len(symtab): + raise UnpackingError('Malformed p.a.c.k.e.r. symtab.') + try: + unbase = Unbaser(radix) + except TypeError: + raise UnpackingError('Unknown p.a.c.k.e.r. encoding.') + def lookup(match): + """Look up symbols in the synthetic symtab.""" + word = match.group(0) + return symtab[unbase(word)] or word + source = re.sub(r'\b\w+\b', lookup, payload) + return _replacestrings(source) +def _filterargs(source): + """Juice from a source file the four args needed by decoder.""" + juicers = [(r"}\('(.*)', *(\d+), *(\d+), *'(.*)'\.split\('\|'\), *(\d+), *(.*)\)\)"), + (r"}\('(.*)', *(\d+), *(\d+), *'(.*)'\.split\('\|'\)"), + ] + for juicer in juicers: + args = re.search(juicer, source, re.DOTALL) + if args: + a = args.groups() + try: + return a[0], a[3].split('|'), int(a[1]), int(a[2]) + except ValueError: + raise UnpackingError('Corrupted p.a.c.k.e.r. data.') + # could not find a satisfying regex + raise UnpackingError('Could not make sense of p.a.c.k.e.r data (unexpected code structure)') +def _replacestrings(source): + """Strip string lookup table (list) and replace values in source.""" + match = re.search(r'var *(_\w+)\=\["(.*?)"\];', source, re.DOTALL) + if match: + varname, strings = match.groups() + startpoint = len(match.group(0)) + lookup = strings.split('","') + variable = '%s[%%d]' % varname + for index, value in enumerate(lookup): + source = source.replace(variable % index, '"%s"' % value) + return source[startpoint:] + return source +class Unbaser(object): + """Functor for a given base. Will efficiently convert + strings to natural numbers.""" + ALPHABET = { + 62: '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', + 95: (' !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ' + '[\]^_`abcdefghijklmnopqrstuvwxyz{|}~') + } + def __init__(self, base): + self.base = base + # If base can be handled by int() builtin, let it do it for us + if 2 <= base <= 36: + self.unbase = lambda string: int(string, base) + else: + if base < 62: + self.ALPHABET[base] = self.ALPHABET[62][0:base] + elif 62 < base < 95: + self.ALPHABET[base] = self.ALPHABET[95][0:base] + # Build conversion dictionary cache + try: + self.dictionary = dict((cipher, index) for index, cipher in enumerate(self.ALPHABET[base])) + except KeyError: + raise TypeError('Unsupported base encoding.') + self.unbase = self._dictunbaser + def __call__(self, string): + return self.unbase(string) + def _dictunbaser(self, string): + """Decodes a value to an integer.""" + ret = 0 + for index, cipher in enumerate(string[::-1]): + ret += (self.base ** index) * self.dictionary[cipher] + return ret + + +class UnpackingError(Exception): + """Badly packed source or general error. Argument is a + meaningful description.""" + pass \ No newline at end of file diff --git a/plugin.video.alfa/lib/libtorrent.pyd b/plugin.video.alfa/lib/libtorrent.pyd new file mode 100755 index 00000000..1068c833 Binary files /dev/null and b/plugin.video.alfa/lib/libtorrent.pyd differ diff --git a/plugin.video.alfa/lib/mechanize/__init__.py b/plugin.video.alfa/lib/mechanize/__init__.py new file mode 100755 index 00000000..43a3324a --- /dev/null +++ b/plugin.video.alfa/lib/mechanize/__init__.py @@ -0,0 +1,211 @@ +__all__ = [ + 'AbstractBasicAuthHandler', + 'AbstractDigestAuthHandler', + 'BaseHandler', + 'Browser', + 'BrowserStateError', + 'CacheFTPHandler', + 'ContentTooShortError', + 'Cookie', + 'CookieJar', + 'CookiePolicy', + 'DefaultCookiePolicy', + 'DefaultFactory', + 'FTPHandler', + 'Factory', + 'FileCookieJar', + 'FileHandler', + 'FormNotFoundError', + 'FormsFactory', + 'HTTPBasicAuthHandler', + 'HTTPCookieProcessor', + 'HTTPDefaultErrorHandler', + 'HTTPDigestAuthHandler', + 'HTTPEquivProcessor', + 'HTTPError', + 'HTTPErrorProcessor', + 'HTTPHandler', + 'HTTPPasswordMgr', + 'HTTPPasswordMgrWithDefaultRealm', + 'HTTPProxyPasswordMgr', + 'HTTPRedirectDebugProcessor', + 'HTTPRedirectHandler', + 'HTTPRefererProcessor', + 'HTTPRefreshProcessor', + 'HTTPResponseDebugProcessor', + 'HTTPRobotRulesProcessor', + 'HTTPSClientCertMgr', + 'HeadParser', + 'History', + 'LWPCookieJar', + 'Link', + 'LinkNotFoundError', + 'LinksFactory', + 'LoadError', + 'MSIECookieJar', + 'MozillaCookieJar', + 'OpenerDirector', + 'OpenerFactory', + 'ParseError', + 'ProxyBasicAuthHandler', + 'ProxyDigestAuthHandler', + 'ProxyHandler', + 'Request', + 'RobotExclusionError', + 'RobustFactory', + 'RobustFormsFactory', + 'RobustLinksFactory', + 'RobustTitleFactory', + 'SeekableResponseOpener', + 'TitleFactory', + 'URLError', + 'USE_BARE_EXCEPT', + 'UnknownHandler', + 'UserAgent', + 'UserAgentBase', + 'XHTMLCompatibleHeadParser', + '__version__', + 'build_opener', + 'install_opener', + 'lwp_cookie_str', + 'make_response', + 'request_host', + 'response_seek_wrapper', # XXX deprecate in public interface? + 'seek_wrapped_response', # XXX should probably use this internally in place of response_seek_wrapper() + 'str2time', + 'urlopen', + 'urlretrieve', + 'urljoin', + + # ClientForm API + 'AmbiguityError', + 'ControlNotFoundError', + 'FormParser', + 'ItemCountError', + 'ItemNotFoundError', + 'LocateError', + 'Missing', + 'ParseFile', + 'ParseFileEx', + 'ParseResponse', + 'ParseResponseEx', + 'ParseString', + 'XHTMLCompatibleFormParser', + # deprecated + 'CheckboxControl', + 'Control', + 'FileControl', + 'HTMLForm', + 'HiddenControl', + 'IgnoreControl', + 'ImageControl', + 'IsindexControl', + 'Item', + 'Label', + 'ListControl', + 'PasswordControl', + 'RadioControl', + 'ScalarControl', + 'SelectControl', + 'SubmitButtonControl', + 'SubmitControl', + 'TextControl', + 'TextareaControl', + ] + +import logging +import sys + +from _version import __version__ + +# high-level stateful browser-style interface +from _mechanize import \ + Browser, History, \ + BrowserStateError, LinkNotFoundError, FormNotFoundError + +# configurable URL-opener interface +from _useragent import UserAgentBase, UserAgent +from _html import \ + Link, \ + Factory, DefaultFactory, RobustFactory, \ + FormsFactory, LinksFactory, TitleFactory, \ + RobustFormsFactory, RobustLinksFactory, RobustTitleFactory + +# urllib2 work-alike interface. This is a superset of the urllib2 interface. +from _urllib2 import * +import _urllib2 +if hasattr(_urllib2, "HTTPSHandler"): + __all__.append("HTTPSHandler") +del _urllib2 + +# misc +from _http import HeadParser +from _http import XHTMLCompatibleHeadParser +from _opener import ContentTooShortError, OpenerFactory, urlretrieve +from _response import \ + response_seek_wrapper, seek_wrapped_response, make_response +from _rfc3986 import urljoin +from _util import http2time as str2time + +# cookies +from _clientcookie import Cookie, CookiePolicy, DefaultCookiePolicy, \ + CookieJar, FileCookieJar, LoadError, request_host_lc as request_host, \ + effective_request_host +from _lwpcookiejar import LWPCookieJar, lwp_cookie_str +# 2.4 raises SyntaxError due to generator / try/finally use +if sys.version_info[:2] > (2,4): + try: + import sqlite3 + except ImportError: + pass + else: + from _firefox3cookiejar import Firefox3CookieJar +from _mozillacookiejar import MozillaCookieJar +from _msiecookiejar import MSIECookieJar + +# forms +from _form import ( + AmbiguityError, + ControlNotFoundError, + FormParser, + ItemCountError, + ItemNotFoundError, + LocateError, + Missing, + ParseError, + ParseFile, + ParseFileEx, + ParseResponse, + ParseResponseEx, + ParseString, + XHTMLCompatibleFormParser, + # deprecated + CheckboxControl, + Control, + FileControl, + HTMLForm, + HiddenControl, + IgnoreControl, + ImageControl, + IsindexControl, + Item, + Label, + ListControl, + PasswordControl, + RadioControl, + ScalarControl, + SelectControl, + SubmitButtonControl, + SubmitControl, + TextControl, + TextareaControl, + ) + +# If you hate the idea of turning bugs into warnings, do: +# import mechanize; mechanize.USE_BARE_EXCEPT = False +USE_BARE_EXCEPT = True + +logger = logging.getLogger("mechanize") +if logger.level is logging.NOTSET: + logger.setLevel(logging.CRITICAL) +del logger diff --git a/plugin.video.alfa/lib/mechanize/_auth.py b/plugin.video.alfa/lib/mechanize/_auth.py new file mode 100755 index 00000000..9fa7e8e3 --- /dev/null +++ b/plugin.video.alfa/lib/mechanize/_auth.py @@ -0,0 +1,68 @@ +"""HTTP Authentication and Proxy support. + + +Copyright 2006 John J. Lee + +This code is free software; you can redistribute it and/or modify it under +the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt +included with the distribution). + +""" + +from _urllib2_fork import HTTPPasswordMgr + + +# TODO: stop deriving from HTTPPasswordMgr +class HTTPProxyPasswordMgr(HTTPPasswordMgr): + # has default realm and host/port + def add_password(self, realm, uri, user, passwd): + # uri could be a single URI or a sequence + if uri is None or isinstance(uri, basestring): + uris = [uri] + else: + uris = uri + passwd_by_domain = self.passwd.setdefault(realm, {}) + for uri in uris: + for default_port in True, False: + reduced_uri = self.reduce_uri(uri, default_port) + passwd_by_domain[reduced_uri] = (user, passwd) + + def find_user_password(self, realm, authuri): + attempts = [(realm, authuri), (None, authuri)] + # bleh, want default realm to take precedence over default + # URI/authority, hence this outer loop + for default_uri in False, True: + for realm, authuri in attempts: + authinfo_by_domain = self.passwd.get(realm, {}) + for default_port in True, False: + reduced_authuri = self.reduce_uri(authuri, default_port) + for uri, authinfo in authinfo_by_domain.iteritems(): + if uri is None and not default_uri: + continue + if self.is_suburi(uri, reduced_authuri): + return authinfo + user, password = None, None + + if user is not None: + break + return user, password + + def reduce_uri(self, uri, default_port=True): + if uri is None: + return None + return HTTPPasswordMgr.reduce_uri(self, uri, default_port) + + def is_suburi(self, base, test): + if base is None: + # default to the proxy's host/port + hostport, path = test + base = (hostport, "/") + return HTTPPasswordMgr.is_suburi(self, base, test) + + +class HTTPSClientCertMgr(HTTPPasswordMgr): + # implementation inheritance: this is not a proper subclass + def add_key_cert(self, uri, key_file, cert_file): + self.add_password(None, uri, key_file, cert_file) + def find_key_cert(self, authuri): + return HTTPPasswordMgr.find_user_password(self, None, authuri) diff --git a/plugin.video.alfa/lib/mechanize/_beautifulsoup.py b/plugin.video.alfa/lib/mechanize/_beautifulsoup.py new file mode 100755 index 00000000..5ec6755a --- /dev/null +++ b/plugin.video.alfa/lib/mechanize/_beautifulsoup.py @@ -0,0 +1,1077 @@ +"""Beautiful Soup +Elixir and Tonic +"The Screen-Scraper's Friend" +v2.1.1 +http://www.crummy.com/software/BeautifulSoup/ + +Beautiful Soup parses arbitrarily invalid XML- or HTML-like substance +into a tree representation. It provides methods and Pythonic idioms +that make it easy to search and modify the tree. + +A well-formed XML/HTML document will yield a well-formed data +structure. An ill-formed XML/HTML document will yield a +correspondingly ill-formed data structure. If your document is only +locally well-formed, you can use this library to find and process the +well-formed part of it. The BeautifulSoup class has heuristics for +obtaining a sensible parse tree in the face of common HTML errors. + +Beautiful Soup has no external dependencies. It works with Python 2.2 +and up. + +Beautiful Soup defines classes for four different parsing strategies: + + * BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific + language that kind of looks like XML. + + * BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid + or invalid. + + * ICantBelieveItsBeautifulSoup, for parsing valid but bizarre HTML + that trips up BeautifulSoup. + + * BeautifulSOAP, for making it easier to parse XML documents that use + lots of subelements containing a single string, where you'd prefer + they put that string into an attribute (such as SOAP messages). + +You can subclass BeautifulStoneSoup or BeautifulSoup to create a +parsing strategy specific to an XML schema or a particular bizarre +HTML document. Typically your subclass would just override +SELF_CLOSING_TAGS and/or NESTABLE_TAGS. +""" #" +from __future__ import generators + +__author__ = "Leonard Richardson (leonardr@segfault.org)" +__version__ = "2.1.1" +__date__ = "$Date: 2004/10/18 00:14:20 $" +__copyright__ = "Copyright (c) 2004-2005 Leonard Richardson" +__license__ = "PSF" + +from _sgmllib_copy import SGMLParser, SGMLParseError +import types +import re +import _sgmllib_copy as sgmllib + +class NullType(object): + + """Similar to NoneType with a corresponding singleton instance + 'Null' that, unlike None, accepts any message and returns itself. + + Examples: + >>> Null("send", "a", "message")("and one more", + ... "and what you get still") is Null + True + """ + + def __new__(cls): return Null + def __call__(self, *args, **kwargs): return Null +## def __getstate__(self, *args): return Null + def __getattr__(self, attr): return Null + def __getitem__(self, item): return Null + def __setattr__(self, attr, value): pass + def __setitem__(self, item, value): pass + def __len__(self): return 0 + # FIXME: is this a python bug? otherwise ``for x in Null: pass`` + # never terminates... + def __iter__(self): return iter([]) + def __contains__(self, item): return False + def __repr__(self): return "Null" +Null = object.__new__(NullType) + +class PageElement: + """Contains the navigational information for some part of the page + (either a tag or a piece of text)""" + + def setup(self, parent=Null, previous=Null): + """Sets up the initial relations between this element and + other elements.""" + self.parent = parent + self.previous = previous + self.next = Null + self.previousSibling = Null + self.nextSibling = Null + if self.parent and self.parent.contents: + self.previousSibling = self.parent.contents[-1] + self.previousSibling.nextSibling = self + + def findNext(self, name=None, attrs={}, text=None): + """Returns the first item that matches the given criteria and + appears after this Tag in the document.""" + return self._first(self.fetchNext, name, attrs, text) + firstNext = findNext + + def fetchNext(self, name=None, attrs={}, text=None, limit=None): + """Returns all items that match the given criteria and appear + before after Tag in the document.""" + return self._fetch(name, attrs, text, limit, self.nextGenerator) + + def findNextSibling(self, name=None, attrs={}, text=None): + """Returns the closest sibling to this Tag that matches the + given criteria and appears after this Tag in the document.""" + return self._first(self.fetchNextSiblings, name, attrs, text) + firstNextSibling = findNextSibling + + def fetchNextSiblings(self, name=None, attrs={}, text=None, limit=None): + """Returns the siblings of this Tag that match the given + criteria and appear after this Tag in the document.""" + return self._fetch(name, attrs, text, limit, self.nextSiblingGenerator) + + def findPrevious(self, name=None, attrs={}, text=None): + """Returns the first item that matches the given criteria and + appears before this Tag in the document.""" + return self._first(self.fetchPrevious, name, attrs, text) + + def fetchPrevious(self, name=None, attrs={}, text=None, limit=None): + """Returns all items that match the given criteria and appear + before this Tag in the document.""" + return self._fetch(name, attrs, text, limit, self.previousGenerator) + firstPrevious = findPrevious + + def findPreviousSibling(self, name=None, attrs={}, text=None): + """Returns the closest sibling to this Tag that matches the + given criteria and appears before this Tag in the document.""" + return self._first(self.fetchPreviousSiblings, name, attrs, text) + firstPreviousSibling = findPreviousSibling + + def fetchPreviousSiblings(self, name=None, attrs={}, text=None, + limit=None): + """Returns the siblings of this Tag that match the given + criteria and appear before this Tag in the document.""" + return self._fetch(name, attrs, text, limit, + self.previousSiblingGenerator) + + def findParent(self, name=None, attrs={}): + """Returns the closest parent of this Tag that matches the given + criteria.""" + r = Null + l = self.fetchParents(name, attrs, 1) + if l: + r = l[0] + return r + firstParent = findParent + + def fetchParents(self, name=None, attrs={}, limit=None): + """Returns the parents of this Tag that match the given + criteria.""" + return self._fetch(name, attrs, None, limit, self.parentGenerator) + + #These methods do the real heavy lifting. + + def _first(self, method, name, attrs, text): + r = Null + l = method(name, attrs, text, 1) + if l: + r = l[0] + return r + + def _fetch(self, name, attrs, text, limit, generator): + "Iterates over a generator looking for things that match." + if not hasattr(attrs, 'items'): + attrs = {'class' : attrs} + + results = [] + g = generator() + while True: + try: + i = g.next() + except StopIteration: + break + found = None + if isinstance(i, Tag): + if not text: + if not name or self._matches(i, name): + match = True + for attr, matchAgainst in attrs.items(): + check = i.get(attr) + if not self._matches(check, matchAgainst): + match = False + break + if match: + found = i + elif text: + if self._matches(i, text): + found = i + if found: + results.append(found) + if limit and len(results) >= limit: + break + return results + + #Generators that can be used to navigate starting from both + #NavigableTexts and Tags. + def nextGenerator(self): + i = self + while i: + i = i.next + yield i + + def nextSiblingGenerator(self): + i = self + while i: + i = i.nextSibling + yield i + + def previousGenerator(self): + i = self + while i: + i = i.previous + yield i + + def previousSiblingGenerator(self): + i = self + while i: + i = i.previousSibling + yield i + + def parentGenerator(self): + i = self + while i: + i = i.parent + yield i + + def _matches(self, chunk, howToMatch): + #print 'looking for %s in %s' % (howToMatch, chunk) + # + # If given a list of items, return true if the list contains a + # text element that matches. + if isList(chunk) and not isinstance(chunk, Tag): + for tag in chunk: + if isinstance(tag, NavigableText) and self._matches(tag, howToMatch): + return True + return False + if callable(howToMatch): + return howToMatch(chunk) + if isinstance(chunk, Tag): + #Custom match methods take the tag as an argument, but all other + #ways of matching match the tag name as a string + chunk = chunk.name + #Now we know that chunk is a string + if not isinstance(chunk, basestring): + chunk = str(chunk) + if hasattr(howToMatch, 'match'): + # It's a regexp object. + return howToMatch.search(chunk) + if isList(howToMatch): + return chunk in howToMatch + if hasattr(howToMatch, 'items'): + return howToMatch.has_key(chunk) + #It's just a string + return str(howToMatch) == chunk + +class NavigableText(PageElement): + + def __getattr__(self, attr): + "For backwards compatibility, text.string gives you text" + if attr == 'string': + return self + else: + raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr) + +class NavigableString(str, NavigableText): + pass + +class NavigableUnicodeString(unicode, NavigableText): + pass + +class Tag(PageElement): + + """Represents a found HTML tag with its attributes and contents.""" + + def __init__(self, name, attrs=None, parent=Null, previous=Null): + "Basic constructor." + self.name = name + if attrs == None: + attrs = [] + self.attrs = attrs + self.contents = [] + self.setup(parent, previous) + self.hidden = False + + def get(self, key, default=None): + """Returns the value of the 'key' attribute for the tag, or + the value given for 'default' if it doesn't have that + attribute.""" + return self._getAttrMap().get(key, default) + + def __getitem__(self, key): + """tag[key] returns the value of the 'key' attribute for the tag, + and throws an exception if it's not there.""" + return self._getAttrMap()[key] + + def __iter__(self): + "Iterating over a tag iterates over its contents." + return iter(self.contents) + + def __len__(self): + "The length of a tag is the length of its list of contents." + return len(self.contents) + + def __contains__(self, x): + return x in self.contents + + def __nonzero__(self): + "A tag is non-None even if it has no contents." + return True + + def __setitem__(self, key, value): + """Setting tag[key] sets the value of the 'key' attribute for the + tag.""" + self._getAttrMap() + self.attrMap[key] = value + found = False + for i in range(0, len(self.attrs)): + if self.attrs[i][0] == key: + self.attrs[i] = (key, value) + found = True + if not found: + self.attrs.append((key, value)) + self._getAttrMap()[key] = value + + def __delitem__(self, key): + "Deleting tag[key] deletes all 'key' attributes for the tag." + for item in self.attrs: + if item[0] == key: + self.attrs.remove(item) + #We don't break because bad HTML can define the same + #attribute multiple times. + self._getAttrMap() + if self.attrMap.has_key(key): + del self.attrMap[key] + + def __call__(self, *args, **kwargs): + """Calling a tag like a function is the same as calling its + fetch() method. Eg. tag('a') returns a list of all the A tags + found within this tag.""" + return apply(self.fetch, args, kwargs) + + def __getattr__(self, tag): + if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3: + return self.first(tag[:-3]) + elif tag.find('__') != 0: + return self.first(tag) + + def __eq__(self, other): + """Returns true iff this tag has the same name, the same attributes, + and the same contents (recursively) as the given tag. + + NOTE: right now this will return false if two tags have the + same attributes in a different order. Should this be fixed?""" + if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other): + return False + for i in range(0, len(self.contents)): + if self.contents[i] != other.contents[i]: + return False + return True + + def __ne__(self, other): + """Returns true iff this tag is not identical to the other tag, + as defined in __eq__.""" + return not self == other + + def __repr__(self): + """Renders this tag as a string.""" + return str(self) + + def __unicode__(self): + return self.__str__(1) + + def __str__(self, needUnicode=None, showStructureIndent=None): + """Returns a string or Unicode representation of this tag and + its contents. + + NOTE: since Python's HTML parser consumes whitespace, this + method is not certain to reproduce the whitespace present in + the original string.""" + + attrs = [] + if self.attrs: + for key, val in self.attrs: + attrs.append('%s="%s"' % (key, val)) + close = '' + closeTag = '' + if self.isSelfClosing(): + close = ' /' + else: + closeTag = '' % self.name + indentIncrement = None + if showStructureIndent != None: + indentIncrement = showStructureIndent + if not self.hidden: + indentIncrement += 1 + contents = self.renderContents(indentIncrement, needUnicode=needUnicode) + if showStructureIndent: + space = '\n%s' % (' ' * showStructureIndent) + if self.hidden: + s = contents + else: + s = [] + attributeString = '' + if attrs: + attributeString = ' ' + ' '.join(attrs) + if showStructureIndent: + s.append(space) + s.append('<%s%s%s>' % (self.name, attributeString, close)) + s.append(contents) + if closeTag and showStructureIndent != None: + s.append(space) + s.append(closeTag) + s = ''.join(s) + isUnicode = type(s) == types.UnicodeType + if needUnicode and not isUnicode: + s = unicode(s) + elif isUnicode and needUnicode==False: + s = str(s) + return s + + def prettify(self, needUnicode=None): + return self.__str__(needUnicode, showStructureIndent=True) + + def renderContents(self, showStructureIndent=None, needUnicode=None): + """Renders the contents of this tag as a (possibly Unicode) + string.""" + s=[] + for c in self: + text = None + if isinstance(c, NavigableUnicodeString) or type(c) == types.UnicodeType: + text = unicode(c) + elif isinstance(c, Tag): + s.append(c.__str__(needUnicode, showStructureIndent)) + elif needUnicode: + text = unicode(c) + else: + text = str(c) + if text: + if showStructureIndent != None: + if text[-1] == '\n': + text = text[:-1] + s.append(text) + return ''.join(s) + + #Soup methods + + def firstText(self, text, recursive=True): + """Convenience method to retrieve the first piece of text matching the + given criteria. 'text' can be a string, a regular expression object, + a callable that takes a string and returns whether or not the + string 'matches', etc.""" + return self.first(recursive=recursive, text=text) + + def fetchText(self, text, recursive=True, limit=None): + """Convenience method to retrieve all pieces of text matching the + given criteria. 'text' can be a string, a regular expression object, + a callable that takes a string and returns whether or not the + string 'matches', etc.""" + return self.fetch(recursive=recursive, text=text, limit=limit) + + def first(self, name=None, attrs={}, recursive=True, text=None): + """Return only the first child of this + Tag matching the given criteria.""" + r = Null + l = self.fetch(name, attrs, recursive, text, 1) + if l: + r = l[0] + return r + findChild = first + + def fetch(self, name=None, attrs={}, recursive=True, text=None, + limit=None): + """Extracts a list of Tag objects that match the given + criteria. You can specify the name of the Tag and any + attributes you want the Tag to have. + + The value of a key-value pair in the 'attrs' map can be a + string, a list of strings, a regular expression object, or a + callable that takes a string and returns whether or not the + string matches for some custom definition of 'matches'. The + same is true of the tag name.""" + generator = self.recursiveChildGenerator + if not recursive: + generator = self.childGenerator + return self._fetch(name, attrs, text, limit, generator) + fetchChildren = fetch + + #Utility methods + + def isSelfClosing(self): + """Returns true iff this is a self-closing tag as defined in the HTML + standard. + + TODO: This is specific to BeautifulSoup and its subclasses, but it's + used by __str__""" + return self.name in BeautifulSoup.SELF_CLOSING_TAGS + + def append(self, tag): + """Appends the given tag to the contents of this tag.""" + self.contents.append(tag) + + #Private methods + + def _getAttrMap(self): + """Initializes a map representation of this tag's attributes, + if not already initialized.""" + if not getattr(self, 'attrMap'): + self.attrMap = {} + for (key, value) in self.attrs: + self.attrMap[key] = value + return self.attrMap + + #Generator methods + def childGenerator(self): + for i in range(0, len(self.contents)): + yield self.contents[i] + raise StopIteration + + def recursiveChildGenerator(self): + stack = [(self, 0)] + while stack: + tag, start = stack.pop() + if isinstance(tag, Tag): + for i in range(start, len(tag.contents)): + a = tag.contents[i] + yield a + if isinstance(a, Tag) and tag.contents: + if i < len(tag.contents) - 1: + stack.append((tag, i+1)) + stack.append((a, 0)) + break + raise StopIteration + + +def isList(l): + """Convenience method that works with all 2.x versions of Python + to determine whether or not something is listlike.""" + return hasattr(l, '__iter__') \ + or (type(l) in (types.ListType, types.TupleType)) + +def buildTagMap(default, *args): + """Turns a list of maps, lists, or scalars into a single map. + Used to build the SELF_CLOSING_TAGS and NESTABLE_TAGS maps out + of lists and partial maps.""" + built = {} + for portion in args: + if hasattr(portion, 'items'): + #It's a map. Merge it. + for k,v in portion.items(): + built[k] = v + elif isList(portion): + #It's a list. Map each item to the default. + for k in portion: + built[k] = default + else: + #It's a scalar. Map it to the default. + built[portion] = default + return built + +class BeautifulStoneSoup(Tag, SGMLParser): + + """This class contains the basic parser and fetch code. It defines + a parser that knows nothing about tag behavior except for the + following: + + You can't close a tag without closing all the tags it encloses. + That is, "" actually means + "". + + [Another possible explanation is "", but since + this class defines no SELF_CLOSING_TAGS, it will never use that + explanation.] + + This class is useful for parsing XML or made-up markup languages, + or when BeautifulSoup makes an assumption counter to what you were + expecting.""" + + SELF_CLOSING_TAGS = {} + NESTABLE_TAGS = {} + RESET_NESTING_TAGS = {} + QUOTE_TAGS = {} + + #As a public service we will by default silently replace MS smart quotes + #and similar characters with their HTML or ASCII equivalents. + MS_CHARS = { '\x80' : '€', + '\x81' : ' ', + '\x82' : '‚', + '\x83' : 'ƒ', + '\x84' : '„', + '\x85' : '…', + '\x86' : '†', + '\x87' : '‡', + '\x88' : '⁁', + '\x89' : '%', + '\x8A' : 'Š', + '\x8B' : '<', + '\x8C' : 'Œ', + '\x8D' : '?', + '\x8E' : 'Z', + '\x8F' : '?', + '\x90' : '?', + '\x91' : '‘', + '\x92' : '’', + '\x93' : '“', + '\x94' : '”', + '\x95' : '•', + '\x96' : '–', + '\x97' : '—', + '\x98' : '˜', + '\x99' : '™', + '\x9a' : 'š', + '\x9b' : '>', + '\x9c' : 'œ', + '\x9d' : '?', + '\x9e' : 'z', + '\x9f' : 'Ÿ',} + + PARSER_MASSAGE = [(re.compile('(<[^<>]*)/>'), + lambda(x):x.group(1) + ' />'), + (re.compile(']*)>'), + lambda(x):''), + (re.compile("([\x80-\x9f])"), + lambda(x): BeautifulStoneSoup.MS_CHARS.get(x.group(1))) + ] + + ROOT_TAG_NAME = '[document]' + + def __init__(self, text=None, avoidParserProblems=True, + initialTextIsEverything=True): + """Initialize this as the 'root tag' and feed in any text to + the parser. + + NOTE about avoidParserProblems: sgmllib will process most bad + HTML, and BeautifulSoup has tricks for dealing with some HTML + that kills sgmllib, but Beautiful Soup can nonetheless choke + or lose data if your data uses self-closing tags or + declarations incorrectly. By default, Beautiful Soup sanitizes + its input to avoid the vast majority of these problems. The + problems are relatively rare, even in bad HTML, so feel free + to pass in False to avoidParserProblems if they don't apply to + you, and you'll get better performance. The only reason I have + this turned on by default is so I don't get so many tech + support questions. + + The two most common instances of invalid HTML that will choke + sgmllib are fixed by the default parser massage techniques: + +
    (No space between name of closing tag and tag close) + (Extraneous whitespace in declaration) + + You can pass in a custom list of (RE object, replace method) + tuples to get Beautiful Soup to scrub your input the way you + want.""" + Tag.__init__(self, self.ROOT_TAG_NAME) + if avoidParserProblems \ + and not isList(avoidParserProblems): + avoidParserProblems = self.PARSER_MASSAGE + self.avoidParserProblems = avoidParserProblems + SGMLParser.__init__(self) + self.quoteStack = [] + self.hidden = 1 + self.reset() + if hasattr(text, 'read'): + #It's a file-type object. + text = text.read() + if text: + self.feed(text) + if initialTextIsEverything: + self.done() + + def __getattr__(self, methodName): + """This method routes method call requests to either the SGMLParser + superclass or the Tag superclass, depending on the method name.""" + if methodName.find('start_') == 0 or methodName.find('end_') == 0 \ + or methodName.find('do_') == 0: + return SGMLParser.__getattr__(self, methodName) + elif methodName.find('__') != 0: + return Tag.__getattr__(self, methodName) + else: + raise AttributeError + + def feed(self, text): + if self.avoidParserProblems: + for fix, m in self.avoidParserProblems: + text = fix.sub(m, text) + SGMLParser.feed(self, text) + + def done(self): + """Called when you're done parsing, so that the unclosed tags can be + correctly processed.""" + self.endData() #NEW + while self.currentTag.name != self.ROOT_TAG_NAME: + self.popTag() + + def reset(self): + SGMLParser.reset(self) + self.currentData = [] + self.currentTag = None + self.tagStack = [] + self.pushTag(self) + + def popTag(self): + tag = self.tagStack.pop() + # Tags with just one string-owning child get the child as a + # 'string' property, so that soup.tag.string is shorthand for + # soup.tag.contents[0] + if len(self.currentTag.contents) == 1 and \ + isinstance(self.currentTag.contents[0], NavigableText): + self.currentTag.string = self.currentTag.contents[0] + + #print "Pop", tag.name + if self.tagStack: + self.currentTag = self.tagStack[-1] + return self.currentTag + + def pushTag(self, tag): + #print "Push", tag.name + if self.currentTag: + self.currentTag.append(tag) + self.tagStack.append(tag) + self.currentTag = self.tagStack[-1] + + def endData(self): + currentData = ''.join(self.currentData) + if currentData: + if not currentData.strip(): + if '\n' in currentData: + currentData = '\n' + else: + currentData = ' ' + c = NavigableString + if type(currentData) == types.UnicodeType: + c = NavigableUnicodeString + o = c(currentData) + o.setup(self.currentTag, self.previous) + if self.previous: + self.previous.next = o + self.previous = o + self.currentTag.contents.append(o) + self.currentData = [] + + def _popToTag(self, name, inclusivePop=True): + """Pops the tag stack up to and including the most recent + instance of the given tag. If inclusivePop is false, pops the tag + stack up to but *not* including the most recent instqance of + the given tag.""" + if name == self.ROOT_TAG_NAME: + return + + numPops = 0 + mostRecentTag = None + for i in range(len(self.tagStack)-1, 0, -1): + if name == self.tagStack[i].name: + numPops = len(self.tagStack)-i + break + if not inclusivePop: + numPops = numPops - 1 + + for i in range(0, numPops): + mostRecentTag = self.popTag() + return mostRecentTag + + def _smartPop(self, name): + + """We need to pop up to the previous tag of this type, unless + one of this tag's nesting reset triggers comes between this + tag and the previous tag of this type, OR unless this tag is a + generic nesting trigger and another generic nesting trigger + comes between this tag and the previous tag of this type. + + Examples: +

    FooBar

    should pop to 'p', not 'b'. +

    FooBar

    should pop to 'table', not 'p'. +

    Foo

    Bar

    should pop to 'tr', not 'p'. +

    FooBar

    should pop to 'p', not 'b'. + +

    • *
    • * should pop to 'ul', not the first 'li'. +
  • ** should pop to 'table', not the first 'tr' + tag should + implicitly close the previous tag within the same
    ** should pop to 'tr', not the first 'td' + """ + + nestingResetTriggers = self.NESTABLE_TAGS.get(name) + isNestable = nestingResetTriggers != None + isResetNesting = self.RESET_NESTING_TAGS.has_key(name) + popTo = None + inclusive = True + for i in range(len(self.tagStack)-1, 0, -1): + p = self.tagStack[i] + if (not p or p.name == name) and not isNestable: + #Non-nestable tags get popped to the top or to their + #last occurance. + popTo = name + break + if (nestingResetTriggers != None + and p.name in nestingResetTriggers) \ + or (nestingResetTriggers == None and isResetNesting + and self.RESET_NESTING_TAGS.has_key(p.name)): + + #If we encounter one of the nesting reset triggers + #peculiar to this tag, or we encounter another tag + #that causes nesting to reset, pop up to but not + #including that tag. + + popTo = p.name + inclusive = False + break + p = p.parent + if popTo: + self._popToTag(popTo, inclusive) + + def unknown_starttag(self, name, attrs, selfClosing=0): + #print "Start tag %s" % name + if self.quoteStack: + #This is not a real tag. + #print "<%s> is not real!" % name + attrs = ''.join(map(lambda(x, y): ' %s="%s"' % (x, y), attrs)) + self.handle_data('<%s%s>' % (name, attrs)) + return + self.endData() + if not name in self.SELF_CLOSING_TAGS and not selfClosing: + self._smartPop(name) + tag = Tag(name, attrs, self.currentTag, self.previous) + if self.previous: + self.previous.next = tag + self.previous = tag + self.pushTag(tag) + if selfClosing or name in self.SELF_CLOSING_TAGS: + self.popTag() + if name in self.QUOTE_TAGS: + #print "Beginning quote (%s)" % name + self.quoteStack.append(name) + self.literal = 1 + + def unknown_endtag(self, name): + if self.quoteStack and self.quoteStack[-1] != name: + #This is not a real end tag. + #print " is not real!" % name + self.handle_data('' % name) + return + self.endData() + self._popToTag(name) + if self.quoteStack and self.quoteStack[-1] == name: + self.quoteStack.pop() + self.literal = (len(self.quoteStack) > 0) + + def handle_data(self, data): + self.currentData.append(data) + + def handle_pi(self, text): + "Propagate processing instructions right through." + self.handle_data("" % text) + + def handle_comment(self, text): + "Propagate comments right through." + self.handle_data("" % text) + + def handle_charref(self, ref): + "Propagate char refs right through." + self.handle_data('&#%s;' % ref) + + def handle_entityref(self, ref): + "Propagate entity refs right through." + self.handle_data('&%s;' % ref) + + def handle_decl(self, data): + "Propagate DOCTYPEs and the like right through." + self.handle_data('' % data) + + def parse_declaration(self, i): + """Treat a bogus SGML declaration as raw data. Treat a CDATA + declaration as regular data.""" + j = None + if self.rawdata[i:i+9] == '', i) + if k == -1: + k = len(self.rawdata) + self.handle_data(self.rawdata[i+9:k]) + j = k+3 + else: + try: + j = SGMLParser.parse_declaration(self, i) + except SGMLParseError: + toHandle = self.rawdata[i:] + self.handle_data(toHandle) + j = i + len(toHandle) + return j + +class BeautifulSoup(BeautifulStoneSoup): + + """This parser knows the following facts about HTML: + + * Some tags have no closing tag and should be interpreted as being + closed as soon as they are encountered. + + * The text inside some tags (ie. 'script') may contain tags which + are not really part of the document and which should be parsed + as text, not tags. If you want to parse the text as tags, you can + always fetch it and parse it explicitly. + + * Tag nesting rules: + + Most tags can't be nested at all. For instance, the occurance of + a

    tag should implicitly close the previous

    tag. + +

    Para1

    Para2 + should be transformed into: +

    Para1

    Para2 + + Some tags can be nested arbitrarily. For instance, the occurance + of a

    tag should _not_ implicitly close the previous +
    tag. + + Alice said:
    Bob said:
    Blah + should NOT be transformed into: + Alice said:
    Bob said:
    Blah + + Some tags can be nested, but the nesting is reset by the + interposition of other tags. For instance, a
    , + but not close a tag in another table. + +
    BlahBlah + should be transformed into: +
    BlahBlah + but, + Blah
    Blah + should NOT be transformed into + Blah
    Blah + + Differing assumptions about tag nesting rules are a major source + of problems with the BeautifulSoup class. If BeautifulSoup is not + treating as nestable a tag your page author treats as nestable, + try ICantBelieveItsBeautifulSoup before writing your own + subclass.""" + + SELF_CLOSING_TAGS = buildTagMap(None, ['br' , 'hr', 'input', 'img', 'meta', + 'spacer', 'link', 'frame', 'base']) + + QUOTE_TAGS = {'script': None} + + #According to the HTML standard, each of these inline tags can + #contain another tag of the same type. Furthermore, it's common + #to actually use these tags this way. + NESTABLE_INLINE_TAGS = ['span', 'font', 'q', 'object', 'bdo', 'sub', 'sup', + 'center'] + + #According to the HTML standard, these block tags can contain + #another tag of the same type. Furthermore, it's common + #to actually use these tags this way. + NESTABLE_BLOCK_TAGS = ['blockquote', 'div', 'fieldset', 'ins', 'del'] + + #Lists can contain other lists, but there are restrictions. + NESTABLE_LIST_TAGS = { 'ol' : [], + 'ul' : [], + 'li' : ['ul', 'ol'], + 'dl' : [], + 'dd' : ['dl'], + 'dt' : ['dl'] } + + #Tables can contain other tables, but there are restrictions. + NESTABLE_TABLE_TAGS = {'table' : [], + 'tr' : ['table', 'tbody', 'tfoot', 'thead'], + 'td' : ['tr'], + 'th' : ['tr'], + } + + NON_NESTABLE_BLOCK_TAGS = ['address', 'form', 'p', 'pre'] + + #If one of these tags is encountered, all tags up to the next tag of + #this type are popped. + RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript', + NON_NESTABLE_BLOCK_TAGS, + NESTABLE_LIST_TAGS, + NESTABLE_TABLE_TAGS) + + NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS, + NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS) + +class ICantBelieveItsBeautifulSoup(BeautifulSoup): + + """The BeautifulSoup class is oriented towards skipping over + common HTML errors like unclosed tags. However, sometimes it makes + errors of its own. For instance, consider this fragment: + + FooBar + + This is perfectly valid (if bizarre) HTML. However, the + BeautifulSoup class will implicitly close the first b tag when it + encounters the second 'b'. It will think the author wrote + "FooBar", and didn't close the first 'b' tag, because + there's no real-world reason to bold something that's already + bold. When it encounters '' it will close two more 'b' + tags, for a grand total of three tags closed instead of two. This + can throw off the rest of your document structure. The same is + true of a number of other tags, listed below. + + It's much more common for someone to forget to close (eg.) a 'b' + tag than to actually use nested 'b' tags, and the BeautifulSoup + class handles the common case. This class handles the + not-co-common case: where you can't believe someone wrote what + they did, but it's valid HTML and BeautifulSoup screwed up by + assuming it wouldn't be. + + If this doesn't do what you need, try subclassing this class or + BeautifulSoup, and providing your own list of NESTABLE_TAGS.""" + + I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \ + ['em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong', + 'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b', + 'big'] + + I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ['noscript'] + + NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS, + I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS, + I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS) + +class BeautifulSOAP(BeautifulStoneSoup): + """This class will push a tag with only a single string child into + the tag's parent as an attribute. The attribute's name is the tag + name, and the value is the string child. An example should give + the flavor of the change: + + baz + => + baz + + You can then access fooTag['bar'] instead of fooTag.barTag.string. + + This is, of course, useful for scraping structures that tend to + use subelements instead of attributes, such as SOAP messages. Note + that it modifies its input, so don't print the modified version + out. + + I'm not sure how many people really want to use this class; let me + know if you do. Mainly I like the name.""" + + def popTag(self): + if len(self.tagStack) > 1: + tag = self.tagStack[-1] + parent = self.tagStack[-2] + parent._getAttrMap() + if (isinstance(tag, Tag) and len(tag.contents) == 1 and + isinstance(tag.contents[0], NavigableText) and + not parent.attrMap.has_key(tag.name)): + parent[tag.name] = tag.contents[0] + BeautifulStoneSoup.popTag(self) + +#Enterprise class names! It has come to our attention that some people +#think the names of the Beautiful Soup parser classes are too silly +#and "unprofessional" for use in enterprise screen-scraping. We feel +#your pain! For such-minded folk, the Beautiful Soup Consortium And +#All-Night Kosher Bakery recommends renaming this file to +#"RobustParser.py" (or, in cases of extreme enterprisitude, +#"RobustParserBeanInterface.class") and using the following +#enterprise-friendly class aliases: +class RobustXMLParser(BeautifulStoneSoup): + pass +class RobustHTMLParser(BeautifulSoup): + pass +class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup): + pass +class SimplifyingSOAPParser(BeautifulSOAP): + pass + +### + + +#By default, act as an HTML pretty-printer. +if __name__ == '__main__': + import sys + soup = BeautifulStoneSoup(sys.stdin.read()) + print soup.prettify() diff --git a/plugin.video.alfa/lib/mechanize/_clientcookie.py b/plugin.video.alfa/lib/mechanize/_clientcookie.py new file mode 100755 index 00000000..d29feaae --- /dev/null +++ b/plugin.video.alfa/lib/mechanize/_clientcookie.py @@ -0,0 +1,1725 @@ +"""HTTP cookie handling for web clients. + +This module originally developed from my port of Gisle Aas' Perl module +HTTP::Cookies, from the libwww-perl library. + +Docstrings, comments and debug strings in this code refer to the +attributes of the HTTP cookie system as cookie-attributes, to distinguish +them clearly from Python attributes. + + CookieJar____ + / \ \ + FileCookieJar \ \ + / | \ \ \ + MozillaCookieJar | LWPCookieJar \ \ + | | \ + | ---MSIEBase | \ + | / | | \ + | / MSIEDBCookieJar BSDDBCookieJar + |/ + MSIECookieJar + +Comments to John J Lee . + + +Copyright 2002-2006 John J Lee +Copyright 1997-1999 Gisle Aas (original libwww-perl code) +Copyright 2002-2003 Johnny Lee (original MSIE Perl code) + +This code is free software; you can redistribute it and/or modify it +under the terms of the BSD or ZPL 2.1 licenses (see the file +COPYING.txt included with the distribution). + +""" + +import sys, re, copy, time, urllib, types, logging +try: + import threading + _threading = threading; del threading +except ImportError: + import dummy_threading + _threading = dummy_threading; del dummy_threading + +MISSING_FILENAME_TEXT = ("a filename was not supplied (nor was the CookieJar " + "instance initialised with one)") +DEFAULT_HTTP_PORT = "80" + +from _headersutil import split_header_words, parse_ns_headers +from _util import isstringlike +import _rfc3986 + +debug = logging.getLogger("mechanize.cookies").debug + + +def reraise_unmasked_exceptions(unmasked=()): + # There are a few catch-all except: statements in this module, for + # catching input that's bad in unexpected ways. + # This function re-raises some exceptions we don't want to trap. + import mechanize, warnings + if not mechanize.USE_BARE_EXCEPT: + raise + unmasked = unmasked + (KeyboardInterrupt, SystemExit, MemoryError) + etype = sys.exc_info()[0] + if issubclass(etype, unmasked): + raise + # swallowed an exception + import traceback, StringIO + f = StringIO.StringIO() + traceback.print_exc(None, f) + msg = f.getvalue() + warnings.warn("mechanize bug!\n%s" % msg, stacklevel=2) + + +IPV4_RE = re.compile(r"\.\d+$") +def is_HDN(text): + """Return True if text is a host domain name.""" + # XXX + # This may well be wrong. Which RFC is HDN defined in, if any (for + # the purposes of RFC 2965)? + # For the current implementation, what about IPv6? Remember to look + # at other uses of IPV4_RE also, if change this. + return not (IPV4_RE.search(text) or + text == "" or + text[0] == "." or text[-1] == ".") + +def domain_match(A, B): + """Return True if domain A domain-matches domain B, according to RFC 2965. + + A and B may be host domain names or IP addresses. + + RFC 2965, section 1: + + Host names can be specified either as an IP address or a HDN string. + Sometimes we compare one host name with another. (Such comparisons SHALL + be case-insensitive.) Host A's name domain-matches host B's if + + * their host name strings string-compare equal; or + + * A is a HDN string and has the form NB, where N is a non-empty + name string, B has the form .B', and B' is a HDN string. (So, + x.y.com domain-matches .Y.com but not Y.com.) + + Note that domain-match is not a commutative operation: a.b.c.com + domain-matches .c.com, but not the reverse. + + """ + # Note that, if A or B are IP addresses, the only relevant part of the + # definition of the domain-match algorithm is the direct string-compare. + A = A.lower() + B = B.lower() + if A == B: + return True + if not is_HDN(A): + return False + i = A.rfind(B) + has_form_nb = not (i == -1 or i == 0) + return ( + has_form_nb and + B.startswith(".") and + is_HDN(B[1:]) + ) + +def liberal_is_HDN(text): + """Return True if text is a sort-of-like a host domain name. + + For accepting/blocking domains. + + """ + return not IPV4_RE.search(text) + +def user_domain_match(A, B): + """For blocking/accepting domains. + + A and B may be host domain names or IP addresses. + + """ + A = A.lower() + B = B.lower() + if not (liberal_is_HDN(A) and liberal_is_HDN(B)): + if A == B: + # equal IP addresses + return True + return False + initial_dot = B.startswith(".") + if initial_dot and A.endswith(B): + return True + if not initial_dot and A == B: + return True + return False + +cut_port_re = re.compile(r":\d+$") +def request_host(request): + """Return request-host, as defined by RFC 2965. + + Variation from RFC: returned value is lowercased, for convenient + comparison. + + """ + url = request.get_full_url() + host = _rfc3986.urlsplit(url)[1] + if host is None: + host = request.get_header("Host", "") + # remove port, if present + return cut_port_re.sub("", host, 1) + +def request_host_lc(request): + return request_host(request).lower() + +def eff_request_host(request): + """Return a tuple (request-host, effective request-host name).""" + erhn = req_host = request_host(request) + if req_host.find(".") == -1 and not IPV4_RE.search(req_host): + erhn = req_host + ".local" + return req_host, erhn + +def eff_request_host_lc(request): + req_host, erhn = eff_request_host(request) + return req_host.lower(), erhn.lower() + +def effective_request_host(request): + """Return the effective request-host, as defined by RFC 2965.""" + return eff_request_host(request)[1] + +def request_path(request): + """Return path component of request-URI, as defined by RFC 2965.""" + url = request.get_full_url() + path = escape_path(_rfc3986.urlsplit(url)[2]) + if not path.startswith("/"): + path = "/" + path + return path + +def request_port(request): + host = request.get_host() + i = host.find(':') + if i >= 0: + port = host[i+1:] + try: + int(port) + except ValueError: + debug("nonnumeric port: '%s'", port) + return None + else: + port = DEFAULT_HTTP_PORT + return port + +def request_is_unverifiable(request): + try: + return request.is_unverifiable() + except AttributeError: + if hasattr(request, "unverifiable"): + return request.unverifiable + else: + raise + +# Characters in addition to A-Z, a-z, 0-9, '_', '.', and '-' that don't +# need to be escaped to form a valid HTTP URL (RFCs 2396 and 1738). +HTTP_PATH_SAFE = "%/;:@&=+$,!~*'()" +ESCAPED_CHAR_RE = re.compile(r"%([0-9a-fA-F][0-9a-fA-F])") +def uppercase_escaped_char(match): + return "%%%s" % match.group(1).upper() +def escape_path(path): + """Escape any invalid characters in HTTP URL, and uppercase all escapes.""" + # There's no knowing what character encoding was used to create URLs + # containing %-escapes, but since we have to pick one to escape invalid + # path characters, we pick UTF-8, as recommended in the HTML 4.0 + # specification: + # http://www.w3.org/TR/REC-html40/appendix/notes.html#h-B.2.1 + # And here, kind of: draft-fielding-uri-rfc2396bis-03 + # (And in draft IRI specification: draft-duerst-iri-05) + # (And here, for new URI schemes: RFC 2718) + if isinstance(path, types.UnicodeType): + path = path.encode("utf-8") + path = urllib.quote(path, HTTP_PATH_SAFE) + path = ESCAPED_CHAR_RE.sub(uppercase_escaped_char, path) + return path + +def reach(h): + """Return reach of host h, as defined by RFC 2965, section 1. + + The reach R of a host name H is defined as follows: + + * If + + - H is the host domain name of a host; and, + + - H has the form A.B; and + + - A has no embedded (that is, interior) dots; and + + - B has at least one embedded dot, or B is the string "local". + then the reach of H is .B. + + * Otherwise, the reach of H is H. + + >>> reach("www.acme.com") + '.acme.com' + >>> reach("acme.com") + 'acme.com' + >>> reach("acme.local") + '.local' + + """ + i = h.find(".") + if i >= 0: + #a = h[:i] # this line is only here to show what a is + b = h[i+1:] + i = b.find(".") + if is_HDN(h) and (i >= 0 or b == "local"): + return "."+b + return h + +def is_third_party(request): + """ + + RFC 2965, section 3.3.6: + + An unverifiable transaction is to a third-party host if its request- + host U does not domain-match the reach R of the request-host O in the + origin transaction. + + """ + req_host = request_host_lc(request) + # the origin request's request-host was stuffed into request by + # _urllib2_support.AbstractHTTPHandler + return not domain_match(req_host, reach(request.origin_req_host)) + + +try: + all +except NameError: + # python 2.4 + def all(iterable): + for x in iterable: + if not x: + return False + return True + + +class Cookie: + """HTTP Cookie. + + This class represents both Netscape and RFC 2965 cookies. + + This is deliberately a very simple class. It just holds attributes. It's + possible to construct Cookie instances that don't comply with the cookie + standards. CookieJar.make_cookies is the factory function for Cookie + objects -- it deals with cookie parsing, supplying defaults, and + normalising to the representation used in this class. CookiePolicy is + responsible for checking them to see whether they should be accepted from + and returned to the server. + + version: integer; + name: string; + value: string (may be None); + port: string; None indicates no attribute was supplied (e.g. "Port", rather + than eg. "Port=80"); otherwise, a port string (eg. "80") or a port list + string (e.g. "80,8080") + port_specified: boolean; true if a value was supplied with the Port + cookie-attribute + domain: string; + domain_specified: boolean; true if Domain was explicitly set + domain_initial_dot: boolean; true if Domain as set in HTTP header by server + started with a dot (yes, this really is necessary!) + path: string; + path_specified: boolean; true if Path was explicitly set + secure: boolean; true if should only be returned over secure connection + expires: integer; seconds since epoch (RFC 2965 cookies should calculate + this value from the Max-Age attribute) + discard: boolean, true if this is a session cookie; (if no expires value, + this should be true) + comment: string; + comment_url: string; + rfc2109: boolean; true if cookie arrived in a Set-Cookie: (not + Set-Cookie2:) header, but had a version cookie-attribute of 1 + rest: mapping of other cookie-attributes + + Note that the port may be present in the headers, but unspecified ("Port" + rather than"Port=80", for example); if this is the case, port is None. + + """ + + + _attrs = ("version", "name", "value", + "port", "port_specified", + "domain", "domain_specified", "domain_initial_dot", + "path", "path_specified", + "secure", "expires", "discard", "comment", "comment_url", + "rfc2109", "_rest") + + def __init__(self, version, name, value, + port, port_specified, + domain, domain_specified, domain_initial_dot, + path, path_specified, + secure, + expires, + discard, + comment, + comment_url, + rest, + rfc2109=False, + ): + + if version is not None: version = int(version) + if expires is not None: expires = int(expires) + if port is None and port_specified is True: + raise ValueError("if port is None, port_specified must be false") + + self.version = version + self.name = name + self.value = value + self.port = port + self.port_specified = port_specified + # normalise case, as per RFC 2965 section 3.3.3 + self.domain = domain.lower() + self.domain_specified = domain_specified + # Sigh. We need to know whether the domain given in the + # cookie-attribute had an initial dot, in order to follow RFC 2965 + # (as clarified in draft errata). Needed for the returned $Domain + # value. + self.domain_initial_dot = domain_initial_dot + self.path = path + self.path_specified = path_specified + self.secure = secure + self.expires = expires + self.discard = discard + self.comment = comment + self.comment_url = comment_url + self.rfc2109 = rfc2109 + + self._rest = copy.copy(rest) + + def has_nonstandard_attr(self, name): + return self._rest.has_key(name) + def get_nonstandard_attr(self, name, default=None): + return self._rest.get(name, default) + def set_nonstandard_attr(self, name, value): + self._rest[name] = value + def nonstandard_attr_keys(self): + return self._rest.keys() + + def is_expired(self, now=None): + if now is None: now = time.time() + return (self.expires is not None) and (self.expires <= now) + + def __eq__(self, other): + return all(getattr(self, a) == getattr(other, a) for a in self._attrs) + + def __ne__(self, other): + return not (self == other) + + def __str__(self): + if self.port is None: p = "" + else: p = ":"+self.port + limit = self.domain + p + self.path + if self.value is not None: + namevalue = "%s=%s" % (self.name, self.value) + else: + namevalue = self.name + return "" % (namevalue, limit) + + def __repr__(self): + args = [] + for name in ["version", "name", "value", + "port", "port_specified", + "domain", "domain_specified", "domain_initial_dot", + "path", "path_specified", + "secure", "expires", "discard", "comment", "comment_url", + ]: + attr = getattr(self, name) + args.append("%s=%s" % (name, repr(attr))) + args.append("rest=%s" % repr(self._rest)) + args.append("rfc2109=%s" % repr(self.rfc2109)) + return "Cookie(%s)" % ", ".join(args) + + +class CookiePolicy: + """Defines which cookies get accepted from and returned to server. + + May also modify cookies. + + The subclass DefaultCookiePolicy defines the standard rules for Netscape + and RFC 2965 cookies -- override that if you want a customised policy. + + As well as implementing set_ok and return_ok, implementations of this + interface must also supply the following attributes, indicating which + protocols should be used, and how. These can be read and set at any time, + though whether that makes complete sense from the protocol point of view is + doubtful. + + Public attributes: + + netscape: implement netscape protocol + rfc2965: implement RFC 2965 protocol + rfc2109_as_netscape: + WARNING: This argument will change or go away if is not accepted into + the Python standard library in this form! + If true, treat RFC 2109 cookies as though they were Netscape cookies. The + default is for this attribute to be None, which means treat 2109 cookies + as RFC 2965 cookies unless RFC 2965 handling is switched off (which it is, + by default), and as Netscape cookies otherwise. + hide_cookie2: don't add Cookie2 header to requests (the presence of + this header indicates to the server that we understand RFC 2965 + cookies) + + """ + def set_ok(self, cookie, request): + """Return true if (and only if) cookie should be accepted from server. + + Currently, pre-expired cookies never get this far -- the CookieJar + class deletes such cookies itself. + + cookie: mechanize.Cookie object + request: object implementing the interface defined by + CookieJar.extract_cookies.__doc__ + + """ + raise NotImplementedError() + + def return_ok(self, cookie, request): + """Return true if (and only if) cookie should be returned to server. + + cookie: mechanize.Cookie object + request: object implementing the interface defined by + CookieJar.add_cookie_header.__doc__ + + """ + raise NotImplementedError() + + def domain_return_ok(self, domain, request): + """Return false if cookies should not be returned, given cookie domain. + + This is here as an optimization, to remove the need for checking every + cookie with a particular domain (which may involve reading many files). + The default implementations of domain_return_ok and path_return_ok + (return True) leave all the work to return_ok. + + If domain_return_ok returns true for the cookie domain, path_return_ok + is called for the cookie path. Otherwise, path_return_ok and return_ok + are never called for that cookie domain. If path_return_ok returns + true, return_ok is called with the Cookie object itself for a full + check. Otherwise, return_ok is never called for that cookie path. + + Note that domain_return_ok is called for every *cookie* domain, not + just for the *request* domain. For example, the function might be + called with both ".acme.com" and "www.acme.com" if the request domain + is "www.acme.com". The same goes for path_return_ok. + + For argument documentation, see the docstring for return_ok. + + """ + return True + + def path_return_ok(self, path, request): + """Return false if cookies should not be returned, given cookie path. + + See the docstring for domain_return_ok. + + """ + return True + + +class DefaultCookiePolicy(CookiePolicy): + """Implements the standard rules for accepting and returning cookies. + + Both RFC 2965 and Netscape cookies are covered. RFC 2965 handling is + switched off by default. + + The easiest way to provide your own policy is to override this class and + call its methods in your overriden implementations before adding your own + additional checks. + + import mechanize + class MyCookiePolicy(mechanize.DefaultCookiePolicy): + def set_ok(self, cookie, request): + if not mechanize.DefaultCookiePolicy.set_ok( + self, cookie, request): + return False + if i_dont_want_to_store_this_cookie(): + return False + return True + + In addition to the features required to implement the CookiePolicy + interface, this class allows you to block and allow domains from setting + and receiving cookies. There are also some strictness switches that allow + you to tighten up the rather loose Netscape protocol rules a little bit (at + the cost of blocking some benign cookies). + + A domain blacklist and whitelist is provided (both off by default). Only + domains not in the blacklist and present in the whitelist (if the whitelist + is active) participate in cookie setting and returning. Use the + blocked_domains constructor argument, and blocked_domains and + set_blocked_domains methods (and the corresponding argument and methods for + allowed_domains). If you set a whitelist, you can turn it off again by + setting it to None. + + Domains in block or allow lists that do not start with a dot must + string-compare equal. For example, "acme.com" matches a blacklist entry of + "acme.com", but "www.acme.com" does not. Domains that do start with a dot + are matched by more specific domains too. For example, both "www.acme.com" + and "www.munitions.acme.com" match ".acme.com" (but "acme.com" itself does + not). IP addresses are an exception, and must match exactly. For example, + if blocked_domains contains "192.168.1.2" and ".168.1.2" 192.168.1.2 is + blocked, but 193.168.1.2 is not. + + Additional Public Attributes: + + General strictness switches + + strict_domain: don't allow sites to set two-component domains with + country-code top-level domains like .co.uk, .gov.uk, .co.nz. etc. + This is far from perfect and isn't guaranteed to work! + + RFC 2965 protocol strictness switches + + strict_rfc2965_unverifiable: follow RFC 2965 rules on unverifiable + transactions (usually, an unverifiable transaction is one resulting from + a redirect or an image hosted on another site); if this is false, cookies + are NEVER blocked on the basis of verifiability + + Netscape protocol strictness switches + + strict_ns_unverifiable: apply RFC 2965 rules on unverifiable transactions + even to Netscape cookies + strict_ns_domain: flags indicating how strict to be with domain-matching + rules for Netscape cookies: + DomainStrictNoDots: when setting cookies, host prefix must not contain a + dot (e.g. www.foo.bar.com can't set a cookie for .bar.com, because + www.foo contains a dot) + DomainStrictNonDomain: cookies that did not explicitly specify a Domain + cookie-attribute can only be returned to a domain that string-compares + equal to the domain that set the cookie (e.g. rockets.acme.com won't + be returned cookies from acme.com that had no Domain cookie-attribute) + DomainRFC2965Match: when setting cookies, require a full RFC 2965 + domain-match + DomainLiberal and DomainStrict are the most useful combinations of the + above flags, for convenience + strict_ns_set_initial_dollar: ignore cookies in Set-Cookie: headers that + have names starting with '$' + strict_ns_set_path: don't allow setting cookies whose path doesn't + path-match request URI + + """ + + DomainStrictNoDots = 1 + DomainStrictNonDomain = 2 + DomainRFC2965Match = 4 + + DomainLiberal = 0 + DomainStrict = DomainStrictNoDots|DomainStrictNonDomain + + def __init__(self, + blocked_domains=None, allowed_domains=None, + netscape=True, rfc2965=False, + # WARNING: this argument will change or go away if is not + # accepted into the Python standard library in this form! + # default, ie. treat 2109 as netscape iff not rfc2965 + rfc2109_as_netscape=None, + hide_cookie2=False, + strict_domain=False, + strict_rfc2965_unverifiable=True, + strict_ns_unverifiable=False, + strict_ns_domain=DomainLiberal, + strict_ns_set_initial_dollar=False, + strict_ns_set_path=False, + ): + """ + Constructor arguments should be used as keyword arguments only. + + blocked_domains: sequence of domain names that we never accept cookies + from, nor return cookies to + allowed_domains: if not None, this is a sequence of the only domains + for which we accept and return cookies + + For other arguments, see CookiePolicy.__doc__ and + DefaultCookiePolicy.__doc__.. + + """ + self.netscape = netscape + self.rfc2965 = rfc2965 + self.rfc2109_as_netscape = rfc2109_as_netscape + self.hide_cookie2 = hide_cookie2 + self.strict_domain = strict_domain + self.strict_rfc2965_unverifiable = strict_rfc2965_unverifiable + self.strict_ns_unverifiable = strict_ns_unverifiable + self.strict_ns_domain = strict_ns_domain + self.strict_ns_set_initial_dollar = strict_ns_set_initial_dollar + self.strict_ns_set_path = strict_ns_set_path + + if blocked_domains is not None: + self._blocked_domains = tuple(blocked_domains) + else: + self._blocked_domains = () + + if allowed_domains is not None: + allowed_domains = tuple(allowed_domains) + self._allowed_domains = allowed_domains + + def blocked_domains(self): + """Return the sequence of blocked domains (as a tuple).""" + return self._blocked_domains + def set_blocked_domains(self, blocked_domains): + """Set the sequence of blocked domains.""" + self._blocked_domains = tuple(blocked_domains) + + def is_blocked(self, domain): + for blocked_domain in self._blocked_domains: + if user_domain_match(domain, blocked_domain): + return True + return False + + def allowed_domains(self): + """Return None, or the sequence of allowed domains (as a tuple).""" + return self._allowed_domains + def set_allowed_domains(self, allowed_domains): + """Set the sequence of allowed domains, or None.""" + if allowed_domains is not None: + allowed_domains = tuple(allowed_domains) + self._allowed_domains = allowed_domains + + def is_not_allowed(self, domain): + if self._allowed_domains is None: + return False + for allowed_domain in self._allowed_domains: + if user_domain_match(domain, allowed_domain): + return False + return True + + def set_ok(self, cookie, request): + """ + If you override set_ok, be sure to call this method. If it returns + false, so should your subclass (assuming your subclass wants to be more + strict about which cookies to accept). + + """ + debug(" - checking cookie %s", cookie) + + assert cookie.name is not None + + for n in "version", "verifiability", "name", "path", "domain", "port": + fn_name = "set_ok_"+n + fn = getattr(self, fn_name) + if not fn(cookie, request): + return False + + return True + + def set_ok_version(self, cookie, request): + if cookie.version is None: + # Version is always set to 0 by parse_ns_headers if it's a Netscape + # cookie, so this must be an invalid RFC 2965 cookie. + debug(" Set-Cookie2 without version attribute (%s)", cookie) + return False + if cookie.version > 0 and not self.rfc2965: + debug(" RFC 2965 cookies are switched off") + return False + elif cookie.version == 0 and not self.netscape: + debug(" Netscape cookies are switched off") + return False + return True + + def set_ok_verifiability(self, cookie, request): + if request_is_unverifiable(request) and is_third_party(request): + if cookie.version > 0 and self.strict_rfc2965_unverifiable: + debug(" third-party RFC 2965 cookie during " + "unverifiable transaction") + return False + elif cookie.version == 0 and self.strict_ns_unverifiable: + debug(" third-party Netscape cookie during " + "unverifiable transaction") + return False + return True + + def set_ok_name(self, cookie, request): + # Try and stop servers setting V0 cookies designed to hack other + # servers that know both V0 and V1 protocols. + if (cookie.version == 0 and self.strict_ns_set_initial_dollar and + cookie.name.startswith("$")): + debug(" illegal name (starts with '$'): '%s'", cookie.name) + return False + return True + + def set_ok_path(self, cookie, request): + if cookie.path_specified: + req_path = request_path(request) + if ((cookie.version > 0 or + (cookie.version == 0 and self.strict_ns_set_path)) and + not req_path.startswith(cookie.path)): + debug(" path attribute %s is not a prefix of request " + "path %s", cookie.path, req_path) + return False + return True + + def set_ok_countrycode_domain(self, cookie, request): + """Return False if explicit cookie domain is not acceptable. + + Called by set_ok_domain, for convenience of overriding by + subclasses. + + """ + if cookie.domain_specified and self.strict_domain: + domain = cookie.domain + # since domain was specified, we know that: + assert domain.startswith(".") + if domain.count(".") == 2: + # domain like .foo.bar + i = domain.rfind(".") + tld = domain[i+1:] + sld = domain[1:i] + if (sld.lower() in [ + "co", "ac", + "com", "edu", "org", "net", "gov", "mil", "int", + "aero", "biz", "cat", "coop", "info", "jobs", "mobi", + "museum", "name", "pro", "travel", + ] and + len(tld) == 2): + # domain like .co.uk + return False + return True + + def set_ok_domain(self, cookie, request): + if self.is_blocked(cookie.domain): + debug(" domain %s is in user block-list", cookie.domain) + return False + if self.is_not_allowed(cookie.domain): + debug(" domain %s is not in user allow-list", cookie.domain) + return False + if not self.set_ok_countrycode_domain(cookie, request): + debug(" country-code second level domain %s", cookie.domain) + return False + if cookie.domain_specified: + req_host, erhn = eff_request_host_lc(request) + domain = cookie.domain + if domain.startswith("."): + undotted_domain = domain[1:] + else: + undotted_domain = domain + embedded_dots = (undotted_domain.find(".") >= 0) + if not embedded_dots and domain != ".local": + debug(" non-local domain %s contains no embedded dot", + domain) + return False + if cookie.version == 0: + if (not erhn.endswith(domain) and + (not erhn.startswith(".") and + not ("."+erhn).endswith(domain))): + debug(" effective request-host %s (even with added " + "initial dot) does not end end with %s", + erhn, domain) + return False + if (cookie.version > 0 or + (self.strict_ns_domain & self.DomainRFC2965Match)): + if not domain_match(erhn, domain): + debug(" effective request-host %s does not domain-match " + "%s", erhn, domain) + return False + if (cookie.version > 0 or + (self.strict_ns_domain & self.DomainStrictNoDots)): + host_prefix = req_host[:-len(domain)] + if (host_prefix.find(".") >= 0 and + not IPV4_RE.search(req_host)): + debug(" host prefix %s for domain %s contains a dot", + host_prefix, domain) + return False + return True + + def set_ok_port(self, cookie, request): + if cookie.port_specified: + req_port = request_port(request) + if req_port is None: + req_port = "80" + else: + req_port = str(req_port) + for p in cookie.port.split(","): + try: + int(p) + except ValueError: + debug(" bad port %s (not numeric)", p) + return False + if p == req_port: + break + else: + debug(" request port (%s) not found in %s", + req_port, cookie.port) + return False + return True + + def return_ok(self, cookie, request): + """ + If you override return_ok, be sure to call this method. If it returns + false, so should your subclass (assuming your subclass wants to be more + strict about which cookies to return). + + """ + # Path has already been checked by path_return_ok, and domain blocking + # done by domain_return_ok. + debug(" - checking cookie %s", cookie) + + for n in ("version", "verifiability", "secure", "expires", "port", + "domain"): + fn_name = "return_ok_"+n + fn = getattr(self, fn_name) + if not fn(cookie, request): + return False + return True + + def return_ok_version(self, cookie, request): + if cookie.version > 0 and not self.rfc2965: + debug(" RFC 2965 cookies are switched off") + return False + elif cookie.version == 0 and not self.netscape: + debug(" Netscape cookies are switched off") + return False + return True + + def return_ok_verifiability(self, cookie, request): + if request_is_unverifiable(request) and is_third_party(request): + if cookie.version > 0 and self.strict_rfc2965_unverifiable: + debug(" third-party RFC 2965 cookie during unverifiable " + "transaction") + return False + elif cookie.version == 0 and self.strict_ns_unverifiable: + debug(" third-party Netscape cookie during unverifiable " + "transaction") + return False + return True + + def return_ok_secure(self, cookie, request): + if cookie.secure and request.get_type() != "https": + debug(" secure cookie with non-secure request") + return False + return True + + def return_ok_expires(self, cookie, request): + if cookie.is_expired(self._now): + debug(" cookie expired") + return False + return True + + def return_ok_port(self, cookie, request): + if cookie.port: + req_port = request_port(request) + if req_port is None: + req_port = "80" + for p in cookie.port.split(","): + if p == req_port: + break + else: + debug(" request port %s does not match cookie port %s", + req_port, cookie.port) + return False + return True + + def return_ok_domain(self, cookie, request): + req_host, erhn = eff_request_host_lc(request) + domain = cookie.domain + + # strict check of non-domain cookies: Mozilla does this, MSIE5 doesn't + if (cookie.version == 0 and + (self.strict_ns_domain & self.DomainStrictNonDomain) and + not cookie.domain_specified and domain != erhn): + debug(" cookie with unspecified domain does not string-compare " + "equal to request domain") + return False + + if cookie.version > 0 and not domain_match(erhn, domain): + debug(" effective request-host name %s does not domain-match " + "RFC 2965 cookie domain %s", erhn, domain) + return False + if cookie.version == 0 and not ("."+erhn).endswith(domain): + debug(" request-host %s does not match Netscape cookie domain " + "%s", req_host, domain) + return False + return True + + def domain_return_ok(self, domain, request): + # Liberal check of domain. This is here as an optimization to avoid + # having to load lots of MSIE cookie files unless necessary. + + # Munge req_host and erhn to always start with a dot, so as to err on + # the side of letting cookies through. + dotted_req_host, dotted_erhn = eff_request_host_lc(request) + if not dotted_req_host.startswith("."): + dotted_req_host = "."+dotted_req_host + if not dotted_erhn.startswith("."): + dotted_erhn = "."+dotted_erhn + if not (dotted_req_host.endswith(domain) or + dotted_erhn.endswith(domain)): + #debug(" request domain %s does not match cookie domain %s", + # req_host, domain) + return False + + if self.is_blocked(domain): + debug(" domain %s is in user block-list", domain) + return False + if self.is_not_allowed(domain): + debug(" domain %s is not in user allow-list", domain) + return False + + return True + + def path_return_ok(self, path, request): + debug("- checking cookie path=%s", path) + req_path = request_path(request) + if not req_path.startswith(path): + debug(" %s does not path-match %s", req_path, path) + return False + return True + + +def vals_sorted_by_key(adict): + keys = adict.keys() + keys.sort() + return map(adict.get, keys) + +class MappingIterator: + """Iterates over nested mapping, depth-first, in sorted order by key.""" + def __init__(self, mapping): + self._s = [(vals_sorted_by_key(mapping), 0, None)] # LIFO stack + + def __iter__(self): return self + + def next(self): + # this is hairy because of lack of generators + while 1: + try: + vals, i, prev_item = self._s.pop() + except IndexError: + raise StopIteration() + if i < len(vals): + item = vals[i] + i = i + 1 + self._s.append((vals, i, prev_item)) + try: + item.items + except AttributeError: + # non-mapping + break + else: + # mapping + self._s.append((vals_sorted_by_key(item), 0, item)) + continue + return item + + +# Used as second parameter to dict.get method, to distinguish absent +# dict key from one with a None value. +class Absent: pass + +class CookieJar: + """Collection of HTTP cookies. + + You may not need to know about this class: try mechanize.urlopen(). + + The major methods are extract_cookies and add_cookie_header; these are all + you are likely to need. + + CookieJar supports the iterator protocol: + + for cookie in cookiejar: + # do something with cookie + + Methods: + + add_cookie_header(request) + extract_cookies(response, request) + get_policy() + set_policy(policy) + cookies_for_request(request) + make_cookies(response, request) + set_cookie_if_ok(cookie, request) + set_cookie(cookie) + clear_session_cookies() + clear_expired_cookies() + clear(domain=None, path=None, name=None) + + Public attributes + + policy: CookiePolicy object + + """ + + non_word_re = re.compile(r"\W") + quote_re = re.compile(r"([\"\\])") + strict_domain_re = re.compile(r"\.?[^.]*") + domain_re = re.compile(r"[^.]*") + dots_re = re.compile(r"^\.+") + + def __init__(self, policy=None): + """ + See CookieJar.__doc__ for argument documentation. + + """ + if policy is None: + policy = DefaultCookiePolicy() + self._policy = policy + + self._cookies = {} + + # for __getitem__ iteration in pre-2.2 Pythons + self._prev_getitem_index = 0 + + def get_policy(self): + return self._policy + + def set_policy(self, policy): + self._policy = policy + + def _cookies_for_domain(self, domain, request): + cookies = [] + if not self._policy.domain_return_ok(domain, request): + return [] + debug("Checking %s for cookies to return", domain) + cookies_by_path = self._cookies[domain] + for path in cookies_by_path.keys(): + if not self._policy.path_return_ok(path, request): + continue + cookies_by_name = cookies_by_path[path] + for cookie in cookies_by_name.values(): + if not self._policy.return_ok(cookie, request): + debug(" not returning cookie") + continue + debug(" it's a match") + cookies.append(cookie) + return cookies + + def cookies_for_request(self, request): + """Return a list of cookies to be returned to server. + + The returned list of cookie instances is sorted in the order they + should appear in the Cookie: header for return to the server. + + See add_cookie_header.__doc__ for the interface required of the + request argument. + + New in version 0.1.10 + + """ + self._policy._now = self._now = int(time.time()) + cookies = self._cookies_for_request(request) + # add cookies in order of most specific (i.e. longest) path first + def decreasing_size(a, b): return cmp(len(b.path), len(a.path)) + cookies.sort(decreasing_size) + return cookies + + def _cookies_for_request(self, request): + """Return a list of cookies to be returned to server.""" + # this method still exists (alongside cookies_for_request) because it + # is part of an implied protected interface for subclasses of cookiejar + # XXX document that implied interface, or provide another way of + # implementing cookiejars than subclassing + cookies = [] + for domain in self._cookies.keys(): + cookies.extend(self._cookies_for_domain(domain, request)) + return cookies + + def _cookie_attrs(self, cookies): + """Return a list of cookie-attributes to be returned to server. + + The $Version attribute is also added when appropriate (currently only + once per request). + + >>> jar = CookieJar() + >>> ns_cookie = Cookie(0, "foo", '"bar"', None, False, + ... "example.com", False, False, + ... "/", False, False, None, True, + ... None, None, {}) + >>> jar._cookie_attrs([ns_cookie]) + ['foo="bar"'] + >>> rfc2965_cookie = Cookie(1, "foo", "bar", None, False, + ... ".example.com", True, False, + ... "/", False, False, None, True, + ... None, None, {}) + >>> jar._cookie_attrs([rfc2965_cookie]) + ['$Version=1', 'foo=bar', '$Domain="example.com"'] + + """ + version_set = False + + attrs = [] + for cookie in cookies: + # set version of Cookie header + # XXX + # What should it be if multiple matching Set-Cookie headers have + # different versions themselves? + # Answer: there is no answer; was supposed to be settled by + # RFC 2965 errata, but that may never appear... + version = cookie.version + if not version_set: + version_set = True + if version > 0: + attrs.append("$Version=%s" % version) + + # quote cookie value if necessary + # (not for Netscape protocol, which already has any quotes + # intact, due to the poorly-specified Netscape Cookie: syntax) + if ((cookie.value is not None) and + self.non_word_re.search(cookie.value) and version > 0): + value = self.quote_re.sub(r"\\\1", cookie.value) + else: + value = cookie.value + + # add cookie-attributes to be returned in Cookie header + if cookie.value is None: + attrs.append(cookie.name) + else: + attrs.append("%s=%s" % (cookie.name, value)) + if version > 0: + if cookie.path_specified: + attrs.append('$Path="%s"' % cookie.path) + if cookie.domain.startswith("."): + domain = cookie.domain + if (not cookie.domain_initial_dot and + domain.startswith(".")): + domain = domain[1:] + attrs.append('$Domain="%s"' % domain) + if cookie.port is not None: + p = "$Port" + if cookie.port_specified: + p = p + ('="%s"' % cookie.port) + attrs.append(p) + + return attrs + + def add_cookie_header(self, request): + """Add correct Cookie: header to request (mechanize.Request object). + + The Cookie2 header is also added unless policy.hide_cookie2 is true. + + The request object (usually a mechanize.Request instance) must support + the methods get_full_url, get_host, is_unverifiable, get_type, + has_header, get_header, header_items and add_unredirected_header, as + documented by urllib2. + """ + debug("add_cookie_header") + cookies = self.cookies_for_request(request) + + attrs = self._cookie_attrs(cookies) + if attrs: + if not request.has_header("Cookie"): + request.add_unredirected_header("Cookie", "; ".join(attrs)) + + # if necessary, advertise that we know RFC 2965 + if self._policy.rfc2965 and not self._policy.hide_cookie2: + for cookie in cookies: + if cookie.version != 1 and not request.has_header("Cookie2"): + request.add_unredirected_header("Cookie2", '$Version="1"') + break + + self.clear_expired_cookies() + + def _normalized_cookie_tuples(self, attrs_set): + """Return list of tuples containing normalised cookie information. + + attrs_set is the list of lists of key,value pairs extracted from + the Set-Cookie or Set-Cookie2 headers. + + Tuples are name, value, standard, rest, where name and value are the + cookie name and value, standard is a dictionary containing the standard + cookie-attributes (discard, secure, version, expires or max-age, + domain, path and port) and rest is a dictionary containing the rest of + the cookie-attributes. + + """ + cookie_tuples = [] + + boolean_attrs = "discard", "secure" + value_attrs = ("version", + "expires", "max-age", + "domain", "path", "port", + "comment", "commenturl") + + for cookie_attrs in attrs_set: + name, value = cookie_attrs[0] + + # Build dictionary of standard cookie-attributes (standard) and + # dictionary of other cookie-attributes (rest). + + # Note: expiry time is normalised to seconds since epoch. V0 + # cookies should have the Expires cookie-attribute, and V1 cookies + # should have Max-Age, but since V1 includes RFC 2109 cookies (and + # since V0 cookies may be a mish-mash of Netscape and RFC 2109), we + # accept either (but prefer Max-Age). + max_age_set = False + + bad_cookie = False + + standard = {} + rest = {} + for k, v in cookie_attrs[1:]: + lc = k.lower() + # don't lose case distinction for unknown fields + if lc in value_attrs or lc in boolean_attrs: + k = lc + if k in boolean_attrs and v is None: + # boolean cookie-attribute is present, but has no value + # (like "discard", rather than "port=80") + v = True + if standard.has_key(k): + # only first value is significant + continue + if k == "domain": + if v is None: + debug(" missing value for domain attribute") + bad_cookie = True + break + # RFC 2965 section 3.3.3 + v = v.lower() + if k == "expires": + if max_age_set: + # Prefer max-age to expires (like Mozilla) + continue + if v is None: + debug(" missing or invalid value for expires " + "attribute: treating as session cookie") + continue + if k == "max-age": + max_age_set = True + if v is None: + debug(" missing value for max-age attribute") + bad_cookie = True + break + try: + v = int(v) + except ValueError: + debug(" missing or invalid (non-numeric) value for " + "max-age attribute") + bad_cookie = True + break + # convert RFC 2965 Max-Age to seconds since epoch + # XXX Strictly you're supposed to follow RFC 2616 + # age-calculation rules. Remember that zero Max-Age is a + # is a request to discard (old and new) cookie, though. + k = "expires" + v = self._now + v + if (k in value_attrs) or (k in boolean_attrs): + if (v is None and + k not in ["port", "comment", "commenturl"]): + debug(" missing value for %s attribute" % k) + bad_cookie = True + break + standard[k] = v + else: + rest[k] = v + + if bad_cookie: + continue + + cookie_tuples.append((name, value, standard, rest)) + + return cookie_tuples + + def _cookie_from_cookie_tuple(self, tup, request): + # standard is dict of standard cookie-attributes, rest is dict of the + # rest of them + name, value, standard, rest = tup + + domain = standard.get("domain", Absent) + path = standard.get("path", Absent) + port = standard.get("port", Absent) + expires = standard.get("expires", Absent) + + # set the easy defaults + version = standard.get("version", None) + if version is not None: + try: + version = int(version) + except ValueError: + return None # invalid version, ignore cookie + secure = standard.get("secure", False) + # (discard is also set if expires is Absent) + discard = standard.get("discard", False) + comment = standard.get("comment", None) + comment_url = standard.get("commenturl", None) + + # set default path + if path is not Absent and path != "": + path_specified = True + path = escape_path(path) + else: + path_specified = False + path = request_path(request) + i = path.rfind("/") + if i != -1: + if version == 0: + # Netscape spec parts company from reality here + path = path[:i] + else: + path = path[:i+1] + if len(path) == 0: path = "/" + + # set default domain + domain_specified = domain is not Absent + # but first we have to remember whether it starts with a dot + domain_initial_dot = False + if domain_specified: + domain_initial_dot = bool(domain.startswith(".")) + if domain is Absent: + req_host, erhn = eff_request_host_lc(request) + domain = erhn + elif not domain.startswith("."): + domain = "."+domain + + # set default port + port_specified = False + if port is not Absent: + if port is None: + # Port attr present, but has no value: default to request port. + # Cookie should then only be sent back on that port. + port = request_port(request) + else: + port_specified = True + port = re.sub(r"\s+", "", port) + else: + # No port attr present. Cookie can be sent back on any port. + port = None + + # set default expires and discard + if expires is Absent: + expires = None + discard = True + + return Cookie(version, + name, value, + port, port_specified, + domain, domain_specified, domain_initial_dot, + path, path_specified, + secure, + expires, + discard, + comment, + comment_url, + rest) + + def _cookies_from_attrs_set(self, attrs_set, request): + cookie_tuples = self._normalized_cookie_tuples(attrs_set) + + cookies = [] + for tup in cookie_tuples: + cookie = self._cookie_from_cookie_tuple(tup, request) + if cookie: cookies.append(cookie) + return cookies + + def _process_rfc2109_cookies(self, cookies): + if self._policy.rfc2109_as_netscape is None: + rfc2109_as_netscape = not self._policy.rfc2965 + else: + rfc2109_as_netscape = self._policy.rfc2109_as_netscape + for cookie in cookies: + if cookie.version == 1: + cookie.rfc2109 = True + if rfc2109_as_netscape: + # treat 2109 cookies as Netscape cookies rather than + # as RFC2965 cookies + cookie.version = 0 + + def _make_cookies(self, response, request): + # get cookie-attributes for RFC 2965 and Netscape protocols + headers = response.info() + rfc2965_hdrs = headers.getheaders("Set-Cookie2") + ns_hdrs = headers.getheaders("Set-Cookie") + + rfc2965 = self._policy.rfc2965 + netscape = self._policy.netscape + + if ((not rfc2965_hdrs and not ns_hdrs) or + (not ns_hdrs and not rfc2965) or + (not rfc2965_hdrs and not netscape) or + (not netscape and not rfc2965)): + return [] # no relevant cookie headers: quick exit + + try: + cookies = self._cookies_from_attrs_set( + split_header_words(rfc2965_hdrs), request) + except: + reraise_unmasked_exceptions() + cookies = [] + + if ns_hdrs and netscape: + try: + # RFC 2109 and Netscape cookies + ns_cookies = self._cookies_from_attrs_set( + parse_ns_headers(ns_hdrs), request) + except: + reraise_unmasked_exceptions() + ns_cookies = [] + self._process_rfc2109_cookies(ns_cookies) + + # Look for Netscape cookies (from Set-Cookie headers) that match + # corresponding RFC 2965 cookies (from Set-Cookie2 headers). + # For each match, keep the RFC 2965 cookie and ignore the Netscape + # cookie (RFC 2965 section 9.1). Actually, RFC 2109 cookies are + # bundled in with the Netscape cookies for this purpose, which is + # reasonable behaviour. + if rfc2965: + lookup = {} + for cookie in cookies: + lookup[(cookie.domain, cookie.path, cookie.name)] = None + + def no_matching_rfc2965(ns_cookie, lookup=lookup): + key = ns_cookie.domain, ns_cookie.path, ns_cookie.name + return not lookup.has_key(key) + ns_cookies = filter(no_matching_rfc2965, ns_cookies) + + if ns_cookies: + cookies.extend(ns_cookies) + + return cookies + + def make_cookies(self, response, request): + """Return sequence of Cookie objects extracted from response object. + + See extract_cookies.__doc__ for the interface required of the + response and request arguments. + + """ + self._policy._now = self._now = int(time.time()) + return [cookie for cookie in self._make_cookies(response, request) + if cookie.expires is None or not cookie.expires <= self._now] + + def set_cookie_if_ok(self, cookie, request): + """Set a cookie if policy says it's OK to do so. + + cookie: mechanize.Cookie instance + request: see extract_cookies.__doc__ for the required interface + + """ + self._policy._now = self._now = int(time.time()) + + if self._policy.set_ok(cookie, request): + self.set_cookie(cookie) + + def set_cookie(self, cookie): + """Set a cookie, without checking whether or not it should be set. + + cookie: mechanize.Cookie instance + """ + c = self._cookies + if not c.has_key(cookie.domain): c[cookie.domain] = {} + c2 = c[cookie.domain] + if not c2.has_key(cookie.path): c2[cookie.path] = {} + c3 = c2[cookie.path] + c3[cookie.name] = cookie + + def extract_cookies(self, response, request): + """Extract cookies from response, where allowable given the request. + + Look for allowable Set-Cookie: and Set-Cookie2: headers in the response + object passed as argument. Any of these headers that are found are + used to update the state of the object (subject to the policy.set_ok + method's approval). + + The response object (usually be the result of a call to + mechanize.urlopen, or similar) should support an info method, which + returns a mimetools.Message object (in fact, the 'mimetools.Message + object' may be any object that provides a getheaders method). + + The request object (usually a mechanize.Request instance) must support + the methods get_full_url, get_type, get_host, and is_unverifiable, as + documented by mechanize, and the port attribute (the port number). The + request is used to set default values for cookie-attributes as well as + for checking that the cookie is OK to be set. + + """ + debug("extract_cookies: %s", response.info()) + self._policy._now = self._now = int(time.time()) + + for cookie in self._make_cookies(response, request): + if cookie.expires is not None and cookie.expires <= self._now: + # Expiry date in past is request to delete cookie. This can't be + # in DefaultCookiePolicy, because can't delete cookies there. + try: + self.clear(cookie.domain, cookie.path, cookie.name) + except KeyError: + pass + debug("Expiring cookie, domain='%s', path='%s', name='%s'", + cookie.domain, cookie.path, cookie.name) + elif self._policy.set_ok(cookie, request): + debug(" setting cookie: %s", cookie) + self.set_cookie(cookie) + + def clear(self, domain=None, path=None, name=None): + """Clear some cookies. + + Invoking this method without arguments will clear all cookies. If + given a single argument, only cookies belonging to that domain will be + removed. If given two arguments, cookies belonging to the specified + path within that domain are removed. If given three arguments, then + the cookie with the specified name, path and domain is removed. + + Raises KeyError if no matching cookie exists. + + """ + if name is not None: + if (domain is None) or (path is None): + raise ValueError( + "domain and path must be given to remove a cookie by name") + del self._cookies[domain][path][name] + elif path is not None: + if domain is None: + raise ValueError( + "domain must be given to remove cookies by path") + del self._cookies[domain][path] + elif domain is not None: + del self._cookies[domain] + else: + self._cookies = {} + + def clear_session_cookies(self): + """Discard all session cookies. + + Discards all cookies held by object which had either no Max-Age or + Expires cookie-attribute or an explicit Discard cookie-attribute, or + which otherwise have ended up with a true discard attribute. For + interactive browsers, the end of a session usually corresponds to + closing the browser window. + + Note that the save method won't save session cookies anyway, unless you + ask otherwise by passing a true ignore_discard argument. + + """ + for cookie in self: + if cookie.discard: + self.clear(cookie.domain, cookie.path, cookie.name) + + def clear_expired_cookies(self): + """Discard all expired cookies. + + You probably don't need to call this method: expired cookies are never + sent back to the server (provided you're using DefaultCookiePolicy), + this method is called by CookieJar itself every so often, and the save + method won't save expired cookies anyway (unless you ask otherwise by + passing a true ignore_expires argument). + + """ + now = time.time() + for cookie in self: + if cookie.is_expired(now): + self.clear(cookie.domain, cookie.path, cookie.name) + + def __getitem__(self, i): + if i == 0: + self._getitem_iterator = self.__iter__() + elif self._prev_getitem_index != i-1: raise IndexError( + "CookieJar.__getitem__ only supports sequential iteration") + self._prev_getitem_index = i + try: + return self._getitem_iterator.next() + except StopIteration: + raise IndexError() + + def __iter__(self): + return MappingIterator(self._cookies) + + def __len__(self): + """Return number of contained cookies.""" + i = 0 + for cookie in self: i = i + 1 + return i + + def __repr__(self): + r = [] + for cookie in self: r.append(repr(cookie)) + return "<%s[%s]>" % (self.__class__, ", ".join(r)) + + def __str__(self): + r = [] + for cookie in self: r.append(str(cookie)) + return "<%s[%s]>" % (self.__class__, ", ".join(r)) + + +class LoadError(Exception): pass + +class FileCookieJar(CookieJar): + """CookieJar that can be loaded from and saved to a file. + + Additional methods + + save(filename=None, ignore_discard=False, ignore_expires=False) + load(filename=None, ignore_discard=False, ignore_expires=False) + revert(filename=None, ignore_discard=False, ignore_expires=False) + + Additional public attributes + + filename: filename for loading and saving cookies + + Additional public readable attributes + + delayload: request that cookies are lazily loaded from disk; this is only + a hint since this only affects performance, not behaviour (unless the + cookies on disk are changing); a CookieJar object may ignore it (in fact, + only MSIECookieJar lazily loads cookies at the moment) + + """ + + def __init__(self, filename=None, delayload=False, policy=None): + """ + See FileCookieJar.__doc__ for argument documentation. + + Cookies are NOT loaded from the named file until either the load or + revert method is called. + + """ + CookieJar.__init__(self, policy) + if filename is not None and not isstringlike(filename): + raise ValueError("filename must be string-like") + self.filename = filename + self.delayload = bool(delayload) + + def save(self, filename=None, ignore_discard=False, ignore_expires=False): + """Save cookies to a file. + + filename: name of file in which to save cookies + ignore_discard: save even cookies set to be discarded + ignore_expires: save even cookies that have expired + + The file is overwritten if it already exists, thus wiping all its + cookies. Saved cookies can be restored later using the load or revert + methods. If filename is not specified, self.filename is used; if + self.filename is None, ValueError is raised. + + """ + raise NotImplementedError() + + def load(self, filename=None, ignore_discard=False, ignore_expires=False): + """Load cookies from a file. + + Old cookies are kept unless overwritten by newly loaded ones. + + Arguments are as for .save(). + + If filename is not specified, self.filename is used; if self.filename + is None, ValueError is raised. The named file must be in the format + understood by the class, or LoadError will be raised. This format will + be identical to that written by the save method, unless the load format + is not sufficiently well understood (as is the case for MSIECookieJar). + + """ + if filename is None: + if self.filename is not None: filename = self.filename + else: raise ValueError(MISSING_FILENAME_TEXT) + + f = open(filename) + try: + self._really_load(f, filename, ignore_discard, ignore_expires) + finally: + f.close() + + def revert(self, filename=None, + ignore_discard=False, ignore_expires=False): + """Clear all cookies and reload cookies from a saved file. + + Raises LoadError (or IOError) if reversion is not successful; the + object's state will not be altered if this happens. + + """ + if filename is None: + if self.filename is not None: filename = self.filename + else: raise ValueError(MISSING_FILENAME_TEXT) + + old_state = copy.deepcopy(self._cookies) + self._cookies = {} + try: + self.load(filename, ignore_discard, ignore_expires) + except (LoadError, IOError): + self._cookies = old_state + raise diff --git a/plugin.video.alfa/lib/mechanize/_debug.py b/plugin.video.alfa/lib/mechanize/_debug.py new file mode 100755 index 00000000..c17a06ce --- /dev/null +++ b/plugin.video.alfa/lib/mechanize/_debug.py @@ -0,0 +1,28 @@ +import logging + +from _response import response_seek_wrapper +from _urllib2_fork import BaseHandler + + +class HTTPResponseDebugProcessor(BaseHandler): + handler_order = 900 # before redirections, after everything else + + def http_response(self, request, response): + if not hasattr(response, "seek"): + response = response_seek_wrapper(response) + info = logging.getLogger("mechanize.http_responses").info + try: + info(response.read()) + finally: + response.seek(0) + info("*****************************************************") + return response + + https_response = http_response + +class HTTPRedirectDebugProcessor(BaseHandler): + def http_request(self, request): + if hasattr(request, "redirect_dict"): + info = logging.getLogger("mechanize.http_redirects").info + info("redirecting to %s", request.get_full_url()) + return request diff --git a/plugin.video.alfa/lib/mechanize/_firefox3cookiejar.py b/plugin.video.alfa/lib/mechanize/_firefox3cookiejar.py new file mode 100755 index 00000000..83fcd21a --- /dev/null +++ b/plugin.video.alfa/lib/mechanize/_firefox3cookiejar.py @@ -0,0 +1,248 @@ +"""Firefox 3 "cookies.sqlite" cookie persistence. + +Copyright 2008 John J Lee + +This code is free software; you can redistribute it and/or modify it +under the terms of the BSD or ZPL 2.1 licenses (see the file +COPYING.txt included with the distribution). + +""" + +import logging +import time + +from _clientcookie import CookieJar, Cookie, MappingIterator +from _util import isstringlike, experimental +debug = logging.getLogger("mechanize.cookies").debug + + +class Firefox3CookieJar(CookieJar): + + """Firefox 3 cookie jar. + + The cookies are stored in Firefox 3's "cookies.sqlite" format. + + Constructor arguments: + + filename: filename of cookies.sqlite (typically found at the top level + of a firefox profile directory) + autoconnect: as a convenience, connect to the SQLite cookies database at + Firefox3CookieJar construction time (default True) + policy: an object satisfying the mechanize.CookiePolicy interface + + Note that this is NOT a FileCookieJar, and there are no .load(), + .save() or .restore() methods. The database is in sync with the + cookiejar object's state after each public method call. + + Following Firefox's own behaviour, session cookies are never saved to + the database. + + The file is created, and an sqlite database written to it, if it does + not already exist. The moz_cookies database table is created if it does + not already exist. + """ + + # XXX + # handle DatabaseError exceptions + # add a FileCookieJar (explicit .save() / .revert() / .load() methods) + + def __init__(self, filename, autoconnect=True, policy=None): + experimental("Firefox3CookieJar is experimental code") + CookieJar.__init__(self, policy) + if filename is not None and not isstringlike(filename): + raise ValueError("filename must be string-like") + self.filename = filename + self._conn = None + if autoconnect: + self.connect() + + def connect(self): + import sqlite3 # not available in Python 2.4 stdlib + self._conn = sqlite3.connect(self.filename) + self._conn.isolation_level = "DEFERRED" + self._create_table_if_necessary() + + def close(self): + self._conn.close() + + def _transaction(self, func): + try: + cur = self._conn.cursor() + try: + result = func(cur) + finally: + cur.close() + except: + self._conn.rollback() + raise + else: + self._conn.commit() + return result + + def _execute(self, query, params=()): + return self._transaction(lambda cur: cur.execute(query, params)) + + def _query(self, query, params=()): + # XXX should we bother with a transaction? + cur = self._conn.cursor() + try: + cur.execute(query, params) + return cur.fetchall() + finally: + cur.close() + + def _create_table_if_necessary(self): + self._execute("""\ +CREATE TABLE IF NOT EXISTS moz_cookies (id INTEGER PRIMARY KEY, name TEXT, + value TEXT, host TEXT, path TEXT,expiry INTEGER, + lastAccessed INTEGER, isSecure INTEGER, isHttpOnly INTEGER)""") + + def _cookie_from_row(self, row): + (pk, name, value, domain, path, expires, + last_accessed, secure, http_only) = row + + version = 0 + domain = domain.encode("ascii", "ignore") + path = path.encode("ascii", "ignore") + name = name.encode("ascii", "ignore") + value = value.encode("ascii", "ignore") + secure = bool(secure) + + # last_accessed isn't a cookie attribute, so isn't added to rest + rest = {} + if http_only: + rest["HttpOnly"] = None + + if name == "": + name = value + value = None + + initial_dot = domain.startswith(".") + domain_specified = initial_dot + + discard = False + if expires == "": + expires = None + discard = True + + return Cookie(version, name, value, + None, False, + domain, domain_specified, initial_dot, + path, False, + secure, + expires, + discard, + None, + None, + rest) + + def clear(self, domain=None, path=None, name=None): + CookieJar.clear(self, domain, path, name) + where_parts = [] + sql_params = [] + if domain is not None: + where_parts.append("host = ?") + sql_params.append(domain) + if path is not None: + where_parts.append("path = ?") + sql_params.append(path) + if name is not None: + where_parts.append("name = ?") + sql_params.append(name) + where = " AND ".join(where_parts) + if where: + where = " WHERE " + where + def clear(cur): + cur.execute("DELETE FROM moz_cookies%s" % where, + tuple(sql_params)) + self._transaction(clear) + + def _row_from_cookie(self, cookie, cur): + expires = cookie.expires + if cookie.discard: + expires = "" + + domain = unicode(cookie.domain) + path = unicode(cookie.path) + name = unicode(cookie.name) + value = unicode(cookie.value) + secure = bool(int(cookie.secure)) + + if value is None: + value = name + name = "" + + last_accessed = int(time.time()) + http_only = cookie.has_nonstandard_attr("HttpOnly") + + query = cur.execute("""SELECT MAX(id) + 1 from moz_cookies""") + pk = query.fetchone()[0] + if pk is None: + pk = 1 + + return (pk, name, value, domain, path, expires, + last_accessed, secure, http_only) + + def set_cookie(self, cookie): + if cookie.discard: + CookieJar.set_cookie(self, cookie) + return + + def set_cookie(cur): + # XXX + # is this RFC 2965-correct? + # could this do an UPDATE instead? + row = self._row_from_cookie(cookie, cur) + name, unused, domain, path = row[1:5] + cur.execute("""\ +DELETE FROM moz_cookies WHERE host = ? AND path = ? AND name = ?""", + (domain, path, name)) + cur.execute("""\ +INSERT INTO moz_cookies VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?) +""", row) + self._transaction(set_cookie) + + def __iter__(self): + # session (non-persistent) cookies + for cookie in MappingIterator(self._cookies): + yield cookie + # persistent cookies + for row in self._query("""\ +SELECT * FROM moz_cookies ORDER BY name, path, host"""): + yield self._cookie_from_row(row) + + def _cookies_for_request(self, request): + session_cookies = CookieJar._cookies_for_request(self, request) + def get_cookies(cur): + query = cur.execute("SELECT host from moz_cookies") + domains = [row[0] for row in query.fetchall()] + cookies = [] + for domain in domains: + cookies += self._persistent_cookies_for_domain(domain, + request, cur) + return cookies + persistent_coookies = self._transaction(get_cookies) + return session_cookies + persistent_coookies + + def _persistent_cookies_for_domain(self, domain, request, cur): + cookies = [] + if not self._policy.domain_return_ok(domain, request): + return [] + debug("Checking %s for cookies to return", domain) + query = cur.execute("""\ +SELECT * from moz_cookies WHERE host = ? ORDER BY path""", + (domain,)) + cookies = [self._cookie_from_row(row) for row in query.fetchall()] + last_path = None + r = [] + for cookie in cookies: + if (cookie.path != last_path and + not self._policy.path_return_ok(cookie.path, request)): + last_path = cookie.path + continue + if not self._policy.return_ok(cookie, request): + debug(" not returning cookie") + continue + debug(" it's a match") + r.append(cookie) + return r diff --git a/plugin.video.alfa/lib/mechanize/_form.py b/plugin.video.alfa/lib/mechanize/_form.py new file mode 100755 index 00000000..ed2b13b4 --- /dev/null +++ b/plugin.video.alfa/lib/mechanize/_form.py @@ -0,0 +1,3280 @@ +"""HTML form handling for web clients. + +HTML form handling for web clients: useful for parsing HTML forms, filling them +in and returning the completed forms to the server. This code developed from a +port of Gisle Aas' Perl module HTML::Form, from the libwww-perl library, but +the interface is not the same. + +The most useful docstring is the one for HTMLForm. + +RFC 1866: HTML 2.0 +RFC 1867: Form-based File Upload in HTML +RFC 2388: Returning Values from Forms: multipart/form-data +HTML 3.2 Specification, W3C Recommendation 14 January 1997 (for ISINDEX) +HTML 4.01 Specification, W3C Recommendation 24 December 1999 + + +Copyright 2002-2007 John J. Lee +Copyright 2005 Gary Poster +Copyright 2005 Zope Corporation +Copyright 1998-2000 Gisle Aas. + +This code is free software; you can redistribute it and/or modify it +under the terms of the BSD or ZPL 2.1 licenses (see the file +COPYING.txt included with the distribution). + +""" + +# TODO: +# Clean up post the merge into mechanize +# * Remove code that was duplicated in ClientForm and mechanize +# * Remove weird import stuff +# * Remove pre-Python 2.4 compatibility cruft +# * Clean up tests +# * Later release: Remove the ClientForm 0.1 backwards-compatibility switch +# Remove parser testing hack +# Clean action URI +# Switch to unicode throughout +# See Wichert Akkerman's 2004-01-22 message to c.l.py. +# Apply recommendations from google code project CURLIES +# Apply recommendations from HTML 5 spec +# Add charset parameter to Content-type headers? How to find value?? +# Functional tests to add: +# Single and multiple file upload +# File upload with missing name (check standards) +# mailto: submission & enctype text/plain?? + +# Replace by_label etc. with moniker / selector concept. Allows, e.g., a +# choice between selection by value / id / label / element contents. Or +# choice between matching labels exactly or by substring. etc. + + +__all__ = ['AmbiguityError', 'CheckboxControl', 'Control', + 'ControlNotFoundError', 'FileControl', 'FormParser', 'HTMLForm', + 'HiddenControl', 'IgnoreControl', 'ImageControl', 'IsindexControl', + 'Item', 'ItemCountError', 'ItemNotFoundError', 'Label', + 'ListControl', 'LocateError', 'Missing', 'ParseError', 'ParseFile', + 'ParseFileEx', 'ParseResponse', 'ParseResponseEx','PasswordControl', + 'RadioControl', 'ScalarControl', 'SelectControl', + 'SubmitButtonControl', 'SubmitControl', 'TextControl', + 'TextareaControl', 'XHTMLCompatibleFormParser'] + +import HTMLParser +from cStringIO import StringIO +import inspect +import logging +import random +import re +import sys +import urllib +import urlparse +import warnings + +import _beautifulsoup +import _request + +# from Python itself, for backwards compatibility of raised exceptions +import sgmllib +# bundled copy of sgmllib +import _sgmllib_copy + + +VERSION = "0.2.11" + +CHUNK = 1024 # size of chunks fed to parser, in bytes + +DEFAULT_ENCODING = "latin-1" + +_logger = logging.getLogger("mechanize.forms") +OPTIMIZATION_HACK = True + +def debug(msg, *args, **kwds): + if OPTIMIZATION_HACK: + return + + caller_name = inspect.stack()[1][3] + extended_msg = '%%s %s' % msg + extended_args = (caller_name,)+args + _logger.debug(extended_msg, *extended_args, **kwds) + +def _show_debug_messages(): + global OPTIMIZATION_HACK + OPTIMIZATION_HACK = False + _logger.setLevel(logging.DEBUG) + handler = logging.StreamHandler(sys.stdout) + handler.setLevel(logging.DEBUG) + _logger.addHandler(handler) + + +def deprecation(message, stack_offset=0): + warnings.warn(message, DeprecationWarning, stacklevel=3+stack_offset) + + +class Missing: pass + +_compress_re = re.compile(r"\s+") +def compress_text(text): return _compress_re.sub(" ", text.strip()) + +def normalize_line_endings(text): + return re.sub(r"(?:(? + w = MimeWriter(f) + ...call w.addheader(key, value) 0 or more times... + + followed by either: + + f = w.startbody(content_type) + ...call f.write(data) for body data... + + or: + + w.startmultipartbody(subtype) + for each part: + subwriter = w.nextpart() + ...use the subwriter's methods to create the subpart... + w.lastpart() + + The subwriter is another MimeWriter instance, and should be + treated in the same way as the toplevel MimeWriter. This way, + writing recursive body parts is easy. + + Warning: don't forget to call lastpart()! + + XXX There should be more state so calls made in the wrong order + are detected. + + Some special cases: + + - startbody() just returns the file passed to the constructor; + but don't use this knowledge, as it may be changed. + + - startmultipartbody() actually returns a file as well; + this can be used to write the initial 'if you can read this your + mailer is not MIME-aware' message. + + - If you call flushheaders(), the headers accumulated so far are + written out (and forgotten); this is useful if you don't need a + body part at all, e.g. for a subpart of type message/rfc822 + that's (mis)used to store some header-like information. + + - Passing a keyword argument 'prefix=' to addheader(), + start*body() affects where the header is inserted; 0 means + append at the end, 1 means insert at the start; default is + append for addheader(), but insert for start*body(), which use + it to determine where the Content-type header goes. + + """ + + def __init__(self, fp, http_hdrs=None): + self._http_hdrs = http_hdrs + self._fp = fp + self._headers = [] + self._boundary = [] + self._first_part = True + + def addheader(self, key, value, prefix=0, + add_to_http_hdrs=0): + """ + prefix is ignored if add_to_http_hdrs is true. + """ + lines = value.split("\r\n") + while lines and not lines[-1]: del lines[-1] + while lines and not lines[0]: del lines[0] + if add_to_http_hdrs: + value = "".join(lines) + # 2.2 urllib2 doesn't normalize header case + self._http_hdrs.append((key.capitalize(), value)) + else: + for i in range(1, len(lines)): + lines[i] = " " + lines[i].strip() + value = "\r\n".join(lines) + "\r\n" + line = key.title() + ": " + value + if prefix: + self._headers.insert(0, line) + else: + self._headers.append(line) + + def flushheaders(self): + self._fp.writelines(self._headers) + self._headers = [] + + def startbody(self, ctype=None, plist=[], prefix=1, + add_to_http_hdrs=0, content_type=1): + """ + prefix is ignored if add_to_http_hdrs is true. + """ + if content_type and ctype: + for name, value in plist: + ctype = ctype + ';\r\n %s=%s' % (name, value) + self.addheader("Content-Type", ctype, prefix=prefix, + add_to_http_hdrs=add_to_http_hdrs) + self.flushheaders() + if not add_to_http_hdrs: self._fp.write("\r\n") + self._first_part = True + return self._fp + + def startmultipartbody(self, subtype, boundary=None, plist=[], prefix=1, + add_to_http_hdrs=0, content_type=1): + boundary = boundary or choose_boundary() + self._boundary.append(boundary) + return self.startbody("multipart/" + subtype, + [("boundary", boundary)] + plist, + prefix=prefix, + add_to_http_hdrs=add_to_http_hdrs, + content_type=content_type) + + def nextpart(self): + boundary = self._boundary[-1] + if self._first_part: + self._first_part = False + else: + self._fp.write("\r\n") + self._fp.write("--" + boundary + "\r\n") + return self.__class__(self._fp) + + def lastpart(self): + if self._first_part: + self.nextpart() + boundary = self._boundary.pop() + self._fp.write("\r\n--" + boundary + "--\r\n") + + +class LocateError(ValueError): pass +class AmbiguityError(LocateError): pass +class ControlNotFoundError(LocateError): pass +class ItemNotFoundError(LocateError): pass + +class ItemCountError(ValueError): pass + +# for backwards compatibility, ParseError derives from exceptions that were +# raised by versions of ClientForm <= 0.2.5 +# TODO: move to _html +class ParseError(sgmllib.SGMLParseError, + HTMLParser.HTMLParseError): + + def __init__(self, *args, **kwds): + Exception.__init__(self, *args, **kwds) + + def __str__(self): + return Exception.__str__(self) + + +class _AbstractFormParser: + """forms attribute contains HTMLForm instances on completion.""" + # thanks to Moshe Zadka for an example of sgmllib/htmllib usage + def __init__(self, entitydefs=None, encoding=DEFAULT_ENCODING): + if entitydefs is None: + entitydefs = get_entitydefs() + self._entitydefs = entitydefs + self._encoding = encoding + + self.base = None + self.forms = [] + self.labels = [] + self._current_label = None + self._current_form = None + self._select = None + self._optgroup = None + self._option = None + self._textarea = None + + # forms[0] will contain all controls that are outside of any form + # self._global_form is an alias for self.forms[0] + self._global_form = None + self.start_form([]) + self.end_form() + self._current_form = self._global_form = self.forms[0] + + def do_base(self, attrs): + debug("%s", attrs) + for key, value in attrs: + if key == "href": + self.base = self.unescape_attr_if_required(value) + + def end_body(self): + debug("") + if self._current_label is not None: + self.end_label() + if self._current_form is not self._global_form: + self.end_form() + + def start_form(self, attrs): + debug("%s", attrs) + if self._current_form is not self._global_form: + raise ParseError("nested FORMs") + name = None + action = None + enctype = "application/x-www-form-urlencoded" + method = "GET" + d = {} + for key, value in attrs: + if key == "name": + name = self.unescape_attr_if_required(value) + elif key == "action": + action = self.unescape_attr_if_required(value) + elif key == "method": + method = self.unescape_attr_if_required(value.upper()) + elif key == "enctype": + enctype = self.unescape_attr_if_required(value.lower()) + d[key] = self.unescape_attr_if_required(value) + controls = [] + self._current_form = (name, action, method, enctype), d, controls + + def end_form(self): + debug("") + if self._current_label is not None: + self.end_label() + if self._current_form is self._global_form: + raise ParseError("end of FORM before start") + self.forms.append(self._current_form) + self._current_form = self._global_form + + def start_select(self, attrs): + debug("%s", attrs) + if self._select is not None: + raise ParseError("nested SELECTs") + if self._textarea is not None: + raise ParseError("SELECT inside TEXTAREA") + d = {} + for key, val in attrs: + d[key] = self.unescape_attr_if_required(val) + + self._select = d + self._add_label(d) + + self._append_select_control({"__select": d}) + + def end_select(self): + debug("") + if self._select is None: + raise ParseError("end of SELECT before start") + + if self._option is not None: + self._end_option() + + self._select = None + + def start_optgroup(self, attrs): + debug("%s", attrs) + if self._select is None: + raise ParseError("OPTGROUP outside of SELECT") + d = {} + for key, val in attrs: + d[key] = self.unescape_attr_if_required(val) + + self._optgroup = d + + def end_optgroup(self): + debug("") + if self._optgroup is None: + raise ParseError("end of OPTGROUP before start") + self._optgroup = None + + def _start_option(self, attrs): + debug("%s", attrs) + if self._select is None: + raise ParseError("OPTION outside of SELECT") + if self._option is not None: + self._end_option() + + d = {} + for key, val in attrs: + d[key] = self.unescape_attr_if_required(val) + + self._option = {} + self._option.update(d) + if (self._optgroup and self._optgroup.has_key("disabled") and + not self._option.has_key("disabled")): + self._option["disabled"] = None + + def _end_option(self): + debug("") + if self._option is None: + raise ParseError("end of OPTION before start") + + contents = self._option.get("contents", "").strip() + self._option["contents"] = contents + if not self._option.has_key("value"): + self._option["value"] = contents + if not self._option.has_key("label"): + self._option["label"] = contents + # stuff dict of SELECT HTML attrs into a special private key + # (gets deleted again later) + self._option["__select"] = self._select + self._append_select_control(self._option) + self._option = None + + def _append_select_control(self, attrs): + debug("%s", attrs) + controls = self._current_form[2] + name = self._select.get("name") + controls.append(("select", name, attrs)) + + def start_textarea(self, attrs): + debug("%s", attrs) + if self._textarea is not None: + raise ParseError("nested TEXTAREAs") + if self._select is not None: + raise ParseError("TEXTAREA inside SELECT") + d = {} + for key, val in attrs: + d[key] = self.unescape_attr_if_required(val) + self._add_label(d) + + self._textarea = d + + def end_textarea(self): + debug("") + if self._textarea is None: + raise ParseError("end of TEXTAREA before start") + controls = self._current_form[2] + name = self._textarea.get("name") + controls.append(("textarea", name, self._textarea)) + self._textarea = None + + def start_label(self, attrs): + debug("%s", attrs) + if self._current_label: + self.end_label() + d = {} + for key, val in attrs: + d[key] = self.unescape_attr_if_required(val) + taken = bool(d.get("for")) # empty id is invalid + d["__text"] = "" + d["__taken"] = taken + if taken: + self.labels.append(d) + self._current_label = d + + def end_label(self): + debug("") + label = self._current_label + if label is None: + # something is ugly in the HTML, but we're ignoring it + return + self._current_label = None + # if it is staying around, it is True in all cases + del label["__taken"] + + def _add_label(self, d): + #debug("%s", d) + if self._current_label is not None: + if not self._current_label["__taken"]: + self._current_label["__taken"] = True + d["__label"] = self._current_label + + def handle_data(self, data): + debug("%s", data) + + if self._option is not None: + # self._option is a dictionary of the OPTION element's HTML + # attributes, but it has two special keys, one of which is the + # special "contents" key contains text between OPTION tags (the + # other is the "__select" key: see the end_option method) + map = self._option + key = "contents" + elif self._textarea is not None: + map = self._textarea + key = "value" + data = normalize_line_endings(data) + # not if within option or textarea + elif self._current_label is not None: + map = self._current_label + key = "__text" + else: + return + + if data and not map.has_key(key): + # according to + # http://www.w3.org/TR/html4/appendix/notes.html#h-B.3.1 line break + # immediately after start tags or immediately before end tags must + # be ignored, but real browsers only ignore a line break after a + # start tag, so we'll do that. + if data[0:2] == "\r\n": + data = data[2:] + elif data[0:1] in ["\n", "\r"]: + data = data[1:] + map[key] = data + else: + map[key] = map[key] + data + + def do_button(self, attrs): + debug("%s", attrs) + d = {} + d["type"] = "submit" # default + for key, val in attrs: + d[key] = self.unescape_attr_if_required(val) + controls = self._current_form[2] + + type = d["type"] + name = d.get("name") + # we don't want to lose information, so use a type string that + # doesn't clash with INPUT TYPE={SUBMIT,RESET,BUTTON} + # e.g. type for BUTTON/RESET is "resetbutton" + # (type for INPUT/RESET is "reset") + type = type+"button" + self._add_label(d) + controls.append((type, name, d)) + + def do_input(self, attrs): + debug("%s", attrs) + d = {} + d["type"] = "text" # default + for key, val in attrs: + d[key] = self.unescape_attr_if_required(val) + controls = self._current_form[2] + + type = d["type"] + name = d.get("name") + self._add_label(d) + controls.append((type, name, d)) + + def do_isindex(self, attrs): + debug("%s", attrs) + d = {} + for key, val in attrs: + d[key] = self.unescape_attr_if_required(val) + controls = self._current_form[2] + + self._add_label(d) + # isindex doesn't have type or name HTML attributes + controls.append(("isindex", None, d)) + + def handle_entityref(self, name): + #debug("%s", name) + self.handle_data(unescape( + '&%s;' % name, self._entitydefs, self._encoding)) + + def handle_charref(self, name): + #debug("%s", name) + self.handle_data(unescape_charref(name, self._encoding)) + + def unescape_attr(self, name): + #debug("%s", name) + return unescape(name, self._entitydefs, self._encoding) + + def unescape_attrs(self, attrs): + #debug("%s", attrs) + escaped_attrs = {} + for key, val in attrs.items(): + try: + val.items + except AttributeError: + escaped_attrs[key] = self.unescape_attr(val) + else: + # e.g. "__select" -- yuck! + escaped_attrs[key] = self.unescape_attrs(val) + return escaped_attrs + + def unknown_entityref(self, ref): self.handle_data("&%s;" % ref) + def unknown_charref(self, ref): self.handle_data("&#%s;" % ref) + + +class XHTMLCompatibleFormParser(_AbstractFormParser, HTMLParser.HTMLParser): + """Good for XHTML, bad for tolerance of incorrect HTML.""" + # thanks to Michael Howitz for this! + def __init__(self, entitydefs=None, encoding=DEFAULT_ENCODING): + HTMLParser.HTMLParser.__init__(self) + _AbstractFormParser.__init__(self, entitydefs, encoding) + + def feed(self, data): + try: + HTMLParser.HTMLParser.feed(self, data) + except HTMLParser.HTMLParseError, exc: + raise ParseError(exc) + + def start_option(self, attrs): + _AbstractFormParser._start_option(self, attrs) + + def end_option(self): + _AbstractFormParser._end_option(self) + + def handle_starttag(self, tag, attrs): + try: + method = getattr(self, "start_" + tag) + except AttributeError: + try: + method = getattr(self, "do_" + tag) + except AttributeError: + pass # unknown tag + else: + method(attrs) + else: + method(attrs) + + def handle_endtag(self, tag): + try: + method = getattr(self, "end_" + tag) + except AttributeError: + pass # unknown tag + else: + method() + + def unescape(self, name): + # Use the entitydefs passed into constructor, not + # HTMLParser.HTMLParser's entitydefs. + return self.unescape_attr(name) + + def unescape_attr_if_required(self, name): + return name # HTMLParser.HTMLParser already did it + def unescape_attrs_if_required(self, attrs): + return attrs # ditto + + def close(self): + HTMLParser.HTMLParser.close(self) + self.end_body() + + +class _AbstractSgmllibParser(_AbstractFormParser): + + def do_option(self, attrs): + _AbstractFormParser._start_option(self, attrs) + + # we override this attr to decode hex charrefs + entity_or_charref = re.compile( + '&(?:([a-zA-Z][-.a-zA-Z0-9]*)|#(x?[0-9a-fA-F]+))(;?)') + def convert_entityref(self, name): + return unescape("&%s;" % name, self._entitydefs, self._encoding) + def convert_charref(self, name): + return unescape_charref("%s" % name, self._encoding) + def unescape_attr_if_required(self, name): + return name # sgmllib already did it + def unescape_attrs_if_required(self, attrs): + return attrs # ditto + + +class FormParser(_AbstractSgmllibParser, _sgmllib_copy.SGMLParser): + """Good for tolerance of incorrect HTML, bad for XHTML.""" + def __init__(self, entitydefs=None, encoding=DEFAULT_ENCODING): + _sgmllib_copy.SGMLParser.__init__(self) + _AbstractFormParser.__init__(self, entitydefs, encoding) + + def feed(self, data): + try: + _sgmllib_copy.SGMLParser.feed(self, data) + except _sgmllib_copy.SGMLParseError, exc: + raise ParseError(exc) + + def close(self): + _sgmllib_copy.SGMLParser.close(self) + self.end_body() + + +class _AbstractBSFormParser(_AbstractSgmllibParser): + + bs_base_class = None + + def __init__(self, entitydefs=None, encoding=DEFAULT_ENCODING): + _AbstractFormParser.__init__(self, entitydefs, encoding) + self.bs_base_class.__init__(self) + + def handle_data(self, data): + _AbstractFormParser.handle_data(self, data) + self.bs_base_class.handle_data(self, data) + + def feed(self, data): + try: + self.bs_base_class.feed(self, data) + except _sgmllib_copy.SGMLParseError, exc: + raise ParseError(exc) + + def close(self): + self.bs_base_class.close(self) + self.end_body() + + +class RobustFormParser(_AbstractBSFormParser, _beautifulsoup.BeautifulSoup): + + """Tries to be highly tolerant of incorrect HTML.""" + + bs_base_class = _beautifulsoup.BeautifulSoup + + +class NestingRobustFormParser(_AbstractBSFormParser, + _beautifulsoup.ICantBelieveItsBeautifulSoup): + + """Tries to be highly tolerant of incorrect HTML. + + Different from RobustFormParser in that it more often guesses nesting + above missing end tags (see BeautifulSoup docs). + """ + + bs_base_class = _beautifulsoup.ICantBelieveItsBeautifulSoup + + +#FormParser = XHTMLCompatibleFormParser # testing hack +#FormParser = RobustFormParser # testing hack + + +def ParseResponseEx(response, + select_default=False, + form_parser_class=FormParser, + request_class=_request.Request, + entitydefs=None, + encoding=DEFAULT_ENCODING, + + # private + _urljoin=urlparse.urljoin, + _urlparse=urlparse.urlparse, + _urlunparse=urlparse.urlunparse, + ): + """Identical to ParseResponse, except that: + + 1. The returned list contains an extra item. The first form in the list + contains all controls not contained in any FORM element. + + 2. The arguments ignore_errors and backwards_compat have been removed. + + 3. Backwards-compatibility mode (backwards_compat=True) is not available. + """ + return _ParseFileEx(response, response.geturl(), + select_default, + False, + form_parser_class, + request_class, + entitydefs, + False, + encoding, + _urljoin=_urljoin, + _urlparse=_urlparse, + _urlunparse=_urlunparse, + ) + +def ParseFileEx(file, base_uri, + select_default=False, + form_parser_class=FormParser, + request_class=_request.Request, + entitydefs=None, + encoding=DEFAULT_ENCODING, + + # private + _urljoin=urlparse.urljoin, + _urlparse=urlparse.urlparse, + _urlunparse=urlparse.urlunparse, + ): + """Identical to ParseFile, except that: + + 1. The returned list contains an extra item. The first form in the list + contains all controls not contained in any FORM element. + + 2. The arguments ignore_errors and backwards_compat have been removed. + + 3. Backwards-compatibility mode (backwards_compat=True) is not available. + """ + return _ParseFileEx(file, base_uri, + select_default, + False, + form_parser_class, + request_class, + entitydefs, + False, + encoding, + _urljoin=_urljoin, + _urlparse=_urlparse, + _urlunparse=_urlunparse, + ) + +def ParseString(text, base_uri, *args, **kwds): + fh = StringIO(text) + return ParseFileEx(fh, base_uri, *args, **kwds) + +def ParseResponse(response, *args, **kwds): + """Parse HTTP response and return a list of HTMLForm instances. + + The return value of mechanize.urlopen can be conveniently passed to this + function as the response parameter. + + mechanize.ParseError is raised on parse errors. + + response: file-like object (supporting read() method) with a method + geturl(), returning the URI of the HTTP response + select_default: for multiple-selection SELECT controls and RADIO controls, + pick the first item as the default if none are selected in the HTML + form_parser_class: class to instantiate and use to pass + request_class: class to return from .click() method (default is + mechanize.Request) + entitydefs: mapping like {"&": "&", ...} containing HTML entity + definitions (a sensible default is used) + encoding: character encoding used for encoding numeric character references + when matching link text. mechanize does not attempt to find the encoding + in a META HTTP-EQUIV attribute in the document itself (mechanize, for + example, does do that and will pass the correct value to mechanize using + this parameter). + + backwards_compat: boolean that determines whether the returned HTMLForm + objects are backwards-compatible with old code. If backwards_compat is + true: + + - ClientForm 0.1 code will continue to work as before. + + - Label searches that do not specify a nr (number or count) will always + get the first match, even if other controls match. If + backwards_compat is False, label searches that have ambiguous results + will raise an AmbiguityError. + + - Item label matching is done by strict string comparison rather than + substring matching. + + - De-selecting individual list items is allowed even if the Item is + disabled. + + The backwards_compat argument will be removed in a future release. + + Pass a true value for select_default if you want the behaviour specified by + RFC 1866 (the HTML 2.0 standard), which is to select the first item in a + RADIO or multiple-selection SELECT control if none were selected in the + HTML. Most browsers (including Microsoft Internet Explorer (IE) and + Netscape Navigator) instead leave all items unselected in these cases. The + W3C HTML 4.0 standard leaves this behaviour undefined in the case of + multiple-selection SELECT controls, but insists that at least one RADIO + button should be checked at all times, in contradiction to browser + behaviour. + + There is a choice of parsers. mechanize.XHTMLCompatibleFormParser (uses + HTMLParser.HTMLParser) works best for XHTML, mechanize.FormParser (uses + bundled copy of sgmllib.SGMLParser) (the default) works better for ordinary + grubby HTML. Note that HTMLParser is only available in Python 2.2 and + later. You can pass your own class in here as a hack to work around bad + HTML, but at your own risk: there is no well-defined interface. + + """ + return _ParseFileEx(response, response.geturl(), *args, **kwds)[1:] + +def ParseFile(file, base_uri, *args, **kwds): + """Parse HTML and return a list of HTMLForm instances. + + mechanize.ParseError is raised on parse errors. + + file: file-like object (supporting read() method) containing HTML with zero + or more forms to be parsed + base_uri: the URI of the document (note that the base URI used to submit + the form will be that given in the BASE element if present, not that of + the document) + + For the other arguments and further details, see ParseResponse.__doc__. + + """ + return _ParseFileEx(file, base_uri, *args, **kwds)[1:] + +def _ParseFileEx(file, base_uri, + select_default=False, + ignore_errors=False, + form_parser_class=FormParser, + request_class=_request.Request, + entitydefs=None, + backwards_compat=True, + encoding=DEFAULT_ENCODING, + _urljoin=urlparse.urljoin, + _urlparse=urlparse.urlparse, + _urlunparse=urlparse.urlunparse, + ): + if backwards_compat: + deprecation("operating in backwards-compatibility mode", 1) + fp = form_parser_class(entitydefs, encoding) + while 1: + data = file.read(CHUNK) + try: + fp.feed(data) + except ParseError, e: + e.base_uri = base_uri + raise + if len(data) != CHUNK: break + fp.close() + if fp.base is not None: + # HTML BASE element takes precedence over document URI + base_uri = fp.base + labels = [] # Label(label) for label in fp.labels] + id_to_labels = {} + for l in fp.labels: + label = Label(l) + labels.append(label) + for_id = l["for"] + coll = id_to_labels.get(for_id) + if coll is None: + id_to_labels[for_id] = [label] + else: + coll.append(label) + forms = [] + for (name, action, method, enctype), attrs, controls in fp.forms: + if action is None: + action = base_uri + else: + action = _urljoin(base_uri, action) + # would be nice to make HTMLForm class (form builder) pluggable + form = HTMLForm( + action, method, enctype, name, attrs, request_class, + forms, labels, id_to_labels, backwards_compat) + form._urlparse = _urlparse + form._urlunparse = _urlunparse + for ii in range(len(controls)): + type, name, attrs = controls[ii] + # index=ii*10 allows ImageControl to return multiple ordered pairs + form.new_control( + type, name, attrs, select_default=select_default, index=ii*10) + forms.append(form) + for form in forms: + form.fixup() + return forms + + +class Label: + def __init__(self, attrs): + self.id = attrs.get("for") + self._text = attrs.get("__text").strip() + self._ctext = compress_text(self._text) + self.attrs = attrs + self._backwards_compat = False # maintained by HTMLForm + + def __getattr__(self, name): + if name == "text": + if self._backwards_compat: + return self._text + else: + return self._ctext + return getattr(Label, name) + + def __setattr__(self, name, value): + if name == "text": + # don't see any need for this, so make it read-only + raise AttributeError("text attribute is read-only") + self.__dict__[name] = value + + def __str__(self): + return "" % (self.id, self.text) + + +def _get_label(attrs): + text = attrs.get("__label") + if text is not None: + return Label(text) + else: + return None + +class Control: + """An HTML form control. + + An HTMLForm contains a sequence of Controls. The Controls in an HTMLForm + are accessed using the HTMLForm.find_control method or the + HTMLForm.controls attribute. + + Control instances are usually constructed using the ParseFile / + ParseResponse functions. If you use those functions, you can ignore the + rest of this paragraph. A Control is only properly initialised after the + fixup method has been called. In fact, this is only strictly necessary for + ListControl instances. This is necessary because ListControls are built up + from ListControls each containing only a single item, and their initial + value(s) can only be known after the sequence is complete. + + The types and values that are acceptable for assignment to the value + attribute are defined by subclasses. + + If the disabled attribute is true, this represents the state typically + represented by browsers by 'greying out' a control. If the disabled + attribute is true, the Control will raise AttributeError if an attempt is + made to change its value. In addition, the control will not be considered + 'successful' as defined by the W3C HTML 4 standard -- ie. it will + contribute no data to the return value of the HTMLForm.click* methods. To + enable a control, set the disabled attribute to a false value. + + If the readonly attribute is true, the Control will raise AttributeError if + an attempt is made to change its value. To make a control writable, set + the readonly attribute to a false value. + + All controls have the disabled and readonly attributes, not only those that + may have the HTML attributes of the same names. + + On assignment to the value attribute, the following exceptions are raised: + TypeError, AttributeError (if the value attribute should not be assigned + to, because the control is disabled, for example) and ValueError. + + If the name or value attributes are None, or the value is an empty list, or + if the control is disabled, the control is not successful. + + Public attributes: + + type: string describing type of control (see the keys of the + HTMLForm.type2class dictionary for the allowable values) (readonly) + name: name of control (readonly) + value: current value of control (subclasses may allow a single value, a + sequence of values, or either) + disabled: disabled state + readonly: readonly state + id: value of id HTML attribute + + """ + def __init__(self, type, name, attrs, index=None): + """ + type: string describing type of control (see the keys of the + HTMLForm.type2class dictionary for the allowable values) + name: control name + attrs: HTML attributes of control's HTML element + + """ + raise NotImplementedError() + + def add_to_form(self, form): + self._form = form + form.controls.append(self) + + def fixup(self): + pass + + def is_of_kind(self, kind): + raise NotImplementedError() + + def clear(self): + raise NotImplementedError() + + def __getattr__(self, name): raise NotImplementedError() + def __setattr__(self, name, value): raise NotImplementedError() + + def pairs(self): + """Return list of (key, value) pairs suitable for passing to urlencode. + """ + return [(k, v) for (i, k, v) in self._totally_ordered_pairs()] + + def _totally_ordered_pairs(self): + """Return list of (key, value, index) tuples. + + Like pairs, but allows preserving correct ordering even where several + controls are involved. + + """ + raise NotImplementedError() + + def _write_mime_data(self, mw, name, value): + """Write data for a subitem of this control to a MimeWriter.""" + # called by HTMLForm + mw2 = mw.nextpart() + mw2.addheader("Content-Disposition", + 'form-data; name="%s"' % name, 1) + f = mw2.startbody(prefix=0) + f.write(value) + + def __str__(self): + raise NotImplementedError() + + def get_labels(self): + """Return all labels (Label instances) for this control. + + If the control was surrounded by a