# -*- coding: utf-8 -*- import os import re import unicodedata import urllib import xbmc import xbmcgui from core import config from core import httptools from core import logger from core import scrapertools from core.item import Item from core.scrapertools import decodeHtmlentities as dhe ACTION_SHOW_FULLSCREEN = 36 ACTION_GESTURE_SWIPE_LEFT = 511 ACTION_SELECT_ITEM = 7 ACTION_PREVIOUS_MENU = 10 ACTION_MOVE_LEFT = 1 ACTION_MOVE_RIGHT = 2 ACTION_MOVE_DOWN = 4 ACTION_MOVE_UP = 3 OPTION_PANEL = 6 OPTIONS_OK = 5 api_key = "2e2160006592024ba87ccdf78c28f49f" api_fankey = "dffe90fba4d02c199ae7a9e71330c987" host = "http://www.txibitsoft.com/" def mainlist(item): logger.info() itemlist = [] itemlist.append(Item(channel=item.channel, title="[COLOR white][B]Peliculas[/B][/COLOR]", action="peliculas", url="http://www.txibitsoft.com/torrents.php?procesar=1&categorias=%27Otras%20Peliculas%27&subcategoria=peliculas&pagina=1", thumbnail="http://imgur.com/v6iC6Er.jpg", fanart="http://imgur.com/tJUbfeC.jpg")) itemlist.append(Item(channel=item.channel, title="[COLOR orange][B]Alta Calidad[/B][/COLOR]", action="", url="", thumbnail="http://imgur.com/KXhvWIc.jpg", fanart="http://imgur.com/4kTqOKE.jpg")) itemlist.append(Item(channel=item.channel, title=" [COLOR white][B]1080[/B][/COLOR]", action="peliculas", url="http://www.txibitsoft.com/torrents.php?procesar=1&categorias='Cine%20Alta%20Definicion%20HD'&subcategoria=1080p&pagina=1", thumbnail="http://imgur.com/KXhvWIc.jpg", fanart="http://imgur.com/4kTqOKE.jpg")) itemlist.append(Item(channel=item.channel, title=" [COLOR white][B]720[/B][/COLOR]", action="peliculas", url="http://www.txibitsoft.com/torrents.php?procesar=1&categorias=%27Peliculas%20x264%20MKV%27&pagina=1", thumbnail="http://imgur.com/KXhvWIc.jpg", fanart="http://imgur.com/4kTqOKE.jpg")) itemlist.append(Item(channel=item.channel, title=" [COLOR white][B]4k[/B][/COLOR]", action="peliculas", url="http://www.txibitsoft.com/torrents.php?procesar=1&categorias=%27Cine%20Alta%20Definicion%20HD%27&subcategoria=4KULTRAHD&pagina=1", thumbnail="http://imgur.com/KXhvWIc.jpg", fanart="http://imgur.com/4kTqOKE.jpg")) title = "[COLOR white][B]Series[/B][/COLOR]" itemlist.append( Item(channel=item.channel, title=" [COLOR white][B]BdRemux[/B][/COLOR]", action="peliculas", url="http://www.txibitsoft.com/torrents.php?procesar=1&categorias=%27Cine%20Alta%20Definicion%20HD%27&subcategoria=BdRemux%201080p&pagina=1", thumbnail="http://imgur.com/KXhvWIc.jpg", fanart="http://imgur.com/4kTqOKE.jpg")) itemlist.append( Item(channel=item.channel, title=" [COLOR white][B]FullBluRay[/B][/COLOR]", action="peliculas", url="http://www.txibitsoft.com/torrents.php?procesar=1&categorias=%27Cine%20Alta%20Definicion%20HD%27&subcategoria=FULLBluRay&pagina=1", thumbnail="http://imgur.com/KXhvWIc.jpg", fanart="http://imgur.com/4kTqOKE.jpg")) itemlist.append(Item(channel=item.channel, title="[COLOR white][B]Series[/B][/COLOR]", action="peliculas", url="http://www.txibitsoft.com/torrents.php?procesar=1&categorias='Series'&pagina=1", thumbnail="http://imgur.com/qTqX9nU.jpg", fanart="http://imgur.com/rwjtkYj.jpg")) title = "[COLOR white][B]Buscar...[/B][/COLOR]" itemlist.append( Item(channel=item.channel, title=title, action="search", url="", fanart="http://imgur.com/wmkgcCC.jpg", thumbnail="http://imgur.com/b9xCys8.png")) return itemlist def search(item, texto): logger.info() texto = texto.replace(" ", "+") item.url = "http://www.txibitsoft.com/torrents.php?procesar=1&texto=%s" % (texto) item.extra = "1" try: return buscador(item) # Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] def buscador(item): logger.info() itemlist = [] # Descarga la página data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| |&", "", data) item.url = re.sub(r"&", "", item.url) # corrige la falta de imagen data = re.sub(r'

', data) #

Imagen de Presentación

Step Up All In MicroHD 1080p AC3 5.1-Castellano-AC3 5.1 Ingles Subs

19-12-2014

Subido por: TorrentEstrenos en Peliculas MICROHD
Descargas 46

Descargar
patron = '
' patron += '.*?href=".*?>(\d)') if float(next_page) > float(item.extra): if next_page: url = item.url + "&pagina=" + next_page title = "siguiente>>" title = title.replace(title, "[COLOR orange]" + title + "[/COLOR]") extra = next_page itemlist.append(Item(channel=item.channel, action="buscador", title=title, url=url, thumbnail="http://s18.postimg.org/4l9172cqx/tbsiguiente.png", fanart="http://s21.postimg.org/w0lgvyud3/tbfanartgeneral2.jpg", extra=extra, folder=True)) except: pass return itemlist def peliculas(item): logger.info() itemlist = [] # Descar
.*?>(.*?)') except: year = "" if sinopsis == "": sinopsis = scrapertools.find_single_match(data, '
(.*?)
') sinopsis = sinopsis.replace("

", "\n") sinopsis = re.sub(r"\(FILMAFFINITY\)
", "", sinopsis) try: rating_filma = scrapertools.get_match(data, 'itemprop="ratingValue" content="(.*?)">') except: rating_filma = "Sin puntuacion" print "lobeznito" print rating_filma critica = "" patron = '
(.*?)
.*?itemprop="author">(.*?)\s*(.*?)h="ID.*?.*?TV Series') except: pass try: imdb_id = scrapertools.get_match(subdata_imdb, '
(.*?)<') except: ratintg_tvdb = "" try: rating = scrapertools.get_match(data, '"vote_average":(.*?),') except: rating = "Sin puntuación" id_scraper = id_tmdb + "|" + "serie" + "|" + rating_filma + "|" + critica + "|" + rating + "|" + status # +"|"+emision posterdb = scrapertools.find_single_match(data_tmdb, '"poster_path":(.*?)","popularity"') if "null" in posterdb: posterdb = item.thumbnail else: posterdb = re.sub(r'\\|"', '', posterdb) posterdb = "https://image.tmdb.org/t/p/original" + posterdb if "null" in fan: fanart = "http://imgur.com/21Oty9A.jpg" else: fanart = "https://image.tmdb.org/t/p/original" + fan item.extra = fanart url = "http://api.themoviedb.org/3/tv/" + id_tmdb + "/images?api_key=" + api_key data = httptools.downloadpage(url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) patron = '"backdrops".*?"file_path":".*?",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"' matches = re.compile(patron, re.DOTALL).findall(data) if len(matches) == 0: patron = '"backdrops".*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"' matches = re.compile(patron, re.DOTALL).findall(data) if len(matches) == 0: fanart_info = item.extra fanart_3 = "" fanart_2 = item.extra for fanart_info, fanart_3, fanart_2 in matches: fanart_info = "https://image.tmdb.org/t/p/original" + fanart_info fanart_3 = "https://image.tmdb.org/t/p/original" + fanart_3 fanart_2 = "https://image.tmdb.org/t/p/original" + fanart_2 if fanart == "http://imgur.com/21Oty9A.jpg": fanart = fanart_info url = "http://webservice.fanart.tv/v3/tv/" + id + "?api_key=" + api_fankey data = httptools.downloadpage(url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) patron = '"clearlogo":.*?"url": "([^"]+)"' matches = re.compile(patron, re.DOTALL).findall(data) if '"tvbanner"' in data: tvbanner = scrapertools.get_match(data, '"tvbanner":.*?"url": "([^"]+)"') tfv = tvbanner elif '"tvposter"' in data: tvposter = scrapertools.get_match(data, '"tvposter":.*?"url": "([^"]+)"') tfv = tvposter else: tfv = posterdb if '"tvthumb"' in data: tvthumb = scrapertools.get_match(data, '"tvthumb":.*?"url": "([^"]+)"') if '"hdtvlogo"' in data: hdtvlogo = scrapertools.get_match(data, '"hdtvlogo":.*?"url": "([^"]+)"') if '"hdclearart"' in data: hdtvclear = scrapertools.get_match(data, '"hdclearart":.*?"url": "([^"]+)"') if len(matches) == 0: if '"hdtvlogo"' in data: if "showbackground" in data: if '"hdclearart"' in data: thumbnail = hdtvlogo extra = hdtvclear + "|" + year show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb else: thumbnail = hdtvlogo extra = thumbnail + "|" + year show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, server="torrent", thumbnail=thumbnail, fanart=item.extra, category=category, extra=extra, show=show, folder=True)) else: if '"hdclearart"' in data: thumbnail = hdtvlogo extra = hdtvclear + "|" + year show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb else: thumbnail = hdtvlogo extra = thumbnail + "|" + year show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, show=show, category=category, folder=True)) else: extra = "" + "|" + year show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, server="torrent", thumbnail=posterdb, fanart=fanart, extra=extra, show=show, category=category, folder=True)) for logo in matches: if '"hdtvlogo"' in data: thumbnail = hdtvlogo elif not '"hdtvlogo"' in data: if '"clearlogo"' in data: thumbnail = logo else: thumbnail = item.thumbnail if '"clearart"' in data: clear = scrapertools.get_match(data, '"clearart":.*?"url": "([^"]+)"') if "showbackground" in data: extra = clear + "|" + year show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, show=show, category=category, folder=True)) else: extra = clear + "|" + year show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, show=show, category=category, folder=True)) if "showbackground" in data: if '"clearart"' in data: clear = scrapertools.get_match(data, '"clearart":.*?"url": "([^"]+)"') extra = clear + "|" + year show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb else: extra = logo + "|" + year show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, show=show, category=category, folder=True)) if not '"clearart"' in data and not '"showbackground"' in data: if '"hdclearart"' in data: extra = hdtvclear + "|" + year show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb else: extra = thumbnail + "|" + year show = fanart_2 + "|" + fanart_3 + "|" + sinopsis + "|" + title_fan + "|" + tfv + "|" + id_tmdb itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, server="torrent", thumbnail=thumbnail, fanart=item.extra, extra=extra, show=show, category=category, folder=True)) title_info = "[COLOR turquoise]Info[/COLOR]" if not "series" in item.url: thumbnail = posterdb title_info = "[COLOR khaki]Info[/COLOR]" if "series" in item.url: title_info = "[COLOR skyblue]Info[/COLOR]" if '"tvposter"' in data: thumbnail = scrapertools.get_match(data, '"tvposter":.*?"url": "([^"]+)"') else: thumbnail = posterdb if "tvbanner" in data: category = tvbanner else: category = show if '"tvthumb"' in data: plot = item.plot + "|" + tvthumb else: plot = item.plot + "|" + item.thumbnail if '"tvbanner"' in data: plot = plot + "|" + tvbanner elif '"tvthumb"' in data: plot = plot + "|" + tvthumb else: plot = plot + "|" + item.thumbnail else: if '"moviethumb"' in data: plot = item.plot + "|" + thumb else: plot = item.plot + "|" + posterdb if '"moviebanner"' in data: plot = plot + "|" + banner else: if '"hdmovieclearart"' in data: plot = plot + "|" + clear else: plot = plot + "|" + posterdb id = id_scraper extra = extra + "|" + id + "|" + title.encode('utf8') itemlist.append( Item(channel=item.channel, action="info", title=title_info, plot=plot, url=item.url, thumbnail=thumbnail, fanart=fanart_info, extra=extra, category=category, show=show, viewmode="movie_with_plot", folder=False)) return itemlist def findvideos(item): logger.info() itemlist = [] if not "serie" in item.url: thumbnail = item.category else: thumbnail = item.show.split("|")[4] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) patron = '
.*?' patron += 'alt="([^<]+)".*?' patron += '

= 5 and int(check_rat_tmdba) < 8: rating = "[COLOR springgreen][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" elif int(check_rat_tmdba) >= 8 or rating_tmdba_tvdb == 10: rating = "[COLOR yellow][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" else: rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" print "lolaymaue" except: rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]" if "10." in rating: rating = re.sub(r'10\.\d+', '10', rating) try: check_rat_filma = scrapertools.get_match(rating_filma, '(\d)') print "paco" print check_rat_filma if int(check_rat_filma) >= 5 and int(check_rat_filma) < 8: print "dios" print check_rat_filma rating_filma = "[COLOR springgreen][B]" + rating_filma + "[/B][/COLOR]" elif int(check_rat_filma) >= 8: print check_rat_filma rating_filma = "[COLOR yellow][B]" + rating_filma + "[/B][/COLOR]" else: rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]" print "rojo??" print check_rat_filma except: rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]" try: if not "serie" in item.url: url_plot = "http://api.themoviedb.org/3/movie/" + item.extra.split("|")[ 1] + "?api_key=" + api_key + "&append_to_response=credits&language=es" data_plot = scrapertools.cache_page(url_plot) plot, tagline = scrapertools.find_single_match(data_plot, '"overview":"(.*?)",.*?"tagline":(".*?")') if plot == "": plot = item.show.split("|")[2] plot = "[COLOR moccasin][B]" + plot + "[/B][/COLOR]" plot = re.sub(r"\\", "", plot) else: plot = item.show.split("|")[2] plot = "[COLOR moccasin][B]" + plot + "[/B][/COLOR]" plot = re.sub(r"\\", "", plot) if item.extra.split("|")[7] != "": tagline = item.extra.split("|")[7] # tagline= re.sub(r',','.',tagline) else: tagline = "" except: title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" plot = "Esta pelicula no tiene informacion..." plot = plot.replace(plot, "[COLOR yellow][B]" + plot + "[/B][/COLOR]") photo = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png" foto = "http://s6.postimg.org/ub7pb76c1/noinfo.png" info = "" if "serie" in item.url: check2 = "serie" icon = "http://s6.postimg.org/hzcjag975/tvdb.png" foto = item.show.split("|")[1] if item.extra.split("|")[5] != "": critica = item.extra.split("|")[5] else: critica = "Esta serie no tiene críticas..." photo = item.extra.split("|")[0].replace(" ", "%20") try: tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]" except: tagline = "" else: critica = item.extra.split("|")[5] if "%20" in critica: critica = "No hay críticas" icon = "http://imgur.com/SenkyxF.png" photo = item.extra.split("|")[0].replace(" ", "%20") foto = item.show.split("|")[1] if foto == item.thumbnail: foto = "" try: if tagline == "\"\"": tagline = " " except: tagline = " " tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]" check2 = "pelicula" # Tambien te puede interesar peliculas = [] if "serie" in item.url: url_tpi = "http://api.themoviedb.org/3/tv/" + item.show.split("|")[ 5] + "/recommendations?api_key=" + api_key + "&language=es" data_tpi = scrapertools.cachePage(url_tpi) tpi = scrapertools.find_multiple_matches(data_tpi, 'id":(.*?),.*?"original_name":"(.*?)",.*?"poster_path":(.*?),"popularity"') else: url_tpi = "http://api.themoviedb.org/3/movie/" + item.extra.split("|")[ 1] + "/recommendations?api_key=" + api_key + "&language=es" data_tpi = scrapertools.cachePage(url_tpi) tpi = scrapertools.find_multiple_matches(data_tpi, 'id":(.*?),.*?"original_title":"(.*?)",.*?"poster_path":(.*?),"popularity"') for idp, peli, thumb in tpi: thumb = re.sub(r'"|}', '', thumb) if "null" in thumb: thumb = "http://s6.postimg.org/tw1vhymj5/noposter.png" else: thumb = "https://image.tmdb.org/t/p/original" + thumb peliculas.append([idp, peli, thumb]) check2 = check2.replace("pelicula", "movie").replace("serie", "tvshow") infoLabels = {'title': title, 'plot': plot, 'thumbnail': photo, 'fanart': foto, 'tagline': tagline, 'rating': rating} item_info = item.clone(info=infoLabels, icon=icon, extra=id, rating=rating, rating_filma=rating_filma, critica=critica, contentType=check2, thumb_busqueda="http://imgur.com/jProvTt.png") from channels import infoplus infoplus.start(item_info, peliculas) def info_capitulos(item): logger.info() url = "https://api.themoviedb.org/3/tv/" + item.show.split("|")[5] + "/season/" + item.extra.split("|")[ 2] + "/episode/" + item.extra.split("|")[3] + "?api_key=" + api_key + "&language=es" if "/0" in url: url = url.replace("/0", "/") data = httptools.downloadpage(url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) patron = '],"name":"(.*?)","overview":"(.*?)".*?"still_path":(.*?),"vote_average":(\d+\.\d).*?,"' matches = re.compile(patron, re.DOTALL).findall(data) if len(matches) == 0: url = "http://thetvdb.com/api/1D62F2F90030C444/series/" + item.category + "/default/" + item.extra.split("|")[ 2] + "/" + item.extra.split("|")[3] + "/es.xml" if "/0" in url: url = url.replace("/0", "/") data = httptools.downloadpage(url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) patron = '.*?([^<]+).*?(.*?).*?(.*?)' matches = re.compile(patron, re.DOTALL).findall(data) if len(matches) == 0: title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" plot = "Este capitulo no tiene informacion..." plot = "[COLOR yellow][B]" + plot + "[/B][/COLOR]" image = "http://s6.postimg.org/ub7pb76c1/noinfo.png" foto = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png" rating = "" else: for name_epi, info, rating in matches: if "episodes" in data: foto = scrapertools.get_match(data, '.*?(.*?)') fanart = "http://thetvdb.com/banners/" + foto else: fanart = "http://imgur.com/ZiEAVOD.png" plot = info plot = "[COLOR peachpuff][B]" + plot + "[/B][/COLOR]" title = name_epi.upper() title = "[COLOR bisque][B]" + title + "[/B][/COLOR]" image = fanart foto = item.extra.split("|")[0] if not ".png" in foto: foto = "http://imgur.com/PXJEqBn.png" foto = re.sub(r'\(.*?\)|" "|" "', '', foto) foto = re.sub(r' ', '', foto) try: check_rating = scrapertools.get_match(rating, '(\d+).') if int(check_rating) >= 5 and int(check_rating) < 8: rating = "Puntuación " + "[COLOR springgreen][B]" + rating + "[/B][/COLOR]" elif int(check_rating) >= 8 and int(check_rating) < 10: rating = "Puntuación " + "[COLOR yellow][B]" + rating + "[/B][/COLOR]" elif int(check_rating) == 10: rating = "Puntuación " + "[COLOR orangered][B]" + rating + "[/B][/COLOR]" else: rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" except: rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" if "10." in rating: rating = re.sub(r'10\.\d+', '10', rating) else: for name_epi, info, fanart, rating in matches: if info == "" or info == "\\": info = "Sin informacion del capítulo aún..." plot = info plot = re.sub(r'/n', '', plot) plot = "[COLOR peachpuff][B]" + plot + "[/B][/COLOR]" title = name_epi.upper() title = "[COLOR bisque][B]" + title + "[/B][/COLOR]" image = fanart image = re.sub(r'"|}', '', image) if "null" in image: image = "http://imgur.com/ZiEAVOD.png" else: image = "https://image.tmdb.org/t/p/original" + image foto = item.extra.split("|")[0] if not ".png" in foto: foto = "http://imgur.com/PXJEqBn.png" foto = re.sub(r'\(.*?\)|" "|" "', '', foto) foto = re.sub(r' ', '', foto) try: check_rating = scrapertools.get_match(rating, '(\d+).') if int(check_rating) >= 5 and int(check_rating) < 8: rating = "Puntuación " + "[COLOR springgreen][B]" + rating + "[/B][/COLOR]" elif int(check_rating) >= 8 and int(check_rating) < 10: rating = "Puntuación " + "[COLOR yellow][B]" + rating + "[/B][/COLOR]" elif int(check_rating) == 10: rating = "Puntuación " + "[COLOR orangered][B]" + rating + "[/B][/COLOR]" else: rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" except: rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]" if "10." in rating: rating = re.sub(r'10\.\d+', '10', rating) ventana = TextBox2(title=title, plot=plot, thumbnail=image, fanart=foto, rating=rating) ventana.doModal() class TextBox2(xbmcgui.WindowDialog): """ Create a skinned textbox window """ def __init__(self, *args, **kwargs): self.getTitle = kwargs.get('title') self.getPlot = kwargs.get('plot') self.getThumbnail = kwargs.get('thumbnail') self.getFanart = kwargs.get('fanart') self.getRating = kwargs.get('rating') self.background = xbmcgui.ControlImage(70, 20, 1150, 630, 'http://imgur.com/H2hMPTP.jpg') self.title = xbmcgui.ControlTextBox(120, 60, 430, 50) self.rating = xbmcgui.ControlTextBox(145, 112, 1030, 45) self.plot = xbmcgui.ControlTextBox(120, 150, 1056, 100) self.thumbnail = xbmcgui.ControlImage(120, 300, 1056, 300, self.getThumbnail) self.fanart = xbmcgui.ControlImage(780, 43, 390, 100, self.getFanart) self.addControl(self.background) self.background.setAnimations( [('conditional', 'effect=slide start=1000% end=0% time=1500 condition=true tween=bounce',), ('WindowClose', 'effect=slide delay=800 start=0% end=1000% time=800 condition=true',)]) self.addControl(self.thumbnail) self.thumbnail.setAnimations([('conditional', 'effect=zoom start=0% end=100% delay=2700 time=1500 condition=true tween=elastic easing=inout',), ('WindowClose', 'effect=slide end=0,700% time=300 condition=true',)]) self.addControl(self.plot) self.plot.setAnimations( [('conditional', 'effect=zoom delay=2000 center=auto start=0 end=100 time=800 condition=true ',), ( 'conditional', 'effect=rotate delay=2000 center=auto aceleration=6000 start=0% end=360% time=800 condition=true',), ('WindowClose', 'effect=zoom center=auto start=100% end=-0% time=600 condition=true',)]) self.addControl(self.fanart) self.fanart.setAnimations( [('WindowOpen', 'effect=slide start=0,-700 delay=1000 time=2500 tween=bounce condition=true',), ( 'conditional', 'effect=rotate center=auto start=0% end=360% delay=3000 time=2500 tween=bounce condition=true',), ('WindowClose', 'effect=slide end=0,-700% time=1000 condition=true',)]) self.addControl(self.title) self.title.setText(self.getTitle) self.title.setAnimations( [('conditional', 'effect=slide start=-1500% end=0% delay=1000 time=2000 condition=true tween=elastic',), ('WindowClose', 'effect=slide start=0% end=-1500% time=800 condition=true',)]) self.addControl(self.rating) self.rating.setText(self.getRating) self.rating.setAnimations( [('conditional', 'effect=fade start=0% end=100% delay=3000 time=1500 condition=true',), ('WindowClose', 'effect=slide end=0,-700% time=600 condition=true',)]) xbmc.sleep(200) try: self.plot.autoScroll(7000, 6000, 30000) except: xbmc.executebuiltin( 'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000,"https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/kodi-icon.png")') self.plot.setText(self.getPlot) def get(self): self.show() def onAction(self, action): if action == ACTION_PREVIOUS_MENU or action.getId() == ACTION_GESTURE_SWIPE_LEFT or action == 110 or action == 92: self.close() def test(): return True def browser(url): import mechanize # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing br = mechanize.Browser() # Browser options br.set_handle_equiv(False) br.set_handle_gzip(True) br.set_handle_redirect(True) br.set_handle_referer(False) br.set_handle_robots(False) # Follows refresh 0 but not hangs on refresh > 0 br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) # Want debugging messages? # br.set_debug_http(True) # br.set_debug_redirects(True) # br.set_debug_responses(True) # User-Agent (this is cheating, ok?) br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')] # br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')] # Open some site, let's pick a random one, the first that pops in mind r = br.open(url) response = r.read() print response if "img,divreturn" in response: r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url) print "prooooxy" response = r.read() return response def tokenize(text, match=re.compile("([idel])|(\d+):|(-?\d+)").match): i = 0 while i < len(text): m = match(text, i) s = m.group(m.lastindex) i = m.end() if m.lastindex == 2: yield "s" yield text[i:i + int(s)] i = i + int(s) else: yield s def decode_item(next, token): if token == "i": # integer: "i" value "e" data = int(next()) if next() != "e": raise ValueError elif token == "s": # string: "s" value (virtual tokens) data = next() elif token == "l" or token == "d": # container: "l" (or "d") values "e" data = [] tok = next() while tok != "e": data.append(decode_item(next, tok)) tok = next() if token == "d": data = dict(zip(data[0::2], data[1::2])) else: raise ValueError return data def decode(text): try: src = tokenize(text) data = decode_item(src.next, src.next()) for token in src: # look for more tokens raise SyntaxError("trailing junk") except (AttributeError, ValueError, StopIteration): try: data = data except: data = src return data def convert_size(size): import math if (size == 0): return '0B' size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") i = int(math.floor(math.log(size, 1024))) p = math.pow(1024, i) s = round(size / p, 2) return '%s %s' % (s, size_name[i])