# -*- coding: utf-8 -*- import os import re import urllib import urlparse import xbmc import xbmcgui from core import httptools from core import scrapertools from core import servertools from core import tmdb from core.item import Item from core.scrapertools import decodeHtmlentities as dhe from platformcode import config, logger ACTION_SHOW_FULLSCREEN = 36 ACTION_GESTURE_SWIPE_LEFT = 511 ACTION_SELECT_ITEM = 7 ACTION_PREVIOUS_MENU = 10 ACTION_MOVE_LEFT = 1 ACTION_MOVE_RIGHT = 2 ACTION_MOVE_DOWN = 4 ACTION_MOVE_UP = 3 OPTION_PANEL = 6 OPTIONS_OK = 5 host = "http://peliculasgratis.biz" CALIDADES = {"micro1080p": "[COLOR plum]Micro1080p[/COLOR]", "dvds": "[COLOR lime]Dvds[/COLOR]", "hdrip": "[COLOR dodgerblue]Hdrip[/COLOR]", "dvdrip": "[COLOR crimson]Dvdrip[/COLOR]", "hdts": "[COLOR aqua]Hdts[/COLOR]", "bluray-line": "[COLOR lightslategray]Bluray-line[/COLOR]", "hdtv-rip": "[COLOR black]Hdtv-rip[/COLOR]", "micro720p": "[COLOR yellow]Micro720p[/COLOR]", "ts-hq": "[COLOR mediumspringgreen]Ts-Hq[/COLOR]", "camrip": "[COLOR royalblue]Camp-Rip[/COLOR]", "webs": "[COLOR lightsalmon]Webs[/COLOR]", "hd": "[COLOR mediumseagreen]HD[/COLOR]"} IDIOMAS = {"castellano": "[COLOR yellow]Castellano[/COLOR]", "latino": "[COLOR orange]Latino[/COLOR]", "vose": "[COLOR lightsalmon]Subtitulada[/COLOR]", "vo": "[COLOR crimson]Ingles[/COLOR]", "en": "[COLOR crimson]Ingles[/COLOR]"} IDIOMASP = {"es": "[COLOR yellow]CAST[/COLOR]", "la": "[COLOR orange]LAT[/COLOR]", "vs": "[COLOR lightsalmon]SUB[/COLOR]", "vo": "[COLOR crimson]Ingles[/COLOR]", "en": "[COLOR crimson]INGL[/COLOR]"} # Para la busqueda en bing evitando baneos def browser(url): import mechanize # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing br = mechanize.Browser() # Browser options br.set_handle_equiv(False) br.set_handle_gzip(True) br.set_handle_redirect(True) br.set_handle_referer(False) br.set_handle_robots(False) # Follows refresh 0 but not hangs on refresh > 0 br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) # Want debugging messages? # br.set_debug_http(True) # br.set_debug_redirects(True) # br.set_debug_responses(True) # User-Agent (this is cheating, ok?) # br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')] # br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')] # Open some site, let's pick a random one, the first that pops in mind r = br.open(url) response = r.read() print response if "img,divreturn" in response: r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url) print "prooooxy" response = r.read() return response api_key = "2e2160006592024ba87ccdf78c28f49f" api_fankey = "dffe90fba4d02c199ae7a9e71330c987" def mainlist(item): logger.info() itemlist = [] itemlist.append(item.clone(title="[COLOR lightskyblue][B]Películas[/B][/COLOR]", action="scraper", url=host, thumbnail="http://imgur.com/fN2p6qH.png", fanart="http://imgur.com/b8OuBR2.jpg", contentType="movie")) itemlist.append(itemlist[-1].clone(title="[COLOR lightskyblue][B] Más vistas[/B][/COLOR]", action="scraper", url="http://peliculasgratis.biz/catalogue?order=most_viewed", thumbnail="http://imgur.com/fN2p6qH.png", fanart="http://imgur.com/b8OuBR2.jpg", contentType="movie")) itemlist.append(itemlist[-1].clone(title=" [COLOR lightskyblue][B]Recomendadas[/B][/COLOR]", action="scraper", url="http://peliculasgratis.biz/catalogue?order=most_rated", thumbnail="http://imgur.com/fN2p6qH.png.png", fanart="http://imgur.com/b8OuBR2.jpg", contentType="movie")) itemlist.append(itemlist[-1].clone(title="[COLOR lightskyblue][B] Actualizadas[/B][/COLOR]", action="scraper", url="http://peliculasgratis.biz/catalogue?", thumbnail="http://imgur.com/fN2p6qH.png", fanart="http://imgur.com/b8OuBR2.jpg", contentType="movie")) itemlist.append(itemlist[-1].clone(title="[COLOR lightskyblue][B]Series[/B][/COLOR]", action="scraper", url="http://peliculasgratis.biz/lista-de-series", thumbnail="http://imgur.com/Jia27Uc.png", fanart="http://imgur.com/b8OuBR2.jpg", contentType="tvshow")) itemlist.append(itemlist[-1].clone(title="[COLOR lightskyblue][B]Buscar[/B][/COLOR]", action="", url="", thumbnail="http://imgur.com/mwTwfN7.png", fanart="http://imgur.com/b8OuBR2.jpg")) itemlist.append( itemlist[-1].clone(title="[COLOR lightskyblue][B] Buscar Película[/B][/COLOR]", action="search", url="", thumbnail="http://imgur.com/mwTwfN7.png", fanart="http://imgur.com/b8OuBR2.jpg", contentType="movie")) itemlist.append( itemlist[-1].clone(title="[COLOR lightskyblue][B] Buscar Serie[/B][/COLOR]", action="search", url="", thumbnail="http://imgur.com/mwTwfN7.png", fanart="http://imgur.com/b8OuBR2.jpg", contentType="tvshow")) return itemlist def search(item, texto): logger.info() texto = texto.replace(" ", "+") item.url = "http://peliculasgratis.biz/search/%s" % texto try: return scraper(item) # Se captura la excepción, para no interrumpir al buscador global si un canal falla except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] def scraper(item): logger.info() itemlist = [] # Descarga la página data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) bloque_enlaces = scrapertools.find_single_match(data, '

(.*?)<\/i>Anuncios') if item.contentType != "movie": matches = scrapertools.find_multiple_matches(bloque_enlaces, '(.*?)<\/a>

.*?(completa)">([^"]+)<\/a><\/h3> (.*?)<\/span>') else: matches = scrapertools.find_multiple_matches(bloque_enlaces, '([^<]+)<.*?
(.*?)<\/a>

.*?title[^<]+>([^<]+)<\/a><\/h3> (.*?)<') for url, thumb, quality, check_idioma, title, check_year in matches: logger.debug('check_idioma: %s' % check_idioma) title_fan = title title_item = "[COLOR cornflowerblue][B]" + title + "[/B][/COLOR]" if item.contentType != "movie": title = title idiomas = '' else: if quality == "ts": quality = re.sub(r'ts', 'ts-hq', quality) if CALIDADES.get(quality): quality = CALIDADES.get(quality) else: quality = quality idiomas = scrapertools.find_multiple_matches(check_idioma, '
') title = title itemlist.append( Item(channel=item.channel, title=title, url=urlparse.urljoin(host, url), action="fanart", thumbnail=thumb, fanart="http://imgur.com/nqmJozd.jpg", extra=title_fan + "|" + title_item + "|" + check_year.strip(), contentType=item.contentType, folder=True, language = idiomas)) ## Paginación if check_year: next = scrapertools.find_single_match(data, 'href="([^"]+)" title="Siguiente página">') if len(next) > 0: url = next if not "http" in url: url = urlparse.urljoin(host, url) itemlist.append( Item(channel=item.channel, action="scraper", title="[COLOR floralwhite][B]Siguiente[/B][/COLOR]", url=url, thumbnail="http://imgur.com/jhRFAmk.png", fanart="http://imgur.com/nqmJozd.jpg", extra=item.extra, contentType=item.contentType, folder=True)) return itemlist def fanart(item): logger.info() itemlist = [] url = item.url data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) year = item.extra.split("|")[2] if not year.isdigit(): try: year = scrapertools.find_single_match(data, '[^<]+<\/span>(\d+)<') except: year = "" if item.contentType != "movie": tipo_ps = "tv" else: tipo_ps = "movie" title = item.extra.split("|")[0] fulltitle = title if "El infiltrado" in title: title = "The Night Manager" title_o = scrapertools.find_single_match(data, '(.*?)<\/div>') if sinopsis == "": try: sinopsis = scrapertools.find_single_match(data, 'sinopsis\'>(.*?)<\/div>') except: sinopsis = "" if "Miniserie" in sinopsis: tipo_ps = "tv" year = scrapertools.find_single_match(sinopsis, 'de TV \((\d+)\)') if year == "": if item.contentType != "movie": try: year = scrapertools.find_single_match(data, 'Estreno:<\/strong>(\d+)<\/span>') except: year = "" else: year = scrapertools.find_single_match(data, '
A.*?(\d+)
') if year == "": try: year = scrapertools.find_single_match(data, 'Estreno.*?\d+/\d+/(\d+)') except: try: year = scrapertools.find_single_match(data, '
.*?Año
.*?(\d\d\d\d)') except: try: year = scrapertools.find_single_match(data, '(.*?)h="ID.*?.*?TV Series') else: urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % (title_imdb.replace(' ', '+'), year) data = browser(urlbing_imdb) data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data) subdata_imdb = scrapertools.find_single_match(data, '
  • (.*?)h="ID.*?') try: imdb_id = scrapertools.get_match(subdata_imdb, '(.*?)h="ID.*?.*?TV Series') else: urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % ( title_imdb.replace(' ', '+'), year) data = browser(urlbing_imdb) data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data) subdata_imdb = scrapertools.find_single_match(data, '
  • (.*?)h="ID.*?') try: imdb_id = scrapertools.get_match(subdata_imdb, '= 5: fanart_info = imagenes[1] fanart_2 = imagenes[2] fanart_3 = imagenes[3] fanart_4 = imagenes[4] if fanart == item.fanart: fanart = fanart_info elif len(imagenes) == 4: fanart_info = imagenes[1] fanart_2 = imagenes[2] fanart_3 = imagenes[3] fanart_4 = imagenes[1] if fanart == item.fanart: fanart = fanart_info elif len(imagenes) == 3: fanart_info = imagenes[1] fanart_2 = imagenes[2] fanart_3 = imagenes[1] fanart_4 = imagenes[0] if fanart == item.fanart: fanart = fanart_info elif len(imagenes) == 2: fanart_info = imagenes[1] fanart_2 = imagenes[0] fanart_3 = imagenes[1] fanart_4 = imagenes[1] if fanart == item.fanart: fanart = fanart_info else: fanart_info = fanart fanart_2 = fanart fanart_3 = fanart fanart_4 = fanart images_fanarttv = fanartv(item, id_tvdb, id) if item.contentType != "movie": url = item.url + "/episodios" action = "findvideos_series" if images_fanarttv: try: thumbnail_art = images_fanarttv.get("hdtvlogo")[0].get("url") except: try: thumbnail_art = images_fanarttv.get("clearlogo")[0].get("url") except: thumbnail_art = posterdb if images_fanarttv.get("tvbanner"): tvf = images_fanarttv.get("tvbanner")[0].get("url") elif images_fanarttv.get("tvthumb"): tvf = images_fanarttv.get("tvthumb")[0].get("url") elif images_fanarttv.get("tvposter"): tvf = images_fanarttv.get("tvposter")[0].get("url") else: tvf = posterdb if images_fanarttv.get("tvthumb"): thumb_info = images_fanarttv.get("tvthumb")[0].get("url") else: thumb_info = thumbnail_art if images_fanarttv.get("hdclearart"): tiw = images_fanarttv.get("hdclearart")[0].get("url") elif images_fanarttv.get("characterart"): tiw = images_fanarttv.get("characterart")[0].get("url") elif images_fanarttv.get("hdtvlogo"): tiw = images_fanarttv.get("hdtvlogo")[0].get("url") else: tiw = "" else: tiw = "" tvf = thumbnail_info = thumbnail_art = posterdb else: url = item.url action = "findvideos" if images_fanarttv: if images_fanarttv.get("hdmovielogo"): thumbnail_art = images_fanarttv.get("hdmovielogo")[0].get("url") elif images_fanarttv.get("moviethumb"): thumbnail_art = images_fanarttv.get("moviethumb")[0].get("url") elif images_fanarttv.get("moviebanner"): thumbnail_art = images_fanarttv.get("moviebanner")[0].get("url") else: thumbnail_art = posterdb if images_fanarttv.get("moviedisc"): tvf = images_fanarttv.get("moviedisc")[0].get("url") elif images_fanarttv.get("hdmovielogo"): tvf = images_fanarttv.get("hdmovielogo")[0].get("url") else: tvf = posterdb if images_fanarttv.get("hdmovieclearart"): tiw = images_fanarttv.get("hdmovieclearart")[0].get("url") elif images_fanarttv.get("hdmovielogo"): tiw = images_fanarttv.get("hdmovielogo")[0].get("url") else: tiw = "" else: tiw = "" tvf = thumbnail_art = posterdb extra = str(fanart_2) + "|" + str(fanart_3) + "|" + str(fanart_4) + "|" + str(id) + "|" + str(tvf) + "|" + str( id_tvdb) + "|" + str(tiw) + "|" + str(rating) + "|" + tipo_ps itemlist.append( Item(channel=item.channel, title=item.title, url=url, action=action, thumbnail=thumbnail_art, fanart=fanart, extra=extra, contentType=item.contentType, fulltitle=fulltitle, folder=True)) title_info = "[COLOR powderblue][B]Info[/B][/COLOR]" extra = str(rating) + "|" + str(rating_filma) + "|" + str(id) + "|" + str(item.title) + "|" + str( id_tvdb) + "|" + str(tagline) + "|" + str(sinopsis) + "|" + str(critica) + "|" + str(thumbnail_art) + "|" + str( fanart_4) itemlist.append(Item(channel=item.channel, action="info", title=title_info, url=item.url, thumbnail=posterdb, fanart=fanart_info, extra=extra, contentType=item.contentType, folder=False)) return itemlist def findvideos_series(item): logger.info() itemlist = [] fanart = "" check_temp = [] data = httptools.downloadpage(item.url).data if item.contentType != "movie": itmdb = tmdb.Tmdb(id_Tmdb=item.extra.split("|")[3], tipo=item.extra.split("|")[8]) season = itmdb.result.get("seasons") check = "no" try: temp, bloque_enlaces = scrapertools.find_single_match(data, 'Temporada (\d+)(.*?)Temporada') except: if "no se agregaron" in data: temp = bloque_enlaces = "" else: temp, bloque_enlaces = scrapertools.find_single_match(data, 'Temporada (\d+)(.*?)
    ') if temp != "": thumbnail = "" if season: for detail in season: if str(detail["season_number"]) == temp: if detail["poster_path"]: thumbnail = "https://image.tmdb.org/t/p/original" + detail["poster_path"] images_fanarttv = fanartv(item, item.extra.split("|")[5], item.extra.split("|")[3]) if images_fanarttv: season_f = images_fanarttv.get("showbackground") if season_f: for detail in season_f: if str(detail["season"]) == temp: if detail["url"]: fanart = detail["url"] if fanart == "": fanart = item.extra.split("|")[0] if thumbnail == "": thumbnail = item.thumbnail itemlist.append(Item(channel=item.channel, title="[COLOR darkturquoise]Temporada[/COLOR] " + "[COLOR beige]" + temp + "[/COLOR]", url="", action="", thumbnail=thumbnail, fanart=fanart, extra="", contentType=item.contentType, folder=False)) capitulos = scrapertools.find_multiple_matches(bloque_enlaces, 'href="([^"]+)".*?Episodio (\d+) - ([^<]+)') for url, epi, title in capitulos: if epi == "1": if epi in str(check_temp): temp = int(temp) + 1 thumbnail = "" if season: for detail in season: if detail["season_number"] == temp: if detail["poster_path"]: thumbnail = "https://image.tmdb.org/t/p/original" + detail["poster_path"] images_fanarttv = fanartv(item, item.extra.split("|")[5], item.extra.split("|")[3]) if images_fanarttv: season_f = images_fanarttv.get("showbackground") if season_f: for detail in season_f: if detail["season"] == temp: if detail["url"]: fanart = detail["url"] if fanart == "": fanart = item.extra.split("|")[0] if thumbnail == "": thumbnail = item.thumbnail itemlist.append(Item(channel=item.channel, title="[COLOR darkturquoise]Temporada[/COLOR] " + "[COLOR beige]" + str( temp) + "[/COLOR]", url="", action="", thumbnail=thumbnail, fanart=fanart, extra="", contentType=item.contentType, folder=False)) check_temp.append([epi]) itemlist.append(Item(channel=item.channel, title=" [COLOR cyan]Episodio[/COLOR] " + "[COLOR darkcyan]" + epi + "[/COLOR]" + " - " + "[COLOR cadetblue]" + title + "[/COLOR]", url=url, action="findvideos", thumbnail=item.extra.split("|")[4], fanart=item.extra.split("|")[0], extra="", contentType=item.contentType, folder=True)) title_info = " Info" title_info = "[COLOR steelblue]" + title_info + "[/COLOR]" itemlist.append(Item(channel=item.channel, action="info_capitulos", title=title_info, url=item.url, thumbnail=item.extra.split("|")[6], fanart=item.extra.split("|")[1], extra=item.extra + "|" + str(temp) + "|" + epi, folder=False)) return itemlist def findvideos(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data if item.extra != "dd" and item.extra != "descarga": if item.contentType != "movie": bloque_links = scrapertools.find_single_match(data, '