diff --git a/mediaserver/platformcode/template/js/protocol.js b/mediaserver/platformcode/template/js/protocol.js index 089a0c3d..4b4fa6d9 100644 --- a/mediaserver/platformcode/template/js/protocol.js +++ b/mediaserver/platformcode/template/js/protocol.js @@ -285,6 +285,7 @@ function get_response(data) { else { keypress = ""; }; + if (!data.items[x].value) data.items[x].value = ""; itemlist[data.items[x].category].push(replace_list(html.config.text, { "item_color": data.items[x].color, "item_label": data.items[x].label, diff --git a/plugin.video.alfa/channels/cinetux.py b/plugin.video.alfa/channels/cinetux.py index 348cdaf9..7f732c30 100644 --- a/plugin.video.alfa/channels/cinetux.py +++ b/plugin.video.alfa/channels/cinetux.py @@ -314,15 +314,11 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item): url = scrapertools.find_single_match(bloque1, patron) if "goo.gl" in url: url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "") - if "www.cinetux.me" in url: - server = scrapertools.find_single_match(url, "player/(.*?)\.") - else: - server = servertools.get_server_from_url(url) - matches.append([url, server, "", language.strip(), t_tipo]) + matches.append([url, "", "", language.strip(), t_tipo]) bloque2 = scrapertools.find_single_match(data, '(?s)box_links.*?dt_social_single') bloque2 = bloque2.replace("\t", "").replace("\r", "") patron = '(?s)optn" href="([^"]+)' - patron += '.*?title="([^"]+)' + patron += '.*?title="([^\.]+)' patron += '.*?src.*?src="[^>]+"?/>([^<]+)' patron += '.*?src="[^>]+"?/>([^<]+)' patron += '.*?/span>([^<]+)' @@ -336,7 +332,7 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item): scrapedtipo = match[4] if t_tipo.upper() not in scrapedtipo.upper(): continue - title = " Mirror en " + scrapedserver.split(".")[0] + " (" + scrapedlanguage + ")" + title = " Mirror en %s (" + scrapedlanguage + ")" if len(scrapedcalidad.strip()) > 0: title += " (Calidad " + scrapedcalidad.strip() + ")" @@ -357,6 +353,7 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item): title = "Mostrar enlaces filtrados en %s" % ", ".join(filtrados) lista_enlaces.append(item.clone(title=title, action="findvideos", url=item.url, text_color=color3, filtro=True)) + lista_enlaces = servertools.get_servers_itemlist(lista_enlaces, lambda i: i.title % i.server.capitalize()) return lista_enlaces @@ -368,7 +365,6 @@ def play(item): data = httptools.downloadpage(item.url, headers={'Referer': item.extra}).data.replace("\\", "") id = scrapertools.find_single_match(data, 'img src="[^#]+#(.*?)"') item.url = "https://youtube.googleapis.com/embed/?status=ok&hl=es&allow_embed=1&ps=docs&partnerid=30&hd=1&autoplay=0&cc_load_policy=1&showinfo=0&docid=" + id - itemlist = servertools.find_video_items(data=item.url) elif "links" in item.url or "www.cinetux.me" in item.url: data = httptools.downloadpage(item.url).data scrapedurl = scrapertools.find_single_match(data, ' 0 - br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) - # Want debugging messages? - # br.set_debug_http(True) - # br.set_debug_redirects(True) - # br.set_debug_responses(True) - - # User-Agent (this is cheating, ok?) - # br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')] - # br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')] - # Open some site, let's pick a random one, the first that pops in mind - r = br.open(url) - response = r.read() - print response - if "img,divreturn" in response: - r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url) - print "prooooxy" - response = r.read() - - return response - - -api_key = "2e2160006592024ba87ccdf78c28f49f" -api_fankey = "dffe90fba4d02c199ae7a9e71330c987" +host = "http://torrentlocura.com/" def mainlist(item): logger.info() - itemlist = [] - itemlist.append(item.clone(title="[COLOR crimson][B]Películas[/B][/COLOR]", action="scraper", - url="http://torrentlocura.com/peliculas/", thumbnail="http://imgur.com/RfZjMBi.png", - fanart="http://imgur.com/V7QZLAL.jpg", contentType="movie")) - itemlist.append(itemlist[-1].clone(title="[COLOR crimson][B] Películas HD[/B][/COLOR]", action="scraper", - url="http://torrentlocura.com/peliculas-hd/", - thumbnail="http://imgur.com/RfZjMBi.png", fanart="http://imgur.com/V7QZLAL.jpg", - contentType="movie")) - itemlist.append(itemlist[-1].clone(title=" [COLOR crimson][B]Estrenos[/B][/COLOR]", action="scraper", - url="http://torrentlocura.com/estrenos-de-cine/", - thumbnail="http://imgur.com/RfZjMBi.png", fanart="http://imgur.com/V7QZLAL.jpg", - contentType="movie")) - itemlist.append(itemlist[-1].clone(title="[COLOR crimson][B] Películas 3D[/B][/COLOR]", action="scraper", - url="http://torrentlocura.com/peliculas-3d/", - thumbnail="http://imgur.com/RfZjMBi.png", fanart="http://imgur.com/V7QZLAL.jpg", - contentType="movie")) + + thumb_movie = get_thumb("channels_movie.png") + thumb_tvshow = get_thumb("channels_tvshow.png") + thumb_anime = get_thumb("channels_anime.png") + thumb_search = get_thumb("search.png") + + itemlist = list() + itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url=host, + thumbnail=thumb_movie, pattern="peliculas")) itemlist.append( - itemlist[-1].clone(title=" [COLOR crimson][B]Películas subtituladas[/B][/COLOR]", action="scraper", - url="http://torrentlocura.com/peliculas-vo/", thumbnail="http://imgur.com/RfZjMBi.png", - fanart="http://imgur.com/V7QZLAL.jpg", contentType="movie")) + Item(channel=item.channel, action="submenu", title="Series", url=host, + thumbnail=thumb_tvshow, pattern="series")) itemlist.append( - itemlist[-1].clone(title="[COLOR crimson][B] Películas Audio Latino[/B][/COLOR]", action="scraper", - url="http://torrentlocura.com/peliculas-latino/", thumbnail="http://imgur.com/RfZjMBi.png", - fanart="http://imgur.com/V7QZLAL.jpg", contentType="movie")) - itemlist.append(itemlist[-1].clone(title="[COLOR crimson][B]Series[/B][/COLOR]", action="scraper", - url="http://torrentlocura.com/series/", thumbnail="http://imgur.com/vX2dUYl.png", - contentType="tvshow")) - itemlist.append(itemlist[-1].clone(title=" [COLOR crimson][B]Series HD[/B][/COLOR]", action="scraper", - url="http://torrentlocura.com/series-hd/", - thumbnail="http://imgur.com/vX2dUYl.png", fanart="http://imgur.com/V7QZLAL.jpg", - contentType="tvshow")) - itemlist.append(itemlist[-1].clone(title="[COLOR crimson][B]Buscar[/B][/COLOR]", action="search", url="", - thumbnail="http://imgur.com/rSttk79.png", fanart="http://imgur.com/V7QZLAL.jpg")) + Item(channel=item.channel, action="anime", title="Anime", url=host, + thumbnail=thumb_anime, pattern="anime")) + itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url=host + "buscar", + thumbnail=thumb_search)) return itemlist def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = "http://torrentlocura.com/buscar" - item.extra = urllib.urlencode({'q': texto}) - item.contentType != "movie" + logger.info("search:" + texto) + # texto = texto.replace(" ", "+") + try: - return buscador(item) + item.post = "q=%s" % texto + item.pattern = "buscar-list" + itemlist = listado2(item) + + return itemlist + # Se captura la excepción, para no interrumpir al buscador global si un canal falla except: import sys @@ -120,1079 +53,309 @@ def search(item, texto): return [] -def buscador(item): +def anime(item): logger.info() itemlist = [] - data = httptools.downloadpage(item.url, post=item.extra, ).data - data = unicode(data, "latin1").encode("utf8") - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - check_item = [] - bloque_enlaces = scrapertools.find_single_match(data, 'Resultados(.*?)end .page-box') - result_0 = scrapertools.find_multiple_matches(bloque_enlaces, - 'a href="([^"]+)" title="Descargar (.*?) ([^<]+)">') - for url, tipo, title, thumb in result_0: - try: - year = scrapertools.find_single_match(title, '(\d\d\d\d)') - except: - year = "" - if tipo == "Serie": - contentType = "tv" - title = re.sub(r'-.*', '', title) - title_check = title.strip() - else: - contentType = "movie" - # tipo="Pelicula" - title = re.sub(r'de Cine', 'Screener', title) - title = title.replace("RIP", "HdRip") - title_check = (title + " " + tipo).strip() - if "pc" in tipo or "PC" in tipo or "XBOX" in tipo or "Nintendo" in tipo or "Windows" in tipo or "varios" in url or "juego" in url: - continue + title = "Anime" + url = host + "anime" + itemlist.append(item.clone(channel=item.channel, action="listado", title=title, url=url, + pattern="pelilist")) + itemlist.append( + item.clone(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url, + thumbnail=item.thumbnail[:-4] + "_az.png", pattern="pelilist")) - if title_check in str(check_item): + return itemlist + + +def submenu(item): + logger.info() + itemlist = [] + + data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) + # data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + + pattern = '
  • .*?' % (host, item.pattern) + data = scrapertools.get_match(data, pattern) + + pattern = '([^>]+)' + matches = re.compile(pattern, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + title = scrapedtitle.strip() + url = scrapedurl + + if item.pattern in title.lower(): + itemlist.append(item.clone(channel=item.channel, action="listado", title=title, url=url, + pattern="pelilist")) + itemlist.append( + item.clone(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url, + thumbnail=item.thumbnail[:-4] + "_az.png", pattern="pelilist")) + + return itemlist + + +def alfabeto(item): + logger.info() + itemlist = [] + + data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) + # data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + + pattern = '' + data = scrapertools.get_match(data, pattern) + + patron = ']+>([^>]+)' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + title = scrapedtitle.upper() + url = scrapedurl + + itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, pattern=item.pattern)) + + return itemlist + + +def listado(item): + logger.info() + itemlist = [] + + data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) + data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + + # logger.debug("data %s " % data) + next_page = scrapertools.find_single_match(data, '