From e934885a000ff1f41cc025a122cef836c8fab924 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Wed, 21 Feb 2018 17:50:13 -0500 Subject: [PATCH] cuelgame: fix buscador --- plugin.video.alfa/channels/cuelgame.py | 1180 +----------------------- 1 file changed, 15 insertions(+), 1165 deletions(-) diff --git a/plugin.video.alfa/channels/cuelgame.py b/plugin.video.alfa/channels/cuelgame.py index 7e323deb..a6b4b641 100755 --- a/plugin.video.alfa/channels/cuelgame.py +++ b/plugin.video.alfa/channels/cuelgame.py @@ -1,55 +1,24 @@ # -*- coding: utf-8 -*- import re -import unicodedata import urlparse -import xbmc -import xbmcgui from core import scrapertools, httptools from core.item import Item from core.scrapertools import decodeHtmlentities as dhe from platformcode import logger -ACTION_SHOW_FULLSCREEN = 36 -ACTION_GESTURE_SWIPE_LEFT = 511 -ACTION_SELECT_ITEM = 7 -ACTION_PREVIOUS_MENU = 10 -ACTION_MOVE_LEFT = 1 -ACTION_MOVE_RIGHT = 2 -ACTION_MOVE_DOWN = 4 -ACTION_MOVE_UP = 3 -OPTION_PANEL = 6 -OPTIONS_OK = 5 - -api_key = "2e2160006592024ba87ccdf78c28f49f" -api_fankey = "dffe90fba4d02c199ae7a9e71330c987" - def mainlist(item): logger.info() itemlist = [] - itemlist.append(Item(channel=item.channel, title="[COLOR forestgreen]Cine[/COLOR]", action="scraper", + itemlist.append(Item(channel=item.channel, title="[COLOR forestgreen]Videos[/COLOR]", action="scraper", url="http://cuelgame.net/?category=4", thumbnail="http://img5a.flixcart.com/image/poster/q/t/d/vintage-camera-collage-sr148-medium-400x400-imadkbnrnbpggqyz.jpeg", fanart="http://imgur.com/7frGoPL.jpg")) - itemlist.append(Item(channel=item.channel, title="[COLOR forestgreen]Series[/COLOR]", action="scraper", - url="http://cuelgame.net/?category=8", thumbnail="http://imgur.com/OjP42lL.jpg", - fanart="http://imgur.com/Xm49VbL.jpg")) - itemlist.append(Item(channel=item.channel, title="[COLOR forestgreen]TV[/COLOR]", action="scraper", - url="http://cuelgame.net/?category=67", thumbnail="http://imgur.com/C4VDnTo.png", - fanart="http://imgur.com/LDoJrAf.jpg")) - itemlist.append(Item(channel=item.channel, title="[COLOR forestgreen]Documentales[/COLOR]", action="scraper", - url="http://cuelgame.net/?category=68", thumbnail="http://imgur.com/nofNYjy.jpg", - fanart="http://imgur.com/upB1jL8.jpg")) - itemlist.append(Item(channel=item.channel, title="[COLOR forestgreen]Música[/COLOR]", action="scraper", - url="http://cuelgame.net/?category=13", thumbnail="http://imgur.com/DPrOlme.jpg", - fanart="http://imgur.com/FxM6xGY.jpg")) - itemlist.append(Item(channel=item.channel, title="[COLOR forestgreen]Buscar[/COLOR]", action="search", url="", thumbnail="http://images2.alphacoders.com/846/84682.jpg", fanart="http://imgur.com/1sIHN1r.jpg")) - return itemlist @@ -71,41 +40,18 @@ def search(item, texto): def scraper(item): logger.info() itemlist = [] - check_search = item.url # Descarga la página data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| |CET", "", data) - - # corrige la falta de imagen - # data = re.sub(r"
","
",data) - - ''' -
En un futuro no lejano, en el que el planeta Tierra sufre una creciente desertización, Jacq Vaucan (Antonio Banderas), un agente de seguros de una compañía de robótica, investiga un caso en apariencia rutinario cuando descubre algo que podría tener consecuencias decisivas para el futuro de la humanidad. Banderas produce y protagoniza este thriller futurista, que especula sobre lo que ocurriría si la inteligencia artificial superase a la humana.| Más info. en comentarios.
- - ''' - # id_torrent = scrapertools.get_match(item.url,"(\d+)-") - patron = '([^<]+)<.*?p>.*?title="meta.*?href=".*?amp;(.*?)"'
- '''
- patron += '([^<]+).*?'
- patron += '([^<]+)<.*?p>'
- patron += '.*?class="counter">.*?([^<]+)'
+ patron += '(.*?)class="lazy".*?'
+ patron += 'news-content">([^<]+)'
+ matches = scrapertools.find_multiple_matches(data, patron)
+ for scrapedurl, scrapedtitle, check_thumb, scrapedplot in matches:
scrapedtitle = re.sub(r'\.', ' ', scrapedtitle)
- try:
- scrapedthumbnail = scrapertools.get_match(check_thumb, "
Más info. en comentarios.", "", scrapedplot)
- scrapedplot = re.sub(r"<.*?>", "", scrapedplot).strip()
-
+ if scrapedurl.startswith("ed2k:"):
+ continue
scrapedtitle = "[COLOR greenyellow]" + scrapedtitle + "[/COLOR]"
-
- extra = title_fan + "|" + year + "|" + scrapedplot + "|" + scrapedurl + "|" + item.url
-
- if "category=4" in item.url or "category=8" in item.url:
-
- itemlist.append(Item(channel=item.channel, title=scrapedtitle, url="", action="fanart", server="torrent",
- thumbnail=scrapedthumbnail, extra=extra, folder=True))
- else:
-
- itemlist.append(
- Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="play", server="torrent",
- thumbnail=scrapedthumbnail, folder=False))
-
+ itemlist.append(
+ Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="play", server="torrent",
+ thumbnail=scrapedthumbnail, folder=False))
# Extrae el paginador
-
-
patronvideos = 'siguiente »'
- matches = re.compile(patronvideos, re.DOTALL).findall(data)
- scrapertools.printMatches(matches)
-
+ matches = scrapertools.find_multiple_matches(data, patronvideos)
if len(matches) > 0:
# corrige "&" para la paginación
next_page = matches[0].replace("amp;", "")
-
- if "search" in check_search:
- scrapedurl = urlparse.urljoin(check_search, next_page)
- else:
- scrapedurl = urlparse.urljoin(item.url, next_page)
+ scrapedurl = urlparse.urljoin(item.url, next_page)
itemlist.append(Item(channel=item.channel, action="scraper", title="Página siguiente >>", url=scrapedurl,
thumbnail="http://imgur.com/ycPgVVO.png", folder=True))
-
return itemlist
-def fanart(item):
- logger.info()
- itemlist = []
-
- check_sp = item.extra.split("|")[4]
- title_fan = item.extra.split("|")[0]
- fulltitle = item.title
- fulltitle = re.sub(r"720p|1080.*", "", fulltitle)
- title_fan = re.sub(r"H264.*|Netflix.*|Mitos Griegos|HDTV.*|\d\d\d\d", "", title_fan).strip()
- item.title = title_fan.upper()
- item.title = "[COLOR springgreen][B]" + item.title + "[/B][/COLOR]"
- title = title_fan.replace(' ', '%20')
- title = ''.join((c for c in unicodedata.normalize('NFD', unicode(title.decode('utf-8'))) if
- unicodedata.category(c) != 'Mn')).encode("ascii", "ignore")
- item.url = item.extra.split("|")[3]
-
- try:
- sinopsis = item.extra.split("|")[2]
- except:
- sinopsis = ""
-
- if "category=4" in check_sp:
- id_tmdb = ""
- # filmafinity
- year = item.extra.split("|")[1]
-
- url = "http://www.filmaffinity.com/es/advsearch.php?stext={0}&stype%5B%5D=title&country=&genre=&fromyear={1}&toyear={1}".format(
- title, year)
- data = httptools.downloadpage(url).data
-
- url_filmaf = scrapertools.find_single_match(data, '