# -*- coding: utf-8 -*- import re from channelselector import get_thumb from core import httptools from core import scrapertools from core import servertools from core.item import Item from platformcode import config, logger from core import tmdb host = 'http://newpct1.com/' def mainlist(item): logger.info() itemlist = [] thumb_pelis=get_thumb("channels_movie.png") thumb_series=get_thumb("channels_tvshow.png") thumb_search = get_thumb("search.png") itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url=host, extra="peliculas", thumbnail=thumb_pelis )) itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url=host, extra="series", thumbnail=thumb_series)) itemlist.append( Item(channel=item.channel, action="search", title="Buscar", url=host + "buscar", thumbnail=thumb_search)) return itemlist def submenu(item): logger.info() itemlist = [] data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") patron = '
  • .*?' data = scrapertools.get_match(data, patron) patron = '([^>]+)' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: title = scrapedtitle.strip() url = scrapedurl itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra="pelilist")) itemlist.append( Item(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url, extra="pelilist")) return itemlist def alfabeto(item): logger.info() itemlist = [] data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") patron = '' data = scrapertools.get_match(data, patron) patron = ']+>([^>]+)' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: title = scrapedtitle.upper() url = scrapedurl itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra=item.extra)) return itemlist def listado(item): logger.info() itemlist = [] url_next_page ='' data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") #logger.debug(data) logger.debug('item.modo: %s'%item.modo) logger.debug('item.extra: %s'%item.extra) if item.modo != 'next' or item.modo =='': logger.debug('item.title: %s'% item.title) patron = '' logger.debug("patron=" + patron) fichas = scrapertools.get_match(data, patron) page_extra = item.extra else: fichas = data page_extra = item.extra patron = '
  • 30: url_next_page = item.url matches = matches[:30] next_page = 'b' modo = 'continue' else: matches = matches[30:] next_page = 'a' patron_next_page = 'Next<\/a>' matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data) modo = 'continue' if len(matches_next_page) > 0: url_next_page = matches_next_page[0] modo = 'next' for scrapedurl, scrapedtitle, scrapedthumbnail, calidad in matches: url = scrapedurl title = scrapedtitle thumbnail = scrapedthumbnail action = "findvideos" extra = "" year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})') if "1.com/series" in url: action = "episodios" extra = "serie" title = scrapertools.find_single_match(title, '([^-]+)') title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea", "", 1).strip() else: title = title.replace("Descargar", "", 1).strip() if title.endswith("gratis"): title = title[:-7] show = title if item.extra != "buscar-list": title = title + ' ' + calidad context = "" context_title = scrapertools.find_single_match(url, "http://(?:www.)?newpct1.com/(.*?)/(.*?)/") if context_title: try: context = context_title[0].replace("descargar-", "").replace("pelicula", "movie").replace("series", "tvshow") context_title = context_title[1].replace("-", " ") if re.search('\d{4}', context_title[-4:]): context_title = context_title[:-4] elif re.search('\(\d{4}\)', context_title[-6:]): context_title = context_title[:-6] except: context_title = show logger.debug('contxt title: %s'%context_title) logger.debug('year: %s' % year) logger.debug('context: %s' % context) if not 'array' in title: new_item = Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail, extra = extra, show = context_title, contentTitle=context_title, contentType=context, context=["buscar_trailer"], infoLabels= {'year':year}) if year: tmdb.set_infoLabels_item(new_item, seekTmdb = True) itemlist.append(new_item) if url_next_page: itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente", url=url_next_page, next_page=next_page, folder=True, text_color='yellow', text_bold=True, modo = modo, plot = extra, extra = page_extra)) return itemlist def listado2(item): logger.info() itemlist = [] data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url, post=item.post).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") list_chars = [["ñ", "ñ"]] for el in list_chars: data = re.sub(r"%s" % el[0], el[1], data) try: get, post = scrapertools.find_single_match(data, '' patron_ver = '
    ]+>.*?' match_ver = scrapertools.find_single_match(data, patron_ver) match_descargar = scrapertools.find_single_match(data, patron_descargar) patron = '
    \d+)?)<.+?]+>(?P.*?)\s*Calidad\s*]+>" \ "[\[]\s*(?P.*?)\s*[\]]" r = re.compile(pattern) match = [m.groupdict() for m in r.finditer(info)][0] if match["episode2"]: multi = True title = "%s (%sx%s-%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), str(match["episode2"]).zfill(2), match["lang"], match["quality"]) else: multi = False title = "%s (%sx%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), match["lang"], match["quality"]) else: # old style pattern = "\[(?P.*?)\].*?\[Cap.(?P\d+)(?P\d{2})(?:_(?P\d+)" \ "(?P\d{2}))?.*?\].*?(?:\[(?P.*?)\])?" r = re.compile(pattern) match = [m.groupdict() for m in r.finditer(info)][0] # logger.debug("data %s" % match) str_lang = "" if match["lang"] is not None: str_lang = "[%s]" % match["lang"] if match["season2"] and match["episode2"]: multi = True if match["season"] == match["season2"]: title = "%s (%sx%s-%s) %s[%s]" % (item.show, match["season"], match["episode"], match["episode2"], str_lang, match["quality"]) else: title = "%s (%sx%s-%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], match["season2"], match["episode2"], str_lang, match["quality"]) else: title = "%s (%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], str_lang, match["quality"]) multi = False season = match['season'] episode = match['episode'] itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, quality=item.quality, multi=multi, contentSeason=season, contentEpisodeNumber=episode, infoLabels = infoLabels)) # order list tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True) if len(itemlist) > 1: itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append( item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios")) return itemlist def search(item, texto): logger.info("search:" + texto) # texto = texto.replace(" ", "+") try: item.post = "q=%s" % texto item.pattern = "buscar-list" itemlist = listado2(item) return itemlist # Se captura la excepción, para no interrumpir al buscador global si un canal falla except: import sys for line in sys.exc_info(): logger.error("%s" % line) return []