diff --git a/plugin.video.alfa/channels/newpct1.py b/plugin.video.alfa/channels/newpct1.py index 22b39433..84a69863 100644 --- a/plugin.video.alfa/channels/newpct1.py +++ b/plugin.video.alfa/channels/newpct1.py @@ -1,470 +1,89 @@ # -*- coding: utf-8 -*- import re +import urllib +import urlparse -from channelselector import get_thumb -from core import httptools from core import scrapertools -from core import servertools from core.item import Item -from platformcode import config, logger -from core import tmdb +from platformcode import logger +from core import httptools + +Host='http://descargas2020.com' -host = 'http://newpct1.com/' def mainlist(item): logger.info() itemlist = [] - - thumb_pelis=get_thumb("channels_movie.png") - thumb_series=get_thumb("channels_tvshow.png") - thumb_search = get_thumb("search.png") - - itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url=host, - extra="peliculas", thumbnail=thumb_pelis )) - - itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url=host, extra="series", - thumbnail=thumb_series)) - itemlist.append( - Item(channel=item.channel, action="search", title="Buscar", url=host + "buscar", thumbnail=thumb_search)) - + itemlist.append(Item(channel=item.channel, action="submenu", title="Películas",url=Host+"/peliculas/")) + itemlist.append(Item(channel=item.channel, action="submenu", title="Series",url=Host+"/series/")) + #itemlist.append(Item(channel=item.channel, action="listado", title="Anime", url=Host+"/anime/", + # viewmode="movie_with_plot")) + #itemlist.append( + # Item(channel=item.channel, action="listado", title="Documentales", url=Host+"/documentales/", + # viewmode="movie_with_plot")) + #itemlist.append(Item(channel=item.channel, action="search", title="Buscar")) return itemlist def submenu(item): logger.info() itemlist = [] - data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) - data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") - - patron = '
  • .*?' - data = scrapertools.get_match(data, patron) - - patron = '([^>]+)' - matches = re.compile(patron, re.DOTALL).findall(data) - + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '
  • (.+?)<\/ul>' #Filtrado por url + data_cat = scrapertools.find_single_match(data, patron) + patron_cat='
  • <\/li>' + matches = scrapertools.find_multiple_matches(data_cat, patron_cat) for scrapedurl, scrapedtitle in matches: - title = scrapedtitle.strip() - url = scrapedurl - - itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra="pelilist")) - itemlist.append( - Item(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url, extra="pelilist")) - - return itemlist - - -def alfabeto(item): - logger.info() - itemlist = [] - - data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) - data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") - - patron = '' - data = scrapertools.get_match(data, patron) - - patron = ']+>([^>]+)' - matches = re.compile(patron, re.DOTALL).findall(data) - - for scrapedurl, scrapedtitle in matches: - title = scrapedtitle.upper() - url = scrapedurl - - itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra=item.extra)) - + itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,action="listado")) return itemlist def listado(item): logger.info() itemlist = [] - url_next_page ='' - - data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) - data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") - #logger.debug(data) - logger.debug('item.modo: %s'%item.modo) - logger.debug('item.extra: %s'%item.extra) - - if item.modo != 'next' or item.modo =='': - logger.debug('item.title: %s'% item.title) - patron = '' - logger.debug("patron=" + patron) - fichas = scrapertools.get_match(data, patron) - page_extra = item.extra + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron_data='' + data_listado = scrapertools.find_single_match(data, patron_data) + logger.info("sadas"+data_listado) + patron_listado='
  • 30: - url_next_page = item.url - matches = matches[:30] - next_page = 'b' - modo = 'continue' - else: - matches = matches[30:] - next_page = 'a' - patron_next_page = 'Next<\/a>' - matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data) - modo = 'continue' - if len(matches_next_page) > 0: - url_next_page = matches_next_page[0] - modo = 'next' - - for scrapedurl, scrapedtitle, scrapedthumbnail, calidad in matches: - url = scrapedurl - title = scrapedtitle - thumbnail = scrapedthumbnail - action = "findvideos" - extra = "" - year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})') - if "1.com/series" in url: - action = "episodios" - extra = "serie" - - - title = scrapertools.find_single_match(title, '([^-]+)') - title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea", "", - 1).strip() + patron_listado+='>' + patron_listado+='(.+?)<\/h2>(.+?)<\/span><\/a><\/li>' + logger.info("sasssss"+patron_listado) + matches = scrapertools.find_multiple_matches(data_listado, patron_listado) + for scrapedurl, scrapedthumbnail,scrapedtitle,scrapedquality in matches: + if 'Serie' in item.title: + action="episodios" else: - title = title.replace("Descargar", "", 1).strip() - if title.endswith("gratis"): title = title[:-7] - - show = title - if item.extra != "buscar-list": - title = title + ' ' + calidad - - context = "" - context_title = scrapertools.find_single_match(url, "http://(?:www.)?newpct1.com/(.*?)/(.*?)/") - if context_title: - try: - context = context_title[0].replace("descargar-", "").replace("pelicula", "movie").replace("series", - "tvshow") - context_title = context_title[1].replace("-", " ") - if re.search('\d{4}', context_title[-4:]): - context_title = context_title[:-4] - elif re.search('\(\d{4}\)', context_title[-6:]): - context_title = context_title[:-6] - - except: - context_title = show - logger.debug('contxt title: %s'%context_title) - logger.debug('year: %s' % year) - - logger.debug('context: %s' % context) - if not 'array' in title: - itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail, - extra = extra, - show = context_title, contentTitle=context_title, contentType=context, - context=["buscar_trailer"], infoLabels= {'year':year})) - - tmdb.set_infoLabels(itemlist, True) - - - - if url_next_page: - itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente", - url=url_next_page, next_page=next_page, folder=True, - text_color='yellow', text_bold=True, modo = modo, plot = extra, - extra = page_extra)) + action="findvideos" + itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,thumbnail=scrapedthumbnail, action=action, quality=scrapedquality,show=scrapedtitle)) + # Página siguiente + patron_pag='' - - match_ver = scrapertools.find_single_match(data, patron_ver) - match_descargar = scrapertools.find_single_match(data, patron_descargar) - - patron = '
    \d+)?)<.+?]+>(?P.*?)\s*Calidad\s*]+>" \ - "[\[]\s*(?P.*?)\s*[\]]" - r = re.compile(pattern) - match = [m.groupdict() for m in r.finditer(info)][0] - - if match["episode2"]: - multi = True - title = "%s (%sx%s-%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), - str(match["episode2"]).zfill(2), match["lang"], - match["quality"]) - else: - multi = False - title = "%s (%sx%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), - match["lang"], match["quality"]) - - else: # old style - pattern = "\[(?P.*?)\].*?\[Cap.(?P\d+)(?P\d{2})(?:_(?P\d+)" \ - "(?P\d{2}))?.*?\].*?(?:\[(?P.*?)\])?" - - r = re.compile(pattern) - match = [m.groupdict() for m in r.finditer(info)][0] - # logger.debug("data %s" % match) - - str_lang = "" - if match["lang"] is not None: - str_lang = "[%s]" % match["lang"] - - if match["season2"] and match["episode2"]: - multi = True - if match["season"] == match["season2"]: - - title = "%s (%sx%s-%s) %s[%s]" % (item.show, match["season"], match["episode"], - match["episode2"], str_lang, match["quality"]) - else: - title = "%s (%sx%s-%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], - match["season2"], match["episode2"], str_lang, - match["quality"]) - else: - title = "%s (%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], str_lang, - match["quality"]) - multi = False - - season = match['season'] - episode = match['episode'] - itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, - quality=item.quality, multi=multi, contentSeason=season, - contentEpisodeNumber=episode, infoLabels = infoLabels)) - - # order list - tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True) - if len(itemlist) > 1: - itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) - - if config.get_videolibrary_support() and len(itemlist) > 0: - itemlist.append( - item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios")) - - return itemlist - -def search(item, texto): - logger.info("search:" + texto) - # texto = texto.replace(" ", "+") - - try: - item.post = "q=%s" % texto - item.pattern = "buscar-list" - itemlist = listado2(item) - - return itemlist - - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - -def newest(categoria): - logger.info() - itemlist = [] - item = Item() - try: - item.extra = 'pelilist' - if categoria == 'torrent': - item.url = host+'peliculas/' - - itemlist = listado(item) - if itemlist[-1].title == ">> Página siguiente": - itemlist.pop() - item.url = host+'series/' - itemlist.extend(listado(item)) - if itemlist[-1].title == ">> Página siguiente": - itemlist.pop() - - # Se captura la excepción, para no interrumpir al canal novedades si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("{0}".format(line)) - return [] - - return itemlist + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron_data='
      (.+?)
    ' + data_listado = scrapertools.find_single_match(data, patron_data) + patron = '.+?.+?