(.*?) + + patron = 'openTorrent.*?title=".*?class="btn-torrent">.*?function openTorrent.*?href = "(.*?)";' + + # escraped torrent + url = scrapertools.find_single_match(data, patron) + logger.debug("urltorrent: " + url + " Title: " + title + " Caratula: " + caratula) + if url != "": + itemlist.append( + Item(channel=item.channel, action="play", server="torrent", title="[torrent] - " + title, fulltitle=title, + url=url, thumbnail=caratula, plot=item.plot, folder=False)) + + # escraped ver vídeos, descargar vídeos un link, múltiples liks + + data = data.replace("http://tumejorserie.com/descargar/url_encript.php?link=", "(") + data = data.replace( + 'javascript:;" onClick="popup("http://www.descargas2020.com/d20/library/include/ajax/get_modallinks.php?links=', "") + + logger.debug("matar %s" % data) + + # Antiguo sistema de scrapeo de servidores usado por Newpct1. Como no funciona con descargas2020, se sustituye por este más común + #patron_descargar = '
]+>.*?' + #patron_ver = '
]+>.*?' + + #match_ver = scrapertools.find_single_match(data, patron_ver) + #match_descargar = scrapertools.find_single_match(data, patron_descargar) + + #patron = '
\d+)?)<.+?]+>(?P.*?)\s*Calidad\s*]+>" \ + "[\[]\s*(?P.*?)\s*[\]]" + r = re.compile(pattern) + match = [m.groupdict() for m in r.finditer(info)][0] + + if match["episode2"]: + multi = True + title = "%s (%sx%s-%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), + str(match["episode2"]).zfill(2), match["lang"], + match["quality"]) + else: + multi = False + title = "%s (%sx%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), + match["lang"], match["quality"]) + + else: # old style + pattern = "\[(?P.*?)\].*?\[Cap.(?P\d+)(?P\d{2})(?:_(?P\d+)" \ + "(?P\d{2}))?.*?\].*?(?:\[(?P.*?)\])?" + + r = re.compile(pattern) + match = [m.groupdict() for m in r.finditer(info)][0] + # logger.debug("data %s" % match) + + str_lang = "" + if match["lang"] is not None: + str_lang = "[%s]" % match["lang"] + + if match["season2"] and match["episode2"]: + multi = True + if match["season"] == match["season2"]: + + title = "%s (%sx%s-%s) %s[%s]" % (item.show, match["season"], match["episode"], + match["episode2"], str_lang, match["quality"]) + else: + title = "%s (%sx%s-%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], + match["season2"], match["episode2"], str_lang, + match["quality"]) + else: + title = "%s (%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], str_lang, + match["quality"]) + multi = False + + season = match['season'] + episode = match['episode'] + itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, + quality=item.quality, multi=multi, contentSeason=season, + contentEpisodeNumber=episode, infoLabels = infoLabels)) + + # order list + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True) + if len(itemlist) > 1: + itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) + + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append( + item.clone(title="[COLOR orange][B]Añadir esta serie a la videoteca[/B][/COLOR]", action="add_serie_to_library", extra="episodios")) + + return itemlist def search(item, texto): logger.info("search:" + texto) @@ -178,40 +469,22 @@ def search(item, texto): logger.error("%s" % line) return [] - - -def findvideos(item): - logger.info() - itemlist = [] - new_item = [] - data = httptools.downloadpage(item.url).data - itemlist = servertools.find_video_items(data = data) - url = scrapertools.find_single_match(data, 'location.href = "([^"]+)"') - new_item.append(Item(url = url, title = "Torrent", server = "torrent", action = "play")) - if url != '': - itemlist.extend(new_item) - for it in itemlist: - it.channel = item.channel - return itemlist - - def newest(categoria): logger.info() itemlist = [] item = Item() try: + item.extra = 'pelilist' if categoria == 'torrent': - item.url = Host+'/peliculas-hd/' - action = listado(item) - if categoria == '4k': - item.url = Host + '/buscar' - item.post = 'q=4k' - item.pattern = 'buscar-list' - action = listado2(item) + item.url = host+'peliculas/' - itemlist = action - if itemlist[-1].title == "[COLOR cyan]Página Siguiente >>[/COLOR]": - itemlist.pop() + itemlist = listado(item) + if itemlist[-1].title == ">> Página siguiente": + itemlist.pop() + item.url = host+'series/' + itemlist.extend(listado(item)) + if itemlist[-1].title == ">> Página siguiente": + itemlist.pop() # Se captura la excepción, para no interrumpir al canal novedades si un canal falla except: @@ -220,4 +493,4 @@ def newest(categoria): logger.error("{0}".format(line)) return [] - return itemlist \ No newline at end of file + return itemlist diff --git a/plugin.video.alfa/channels/mejortorrent.py b/plugin.video.alfa/channels/mejortorrent.py index af1add1f..f085f160 100755 --- a/plugin.video.alfa/channels/mejortorrent.py +++ b/plugin.video.alfa/channels/mejortorrent.py @@ -12,7 +12,7 @@ from core.item import Item from core.tmdb import Tmdb from platformcode import logger -host = "https://mejortorrent.website" +host = "http://www.mejortorrent.com" def mainlist(item): @@ -40,8 +40,8 @@ def mainlist(item): thumbnail=thumb_series_hd)) itemlist.append(Item(channel=item.channel, title="Series Listado Alfabetico", action="listalfabetico", url= host + "/torrents-de-series.html", thumbnail=thumb_series_az)) - itemlist.append(Item(channel=item.channel, title="Documentales", action="getlist", - url= host + "/torrents-de-documentales.html", thumbnail=thumb_docus)) + #itemlist.append(Item(channel=item.channel, title="Documentales", action="getlist", + # url= host + "/torrents-de-documentales.html", thumbnail=thumb_docus)) itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", thumbnail=thumb_buscar)) return itemlist @@ -235,6 +235,8 @@ def episodios(item): tmdb_title = re.sub(r'(\s*-\s*)?\d+.*?\s*Temporada|(\s*-\s*)?\s*Miniserie\.?|\(.*\)|\[.*\]', '', item.title).strip() logger.debug('tmdb_title=' + tmdb_title) + #logger.debug(matches) + #logger.debug(data) if item.extra == "series": oTmdb = Tmdb(texto_buscado=tmdb_title.strip(), tipo='tv', idioma_busqueda="es") @@ -248,8 +250,8 @@ def episodios(item): #import web_pdb; web_pdb.set_trace() title = scrapedtitle + " (" + fecha + ")" patron = "" - - url = "https://mejortorrent.website"+scrapertools.find_single_match(data,patron) + + url = host + scrapertools.find_single_match(data,patron) # "episodios%5B1%5D=11744&total_capis=5&tabla=series&titulo=Sea+Patrol+-+2%AA+Temporada" post = urllib.urlencode({name: value, "total_capis": total_capis, "tabla": tabla, "titulo": titulo}) logger.debug("post=" + post) @@ -319,11 +321,11 @@ def show_movie_info(item): logger.debug("title=[" + item.title + "], url=[" + url + "], thumbnail=[" + item.thumbnail + "]") torrent_data = httptools.downloadpage(url).data - link = scrapertools.get_match(torrent_data, "") + link = scrapertools.get_match(torrent_data, "") link = urlparse.urljoin(url, link) logger.debug("link=" + link) itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link, - thumbnail=item.thumbnail, plot=item.plot, fanart=item.fanart, folder=False)) + thumbnail=item.thumbnail, plot=item.plot, fanart=item.fanart, folder=False, extra="pelicula")) return itemlist @@ -334,29 +336,29 @@ def play(item): itemlist = [] if item.extra == "pelicula": - #itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=item.url, - # thumbnail=item.thumbnail, plot=item.plot, fanart=item.fanart, folder=False)) - data = httptools.downloadpage(item.url).data - logger.debug("data=" + data) - #url https://mejortorrent.website/peli-descargar-torrent-16443-Thor-Ragnarok.html - patron = "https://mejortorrent.website/peli-descargar-torrent-((.*?))-" - newid = scrapertools.find_single_match(item.url, patron) + itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=item.url, + thumbnail=item.thumbnail, plot=item.plot, fanart=item.fanart, folder=False)) + #data = httptools.downloadpage(item.url).data + #logger.debug("data=" + data) + #url http://www.mejortorrent.com/peli-descargar-torrent-16443-Thor-Ragnarok.html + #patron = host + "/peli-descargar-torrent-((.*?))-" + #newid = scrapertools.find_single_match(item.url, patron) #params = dict(urlparse.parse_qsl(item.extra)) - patron = "https://mejortorrent.website/secciones.php?sec=descargas&ap=contar&tabla=peliculas&id=" + newid[0] + "&link_bajar=1" - #https://mejortorrent.website/secciones.php?sec=descargas&ap=contar&tabla=peliculas&id=16443&link_bajar=1 + #patron = host + "/secciones.php?sec=descargas&ap=contar&tabla=peliculas&id=" + newid[0] + "&link_bajar=1" + #http://www.mejortorrent.com/secciones.php?sec=descargas&ap=contar&tabla=peliculas&id=16443&link_bajar=1 #link=scrapertools.find_single_match(data,patron) #data = httptools.downloadpage(link).data - data = httptools.downloadpage(patron).data - patron = "Pincha " - link = "https://mejortorrent.website" + scrapertools.find_single_match(data, patron) - logger.info("link=" + link) - itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link, - thumbnail=item.thumbnail, plot=item.plot, folder=False)) + #data = httptools.downloadpage(patron).data + #patron = "Pincha " + #link = host + scrapertools.find_single_match(data, patron) + #logger.info("link=" + link) + #itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link, + # thumbnail=item.thumbnail, plot=item.plot, folder=False)) else: #data = httptools.downloadpage(item.url, post=item.extra).data @@ -364,14 +366,14 @@ def play(item): logger.debug("data=" + data) params = dict(urlparse.parse_qsl(item.extra)) - patron = "https://mejortorrent.website/secciones.php?sec=descargas&ap=contar&tabla=" + params["tabla"] + "&id=" + item.id + patron = host + "/secciones.php?sec=descargas&ap=contar&tabla=" + params["tabla"] + "&id=" + item.id #link=scrapertools.find_single_match(data,patron) #data = httptools.downloadpage(link).data data = httptools.downloadpage(patron).data patron = "Pincha " - link = "https://mejortorrent.website" + scrapertools.find_single_match(data, patron) + link = host + scrapertools.find_single_match(data, patron) logger.info("link=" + link) itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link, thumbnail=item.thumbnail, plot=item.plot, folder=False)) diff --git a/plugin.video.alfa/channels/torrentlocura.py b/plugin.video.alfa/channels/torrentlocura.py index 739ecfaa..08f053e2 100755 --- a/plugin.video.alfa/channels/torrentlocura.py +++ b/plugin.video.alfa/channels/torrentlocura.py @@ -1,106 +1,66 @@ # -*- coding: utf-8 -*- import re -import urllib -import urlparse - - from channelselector import get_thumb from core import httptools from core import scrapertools from core import servertools from core.item import Item -from platformcode import logger - -host = "http://torrentlocura.com/" +from platformcode import config, logger +from core import tmdb +host = 'http://torrentlocura.com/' # Cambiar manualmente "xx" en línea 287 ".com/xx/library" por tl para torrentlocura, tr para torrentrapid, d20 para descargas2020 def mainlist(item): logger.info() - thumb_movie = get_thumb("channels_movie.png") - thumb_tvshow = get_thumb("channels_tvshow.png") - thumb_anime = get_thumb("channels_anime.png") + itemlist = [] + + thumb_pelis=get_thumb("channels_movie.png") + thumb_series=get_thumb("channels_tvshow.png") thumb_search = get_thumb("search.png") - itemlist = list() itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url=host, - pattern="peliculas", thumbnail=get_thumb('movies', auto=True))) + extra="peliculas", thumbnail=thumb_pelis )) + + itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url=host, extra="series", + thumbnail=thumb_series)) + + itemlist.append(Item(channel=item.channel, action="submenu", title="Documentales", url=host, extra="varios", + thumbnail=thumb_series)) itemlist.append( - Item(channel=item.channel, action="submenu", title="Series", url=host, - pattern="series", thumbnail=get_thumb('tvshows', auto=True))) - itemlist.append( - Item(channel=item.channel, action="anime", title="Anime", url=host, - pattern="anime", thumbnail=get_thumb('anime', auto=True))) - itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url=host + "buscar", - thumbnail=get_thumb('search', auto=True))) + Item(channel=item.channel, action="search", title="Buscar", url=host + "buscar", thumbnail=thumb_search)) return itemlist - -def search(item, texto): - logger.info("search:" + texto) - # texto = texto.replace(" ", "+") - - try: - item.post = "q=%s" % texto - item.pattern = "buscar-list" - itemlist = listado2(item) - - return itemlist - - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def anime(item): - logger.info() - itemlist = [] - title = "Anime" - url = host + "anime" - itemlist.append(item.clone(channel=item.channel, action="listado", title=title, url=url, - pattern="pelilist")) - itemlist.append( - item.clone(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url, - thumbnail=item.thumbnail[:-4] + "_az.png", pattern="pelilist")) - - return itemlist - - def submenu(item): logger.info() itemlist = [] - data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) - # data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) + data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") - pattern = '
  • .*?
      (.*?)
    ' % (host, item.pattern) - data = scrapertools.get_match(data, pattern) + #patron = '
  • .*?
      (.*?)
    ' + patron = '
  • .*?
      (.*?)
    ' #Filtrado por url + data = scrapertools.get_match(data, patron) - pattern = '
    ([^>]+)' - matches = re.compile(pattern, re.DOTALL).findall(data) + patron = '([^>]+)' + matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: title = scrapedtitle.strip() url = scrapedurl - if item.pattern in title.lower(): - itemlist.append(item.clone(channel=item.channel, action="listado", title=title, url=url, - pattern="pelilist")) - itemlist.append( - item.clone(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url, - thumbnail=item.thumbnail[:-4] + "_az.png", pattern="pelilist")) - - if 'Películas' in item.title: - new_item = item.clone(title='Peliculas 4K', url=host+'buscar', post='q=4k', action='listado2', - pattern='buscar-list') - itemlist.append(new_item) - + itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra="pelilist")) + itemlist.append( + Item(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url, extra="pelilist")) + + if item.extra == "peliculas": + itemlist.append(Item(channel=item.channel, action="listado", title="Películas 4K", url=host + "peliculas-hd/4kultrahd/", extra="pelilist")) + itemlist.append( + Item(channel=item.channel, action="alfabeto", title="Películas 4K" + " [A-Z]", url=host + "peliculas-hd/4kultrahd/", extra="pelilist")) + return itemlist @@ -108,11 +68,11 @@ def alfabeto(item): logger.info() itemlist = [] - data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) - # data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) + data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") - pattern = '
      (.*?)
    ' - data = scrapertools.get_match(data, pattern) + patron = '
      (.*?)
    ' + data = scrapertools.get_match(data, patron) patron = ']+>([^>]+)' matches = re.compile(patron, re.DOTALL).findall(data) @@ -121,7 +81,7 @@ def alfabeto(item): title = scrapedtitle.upper() url = scrapedurl - itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, pattern=item.pattern)) + itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra=item.extra)) return itemlist @@ -129,73 +89,113 @@ def alfabeto(item): def listado(item): logger.info() itemlist = [] + url_next_page ='' - data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) + data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") + #logger.debug(data) + logger.debug('item.modo: %s'%item.modo) + logger.debug('item.extra: %s'%item.extra) - # logger.debug("data %s " % data) - next_page = scrapertools.find_single_match(data, '