From 4c3393249ddbee02ed671630f90ca03502c6c095 Mon Sep 17 00:00:00 2001 From: Kingbox <37674310+lopezvg@users.noreply.github.com> Date: Fri, 4 May 2018 20:42:12 +0200 Subject: [PATCH 1/2] TmDB: Evita retorno de infoLabel incorrecto MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Despues de crear infoLabels para Series, pueden devolver esta información con películas --- plugin.video.alfa/core/tmdb.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugin.video.alfa/core/tmdb.py b/plugin.video.alfa/core/tmdb.py index a318cc7d..1ba25685 100755 --- a/plugin.video.alfa/core/tmdb.py +++ b/plugin.video.alfa/core/tmdb.py @@ -404,7 +404,8 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda='es', lock=None): otmdb = Tmdb(external_id=item.infoLabels['tvrage_id'], external_source="tvrage_id", tipo=tipo_busqueda, idioma_busqueda=idioma_busqueda) - if otmdb is None: + #if otmdb is None: + if not item.infoLabels['tmdb_id'] and not item.infoLabels['imdb_id'] and not item.infoLabels['tvdb_id'] and not item.infoLabels['freebase_mid'] and not item.infoLabels['freebase_id'] and not item.infoLabels['tvrage_id']: # No se ha podido buscar por ID... # hacerlo por titulo if tipo_busqueda == 'tv': From b20607c8ac07d303a0750f72cdc1c68a52f88983 Mon Sep 17 00:00:00 2001 From: Kingbox <37674310+lopezvg@users.noreply.github.com> Date: Mon, 7 May 2018 19:05:50 +0200 Subject: [PATCH 2/2] =?UTF-8?q?Mejor=20Torrent:=20reestructuraci=C3=B3n=20?= =?UTF-8?q?del=20canal?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Basado en la idea del canal Mejor Torrent anterior, se reestructura para añadirle más funcionalidad. Es una web difícil, algo inconsistente, que aporta pocos datos, pero tiene muy buenos contenidos: - Listados de películas, series y documentales, calidad normal y el HD, con y sin lista alfabética - Búsquedas de esos tres contenidos - Listado de series, que suele dar episodios, convertidos a temporadas - Videoteca para películas, series y documentales - Uso extensivo de www.themoviedb.org para mayor información de los contenidos - Gestión detallada de títulos - Paginación en todas la funciones --- plugin.video.alfa/channels/mejortorrent.json | 8 + plugin.video.alfa/channels/mejortorrent.py | 1212 +++++++++++++----- 2 files changed, 885 insertions(+), 335 deletions(-) diff --git a/plugin.video.alfa/channels/mejortorrent.json b/plugin.video.alfa/channels/mejortorrent.json index a64f06e1..49621d31 100755 --- a/plugin.video.alfa/channels/mejortorrent.json +++ b/plugin.video.alfa/channels/mejortorrent.json @@ -28,6 +28,14 @@ "default": true, "enabled": true, "visible": true + }, + { + "id": "include_in_newest_documentales", + "type": "bool", + "label": "Incluir en Novedades - Documentales", + "default": true, + "enabled": true, + "visible": true } ] } \ No newline at end of file diff --git a/plugin.video.alfa/channels/mejortorrent.py b/plugin.video.alfa/channels/mejortorrent.py index c31662f6..424c122c 100755 --- a/plugin.video.alfa/channels/mejortorrent.py +++ b/plugin.video.alfa/channels/mejortorrent.py @@ -8,13 +8,13 @@ import urlparse from channelselector import get_thumb from core import httptools from core import scrapertools +from core import servertools from core.item import Item -from core.tmdb import Tmdb -from platformcode import logger +from platformcode import config, logger +from core import tmdb host = "http://www.mejortorrent.com" - def mainlist(item): logger.info() @@ -28,48 +28,889 @@ def mainlist(item): thumb_docus = get_thumb("channels_documentary.png") thumb_buscar = get_thumb("search.png") - itemlist.append(Item(channel=item.channel, title="Peliculas", action="getlist", + itemlist.append(Item(channel=item.channel, title="Novedades", action="listado_busqueda", extra="novedades", tipo=False, + url= host + "/secciones.php?sec=ultimos_torrents", thumbnail=thumb_buscar)) + itemlist.append(Item(channel=item.channel, title="Peliculas", action="listado", extra="peliculas", tipo=False, url= host + "/torrents-de-peliculas.html", thumbnail=thumb_pelis)) - itemlist.append(Item(channel=item.channel, title="Peliculas HD", action="getlist", + itemlist.append(Item(channel=item.channel, title="Peliculas HD", action="listado", extra="peliculas", tipo=False, url= host + "/torrents-de-peliculas-hd-alta-definicion.html", thumbnail=thumb_pelis_hd)) - itemlist.append(Item(channel=item.channel, title="Series", action="getlist", + itemlist.append(Item(channel=item.channel, title="Películas Listado Alfabetico", action="alfabeto", + url= host + "/peliculas-buscador.html" + + "?campo=letra&valor&valor2=Acci%%F3n&valor3=%s&valor4=3&submit=Buscar", extra="peliculas", + thumbnail=thumb_pelis)) + itemlist.append(Item(channel=item.channel, title="Series", action="listado", extra="series", tipo=False, url= host + "/torrents-de-series.html", thumbnail=thumb_series)) - itemlist.append(Item(channel=item.channel, title="Series HD", action="getlist", + itemlist.append(Item(channel=item.channel, title="Series Listado Alfabetico", action="alfabeto", extra="series", + url= host + "/torrents-de-series.html", thumbnail=thumb_series_az)) + itemlist.append(Item(channel=item.channel, title="Series HD", action="listado", extra="series", tipo=False, url= host + "/torrents-de-series-hd-alta-definicion.html", thumbnail=thumb_series_hd)) - itemlist.append(Item(channel=item.channel, title="Series Listado Alfabetico", action="listalfabetico", - url= host + "/torrents-de-series.html", thumbnail=thumb_series_az)) - #itemlist.append(Item(channel=item.channel, title="Documentales", action="getlist", - # url= host + "/torrents-de-documentales.html", thumbnail=thumb_docus)) - itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", thumbnail=thumb_buscar)) + itemlist.append(Item(channel=item.channel, title="Series HD Listado Alfabetico", action="alfabeto", extra="series-hd", + url= host + "/torrents-de-series-hd-alta-definicion.html", thumbnail=thumb_series_az)) + itemlist.append(Item(channel=item.channel, title="Documentales", action="listado", extra="documentales", tipo=False, + url= host + "/torrents-de-documentales.html", thumbnail=thumb_docus)) + itemlist.append(Item(channel=item.channel, title="Documentales Listado Alfabetico", action="alfabeto", extra="documentales", url= host + "/torrents-de-documentales.html", thumbnail=thumb_docus)) + itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", thumbnail=thumb_buscar, tipo=False)) return itemlist -def listalfabetico(item): +def alfabeto(item): logger.info() - itemlist = [] - for letra in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', - 'U', 'V', 'W', 'X', 'Y', 'Z']: - itemlist.append(Item(channel=item.channel, action="getlist", title=letra, - url= host + "/series-letra-" + letra.lower() + ".html")) + if item.extra == "series-hd": + itemlist.append(Item(channel=item.channel, action="listado", title="Todas", extra="series", tipo=True, + url= host + "/secciones.php?sec=descargas&ap=series_hd&func=mostrar&letra=.")) + for letra in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']: + itemlist.append(Item(channel=item.channel, action="listado", title=letra, extra="series", tipo=True, + url= host + "/secciones.php?sec=descargas&ap=series_hd&func=mostrar&letra=" + letra.lower())) - itemlist.append(Item(channel=item.channel, action="getlist", title="Todas", - url= host + "/series-letra..html")) + elif item.extra == "series" or item.extra == "documentales": + itemlist.append(Item(channel=item.channel, action="listado", title="Todas", extra=item.extra, tipo=True, url= host + "/" + item.extra + "-letra-..html")) + for letra in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']: + itemlist.append(Item(channel=item.channel, action="listado", title=letra, extra=item.extra, tipo=True, url= host + "/" + item.extra + "-letra-" + letra.lower() + ".html")) + + elif item.extra == "peliculas": + itemlist.append(Item(channel=item.channel, action="listado", title="Todas", extra=item.extra, tipo=True, url=item.url % ".")) + for letra in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']: + itemlist.append(Item(channel=item.channel, action="listado", title=letra, extra=item.extra, tipo=True, url=item.url % letra.lower())) return itemlist +def listado(item): + logger.info() + itemlist = [] + url_next_page ='' # Controlde paginación + cnt_tot = 30 # Poner el num. máximo de items por página + + if item.category: + del item.category + if item.totalItems: + del item.totalItems + + # La url de Películas por orden Alfabético tiene un formato distinto + if item.extra == "peliculas" and item.tipo: + url = item.url.split("?") + data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(url[0], post=url[1]).data) + else: + data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) + + # En este canal las url's y los títulos tienen diferente formato dependiendo del contenido + if item.extra == "peliculas" and item.tipo: #Desde Lista Alfabética + patron = " cnt_tot and item.extra == "documentales" and pag: + item.next_page = '' + if item.next_page != 'b': + if matches_cnt > cnt_pag + cnt_tot: + url_next_page = item.url + matches = matches[cnt_pag:cnt_pag+cnt_tot] + next_page = '' + if matches_cnt <= cnt_pag + (cnt_tot * 2): + if pag: + next_page = 'b' + modo = 'continue' + else: + matches = matches[cnt_pag:cnt_pag+cnt_tot] + next_page = 'a' + patron_next_page = " Siguiente >> <\/a>" + matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data) + modo = 'continue' + if len(matches_next_page) > 0: + url_next_page = urlparse.urljoin(item.url, matches_next_page[0]) + modo = 'next' + if item.next_page: + del item.next_page + + #logger.debug(data) + #logger.debug("PATRON1: " + patron + " / ") + #logger.debug(matches) + + # Primera pasada + # En la primera pasada se obtiene una información básica del título a partir de la url + # Para Series se obtienen la temporada y el episodio + # Se limpian algunas etiquetas del item inical. + for scrapedurl, scrapedthumbnail in matches: + item_local = item.clone() + if item_local.tipo: + del item_local.tipo + if item_local.totalItems: + del item_local.totalItems + item_local.title = '' + item_local.context = "['buscar_trailer']" + + item_local.title = scrapertools.get_match(scrapedurl, patron_enlace) + item_local.title = item_local.title.replace("-", " ") + item_local.url = urlparse.urljoin(item_local.url, scrapedurl) + item_local.thumbnail = host + urllib.quote(scrapedthumbnail) + item_local.contentThumbnail = item_local.thumbnail + item_local.infoLabels['year'] = '-' # Al no saber el año, le ponemos "-" y TmDB lo calcula automáticamente + + # Para que el menú contextual muestre conrrectamente las opciones de añadir a Videoteca + if item_local.extra == "series": + if "/serie-episodio" in item_local.url: + item_local.contentType = "episode" + else: + item_local.contentType = "season" + + # Poner nombre real de serie. Busca nº de temporada y capítulo + if item_local.extra == "series" or item.extra == "documentales": + if item_local.contentType == "episode": + real_title = scrapertools.find_single_match(scrapedurl, patron_title_ep) + real_title = real_title.replace("-", " ") + item_local.contentSeason = scrapertools.find_single_match(scrapedurl, '.*?-(\d{1,2})x\d{1,2}.*?\.html') + + #Hay que buscar la raiz de la temporada + data_epi = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item_local.url).data) + url = scrapertools.find_single_match(data_epi, r" cnt_pag + cnt_tot: + url_next_page = item.url + matches = matches[cnt_pag:cnt_pag+cnt_tot] + next_page = '' + if matches_cnt <= cnt_pag + (cnt_tot * 2): + if pag: + next_page = 'b' + modo = 'continue' + else: + matches = matches[cnt_pag:cnt_pag+cnt_tot] + next_page = 'a' + patron_next_page = " Siguiente >> <\/a>" + matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data) + modo = 'continue' + if len(matches_next_page) > 0: + url_next_page = urlparse.urljoin(item.url, matches_next_page[0]) + modo = 'next' + if item.next_page: + del item.next_page + + if matches_cnt >= cnt_tot: + cnt_pag += cnt_tot + else: + cnt_pag += matches_cnt + + for scrapedurl, scrapedtitle, scrapedinfo in matches: + # Creamos "item_local" y lo limpiamos un poco de algunos restos de item + item_local = item.clone() + if item_local.category: + del item_local.category + if item_local.tipo: + del item_local.tipo + if item_local.totalItems: + del item_local.totalItems + item_local.contentThumbnail = '' + item_local.thumbnail = '' + item_local.context = "['buscar_trailer']" + item_local.infoLabels['year'] = '-' # Al no saber el año, le ponemos "-" y TmDB lo calcula automáticamente + + # Limpiamos títulos, Sacamos datos de calidad, audio y lenguaje + scrapedtitle = re.sub('\r\n', '', scrapedtitle).decode('iso-8859-1').encode('utf8').strip() + title = scrapedtitle + + title_subs = "" + item_local.language = "" + if "[subs" in title or "[Subs" in title or "[VOS" in title or "[VOSE" in title or "(V.O.S.E" in title: + item_local.language = "VOS" + title = title.replace(" [Subs. integrados]", "").replace(" [subs. Integrados]", "").replace(" [VOSE", "").replace(" (V.O.S.E)", "") + if "latino" in title or "Latino" in title or "rgentina" in title: + item_local.language = "LAT" + title = title.replace(" Latino", "").replace(" latino", "").replace(" Argentina", "").replace(" argentina", "") + title = title.replace("Castellano", "").replace("castellano", "").replace("inglés", "").replace("ingles", "").replace("Inglés", "").replace("Ingles", "") + + if "3d" in title or "3D" in title: #Reservamos info de subtítulos para después de TMDB + title_subs = "[3D]" + title = title.replace(" [3d]", "").replace(" 3d", "").replace(" [3D]", "").replace(" 3D", "") + if "Temp" in title or "temp" in title: #Reservamos info de Temporada para después de TMDB + title_subs = "[Temp.]" + if "Audio" in title or "audio" in title: #Reservamos info de subtítulos para después de TMDB + title_subs = '[%s]' % scrapertools.find_single_match(title, r'(\[[a|A]udio.*?\])') + title = re.sub(r'\[[a|A]udio.*?\]', '', title) + if "[Dual" in title or "[dual" in title: + title_subs = "[Dual]" + title = title = re.sub(r'\[D|dual.*?\]', '', title) + + if title.endswith('.'): + title = title[:-1] + title = title.replace("á", "a", 1).replace("é", "e", 1).replace("í", "i", 1).replace("ó", "o", 1).replace("ú", "u", 1).replace("ü", "u", 1) + if not title: + title = "dummy" + title = scrapertools.remove_htmltags(title) + + if item.extra == "novedades" and ("/serie-" in scrapedurl or "/doc-" in scrapedurl): + item_local.quality = scrapertools.find_single_match(scrapedtitle, r'.*?\[(.*?)\]') + else: + item_local.quality = scrapertools.remove_htmltags(scrapedinfo).decode('iso-8859-1').encode('utf8') + item_local.quality = item_local.quality.replace("(", "").replace(")", "").replace("[", "").replace("]", "").replace("Documental", "").replace("documental", "") + + item_local.url = urlparse.urljoin(item.url, scrapedurl) + + #Preparamos la información básica para TMDB + if "/serie-" in scrapedurl or "/doc-" in scrapedurl: + item_local.action = "episodios" + if "/serie-" in scrapedurl: + item_local.extra = "series" + else: + item_local.extra = "documentales" + item_local.contentType = "season" + + title = re.sub(r'\[\d+.*?\]', '', title) # Quitar la calidad del título + item_local.contentSerieName = scrapertools.find_single_match(title, '(.*?) - \d.*?') + if not item_local.contentSerieName: + item_local.contentSerieName = title + if item_local.contentSerieName.endswith(' '): + item_local.contentSerieName = item_local.contentSerieName[:-1] + title = item_local.contentSerieName + item_local.title = title + item_local.infoLabels['tvshowtitle'] = item_local.contentSerieName + if not item_local.contentSerieName: + item_local.contentSerieName = "dummy" + item_local.contentSeason = scrapertools.find_single_match(scrapedurl, '.*?-(\d{1,2})-Temp.*?\.html') + if not item_local.contentSeason: + item_local.contentSeason = 1 + + if "(HDRip" in title or "(BR" in title or "(HDRip" in title or "(VHSRip" in title or "(DVDRip" in title or "(FullB" in title or "(fullb" in title or "(Blu" in title or "(4K" in title or "(4k" in title or "(HEVC" in title or "(IMAX" in title or "Extendida" in title or "[720p]" in title or "[1080p]" in title: + if not item_local.quality: + item_local.quality = scrapertools.find_single_match(title, r'\(.*?\)?\(.*?\)') + if not item_local.quality: + item_local.quality = scrapertools.find_single_match(title, r'[\[|\(](.*?)[\)|\]]') + title = re.sub(r'\(.*?\)?\(.*?\)', '', title) + title = re.sub(r'[\[|\(].*?[\)|\]]', '', title) + if not item_local.quality: + if "FullBluRay" in title or "fullbluray" in title: + item_local.quality = "FullBluRay" + title = title.replace("FullBluRay", "").replace("fullbluray", "") + if "4K" in title or "4k" in title or "HDR" in title or "hdr" in title: + item_local.quality = "4K" + title = title.replace("4k-hdr", "").replace("4K-HDR", "").replace("hdr", "").replace("HDR", "").replace("4k", "").replace("4K", "") + title = title.replace("(", "").replace(")", "").replace("[", "").replace("]", "") + if title.endswith(' '): + title = title[:-1] + item_local.title = title + + if "/peli-" in scrapedurl: + item_local.action = "findvideos" + item_local.extra = "peliculas" + item_local.contentType = "movie" + item_local.contentTitle = title + + if "Saga" in item_local.contentTitle or "Saga" in item_local.contentSerieName: + item_local.contentTitle = item_local.contentTitle.replace("Saga ", "").replace("Saga", "") + item_local.contentSerieName = item_local.contentSerieName.replace("Saga ", "").replace("Saga", "") + title_subs = "[Saga]" + if "Colecc" in item_local.contentTitle or "Colecc" in item_local.contentSerieName: + item_local.contentTitle = item_local.contentTitle.replace("Coleccion ", "").replace("Coleccion", "") + item_local.contentSerieName = item_local.contentSerieName.replace("Coleccion ", "").replace("Coleccion", "") + title_subs = "[Coleccion]" + + # Guardamos temporalmente info de subtítulos, si lo hay + item_local.extra = item_local.extra + title_subs + + itemlist.append(item_local.clone()) + + #logger.debug(item_local) + + #Llamamos a TMDB para que complete InfoLabels desde itemlist. Mejor desde itemlist porque envía las queries en paralelo + tmdb.set_infoLabels(itemlist, seekTmdb = True) + + # Pasada para maqullaje de los títulos obtenidos desde TMDB + for item_local in itemlist: + title = item_local.title + title_subs = "" + temporada = "" + title_subs = scrapertools.find_single_match(item_local.extra, r'(\[.*?\])') + if "[Temp.]" in item_local.extra: + temporada = "[Temp.]" + title_subs = "" + if "Audio" in item_local.extra or "audio" in item_local.extra: + title_subs = '[%s]' % scrapertools.find_single_match(item_local.extra, r'\[[a|A]udio (.*?)\]') + item_local.extra = re.sub(r'\[.*?\]', '', item_local.extra) + + # Si TMDB no ha encontrado el vídeo limpiamos el año + if item_local.infoLabels['year'] == "-": + item_local.infoLabels['year'] = '' + item_local.infoLabels['aired'] = '' + + # Ahora maquillamos un poco los titulos dependiendo de si se han seleccionado títulos inteleigentes o no + if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados: + if item_local.contentType == "season" or item_local.contentType == "tvshow": + if item_local.extra == "series" or temporada == "[Temp.]": + title = '%s - Temporada %s [%s][%s][%s]' % (item_local.contentSerieName, str(item_local.contentSeason), scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})'), item_local.quality, item_local.language) + else: + title = '%s [%s][%s][%s]' % (item_local.contentSerieName, scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})'), item_local.quality, item_local.language) + + elif item_local.contentType == "movie": + title = '%s [%s][%s][%s]' % (title, str(item_local.infoLabels['year']), item_local.quality, item_local.language) + + if config.get_setting("unify"): #Si Titulos Inteligentes SÍ seleccionados: + if item_local.contentType == "season" or item_local.contentType == "tvshow": + if item_local.extra == "series" or temporada == "[Temp.]": + title = '%s - Temporada %s -%s-' % (item_local.contentSerieName, item_local.contentSeason, scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})')) + else: + title = '%s -%s-' % (item_local.contentSerieName, scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})')) + title_subs = title_subs.replace("[", "-").replace("]", "-") + + title = title.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "") + item_local.title = title + title_subs + item_local.contentTitle += title_subs #añadimos info adicional para display + + #logger.debug("title=[" + item_local.title + "], url=[" + item_local.url + "], calidad=[" + item_local.quality + "]") + #logger.debug(item_local) + + if url_next_page: + itemlist.append( + Item(channel=item.channel, action="listado_busqueda", title="[COLOR gold][B]Pagina siguiente >>[/B][/COLOR]", url=url_next_page, next_page=next_page, cnt_pag=cnt_pag, pag=pag, modo=modo, extra=item.extra, tipo=item.tipo)) + + logger.debug(url_next_page + " / " + next_page + " / " + str(matches_cnt) + " / " + str(cnt_pag) + " / " + str(pag) + " / " + modo + " / " + item.extra + " / " + str(item.tipo)) + + return itemlist + + +def findvideos(item): + #import xbmc + logger.info() + itemlist = [] + + # Saber si estamos en una ventana emergente lanzada desde una viñeta del menú principal, + # con la función "play_from_library" + #unify_status = False + #if xbmc.getCondVisibility('Window.IsMedia') == 1: + # unify_status = config.get_setting("unify") + unify_status = config.get_setting("unify") + + # Obtener la información actualizada del Episodio, si no la hay + if not item.infoLabels['tmdb_id']: + tmdb.set_infoLabels(item, True) + + if item.post: #Puede traer datos para una llamada "post". De momento usado para documentales, pero podrían ser series + data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url, post=item.post).data) + data = data.replace('"', "'") + patron = ">Pincha.*?)", "", httptools.downloadpage(item.url).data) + patron = ")", "", httptools.downloadpage(url).data) + #logger.debug(torrent_data) + link = scrapertools.get_match(torrent_data, ">Pincha.*?)", "", httptools.downloadpage(item.url).data) + + #Datos para crear el Post. Usado para documentales + total_capis = scrapertools.find_single_match(data, "") + tabla = scrapertools.find_single_match(data, "") + titulo_post = scrapertools.find_single_match(data, "") + + # Selecciona en tramo que nos interesa + data = scrapertools.find_single_match(data, + "(
.*?)
") + + # Prepara el patrón de búsqueda de: URL, título, fechas y dos valores mas sin uso + if item.extra == 'series': + patron = ".*?]+>
?([^>]+)<\/a><\/td>.*?" + else: + patron = "
.*?]+>(.*?)<\/td>.*?" + patron += "]+>Fecha: ([^<]+)<\/div><\/td>.*?" + patron += " 1: + #temporada = re.sub("\D", "", epi[0]) + capitulo = re.search("\d+", epi[1]) + if capitulo: + item_local.contentEpisodeNumber = capitulo.group() + else: + item_local.contentEpisodeNumber = 1 + + else: #Se prepara el Post para documentales + item_local.contentEpisodeNumber = 1 + item_local.url = host + "/secciones.php?sec=descargas&ap=contar_varios" + item_local.post = urllib.urlencode({name: value, "total_capis": total_capis, "tabla": tabla, "titulo": titulo_post}) + + item_local.title = scrapedtitle + + itemlist.append(item_local.clone()) + + # Llamamos a TMDB para que complete el episodio en InfoLabels + tmdb.set_infoLabels(itemlist, seekTmdb = True) + + # Pasada para maqullaje de los títulos obtenidos desde TMDB + for item_local in itemlist: + title = item_local.title + + # Si no hay datos de TMDB, pongo los datos locales que conozco + if item_local.infoLabels['aired']: + year = scrapertools.find_single_match(str(item_local.infoLabels['aired']), r'\/(\d{4})') + else: + year = scrapertools.find_single_match(fecha, r'(\d{4})') + if not item_local.infoLabels['year']: + item_local.infoLabels['year'] = year + + # Si son episodios múltiples, se toman los datos locales para nombre de episodio + if scrapertools.find_single_match(title, r'\d+x\d+.*?(\w+.*?\d+x\d+)'): + item_local.infoLabels['episodio_titulo'] = scrapertools.find_single_match(title, r'\d+x\d+.*?(\w+.*?\d+x\d+)') + + #Preparamos el título para que sea compatible con Añadir Serie a Videoteca + if item_local.infoLabels['episodio_titulo']: + item_local.title = '%sx%s %s, %s [%s]' % (str(item_local.contentSeason), item_local.contentEpisodeNumber, item_local.infoLabels['episodio_titulo'], item_local.contentSerieName, year) + item_local.infoLabels['episodio_titulo'] = '%s [%s]' % (item_local.infoLabels['episodio_titulo'], year) + else: + item_local.title = '%sx%s %s [%s]' % (str(item_local.contentSeason), item_local.contentEpisodeNumber, item_local.contentSerieName, year) + + #Ahora maquillamos un poco los titulos dependiendo de si se han seleccionado títulos inteleigentes o no + if not config.get_setting("unify"): #Si Titulos Inteligentes NO seleccionados: añadir calidad, lenguaje + item_local.title = '%s [%s][%s]' % (item_local.title, item_local.quality, item_local.language) + + #Quitamos campos vacíos + if item_local.infoLabels['episodio_titulo']: + item_local.infoLabels['episodio_titulo'] = item_local.infoLabels['episodio_titulo'].replace("[]", "") + item_local.title = item_local.title.replace("[]", "") + + #logger.debug("title=[" + item_local.title + "], url=[" + item_local.url + "], item=[" + str(item_local) + "]") + + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append(item.clone(title="[COLOR yelow]Añadir esta serie a la videoteca[/COLOR]", action="add_serie_to_library", extra="episodios")) + + return itemlist def search(item, texto): - logger.info() + itemlist = [] + logger.info("search:" + texto) texto = texto.replace(" ", "+") item.url = host + "/secciones.php?sec=buscador&valor=%s" % (texto) + try: - return buscador(item) + itemlist = listado_busqueda(item) + return itemlist # Se captura la excepción, para no interrumpir al buscador global si un canal falla except: @@ -77,327 +918,28 @@ def search(item, texto): logger.error("%s" % line) return [] - -def buscador(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = "]+>(.*?)" - patron += ".*?([^']+)" - patron_enlace = "/serie-descargar-torrents-\d+-\d+-(.*?)\.html" - - matches = scrapertools.find_multiple_matches(data, patron) - - for scrapedurl, scrapedtitle, scrapedinfo in matches: - title = scrapertools.remove_htmltags(scrapedtitle).decode('iso-8859-1').encode( - 'utf8') + ' ' + scrapedinfo.decode('iso-8859-1').encode('utf8') - url = urlparse.urljoin(item.url, scrapedurl) - logger.debug("title=[" + title + "], url=[" + url + "]") - - itemlist.append( - Item(channel=item.channel, action="episodios", title=title, url=url, folder=True, extra="series", - viewmode="movie_with_plot")) - - # busca pelis - patron = "]+>(.*?)" - patron_enlace = "/peli-descargar-torrent-\d+(.*?)\.html" - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedtitle in matches: - title = scrapertools.remove_htmltags(scrapedtitle).decode('iso-8859-1').encode('utf-8') - url = urlparse.urljoin(item.url, scrapedurl) - logger.debug("title=[" + title + "], url=[" + url + "]") - itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, folder=False, extra="pelicula")) - - # busca docu - patron = " -1: - patron = '[^<]+' - patron += '' - patron_enlace = "/peli-descargar-torrent-\d+(.*?)\.html" - patron_title = '([^<]+)(\s*([^>]+))?' - action = "show_movie_info" - folder = True - extra = "" - elif item.url.find("series-letra") > -1: - patron = "()" - patron_enlace = "/serie-descargar-torrents-\d+-\d+-(.*?)\.html" - patron_title = '([^<]+)(\s*([^>]+))?' - action = "episodios" - folder = True - extra = "series" - elif item.url.find("series") > -1: - patron = '[^<]+' - patron += '' - patron_enlace = "/serie-descargar-torrents-\d+-\d+-(.*?)\.html" - patron_title = '([^<]+)(\s*([^>]+))?' - action = "episodios" - folder = True - extra = "series" - else: - patron = '[^<]+' - patron += '' - patron_enlace = "/doc-descargar-torrent-\d+-\d+-(.*?)\.html" - patron_title = '([^<]+)(\s*([^>]+))?' - action = "episodios" - folder = True - extra = "docus" - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedthumbnail in matches: - title = scrapertools.get_match(scrapedurl, patron_enlace) - title = title.replace("-", " ") - url = urlparse.urljoin(item.url, scrapedurl) - thumbnail = host + urllib.quote(scrapedthumbnail) - plot = "" - itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail, plot=plot, - folder=folder, extra=extra)) - - matches = re.compile(patron_title, re.DOTALL).findall(data) - - cnt = 0 - for scrapedtitle, notused, scrapedinfo in matches: - title = re.sub('\r\n', '', scrapedtitle).decode('iso-8859-1').encode('utf8').strip() - if title.endswith('.'): - title = title[:-1] - - info = scrapedinfo.decode('iso-8859-1').encode('utf8') - if info != "": - title = '{0} {1}'.format(title, info) - - itemlist[cnt].title = title - cnt += 1 - if cnt == len(itemlist) - 1: - break - - if len(itemlist) == 0: - itemlist.append(Item(channel=item.channel, action="mainlist", title="No se ha podido cargar el listado")) - else: - # Extrae el paginador - patronvideos = " Siguiente >>" - matches = re.compile(patronvideos, re.DOTALL).findall(data) - - if len(matches) > 0: - scrapedurl = urlparse.urljoin(item.url, matches[0]) - itemlist.append( - Item(channel=item.channel, action="getlist", title="Pagina siguiente >>", url=scrapedurl, folder=True)) - - return itemlist - - -def episodios(item): - #import web_pdb; web_pdb.set_trace() - logger.info() - itemlist = [] - - # Descarga la página - data = httptools.downloadpage(item.url).data - - total_capis = scrapertools.get_match(data, "") - tabla = scrapertools.get_match(data, "") - titulo = scrapertools.get_match(data, "") - - item.thumbnail = scrapertools.find_single_match(data, - "src='http://www\.mejortorrent\.com(/uploads/imagenes/" + tabla + "/[a-zA-Z0-9_ ]+.jpg)'") - item.thumbnail = host + urllib.quote(item.thumbnail) - - # - data = scrapertools.get_match(data, - "(.*?)") - if item.extra == "series": - patron = "]+>]+>([^>]+)
[^<]+" - else: - patron = "]+>([^>]+)[^<]+" - - patron += "]+>Fecha: ([^<]+)[^<]+" - patron += " 1: - temporada = re.sub("\D", "", epi[0]) - capitulo = re.search("\d+", epi[1]) - if capitulo: - capitulo = capitulo.group() - else: - capitulo = 1 - - epi_data = oTmdb.get_episodio(temporada, capitulo) - logger.debug("epi_data=" + str(epi_data)) - - if epi_data: - item.thumbnail = epi_data["temporada_poster"] - item.fanart = epi_data["episodio_imagen"] - item.plot = epi_data["episodio_sinopsis"] - epi_title = epi_data["episodio_titulo"] - if epi_title != "": - title = scrapedtitle + " " + epi_title + " (" + fecha + ")" - else: - try: - item.fanart = oTmdb.get_backdrop() - except: - pass - - item.plot = oTmdb.get_sinopsis() - - logger.debug("title=[" + title + "], url=[" + url + "], item=[" + str(item) + "]") - - itemlist.append( - Item(channel=item.channel, action="play", title=title, url=url, thumbnail=item.thumbnail, plot=item.plot, - fanart=item.fanart, extra=post, folder=False, id=value)) - - return itemlist - - -def show_movie_info(item): - logger.info() - - itemlist = [] - - tmdb_title = re.sub(r'\(.*\)|\[.*\]', '', item.title).strip() - logger.debug('tmdb_title=' + tmdb_title) - - try: - oTmdb = Tmdb(texto_buscado=tmdb_title, idioma_busqueda="es") - item.fanart = oTmdb.get_backdrop() - item.plot = oTmdb.get_sinopsis() - except: - pass - - data = httptools.downloadpage(item.url).data - - patron = ""): - link = scrapertools.get_match(torrent_data, "") - else: - link = scrapertools.get_match(torrent_data, "") - link = urlparse.urljoin(url, link) - logger.debug("link=" + link) - itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link, - thumbnail=item.thumbnail, plot=item.plot, fanart=item.fanart, folder=False, extra="pelicula")) - - return itemlist - - -def play(item): - #import web_pdb; web_pdb.set_trace() - logger.info() - itemlist = [] - - if item.extra == "pelicula": - itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=item.url, - thumbnail=item.thumbnail, plot=item.plot, fanart=item.fanart, folder=False)) - #data = httptools.downloadpage(item.url).data - #logger.debug("data=" + data) - #url http://www.mejortorrent.com/peli-descargar-torrent-16443-Thor-Ragnarok.html - #patron = host + "/peli-descargar-torrent-((.*?))-" - #newid = scrapertools.find_single_match(item.url, patron) - - - - #params = dict(urlparse.parse_qsl(item.extra)) - #patron = host + "/secciones.php?sec=descargas&ap=contar&tabla=peliculas&id=" + newid[0] + "&link_bajar=1" - #http://www.mejortorrent.com/secciones.php?sec=descargas&ap=contar&tabla=peliculas&id=16443&link_bajar=1 - #link=scrapertools.find_single_match(data,patron) - #data = httptools.downloadpage(link).data - - - #data = httptools.downloadpage(patron).data - #patron = "Pincha " - #link = host + scrapertools.find_single_match(data, patron) - #logger.info("link=" + link) - #itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link, - # thumbnail=item.thumbnail, plot=item.plot, folder=False)) - - else: - #data = httptools.downloadpage(item.url, post=item.extra).data - data = httptools.downloadpage(item.url).data - #logger.debug("data=" + data) - - params = dict(urlparse.parse_qsl(item.extra)) - patron = host + "/secciones.php?sec=descargas&ap=contar&tabla=" + params["tabla"] + "&id=" + item.id - #link=scrapertools.find_single_match(data,patron) - #data = httptools.downloadpage(link).data - - - data = httptools.downloadpage(patron).data - patron = "Pincha " - link = scrapertools.find_single_match(data, patron) - if not host in link: - link = host + link - logger.info("link=" + link) - itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link, - thumbnail=item.thumbnail, plot=item.plot, folder=False)) - return itemlist - def newest(categoria): logger.info() itemlist = [] item = Item() try: if categoria == 'torrent': - item.url = host + "/torrents-de-peliculas.html" - - itemlist = getlist(item) - if itemlist[-1].title == "Pagina siguiente >>": - itemlist.pop() - item.url = host + "/torrents-de-series.html" - itemlist.extend(getlist(item)) - if itemlist[-1].title == "Pagina siguiente >>": + item.url = host + "/secciones.php?sec=ultimos_torrents" + item.extra = "novedades" + item.channel = "mejortorrent" + item.tipo = False + itemlist = listado_busqueda(item) + if "Pagina siguiente >>" in itemlist[-1].title: itemlist.pop() + if categoria == 'documentales': + item.url = host + "/torrents-de-documentales.html" + item.extra = "documentales" + item.channel = "mejortorrent" + item.tipo = False + itemlist = listado(item) + if "Pagina siguiente >>" in itemlist[-1].title: + itemlist.pop() # Se captura la excepción, para no interrumpir al canal novedades si un canal falla except: import sys