From e391d84574741fe823895e4487e243d6638d75f7 Mon Sep 17 00:00:00 2001 From: prpeaprendiz <31428501+prpeaprendiz@users.noreply.github.com> Date: Wed, 27 Sep 2017 18:28:24 -0500 Subject: [PATCH 01/30] uodate cinecalidad.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Estético: en caso de no haber fanart y thumbnail de tmdb muestra solo la thumbnail cargada en lugar de ninguna. --- plugin.video.alfa/channels/cinecalidad.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugin.video.alfa/channels/cinecalidad.py b/plugin.video.alfa/channels/cinecalidad.py index f2b44121..e885cef0 100644 --- a/plugin.video.alfa/channels/cinecalidad.py +++ b/plugin.video.alfa/channels/cinecalidad.py @@ -298,7 +298,7 @@ def findvideos(item): if server_id in server_url: server = server_id.lower() - thumbnail = item.contentThumbnail + thumbnail = item.thumbnail if server_id == 'TVM': server = 'thevideo.me' url = server_url[server_id] + video_id + '.html' @@ -367,7 +367,7 @@ def play(item): for videoitem in itemlist: videoitem.title = item.fulltitle videoitem.fulltitle = item.fulltitle - videoitem.thumbnail = item.contentThumbnail + videoitem.thumbnail = item.thumbnail videoitem.channel = item.channel else: itemlist.append(item) From 5d90776c1e31b0ddedf2771de287df8ae7398999 Mon Sep 17 00:00:00 2001 From: prpeaprendiz <31428501+prpeaprendiz@users.noreply.github.com> Date: Wed, 27 Sep 2017 18:37:49 -0500 Subject: [PATCH 02/30] update pelisplus.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Estético: Muestra el thumbnail de los capítulos en las series en lugar de mostrar la portada de la serie (desde la videoteca) --- plugin.video.alfa/channels/pelisplus.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin.video.alfa/channels/pelisplus.py b/plugin.video.alfa/channels/pelisplus.py index f3b424c2..e42aedc7 100644 --- a/plugin.video.alfa/channels/pelisplus.py +++ b/plugin.video.alfa/channels/pelisplus.py @@ -470,7 +470,7 @@ def findvideos(item): videoitem.quality = 'default' videoitem.language = 'Latino' if videoitem.server != '': - videoitem.thumbnail = item.contentThumbnail + videoitem.thumbnail = item.thumbnail else: videoitem.thumbnail = item.thumbnail videoitem.server = 'directo' From a9c9aff44f1637e0b63b5fd830efd3795a1e0357 Mon Sep 17 00:00:00 2001 From: prpeaprendiz <31428501+prpeaprendiz@users.noreply.github.com> Date: Wed, 27 Sep 2017 18:51:38 -0500 Subject: [PATCH 03/30] update ultrapeliculashd.py Mostrar thumbnails --- plugin.video.alfa/channels/ultrapeliculashd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin.video.alfa/channels/ultrapeliculashd.py b/plugin.video.alfa/channels/ultrapeliculashd.py index 6edd1ea3..e84b5dfe 100755 --- a/plugin.video.alfa/channels/ultrapeliculashd.py +++ b/plugin.video.alfa/channels/ultrapeliculashd.py @@ -217,7 +217,7 @@ def findvideos(item): for videoitem in itemlist: videoitem.channel = item.channel videoitem.action = 'play' - videoitem.thumbnail = servertools.guess_server_thumbnail(videoitem.server) + videoitem.thumbnail = item.thumbnail videoitem.infoLabels = item.infoLabels videoitem.title = item.contentTitle + ' (' + videoitem.server + ')' if 'youtube' in videoitem.url: From 7b173c31049d592157d55131b433295a567cf78c Mon Sep 17 00:00:00 2001 From: Unknown Date: Thu, 28 Sep 2017 13:46:03 -0300 Subject: [PATCH 04/30] =?UTF-8?q?otro=20peque=C3=B1o=20ajuste?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- plugin.video.alfa/channels/hdfull.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/plugin.video.alfa/channels/hdfull.py b/plugin.video.alfa/channels/hdfull.py index f09fa162..ab5317da 100644 --- a/plugin.video.alfa/channels/hdfull.py +++ b/plugin.video.alfa/channels/hdfull.py @@ -319,7 +319,7 @@ def fichas(item): contentTitle = scrapedtitle.strip() if scrapedlangs != ">": - textoidiomas = extrae_idiomas(scrapedlangs) + textoidiomas, language = extrae_idiomas(scrapedlangs) #Todo Quitar el idioma title += bbcode_kodi2html(" ( [COLOR teal][B]" + textoidiomas + "[/B][/COLOR])") @@ -351,7 +351,7 @@ def fichas(item): itemlist.append( Item(channel=item.channel, action=action, title=title, url=url, fulltitle=title, thumbnail=thumbnail, show=show, folder=True, contentType=contentType, contentTitle=contentTitle, - language =textoidiomas, infoLabels=infoLabels)) + language =language, infoLabels=infoLabels)) ## Paginación next_page_url = scrapertools.find_single_match(data, '.raquo;') @@ -797,16 +797,17 @@ def agrupa_datos(data): def extrae_idiomas(bloqueidiomas): logger.info("idiomas=" + bloqueidiomas) - # Todo cambiar por lista - #textoidiomas=[] + language=[] textoidiomas = '' patronidiomas = '([a-z0-9]+).png"' idiomas = re.compile(patronidiomas, re.DOTALL).findall(bloqueidiomas) for idioma in idiomas: + # TODO quitar esto textoidiomas = textoidiomas + idioma +" " - #textoidiomas.append(idioma.upper()) + # TODO y dejar esto + language.append(idioma) - return textoidiomas + return textoidiomas, language def bbcode_kodi2html(text): From 739755d43e9167a9d774719730e425d6eb70f633 Mon Sep 17 00:00:00 2001 From: Unknown Date: Fri, 29 Sep 2017 15:51:32 -0300 Subject: [PATCH 05/30] ajustes a canales varios --- plugin.video.alfa/channels/allcalidad.py | 3 ++- plugin.video.alfa/channels/canalpelis.py | 10 +++++----- plugin.video.alfa/channels/cineasiaenlinea.py | 4 ++-- plugin.video.alfa/channels/cinefox.py | 4 ++-- plugin.video.alfa/channels/cinetux.py | 16 +++++++++------- plugin.video.alfa/channels/divxatope.py | 2 +- plugin.video.alfa/channels/maxipelis.py | 3 +-- plugin.video.alfa/channels/peliculasnu.py | 5 +++-- 8 files changed, 25 insertions(+), 22 deletions(-) diff --git a/plugin.video.alfa/channels/allcalidad.py b/plugin.video.alfa/channels/allcalidad.py index c4ca9513..0894a0a5 100755 --- a/plugin.video.alfa/channels/allcalidad.py +++ b/plugin.video.alfa/channels/allcalidad.py @@ -91,7 +91,8 @@ def peliculas(item): thumbnail = thumbnail, url = url, contentTitle = titulo, - contentType="movie" + contentType="movie", + language = idioma ) if year: new_item.infoLabels['year'] = int(year) diff --git a/plugin.video.alfa/channels/canalpelis.py b/plugin.video.alfa/channels/canalpelis.py index 19604c52..15fce9a1 100644 --- a/plugin.video.alfa/channels/canalpelis.py +++ b/plugin.video.alfa/channels/canalpelis.py @@ -137,18 +137,18 @@ def peliculas(item): matches = scrapertools.find_multiple_matches(data, patron) - for scrapedthumbnail, scrapedtitle, rating, calidad, scrapedurl, year in matches[item.page:item.page + 20]: - if 'Próximamente' not in calidad and '-XXX.jpg' not in scrapedthumbnail: + for scrapedthumbnail, scrapedtitle, rating, quality, scrapedurl, year in matches[item.page:item.page + 20]: + if 'Próximamente' not in quality and '-XXX.jpg' not in scrapedthumbnail: scrapedtitle = scrapedtitle.replace('Ver ', '').strip() contentTitle = scrapedtitle.partition(':')[0].partition(',')[0] title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % ( - scrapedtitle, year, calidad) + scrapedtitle, year, quality) itemlist.append(item.clone(channel=__channel__, action="findvideos", text_color=color3, url=scrapedurl, infoLabels={'year': year, 'rating': rating}, contentTitle=contentTitle, thumbnail=scrapedthumbnail, - title=title, context="buscar_trailer")) + title=title, context="buscar_trailer", quality = quality)) tmdb.set_infoLabels(itemlist, __modo_grafico__) tmdb.set_infoLabels(itemlist, __modo_grafico__) @@ -367,7 +367,7 @@ def findvideos(item): server = servertools.get_server_from_url(url) title = "%s [COLOR yellow](%s) (%s)[/COLOR]" % (item.contentTitle, server.title(), lang) itemlist.append(item.clone(action='play', url=url, title=title, extra1=title, - server=server, text_color=color3)) + server=server, language = lang, text_color=color3)) itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', diff --git a/plugin.video.alfa/channels/cineasiaenlinea.py b/plugin.video.alfa/channels/cineasiaenlinea.py index 5dab9193..17a25c83 100755 --- a/plugin.video.alfa/channels/cineasiaenlinea.py +++ b/plugin.video.alfa/channels/cineasiaenlinea.py @@ -108,9 +108,9 @@ def peliculas(item): infolab = {'year': year} itemlist.append(item.clone(action="findvideos", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, infoLabels=infolab, - contentTitle=title, contentType="movie")) + contentTitle=title, contentType="movie", quality=calidad)) - next_page = scrapertools.find_single_match(data, '
  • <\/span>(.*?)<\/li>' + patron = '
  • <\/span>(.*?) (\d+)<\/li>' matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedlang, scrapedtitle in matches: + for scrapedurl, scrapedlang, scrapedtitle, episode in matches: language = scrapedlang - title = scrapedtitle + title = scrapedtitle + " " + "1x" + episode url = scrapedurl itemlist.append(item.clone(title=title, url=url, action='findvideos', language=language)) - return itemlist - - -def findvideos(item): - logger.info() - itemlist = [] - - data = get_source(item.url) - itemlist.extend(servertools.find_video_items(data=data)) - - for videoitem in itemlist: - title = item.title - videoitem.channel = item.channel - videoitem.title = title - videoitem.action = 'play' - - + + if config.get_videolibrary_support(): + itemlist.append(Item(channel=item.channel, title="Añadir serie a la biblioteca", url=item.url, action="add_serie_to_library", extra="episodios", fanart=item.thumbnail, thumbnail=item.thumbnail, contentTitle=item.show, show=item.show)) + return itemlist From ec7ba95ccdc3a8acee538be85f328721aedd662b Mon Sep 17 00:00:00 2001 From: Unknown Date: Fri, 29 Sep 2017 21:25:45 -0300 Subject: [PATCH 07/30] Ajustes en novedades para doomtv --- plugin.video.alfa/channels/doomtv.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/plugin.video.alfa/channels/doomtv.py b/plugin.video.alfa/channels/doomtv.py index 0ab1fe6e..170d3363 100644 --- a/plugin.video.alfa/channels/doomtv.py +++ b/plugin.video.alfa/channels/doomtv.py @@ -203,9 +203,10 @@ def newest(categoria): # categoria='peliculas' try: if categoria == 'peliculas': - item.url = host + item.url = host +'peliculas/page/1' elif categoria == 'infantiles': - item.url = host + 'category/animacion/' + item.url = host + 'categoria/animacion/' + itemlist = lista(item) if itemlist[-1].title == 'Siguiente >>>': itemlist.pop() From ce8c4580efb79761f9b6ecd889fb5957e9fbf4f7 Mon Sep 17 00:00:00 2001 From: Alfa-Addon Date: Fri, 29 Sep 2017 21:00:42 -0400 Subject: [PATCH 08/30] fix language labels --- plugin.video.alfa/channels/pedropolis.py | 214 +++++------------------ 1 file changed, 47 insertions(+), 167 deletions(-) diff --git a/plugin.video.alfa/channels/pedropolis.py b/plugin.video.alfa/channels/pedropolis.py index c5a01871..aa4df645 100644 --- a/plugin.video.alfa/channels/pedropolis.py +++ b/plugin.video.alfa/channels/pedropolis.py @@ -15,7 +15,6 @@ from core.item import Item from core import channeltools from core import tmdb from platformcode import config, logger -from channelselector import get_thumb __channel__ = "pedropolis" @@ -44,49 +43,41 @@ parameters = channeltools.get_channel_parameters(__channel__) fanart_host = parameters['fanart'] thumbnail_host = parameters['thumbnail'] +thumbnail = "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/%s.png" + def mainlist(item): logger.info() - itemlist = [item.clone(title="Peliculas", action="menumovies", text_blod=True, - viewcontent='movies', viewmode="movie_with_plot", thumbnail=get_thumb("channels_movie.png")), - - item.clone(title="Series", action="menuseries", text_blod=True, extra='serie', mediatype="tvshow", - viewcontent='tvshows', url=host + 'tvshows/', viewmode="movie_with_plot", - thumbnail=get_thumb("channels_tvshow.png")), - - item.clone(title="Buscar", action="search", text_blod=True, extra='buscar', - thumbnail=get_thumb('search.png'), url=host)] + viewcontent='movies', viewmode="movie_with_plot"), + item.clone(title="Series", action="menuseries", text_blod=True, extra='serie', mediatype= "tvshow", + viewcontent='tvshows', url=host + 'tvshows/', viewmode="movie_with_plot")] return itemlist def menumovies(item): logger.info() - itemlist = [item.clone(title="Todas", action="peliculas", text_blod=True, url=host + 'movies/', - viewcontent='movies', viewmode="movie_with_plot"), - + itemlist = [item.clone(title="Todas", action="peliculas", text_blod=True, + viewcontent='movies', url=host + 'movies/', viewmode="movie_with_plot"), item.clone(title="Más Vistas", action="peliculas", text_blod=True, viewcontent='movies', url=host + 'tendencias/?get=movies', viewmode="movie_with_plot"), - - item.clone(title="Más Valoradas", action="peliculas", text_blod=True, viewcontent='movies', - url=host + 'calificaciones/?get=movies', viewmode="movie_with_plot"), - - item.clone(title="Géneros", action="generos", text_blod=True, viewmode="movie_with_plot", - viewcontent='movies', url=host)] + item.clone(title="Más Valoradas", action="peliculas", text_blod=True, + viewcontent='movies', url=host + 'calificaciones/?get=movies', + viewmode="movie_with_plot"), item.clone(title="Géneros", action="generos", text_blod=True, + viewcontent='movies', url=host, + viewmode="movie_with_plot")] return itemlist def menuseries(item): logger.info() - itemlist = [item.clone(title="Todas", action="series", text_blod=True, extra='serie', mediatype="tvshow", + itemlist = [item.clone(title="Todas", action="series", text_blod=True, extra='serie', mediatype= "tvshow", viewcontent='tvshows', url=host + 'tvshows/', viewmode="movie_with_plot"), - - item.clone(title="Más Vistas", action="series", text_blod=True, extra='serie', mediatype="tvshow", + item.clone(title="Más Vistas", action="series", text_blod=True, extra='serie', mediatype= "tvshow", viewcontent='tvshows', url=host + 'tendencias/?get=tv', viewmode="movie_with_plot"), - - item.clone(title="Mejor Valoradas", action="series", text_blod=True, extra='serie', mediatype="tvshow", + item.clone(title="Mejor Valoradas", action="series", text_blod=True, extra='serie', mediatype= "tvshow", viewcontent='tvshows', url=host + 'calificaciones/?get=tv', viewmode="movie_with_plot")] return itemlist @@ -97,14 +88,14 @@ def peliculas(item): itemlist = [] url_next_page = '' data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data) + datas = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data) patron = '
    ([^.*?' # img, title patron += '
    ([^<]+).*?' # rating patron += '([^<]+).*?' # calidad, url patron += '([^<]+)' # year - matches = scrapertools.find_multiple_matches(data, patron) + matches = scrapertools.find_multiple_matches(datas, patron) # Paginación if item.next_page != 'b': @@ -124,6 +115,8 @@ def peliculas(item): if 'Proximamente' not in calidad: scrapedtitle = scrapedtitle.replace('Ver ', '').partition(' /')[0].partition(':')[0].replace( 'Español Latino', '').strip() + item.infoLabels['year'] = year + item.infoLabels['rating'] = rating title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (scrapedtitle, year, calidad) new_item = Item(channel=__channel__, action="findvideos", contentTitle=scrapedtitle, @@ -134,9 +127,8 @@ def peliculas(item): itemlist.append(new_item) if url_next_page: - itemlist.append(Item(channel=__channel__, action="peliculas", title="» Siguiente »", - url=url_next_page, next_page=next_page, folder=True, text_blod=True, - thumbnail=get_thumb("next.png"))) + itemlist.append(Item(channel=__channel__, action="peliculas", title=">> Página siguiente", + url=url_next_page, next_page=next_page, folder=True, text_blod=True)) for item in itemlist: if item.infoLabels['plot'] == '': @@ -158,92 +150,11 @@ def peliculas(item): return itemlist -def search(item, texto): - logger.info() - - texto = texto.replace(" ", "+") - item.url = urlparse.urljoin(item.url, "?s={0}".format(texto)) - - try: - return sub_search(item) - - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("{0}".format(line)) - return [] - - -def sub_search(item): - logger.info() - - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |
    ", "", data) - - patron = '
    ([^' # url, img, title - patron += '([^<]+).*?' # tipo - patron += '([^"]+).*?

    ([^<]+)

    ' # year, plot - - matches = re.compile(patron, re.DOTALL).findall(data) - - for scrapedurl, scrapedthumbnail, scrapedtitle, tipo, year, plot in matches: - title = scrapedtitle - if tipo == 'Serie': - contentType = 'tvshow' - action = 'temporadas' - title += ' [COLOR red](' + tipo + ')[/COLOR]' - else: - contentType = 'movie' - action = 'findvideos' - title += ' [COLOR green](' + tipo + ')[/COLOR]' - - itemlist.append(item.clone(title=title, url=scrapedurl, contentTitle=scrapedtitle, extra='buscar', - action=action, infoLabels={"year": year}, contentType=contentType, - thumbnail=scrapedthumbnail, text_color=color1, contentSerieName=scrapedtitle)) - - tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) - paginacion = scrapertools.find_single_match(data, '') - - if paginacion: - itemlist.append(Item(channel=item.channel, action="sub_search", - title="» Siguiente »", url=paginacion, thumbnail=get_thumb("next.png"))) - - return itemlist - - -def newest(categoria): - logger.info() - itemlist = [] - item = Item() - try: - if categoria == 'peliculas': - item.url = host + 'movies/' - elif categoria == 'infantiles': - item.url = host + "genre/animacion/" - else: - return [] - - itemlist = peliculas(item) - if itemlist[-1].title == "» Siguiente »": - itemlist.pop() - - # Se captura la excepción, para no interrumpir al canal novedades si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("{0}".format(line)) - return [] - - return itemlist - - def generos(item): logger.info() itemlist = [] - data = httptools.downloadpage(item.url).data + data = scrapertools.cache_page(item.url) data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) # logger.info(data) data = scrapertools.find_single_match(data, 'Genero
    (.*?)
  • ", "", data) - # logger.info(data) + # logger.info(datas) patron = '
    ([^.*?' @@ -289,29 +200,21 @@ def series(item): url_next_page = urlparse.urljoin(item.url, matches_next_page[0]) for scrapedthumbnail, scrapedtitle, scrapedurl in matches: - scrapedtitle = scrapedtitle.replace('’', "'") - itemlist.append(Item(channel=__channel__, title=scrapedtitle, extra='serie', - url=scrapedurl, thumbnail=scrapedthumbnail, - contentSerieName=scrapedtitle, show=scrapedtitle, - next_page=next_page, action="temporadas", contentType='tvshow')) + scrapedtitle = scrapedtitle.replace('Ver ', + '').replace(' Online HD', + '').replace('ver ', '').replace(' Online', + '').replace('’', "'") + itemlist.append(Item(channel=__channel__, title=scrapedtitle, + url=scrapedurl, thumbnail=scrapedthumbnail, + contentSerieName=scrapedtitle, show=scrapedtitle, + next_page=next_page, action="temporadas", contentType='tvshow')) tmdb.set_infoLabels(itemlist, __modo_grafico__) tmdb.set_infoLabels(itemlist, __modo_grafico__) if url_next_page: - itemlist.append(Item(channel=__channel__, action="series", title="» Siguiente »", url=url_next_page, - next_page=next_page, thumbnail=get_thumb("next.png"))) - - for item in itemlist: - if item.infoLabels['plot'] == '': - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - # logger.info(data) - item.fanart = scrapertools.find_single_match(data, - "").replace( - 'w780', 'original') - item.plot = scrapertools.find_single_match(data, '

    Sinopsis

    ' # capítulos - matches = scrapertools.find_multiple_matches(data, patron) + matches = scrapertools.find_multiple_matches(datas, patron) if len(matches) > 1: for scrapedseason, scrapedthumbnail in matches: scrapedseason = " ".join(scrapedseason.split()) temporada = scrapertools.find_single_match(scrapedseason, '(\d+)') - new_item = item.clone(action="episodios", season=temporada, thumbnail=scrapedthumbnail, extra='serie') + new_item = item.clone(action="episodios", season=temporada, thumbnail=scrapedthumbnail) new_item.infoLabels['season'] = temporada new_item.extra = "" itemlist.append(new_item) @@ -348,11 +251,6 @@ def temporadas(item): itemlist.sort(key=lambda it: it.title) - if config.get_videolibrary_support() and len(itemlist) > 0: - itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url, - action="add_serie_to_library", extra="episodios", show=item.show, category="Series", - text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host)) - return itemlist else: return episodios(item) @@ -363,13 +261,13 @@ def episodios(item): itemlist = [] data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |
    ", "", data) - # logger.info(data) + datas = re.sub(r"\n|\r|\t| |
    ", "", data) + # logger.info(datas) patron = '
    .*?' # url cap, img patron += '
    (.*?)
    .*?' # numerando cap patron += '
    ([^<]+)' # title de episodios - matches = scrapertools.find_multiple_matches(data, patron) + matches = scrapertools.find_multiple_matches(datas, patron) for scrapedurl, scrapedtitle, scrapedname in matches: scrapedtitle = scrapedtitle.replace('--', '0') @@ -382,7 +280,7 @@ def episodios(item): title = "%sx%s: %s" % (season, episode.zfill(2), scrapertools.unescape(scrapedname)) new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title, - contentType="episode", extra='serie') + contentType="episode") if 'infoLabels' not in new_item: new_item.infoLabels = {} @@ -390,7 +288,6 @@ def episodios(item): new_item.infoLabels['episode'] = episode.zfill(2) itemlist.append(new_item) - tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) # TODO no hacer esto si estamos añadiendo a la videoteca if not item.extra: @@ -399,7 +296,7 @@ def episodios(item): for i in itemlist: if i.infoLabels['title']: # Si el capitulo tiene nombre propio añadírselo al titulo del item - i.title = "%sx%s: %s" % (i.infoLabels['season'], i.infoLabels['episode'], i.infoLabels['title']) + i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels['episode'], i.infoLabels['title']) if i.infoLabels.has_key('poster_path'): # Si el capitulo tiene imagen propia remplazar al poster i.thumbnail = i.infoLabels['poster_path'] @@ -411,7 +308,7 @@ def episodios(item): if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show, category="Series", - text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host)) + text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host)) return itemlist @@ -423,42 +320,25 @@ def findvideos(item): data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) # logger.info(data) - patron = '
    ' # lang, url + patron = '
    ' # matches = re.compile(patron, re.DOTALL).findall(data) for option, url in matches: lang = scrapertools.find_single_match(data, '
  • .*? 0 and item.extra != 'serie': - itemlist.append(Item(channel=__channel__, - title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', - url=item.url, action="add_pelicula_to_library", - thumbnail=get_thumb("videolibrary_movie.png"), - extra="findvideos", contentTitle=item.contentTitle)) + x.title = "%s %s [COLOR yellow](%s)[/COLOR] [COLOR yellow](%s)[/COLOR]" % ( + x.language, x.title, x.server.title(), x.quality) return itemlist From 52009dbe0e8fb8f1507965e8f0cd6a477fcdabca Mon Sep 17 00:00:00 2001 From: Alfa-Addon Date: Fri, 29 Sep 2017 21:12:20 -0400 Subject: [PATCH 09/30] v2.2.1 --- plugin.video.alfa/addon.xml | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/plugin.video.alfa/addon.xml b/plugin.video.alfa/addon.xml index 3fc4a4f1..c290d918 100755 --- a/plugin.video.alfa/addon.xml +++ b/plugin.video.alfa/addon.xml @@ -1,5 +1,5 @@  - + @@ -19,11 +19,16 @@ [B]Estos son los cambios para esta versión:[/B] [COLOR green][B]Canales agregados y arreglos[/B][/COLOR] - » canalpelis » hdfull - » xdvideos » playmax - » cinetux » gnula - » flashx » rapidvideo + » canalpelis » allcalidad + » cinefox » cineasiaenlinea + » cinetux » divxatope + » maxipelis » pedropolis + » doomtv » animeshd + » hdfull » ultrapelishd + » pelisplus » cinecalidad + » peliculasnu » allpeliculas ¤ arreglos internos + [COLOR green]Gracias a [COLOR yellow]prpeaprendiz[/COLOR] por su colaboración en esta versión[/COLOR] Navega con Kodi por páginas web para ver sus videos de manera fácil. Browse web pages using Kodi From bbcd9a56a93789255b01aee78e2b051ff8346260 Mon Sep 17 00:00:00 2001 From: Alfa-Addon Date: Fri, 29 Sep 2017 22:31:49 -0400 Subject: [PATCH 10/30] revision fixed --- plugin.video.alfa/channels/pedropolis.py | 236 +++++++++++++++++------ 1 file changed, 178 insertions(+), 58 deletions(-) diff --git a/plugin.video.alfa/channels/pedropolis.py b/plugin.video.alfa/channels/pedropolis.py index aa4df645..3bfec694 100644 --- a/plugin.video.alfa/channels/pedropolis.py +++ b/plugin.video.alfa/channels/pedropolis.py @@ -15,6 +15,7 @@ from core.item import Item from core import channeltools from core import tmdb from platformcode import config, logger +from channelselector import get_thumb __channel__ = "pedropolis" @@ -43,41 +44,49 @@ parameters = channeltools.get_channel_parameters(__channel__) fanart_host = parameters['fanart'] thumbnail_host = parameters['thumbnail'] -thumbnail = "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/%s.png" - def mainlist(item): logger.info() + itemlist = [item.clone(title="Peliculas", action="menumovies", text_blod=True, - viewcontent='movies', viewmode="movie_with_plot"), - item.clone(title="Series", action="menuseries", text_blod=True, extra='serie', mediatype= "tvshow", - viewcontent='tvshows', url=host + 'tvshows/', viewmode="movie_with_plot")] + viewcontent='movies', viewmode="movie_with_plot", thumbnail=get_thumb("channels_movie.png")), + + item.clone(title="Series", action="menuseries", text_blod=True, extra='serie', mediatype="tvshow", + viewcontent='tvshows', url=host + 'tvshows/', viewmode="movie_with_plot", + thumbnail=get_thumb("channels_tvshow.png")), + + item.clone(title="Buscar", action="search", text_blod=True, extra='buscar', + thumbnail=get_thumb('search.png'), url=host)] return itemlist def menumovies(item): logger.info() - itemlist = [item.clone(title="Todas", action="peliculas", text_blod=True, - viewcontent='movies', url=host + 'movies/', viewmode="movie_with_plot"), + itemlist = [item.clone(title="Todas", action="peliculas", text_blod=True, url=host + 'movies/', + viewcontent='movies', viewmode="movie_with_plot"), + item.clone(title="Más Vistas", action="peliculas", text_blod=True, viewcontent='movies', url=host + 'tendencias/?get=movies', viewmode="movie_with_plot"), - item.clone(title="Más Valoradas", action="peliculas", text_blod=True, - viewcontent='movies', url=host + 'calificaciones/?get=movies', - viewmode="movie_with_plot"), item.clone(title="Géneros", action="generos", text_blod=True, - viewcontent='movies', url=host, - viewmode="movie_with_plot")] + + item.clone(title="Más Valoradas", action="peliculas", text_blod=True, viewcontent='movies', + url=host + 'calificaciones/?get=movies', viewmode="movie_with_plot"), + + item.clone(title="Géneros", action="generos", text_blod=True, viewmode="movie_with_plot", + viewcontent='movies', url=host)] return itemlist def menuseries(item): logger.info() - itemlist = [item.clone(title="Todas", action="series", text_blod=True, extra='serie', mediatype= "tvshow", + itemlist = [item.clone(title="Todas", action="series", text_blod=True, extra='serie', mediatype="tvshow", viewcontent='tvshows', url=host + 'tvshows/', viewmode="movie_with_plot"), - item.clone(title="Más Vistas", action="series", text_blod=True, extra='serie', mediatype= "tvshow", + + item.clone(title="Más Vistas", action="series", text_blod=True, extra='serie', mediatype="tvshow", viewcontent='tvshows', url=host + 'tendencias/?get=tv', viewmode="movie_with_plot"), - item.clone(title="Mejor Valoradas", action="series", text_blod=True, extra='serie', mediatype= "tvshow", + + item.clone(title="Mejor Valoradas", action="series", text_blod=True, extra='serie', mediatype="tvshow", viewcontent='tvshows', url=host + 'calificaciones/?get=tv', viewmode="movie_with_plot")] return itemlist @@ -88,14 +97,14 @@ def peliculas(item): itemlist = [] url_next_page = '' data = httptools.downloadpage(item.url).data - datas = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data) + data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data) - patron = '
    ([^.*?' # img, title + patron = '
    ([^.*?' # img, title patron += '
    ([^<]+).*?' # rating patron += '([^<]+).*?' # calidad, url - patron += '([^<]+)' # year + patron += '([^<]+)' # year - matches = scrapertools.find_multiple_matches(datas, patron) + matches = scrapertools.find_multiple_matches(data, patron) # Paginación if item.next_page != 'b': @@ -115,8 +124,6 @@ def peliculas(item): if 'Proximamente' not in calidad: scrapedtitle = scrapedtitle.replace('Ver ', '').partition(' /')[0].partition(':')[0].replace( 'Español Latino', '').strip() - item.infoLabels['year'] = year - item.infoLabels['rating'] = rating title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (scrapedtitle, year, calidad) new_item = Item(channel=__channel__, action="findvideos", contentTitle=scrapedtitle, @@ -127,8 +134,9 @@ def peliculas(item): itemlist.append(new_item) if url_next_page: - itemlist.append(Item(channel=__channel__, action="peliculas", title=">> Página siguiente", - url=url_next_page, next_page=next_page, folder=True, text_blod=True)) + itemlist.append(Item(channel=__channel__, action="peliculas", title="» Siguiente »", + url=url_next_page, next_page=next_page, folder=True, text_blod=True, + thumbnail=get_thumb("next.png"))) for item in itemlist: if item.infoLabels['plot'] == '': @@ -150,11 +158,92 @@ def peliculas(item): return itemlist +def search(item, texto): + logger.info() + + texto = texto.replace(" ", "+") + item.url = urlparse.urljoin(item.url, "?s={0}".format(texto)) + + try: + return sub_search(item) + + # Se captura la excepción, para no interrumpir al buscador global si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + +def sub_search(item): + logger.info() + + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |
    ", "", data) + + patron = '
    ([^' # url, img, title + patron += '([^<]+).*?' # tipo + patron += '([^"]+).*?

    ([^<]+)

    ' # year, plot + + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedthumbnail, scrapedtitle, tipo, year, plot in matches: + title = scrapedtitle + if tipo == 'Serie': + contentType = 'tvshow' + action = 'temporadas' + title += ' [COLOR red](' + tipo + ')[/COLOR]' + else: + contentType = 'movie' + action = 'findvideos' + title += ' [COLOR green](' + tipo + ')[/COLOR]' + + itemlist.append(item.clone(title=title, url=scrapedurl, contentTitle=scrapedtitle, extra='buscar', + action=action, infoLabels={"year": year}, contentType=contentType, + thumbnail=scrapedthumbnail, text_color=color1, contentSerieName=scrapedtitle)) + + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + paginacion = scrapertools.find_single_match(data, '') + + if paginacion: + itemlist.append(Item(channel=item.channel, action="sub_search", + title="» Siguiente »", url=paginacion, thumbnail=get_thumb("next.png"))) + + return itemlist + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + try: + if categoria == 'peliculas': + item.url = host + 'movies/' + elif categoria == 'infantiles': + item.url = host + "genre/animacion/" + else: + return [] + + itemlist = peliculas(item) + if itemlist[-1].title == "» Siguiente »": + itemlist.pop() + + # Se captura la excepción, para no interrumpir al canal novedades si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist + + def generos(item): logger.info() itemlist = [] - data = scrapertools.cache_page(item.url) + data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) # logger.info(data) data = scrapertools.find_single_match(data, 'Genero
    (.*?)
  • ", "", data) - # logger.info(datas) + # logger.info(data) - patron = '
    ([^.*?' + patron = '
    ([^.*?' # img, title, url matches = scrapertools.find_multiple_matches(data, patron) @@ -200,21 +289,29 @@ def series(item): url_next_page = urlparse.urljoin(item.url, matches_next_page[0]) for scrapedthumbnail, scrapedtitle, scrapedurl in matches: - scrapedtitle = scrapedtitle.replace('Ver ', - '').replace(' Online HD', - '').replace('ver ', '').replace(' Online', - '').replace('’', "'") - itemlist.append(Item(channel=__channel__, title=scrapedtitle, - url=scrapedurl, thumbnail=scrapedthumbnail, - contentSerieName=scrapedtitle, show=scrapedtitle, - next_page=next_page, action="temporadas", contentType='tvshow')) + scrapedtitle = scrapedtitle.replace('’', "'") + itemlist.append(Item(channel=__channel__, title=scrapedtitle, extra='serie', + url=scrapedurl, thumbnail=scrapedthumbnail, + contentSerieName=scrapedtitle, show=scrapedtitle, + next_page=next_page, action="temporadas", contentType='tvshow')) tmdb.set_infoLabels(itemlist, __modo_grafico__) tmdb.set_infoLabels(itemlist, __modo_grafico__) if url_next_page: - itemlist.append(Item(channel=__channel__, action="series", title=">> Página Siguiente", url=url_next_page, - next_page=next_page)) + itemlist.append(Item(channel=__channel__, action="series", title="» Siguiente »", url=url_next_page, + next_page=next_page, thumbnail=get_thumb("next.png"))) + + for item in itemlist: + if item.infoLabels['plot'] == '': + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + # logger.info(data) + item.fanart = scrapertools.find_single_match(data, + "").replace( + 'w780', 'original') + item.plot = scrapertools.find_single_match(data, '

    Sinopsis

    ' # capítulos + data = re.sub(r"\n|\r|\t| |
    ", "", data) + # logger.info(data) + patron = '([^<]+).*?' # season + patron += '
    ' # img - matches = scrapertools.find_multiple_matches(datas, patron) + matches = scrapertools.find_multiple_matches(data, patron) if len(matches) > 1: for scrapedseason, scrapedthumbnail in matches: scrapedseason = " ".join(scrapedseason.split()) temporada = scrapertools.find_single_match(scrapedseason, '(\d+)') - new_item = item.clone(action="episodios", season=temporada, thumbnail=scrapedthumbnail) + new_item = item.clone(action="episodios", season=temporada, thumbnail=scrapedthumbnail, extra='serie') new_item.infoLabels['season'] = temporada new_item.extra = "" itemlist.append(new_item) @@ -251,6 +348,11 @@ def temporadas(item): itemlist.sort(key=lambda it: it.title) + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url, + action="add_serie_to_library", extra="episodios", show=item.show, category="Series", + text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host)) + return itemlist else: return episodios(item) @@ -261,13 +363,13 @@ def episodios(item): itemlist = [] data = httptools.downloadpage(item.url).data - datas = re.sub(r"\n|\r|\t| |
    ", "", data) - # logger.info(datas) - patron = '
    .*?' # url cap, img - patron += '
    (.*?)
    .*?' # numerando cap - patron += '
    ([^<]+)' # title de episodios + data = re.sub(r"\n|\r|\t| |
    ", "", data) + # logger.info(data) + patron = '
    .*?' # url + patron += '
    (.*?)
    .*?' # numerando cap + patron += '
    ([^<]+)' # title de episodios - matches = scrapertools.find_multiple_matches(datas, patron) + matches = scrapertools.find_multiple_matches(data, patron) for scrapedurl, scrapedtitle, scrapedname in matches: scrapedtitle = scrapedtitle.replace('--', '0') @@ -280,7 +382,7 @@ def episodios(item): title = "%sx%s: %s" % (season, episode.zfill(2), scrapertools.unescape(scrapedname)) new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title, - contentType="episode") + contentType="episode", extra='serie') if 'infoLabels' not in new_item: new_item.infoLabels = {} @@ -296,7 +398,7 @@ def episodios(item): for i in itemlist: if i.infoLabels['title']: # Si el capitulo tiene nombre propio añadírselo al titulo del item - i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels['episode'], i.infoLabels['title']) + i.title = "%sx%s: %s" % (i.infoLabels['season'], i.infoLabels['episode'], i.infoLabels['title']) if i.infoLabels.has_key('poster_path'): # Si el capitulo tiene imagen propia remplazar al poster i.thumbnail = i.infoLabels['poster_path'] @@ -304,11 +406,13 @@ def episodios(item): itemlist.sort(key=lambda it: int(it.infoLabels['episode']), reverse=config.get_setting('orden_episodios', __channel__)) + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + # Opción "Añadir esta serie a la videoteca" if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show, category="Series", - text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host)) + text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host)) return itemlist @@ -320,25 +424,41 @@ def findvideos(item): data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) # logger.info(data) - patron = '
    ' # + patron = '
    ' # lang, url matches = re.compile(patron, re.DOTALL).findall(data) for option, url in matches: - lang = scrapertools.find_single_match(data, '
  • .*?.*?-->(\w+)' % option) + lang = lang.lower() + idioma = {'latino': '[COLOR cornflowerblue](LAT)[/COLOR]', + 'drive': '[COLOR cornflowerblue](LAT)[/COLOR]', + 'castellano': '[COLOR green](CAST)[/COLOR]', + 'subtitulado': '[COLOR red](VOS)[/COLOR]', + 'ingles': '[COLOR red](VOS)[/COLOR]'} + if lang in idioma: + lang = idioma[lang] + # obtenemos los redirecionamiento de shorturl en caso de coincidencia if "bit.ly" in url: url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "") - itemlist.append(item.clone(channel=__channel__, url=url, title=item.contentTitle, action='play', language=lang)) + itemlist.append(item.clone(channel=__channel__, url=url, title=item.contentTitle, + action='play', language=lang)) itemlist = servertools.get_servers_itemlist(itemlist) + itemlist.sort(key=lambda it: it.language, reverse=False) for x in itemlist: if x.extra != 'directo': x.thumbnail = item.thumbnail - x.title = "%s %s [COLOR yellow](%s)[/COLOR] [COLOR yellow](%s)[/COLOR]" % ( - x.language, x.title, x.server.title(), x.quality) + x.title = "Ver en: [COLOR yellow](%s)[/COLOR] %s" % (x.server.title(), x.language) + if item.extra != 'serie' and item.extra != 'buscar': + x.title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % ( + x.server.title(), x.quality, x.language) + + if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'serie': + itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos", + title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', + thumbnail=get_thumb("videolibrary_movie.png"), contentTitle=item.contentTitle)) return itemlist From c44c53bec06739748fb2418896d0a97a8f008d33 Mon Sep 17 00:00:00 2001 From: alfa_addon_10 Date: Sat, 30 Sep 2017 17:24:45 +0200 Subject: [PATCH 11/30] fix pagination, revamp channel --- plugin.video.alfa/channels/datoporn.py | 2 +- plugin.video.alfa/channels/seriesyonkis.py | 478 +++++---------------- 2 files changed, 107 insertions(+), 373 deletions(-) diff --git a/plugin.video.alfa/channels/datoporn.py b/plugin.video.alfa/channels/datoporn.py index 1afb3615..a63ebc25 100755 --- a/plugin.video.alfa/channels/datoporn.py +++ b/plugin.video.alfa/channels/datoporn.py @@ -40,7 +40,7 @@ def lista(item): server="datoporn", fanart=scrapedthumbnail.replace("_t.jpg", ".jpg"))) # Extrae la marca de siguiente página - next_page = scrapertools.find_single_match(data, 'Next') + next_page = scrapertools.find_single_match(data, 'Next') if next_page and itemlist: itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page)) diff --git a/plugin.video.alfa/channels/seriesyonkis.py b/plugin.video.alfa/channels/seriesyonkis.py index 530d1aa2..20c61a64 100755 --- a/plugin.video.alfa/channels/seriesyonkis.py +++ b/plugin.video.alfa/channels/seriesyonkis.py @@ -1,47 +1,82 @@ # -*- coding: utf-8 -*- import re -import urllib -import urllib2 import urlparse +from core import httptools from core import scrapertools from core import servertools from core.item import Item from platformcode import config, logger +host = 'https://yonkis.to' + def mainlist(item): logger.info() - itemlist = [] - itemlist.append(Item(channel=item.channel, action="listalfabetico", title="Listado alfabetico", - url="http://www.seriesyonkis.sx", - fanart=item.fanart)) - itemlist.append(Item(channel=item.channel, action="mostviewed", title="Series más vistas", - url="http://www.seriesyonkis.sx/series-mas-vistas", - fanart=item.fanart)) - itemlist.append( - Item(channel=item.channel, action="search", title="Buscar", url="http://www.seriesyonkis.sx/buscar/serie", - fanart=item.fanart)) + itemlist = list() + itemlist.append(Item(channel=item.channel, action="alfabetico", title="Listado alfabetico", url=host)) + itemlist.append(Item(channel=item.channel, action="mas_vistas", title="Series más vistas", + url=host + "/series-mas-vistas")) + itemlist.append(Item(channel=item.channel, action="ultimos", title="Últimos episodios añadidos", + url=host)) + itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url=host + "/buscar/serie")) return itemlist -def search(item, texto, categoria="*"): +def alfabetico(item): logger.info() + + itemlist = list() + + itemlist.append(Item(channel=item.channel, action="series", title="0-9", url=host + "/lista-de-series/0-9")) + for letra in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ': + itemlist.append(Item(channel=item.channel, action="series", title=letra, url=host+"/lista-de-series/"+letra)) + + return itemlist + + +def mas_vistas(item): + logger.info() + + data = httptools.downloadpage(item.url).data + matches = re.compile('', re.S).findall(data) + itemlist = [] + for scrapedtitle, scrapedurl, scrapedthumbnail in matches: + scrapedurl = urlparse.urljoin(item.url, scrapedurl) + scrapedthumbnail = urlparse.urljoin(item.url, scrapedthumbnail.replace("/90/", "/150/")) - if categoria not in ("*", "S"): return itemlist ## <-- + itemlist.append( + Item(channel=item.channel, action="episodios", title=scrapedtitle, fulltitle=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, show=scrapedtitle, fanart=item.fanart)) - if item.url == "": - item.url = "http://www.seriesyonkis.sx/buscar/serie" - url = "http://www.seriesyonkis.sx/buscar/serie" # write ur URL here - post = 'keyword=' + texto[0:18] + '&search_type=serie' + return itemlist + + +def search(item, texto): + logger.info() + + itemlist = [] + post = "keyword=%s&search_type=serie" % texto + data = httptools.downloadpage(item.url, post=post).data - data = scrapertools.cache_page(url, post=post) try: - return getsearchresults(item, data, "episodios") + patron = '([^<]+)
  • ' + matches = re.compile(patron, re.DOTALL).findall(data) + for scrapedurl, scrapedtitle, scrapedthumb, scrapedplot in matches: + title = scrapedtitle.strip() + url = host + scrapedurl + thumb = host + scrapedthumb.replace("/90/", "/150/") + plot = re.sub(r"\n|\r|\t|\s{2,}", "", scrapedplot.strip()) + logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumb + "]") + + itemlist.append(Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=url, + thumbnail=thumb, plot=plot, show=title)) + + return itemlist # Se captura la excepción, para no interrumpir al buscador global si un canal falla except: import sys @@ -50,90 +85,21 @@ def search(item, texto, categoria="*"): return [] -def getsearchresults(item, data, action): - itemlist = [] - - patron = '_results_wrapper">(.*?)