diff --git a/plugin.video.alfa/addon.xml b/plugin.video.alfa/addon.xml index 0ca720af..ae32cdbb 100755 --- a/plugin.video.alfa/addon.xml +++ b/plugin.video.alfa/addon.xml @@ -1,5 +1,5 @@  - + @@ -19,11 +19,19 @@ [B]Estos son los cambios para esta versión:[/B] [COLOR green][B]Canales agregados y arreglos[/B][/COLOR] - » playmax » allcalidad - » cinetux » allpeliculas - » pedropolis » pelisplanet - » flashx » gvideo - ¤ selector de temas ¤ arreglos internos + » maxipelis » peliculasaudiolatino + » peliculasmx » peliscity + » repelis » seriesmeme + » seriesyonkis » verpeliculasnuevas + » zonatorrent » kabagi/diskokosmico + » tiotorrent » allcalidad + » areadocumental » cinetux + » hdfull » newpct1 + » ohpelis » animeyt + » flashx » kbagi + » gamovideo » vidup + ¤ arreglos internos + [COLOR green]Gracias a [COLOR yellow]RIgodonius[/COLOR] por su colaboración en esta versión[/COLOR] Navega con Kodi por páginas web para ver sus videos de manera fácil. Browse web pages using Kodi diff --git a/plugin.video.alfa/channels/allcalidad.py b/plugin.video.alfa/channels/allcalidad.py index 9d5f6a7e..8a64360a 100755 --- a/plugin.video.alfa/channels/allcalidad.py +++ b/plugin.video.alfa/channels/allcalidad.py @@ -92,9 +92,14 @@ def peliculas(item): matches = scrapertools.find_multiple_matches(data, patron) for url, thumbnail, titulo, varios in matches: idioma = scrapertools.find_single_match(varios, '(?s)Idioma.*?kinopoisk">([^<]+)') + number_idioma = scrapertools.find_single_match(idioma, '[0-9]') + mtitulo = titulo + if number_idioma != "": + idioma = "" + else: + mtitulo += " (" + idioma + ")" year = scrapertools.find_single_match(varios, 'Año.*?kinopoisk">([^<]+)') year = scrapertools.find_single_match(year, '[0-9]{4}') - mtitulo = titulo + " (" + idioma + ")" if year: mtitulo += " (" + year + ")" item.infoLabels['year'] = int(year) diff --git a/plugin.video.alfa/channels/animeflv_me.py b/plugin.video.alfa/channels/animeflv_me.py index 71b18b3b..b73b174f 100755 --- a/plugin.video.alfa/channels/animeflv_me.py +++ b/plugin.video.alfa/channels/animeflv_me.py @@ -21,12 +21,12 @@ CHANNEL_DEFAULT_HEADERS = [ REGEX_NEXT_PAGE = r"class='current'>\d+?
  • )(.+?)(?:)' -REGEX_THUMB = r'src="(http://media.animeflv\.me/uploads/thumbs/[^"]+?)"' +REGEX_THUMB = r'src="(http://media.animeflv\.co/uploads/thumbs/[^"]+?)"' REGEX_PLOT = r'Línea de historia:

    (.*?)' -REGEX_URL = r'href="(http://animeflv\.me/Anime/[^"]+)">' +REGEX_URL = r'href="(http://animeflv\.co/Anime/[^"]+)">' REGEX_SERIE = r'%s.+?%s([^<]+?)

    (.+?)

    ' % (REGEX_THUMB, REGEX_URL) -REGEX_EPISODE = r'href="(http://animeflv\.me/Ver/[^"]+?)">(?:)?(.+?)(\d+/\d+/\d+)' -REGEX_GENERO = r'([^<]+)' +REGEX_EPISODE = r'href="(http://animeflv\.co/Ver/[^"]+?)">(?:)?(.+?)(\d+/\d+/\d+)' +REGEX_GENERO = r'([^<]+)' def get_url_contents(url): @@ -309,7 +309,7 @@ def findvideos(item): itemlist = [] page_html = get_url_contents(item.url) - regex_api = r'http://player\.animeflv\.me/[^\"]+' + regex_api = r'http://player\.animeflv\.co/[^\"]+' iframe_url = scrapertools.find_single_match(page_html, regex_api) iframe_html = get_url_contents(iframe_url) diff --git a/plugin.video.alfa/channels/animeyt.json b/plugin.video.alfa/channels/animeyt.json new file mode 100644 index 00000000..d5672f1a --- /dev/null +++ b/plugin.video.alfa/channels/animeyt.json @@ -0,0 +1,36 @@ +{ + "id": "animeyt", + "name": "AnimeYT", + "active": true, + "adult": false, + "language": "es", + "thumbnail": "http://i.imgur.com/dHpupFk.png", + "version": 1, + "changes": [ + { + "date": "17/05/2017", + "description": "Fix novedades y replace en findvideos" + } + ], + "categories": [ + "anime" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": true, + "visible": true + }, + { + "id": "modo_grafico", + "type": "bool", + "label": "información extra", + "default": true, + "enabled": true, + "visible": true + } + ] +} diff --git a/plugin.video.alfa/channels/animeyt.py b/plugin.video.alfa/channels/animeyt.py new file mode 100644 index 00000000..bae04053 --- /dev/null +++ b/plugin.video.alfa/channels/animeyt.py @@ -0,0 +1,187 @@ +# -*- coding: utf-8 -*- + +import re +import urlparse + + +from core import httptools +from core import scrapertools +from core import servertools +from core.item import Item +from core import tmdb +from platformcode import config,logger + +__modo_grafico__ = config.get_setting('modo_grafico', 'animeyt') + +HOST = "http://animeyt.tv/" + +def mainlist(item): + logger.info() + + itemlist = list() + + itemlist.append(Item(channel=item.channel, title="Novedades", action="novedades", url=HOST)) + + itemlist.append(Item(channel=item.channel, title="Recientes", action="recientes", url=HOST)) + + itemlist.append(Item(channel=item.channel, title="Alfabético", action="alfabetico", url=HOST)) + + itemlist.append(Item(channel=item.channel, title="Búsqueda", action="search", url=urlparse.urljoin(HOST, "busqueda?terminos="))) + + + return itemlist + + +def novedades(item): + logger.info() + itemlist = list() + if not item.pagina: + item.pagina = 0 + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |
    ", "", data) + + patron_novedades = '
    [\s\S]+?

    Comentarios

    ' + + data_novedades = scrapertools.find_single_match(data, patron_novedades) + + patron = 'href="([^"]+)"[\s\S]+?src="([^"]+)"[^<]+alt="([^"]+) (\d+)([^"]+)' + + matches = scrapertools.find_multiple_matches(data_novedades, patron) + + for url, img, scrapedtitle, eps, info in matches[item.pagina:item.pagina + 20]: + title = scrapedtitle + " " + "1x" + eps + info + title = title.replace("Sub Español", "").replace("sub español", "") + infoLabels = {'filtro': {"original_language": "ja"}.items()} + itemlist.append(Item(channel=item.channel, title=title, url=url, thumb=img, action="findvideos", contentTitle=scrapedtitle, contentSerieName=scrapedtitle, infoLabels=infoLabels, contentType="tvshow")) + try: + from core import tmdb + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) + for it in itemlist: + it.thumbnail = it.thumb + except: + pass + + if len(matches) > item.pagina + 20: + pagina = item.pagina + 20 + itemlist.append(item.clone(channel=item.channel, action="novedades", url=item.url, title=">> Página Siguiente", pagina=pagina)) + + return itemlist + + +def alfabetico(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |
    ", "", data) + + + for letra in '0ABCDEFGHIJKLMNOPQRSTUVWXYZ': + titulo = letra + if letra == "0": + letra = "num" + itemlist.append(Item(channel=item.channel, action="recientes", title=titulo, + url=urlparse.urljoin(HOST, "animes?tipo=0&genero=0&anio=0&letra={letra}".format(letra=letra)))) + + + return itemlist + + +def search(item, texto): + logger.info() + + texto = texto.replace(" ","+") + item.url = item.url+texto + if texto!='': + return recientes(item) + + +def recientes(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |
    ", "", data) + + patron_recientes = '
    [\s\S]+?' + + data_recientes = scrapertools.find_single_match(data, patron_recientes) + + patron = '(.*?)<.*?

    (.*?)(.*?)' + + matches = scrapertools.find_multiple_matches(data_recientes, patron) + + for url, thumbnail, plot, title, cat in matches: + itemlist.append(item.clone(title=title, url=url, action="episodios", show=title, thumbnail=thumbnail, plot=plot, cat=cat)) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True) + + paginacion = scrapertools.find_single_match(data, '", "", data) + + patron = 'Player\("(.*?)"' + + matches = scrapertools.find_multiple_matches(data, patron) + + for url in matches: + if "cldup" in url: + title = "Opcion Cldup" + if "chumi" in url: + title = "Opcion Chumi" + itemlist.append(item.clone(channel=item.channel, folder=False, title=title, action="play", url=url)) + + if item.extra != "library": + if config.get_videolibrary_support() and item.extra: + itemlist.append(item.clone(channel=item.channel, title="[COLOR yellow]Añadir pelicula a la videoteca[/COLOR]", url=item.url, action="add_pelicula_to_library", extra="library", contentTitle=item.show, contentType="movie")) + + return itemlist + + +def player(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url, add_referer=True).data + data = re.sub(r"\n|\r|\t| |
    ", "", data) + + url = scrapertools.find_single_match(data, 'sources: \[{file:\'(.*?)\'') + + itemlist = servertools.find_video_items(data=data) + + return itemlist + diff --git a/plugin.video.alfa/channels/areadocumental.py b/plugin.video.alfa/channels/areadocumental.py index 6a04ef75..b360e078 100644 --- a/plugin.video.alfa/channels/areadocumental.py +++ b/plugin.video.alfa/channels/areadocumental.py @@ -23,12 +23,12 @@ def mainlist(item): itemlist = [] item.text_color = color1 itemlist.append(item.clone(title="Novedades", action="entradas", - url="http://www.area-documental.com/resultados-reciente.php?buscar=&genero=", + url= host + "/resultados-reciente.php?buscar=&genero=", fanart="http://i.imgur.com/Q7fsFI6.png")) itemlist.append(item.clone(title="Destacados", action="entradas", - url="http://www.area-documental.com/resultados-destacados.php?buscar=&genero=", + url= host + "/resultados-destacados.php?buscar=&genero=", fanart="http://i.imgur.com/Q7fsFI6.png")) - itemlist.append(item.clone(title="Categorías", action="cat", url="http://www.area-documental.com/index.php", + itemlist.append(item.clone(title="Categorías", action="cat", url= host + "/index.php", fanart="http://i.imgur.com/Q7fsFI6.png")) itemlist.append(item.clone(title="Ordenados por...", action="indice", fanart="http://i.imgur.com/Q7fsFI6.png")) @@ -47,7 +47,7 @@ def configuracion(item): def search(item, texto): logger.info() - item.url = "http://www.area-documental.com/resultados.php?buscar=%s&genero=&x=0&y=0" % texto + item.url = host + "/resultados.php?buscar=%s&genero=&x=0&y=0" % texto item.action = "entradas" try: itemlist = entradas(item) @@ -65,7 +65,7 @@ def newest(categoria): item = Item() try: if categoria == "documentales": - item.url = "http://www.area-documental.com/resultados-reciente.php?buscar=&genero=" + item.url = host + "/resultados-reciente.php?buscar=&genero=" item.action = "entradas" itemlist = entradas(item) @@ -86,9 +86,9 @@ def indice(item): logger.info() itemlist = [] itemlist.append(item.clone(title="Título", action="entradas", - url="http://www.area-documental.com/resultados-titulo.php?buscar=&genero=")) + url= host + "/resultados-titulo.php?buscar=&genero=")) itemlist.append(item.clone(title="Año", action="entradas", - url="http://www.area-documental.com/resultados-anio.php?buscar=&genero=")) + url= host + "/resultados-anio.php?buscar=&genero=")) return itemlist @@ -125,9 +125,13 @@ def entradas(item): data2 = "" data = data.replace("\n", "").replace("\t", "") - patron = '
    .*?(.*?)(.*?)

    (.*?)

    ' \ - '.*?: (.*?).*?(.*?)
    ' + patron = '(?s)
    .*?a href="([^"]+)".*?' + patron += ' 0 and item.extra != 'findvideos': itemlist.append( diff --git a/plugin.video.alfa/channels/cinetux.py b/plugin.video.alfa/channels/cinetux.py index fcf41397..39a48373 100644 --- a/plugin.video.alfa/channels/cinetux.py +++ b/plugin.video.alfa/channels/cinetux.py @@ -298,6 +298,7 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item): url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "") if "player" in url: scrapedserver = scrapertools.find_single_match(url, 'player/(\w+)') + if "ok" in scrapedserver: scrapedserver = "okru" matches.append([url, scrapedserver, "", language.strip(), t_tipo]) bloque2 = scrapertools.find_single_match(data, '(?s)box_links.*?dt_social_single') bloque2 = bloque2.replace("\t", "").replace("\r", "") @@ -347,10 +348,12 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item): def play(item): logger.info() itemlist = [] - if "api.cinetux" in item.url: + if "api.cinetux" in item.url or item.server == "okru": data = httptools.downloadpage(item.url, headers={'Referer': item.extra}).data.replace("\\", "") id = scrapertools.find_single_match(data, 'img src="[^#]+#(.*?)"') item.url = "https://youtube.googleapis.com/embed/?status=ok&hl=es&allow_embed=1&ps=docs&partnerid=30&hd=1&autoplay=0&cc_load_policy=1&showinfo=0&docid=" + id + if item.server == "okru": + item.url = "https://ok.ru/videoembed/" + id elif "links" in item.url or "www.cinetux.me" in item.url: data = httptools.downloadpage(item.url).data scrapedurl = scrapertools.find_single_match(data, '') if paginacion: - itemlist.append(channel=item.channel, action="sub_search", title="Next page >>" , url=paginacion) + itemlist.append(Item(channel=item.channel, action="sub_search", title="Next page >>", url=paginacion)) return itemlist + def peliculas(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |
    ", "", data) - patron = '
    .*?href="(.*?)" title="(.*?)".*?under-title">(.*?)<.*?src="(.*?)"' - matches = re.compile(patron,re.DOTALL).findall(data) - + patron = '
    .*?href="(.*?)" title="(.*?)".*?under-title">(.*?)<.*?src="(.*?)"' + matches = scrapertools.find_multiple_matches(data, patron) for scrapedurl, scrapedyear, scrapedtitle, scrapedthumbnail in matches: - - url = scrapedurl - title = scrapedtitle year = scrapertools.find_single_match(scrapedyear, r'.*?\((\d{4})\)') - thumbnail = scrapedthumbnail - new_item =Item (channel = item.channel, action="findvideos", title=title, contentTitle=title, url=url, - thumbnail=thumbnail, infoLabels = {'year':year}) - if year: - tmdb.set_infoLabels_item(new_item) + itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, fulltitle = scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, infoLabels={'year': year})) - itemlist.append(new_item) - - next_page_url = scrapertools.find_single_match(data,'') - if next_page_url!="": - next_page_url = urlparse.urljoin(item.url,next_page_url) + tmdb.set_infoLabels(itemlist, True) + next_page_url = scrapertools.find_single_match(data, '' in s_p[0]: - return [Item(channel=item.channel, title=bbcode_kodi2html( - "[COLOR gold][B]HDFull:[/B][/COLOR] [COLOR blue]" + texto.replace('%20', - ' ') + "[/COLOR] sin resultados"))] + return [Item(channel=item.channel, title="[COLOR gold][B]HDFull:[/B][/COLOR] [COLOR blue]" + texto.replace('%20', + ' ') + "[/COLOR] sin resultados")] else: data = s_p[0] + s_p[1] else: @@ -321,12 +319,12 @@ def fichas(item): if scrapedlangs != ">": textoidiomas, language = extrae_idiomas(scrapedlangs) #Todo Quitar el idioma - title += bbcode_kodi2html(" ( [COLOR teal][B]" + textoidiomas + "[/B][/COLOR])") + title += " ( [COLOR teal][B]" + textoidiomas + "[/B][/COLOR])" if scrapedrating != ">": valoracion = re.sub(r'><[^>]+>(\d+)(\d+)', r'\1,\2', scrapedrating) infoLabels['rating']=valoracion - title += bbcode_kodi2html(" ([COLOR orange]" + valoracion + "[/COLOR])") + title += " ([COLOR orange]" + valoracion + "[/COLOR])" url = urlparse.urljoin(item.url, scrapedurl) @@ -346,7 +344,7 @@ def fichas(item): if item.title == "Buscar...": tag_type = scrapertools.get_match(url, 'l.tv/([^/]+)/') - title += bbcode_kodi2html(" - [COLOR blue]" + tag_type.capitalize() + "[/COLOR]") + title += " - [COLOR blue]" + tag_type.capitalize() + "[/COLOR]" itemlist.append( Item(channel=item.channel, action=action, title=title, url=url, fulltitle=title, thumbnail=thumbnail, @@ -388,7 +386,7 @@ def episodios(item): str = get_status(status, "shows", id) if str != "" and account and item.category != "Series" and "XBMC" not in item.title: if config.get_videolibrary_support(): - title = bbcode_kodi2html(" ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )") + title = " ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )" itemlist.append( Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=url_targets, thumbnail=item.thumbnail, show=item.show, folder=False)) @@ -397,11 +395,11 @@ def episodios(item): thumbnail=item.thumbnail, show=item.show, folder=True)) elif account and item.category != "Series" and "XBMC" not in item.title: if config.get_videolibrary_support(): - title = bbcode_kodi2html(" ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )") + title = " ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )" itemlist.append( Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=url_targets, thumbnail=item.thumbnail, show=item.show, folder=False)) - title = bbcode_kodi2html(" ( [COLOR orange][B]Seguir[/B][/COLOR] )") + title = " ( [COLOR orange][B]Seguir[/B][/COLOR] )" itemlist.append(Item(channel=item.channel, action="set_status", title=title, fulltitle=title, url=url_targets, thumbnail=item.thumbnail, show=item.show, folder=True)) @@ -436,7 +434,7 @@ def episodios(item): idiomas = "( [COLOR teal][B]" for idioma in episode['languages']: idiomas += idioma + " " idiomas += "[/B][/COLOR])" - idiomas = bbcode_kodi2html(idiomas) + idiomas = idiomas else: idiomas = "" @@ -513,7 +511,7 @@ def novedades_episodios(item): idiomas = "( [COLOR teal][B]" for idioma in episode['languages']: idiomas += idioma + " " idiomas += "[/B][/COLOR])" - idiomas = bbcode_kodi2html(idiomas) + idiomas = idiomas else: idiomas = "" @@ -522,7 +520,7 @@ def novedades_episodios(item): except: show = episode['show']['title']['en'].strip() - show = bbcode_kodi2html("[COLOR whitesmoke][B]" + show + "[/B][/COLOR]") + show = "[COLOR whitesmoke][B]" + show + "[/B][/COLOR]" if episode['title']: try: @@ -610,8 +608,9 @@ def generos_series(item): def findvideos(item): logger.info() - itemlist = [] + it1 = [] + it2 = [] ## Carga estados status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data) url_targets = item.url @@ -623,21 +622,21 @@ def findvideos(item): item.url = item.url.split("###")[0] if type == "2" and account and item.category != "Cine": - title = bbcode_kodi2html(" ( [COLOR orange][B]Agregar a Favoritos[/B][/COLOR] )") + title = " ( [COLOR orange][B]Agregar a Favoritos[/B][/COLOR] )" if "Favorito" in item.title: - title = bbcode_kodi2html(" ( [COLOR red][B]Quitar de Favoritos[/B][/COLOR] )") + title = " ( [COLOR red][B]Quitar de Favoritos[/B][/COLOR] )" if config.get_videolibrary_support(): - title_label = bbcode_kodi2html(" ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )") - itemlist.append(Item(channel=item.channel, action="findvideos", title=title_label, fulltitle=title_label, + title_label = " ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )" + it1.append(Item(channel=item.channel, action="findvideos", title=title_label, fulltitle=title_label, url=url_targets, thumbnail=item.thumbnail, show=item.show, folder=False)) - title_label = bbcode_kodi2html(" ( [COLOR green][B]Tráiler[/B][/COLOR] )") + title_label = " ( [COLOR green][B]Tráiler[/B][/COLOR] )" - itemlist.append( - Item(channel=item.channel, action="trailer", title=title_label, fulltitle=title_label, url=url_targets, + it1.append( + item.clone(channel="trailertools", action="buscartrailer", title=title_label, contentTitle=item.show, url=item.url, thumbnail=item.thumbnail, show=item.show)) - itemlist.append(Item(channel=item.channel, action="set_status", title=title, fulltitle=title, url=url_targets, + it1.append(Item(channel=item.channel, action="set_status", title=title, fulltitle=title, url=url_targets, thumbnail=item.thumbnail, show=item.show, folder=True)) data_js = httptools.downloadpage("http://hdfull.tv/templates/hdfull/js/jquery.hdfull.view.min.js").data @@ -663,7 +662,6 @@ def findvideos(item): infolabels = {} year = scrapertools.find_single_match(data, 'Año:\s*.*?(\d{4})') infolabels["year"] = year - matches = [] for match in data_decrypt: prov = eval(scrapertools.find_single_match(data_js, 'p\[%s\]\s*=\s*(\{.*?\}[\'"]\})' % match["provider"])) @@ -676,93 +674,43 @@ def findvideos(item): matches.append([match["lang"], match["quality"], url, embed]) - enlaces = [] for idioma, calidad, url, embed in matches: - servername = scrapertools.find_single_match(url, "(?:http:|https:)//(?:www.|)([^.]+).") - if servername == "streamin": servername = "streaminto" - if servername == "waaw": servername = "netutv" - if servername == "uploaded" or servername == "ul": servername = "uploadedto" mostrar_server = True - if config.get_setting("hidepremium") == True: - mostrar_server = servertools.is_server_enabled(servername) - if mostrar_server: - option = "Ver" - if re.search(r'return ([\'"]{2,}|\})', embed): - option = "Descargar" - calidad = unicode(calidad, "utf8").upper().encode("utf8") - servername_c = unicode(servername, "utf8").capitalize().encode("utf8") - title = option + ": " + servername_c + " (" + calidad + ")" + " (" + idioma + ")" - thumbnail = item.thumbnail - plot = item.title + "\n\n" + scrapertools.find_single_match(data, - '> Añadir a la videoteca..." - try: - itemlist.extend(file_cine_library(item, url_targets)) - except: - pass + if config.get_videolibrary_support(): + itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green", + action="add_pelicula_to_library", url=url_targets, thumbnail = item.thumbnail, + fulltitle = item.contentTitle + )) return itemlist -def trailer(item): - import youtube - itemlist = [] - item.url = "https://www.googleapis.com/youtube/v3/search" + \ - "?q=" + item.show.replace(" ", "+") + "+trailer+HD+Español" \ - "®ionCode=ES" + \ - "&part=snippet" + \ - "&hl=es_ES" + \ - "&key=AIzaSyAd-YEOqZz9nXVzGtn3KWzYLbLaajhqIDA" + \ - "&type=video" + \ - "&maxResults=50" + \ - "&pageToken=" - itemlist.extend(youtube.fichas(item)) - # itemlist.pop(-1) - return itemlist - - -def file_cine_library(item, url_targets): - import os - from core import filetools - videolibrarypath = os.path.join(config.get_videolibrary_path(), "CINE") - archivo = item.show.strip() - strmfile = archivo + ".strm" - strmfilepath = filetools.join(videolibrarypath, strmfile) - - if not os.path.exists(strmfilepath): - itemlist = [] - itemlist.append(Item(channel=item.channel, title=">> Añadir a la videoteca...", url=url_targets, - action="add_file_cine_library", extra="episodios", show=archivo)) - - return itemlist - - -def add_file_cine_library(item): - from core import videolibrarytools - new_item = item.clone(title=item.show, action="play_from_library") - videolibrarytools.save_movie(new_item) - itemlist = [] - itemlist.append(Item(title='El vídeo ' + item.show + ' se ha añadido a la videoteca')) - # xbmctools.renderItems(itemlist, "", "", "") - platformtools.render_items(itemlist, "") - - return - def play(item): if "###" in item.url: @@ -780,13 +728,11 @@ def play(item): if devuelve: item.url = devuelve[0][1] item.server = devuelve[0][2] - + item.thumbnail = item.contentThumbnail + item.fulltitle = item.contentTitle return [item] -## -------------------------------------------------------------------------------- -## -------------------------------------------------------------------------------- - def agrupa_datos(data): ## Agrupa los datos data = re.sub(r'\n|\r|\t| |
    |', '', data) @@ -810,22 +756,6 @@ def extrae_idiomas(bloqueidiomas): return textoidiomas, language -def bbcode_kodi2html(text): - if config.get_platform().startswith("plex") or config.get_platform().startswith("mediaserver"): - import re - text = re.sub(r'\[COLOR\s([^\]]+)\]', - r'', - text) - text = text.replace('[/COLOR]', '') - text = text.replace('[CR]', '
    ') - text = re.sub(r'\[([^\]]+)\]', - r'<\1>', - text) - text = text.replace('"color: white"', '"color: auto"') - - return text - - ## -------------------------------------------------------------------------------- def set_status(item): @@ -853,7 +783,7 @@ def set_status(item): data = httptools.downloadpage(host + path, post=post).data - title = bbcode_kodi2html("[COLOR green][B]OK[/B][/COLOR]") + title = "[COLOR green][B]OK[/B][/COLOR]" return [Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=item.url, thumbnail=item.thumbnail, show=item.show, folder=False)] @@ -871,15 +801,14 @@ def get_status(status, type, id): try: if id in status['favorites'][type]: - str1 = bbcode_kodi2html(" [COLOR orange][B]Favorito[/B][/COLOR]") + str1 = " [COLOR orange][B]Favorito[/B][/COLOR]" except: str1 = "" try: if id in status['status'][type]: str2 = state[status['status'][type][id]] - if str2 != "": str2 = bbcode_kodi2html( - " [COLOR green][B]" + state[status['status'][type][id]] + "[/B][/COLOR]") + if str2 != "": str2 = "[COLOR green][B]" + state[status['status'][type][id]] + "[/B][/COLOR]" except: str2 = "" diff --git a/plugin.video.alfa/channels/copiapop.json b/plugin.video.alfa/channels/kbagi.json similarity index 75% rename from plugin.video.alfa/channels/copiapop.json rename to plugin.video.alfa/channels/kbagi.json index acdb4daf..2f292d54 100644 --- a/plugin.video.alfa/channels/copiapop.json +++ b/plugin.video.alfa/channels/kbagi.json @@ -1,22 +1,10 @@ { - "id": "copiapop", - "name": "Copiapop/Diskokosmiko", + "id": "kbagi", + "name": "Kbagi/Diskokosmiko", "language": ["cast", "lat"], "active": true, "adult": false, "version": 1, - "changes": [ - { - "date": "15/03/2017", - "autor": "SeiTaN", - "description": "limpieza código" - }, - { - "date": "16/02/2017", - "autor": "Cmos", - "description": "Primera versión" - } - ], "thumbnail": "http://i.imgur.com/EjbfM7p.png?1", "banner": "copiapop.png", "categories": [ @@ -33,19 +21,19 @@ "visible": true }, { - "id": "copiapopuser", + "id": "kbagiuser", "type": "text", "color": "0xFF25AA48", - "label": "Usuario Copiapop", + "label": "Usuario Kbagi", "enabled": true, "visible": true }, { - "id": "copiapoppassword", + "id": "kbagipassword", "type": "text", "color": "0xFF25AA48", "hidden": true, - "label": "Password Copiapop", + "label": "Password Kbagi", "enabled": "!eq(-1,'')", "visible": true }, diff --git a/plugin.video.alfa/channels/copiapop.py b/plugin.video.alfa/channels/kbagi.py old mode 100755 new mode 100644 similarity index 90% rename from plugin.video.alfa/channels/copiapop.py rename to plugin.video.alfa/channels/kbagi.py index cf1661d8..325c9547 --- a/plugin.video.alfa/channels/copiapop.py +++ b/plugin.video.alfa/channels/kbagi.py @@ -9,7 +9,7 @@ from core import scrapertools from core.item import Item from platformcode import config, logger -__perfil__ = config.get_setting('perfil', "copiapop") +__perfil__ = config.get_setting('perfil', "kbagi") # Fijar perfil de color perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFF088A08'], @@ -21,20 +21,20 @@ if __perfil__ - 1 >= 0: else: color1 = color2 = color3 = color4 = color5 = "" -adult_content = config.get_setting("adult_content", "copiapop") +adult_content = config.get_setting("adult_content", "kbagi") def login(pagina): logger.info() try: - user = config.get_setting("%suser" % pagina.split(".")[0], "copiapop") - password = config.get_setting("%spassword" % pagina.split(".")[0], "copiapop") - if pagina == "copiapop.com": + user = config.get_setting("%suser" % pagina.split(".")[0], "kbagi") + password = config.get_setting("%spassword" % pagina.split(".")[0], "kbagi") + if pagina == "kbagi.com": if user == "" and password == "": - return False, "Para ver los enlaces de copiapop es necesario registrarse en copiapop.com" + return False, "Para ver los enlaces de kbagi es necesario registrarse en kbagi.com" elif user == "" or password == "": - return False, "Copiapop: Usuario o contraseña en blanco. Revisa tus credenciales" + return False, "kbagi: Usuario o contraseña en blanco. Revisa tus credenciales" else: if user == "" or password == "": return False, "DiskoKosmiko: Usuario o contraseña en blanco. Revisa tus credenciales" @@ -65,19 +65,19 @@ def mainlist(item): itemlist = [] item.text_color = color1 - logueado, error_message = login("copiapop.com") + logueado, error_message = login("kbagi.com") if not logueado: itemlist.append(item.clone(title=error_message, action="configuracion", folder=False)) else: - item.extra = "http://copiapop.com" - itemlist.append(item.clone(title="Copiapop", action="", text_color=color2)) + item.extra = "http://kbagi.com" + itemlist.append(item.clone(title="kbagi", action="", text_color=color2)) itemlist.append( - item.clone(title=" Búsqueda", action="search", url="http://copiapop.com/action/SearchFiles")) + item.clone(title=" Búsqueda", action="search", url="http://kbagi.com/action/SearchFiles")) itemlist.append(item.clone(title=" Colecciones", action="colecciones", - url="http://copiapop.com/action/home/MoreNewestCollections?pageNumber=1")) + url="http://kbagi.com/action/home/MoreNewestCollections?pageNumber=1")) itemlist.append(item.clone(title=" Búsqueda personalizada", action="filtro", - url="http://copiapop.com/action/SearchFiles")) + url="http://kbagi.com/action/SearchFiles")) itemlist.append(item.clone(title=" Mi cuenta", action="cuenta")) item.extra = "http://diskokosmiko.mx/" @@ -90,7 +90,7 @@ def mainlist(item): itemlist.append(item.clone(title=" Mi cuenta", action="cuenta")) itemlist.append(item.clone(action="", title="")) - folder_thumb = filetools.join(config.get_data_path(), 'thumbs_copiapop') + folder_thumb = filetools.join(config.get_data_path(), 'thumbs_kbagi') files = filetools.listdir(folder_thumb) if files: itemlist.append( @@ -133,7 +133,7 @@ def listado(item): data = httptools.downloadpage(item.url, item.post).data data = re.sub(r"\n|\r|\t|\s{2}| |
    ", "", data) - folder = filetools.join(config.get_data_path(), 'thumbs_copiapop') + folder = filetools.join(config.get_data_path(), 'thumbs_kbagi') patron = '
    (.*?)
    ' bloques = scrapertools.find_multiple_matches(data, patron) for block in bloques: @@ -204,7 +204,7 @@ def findvideos(item): logger.info() itemlist = [] - itemlist.append(item.clone(action="play", title="Reproducir/Descargar", server="copiapop")) + itemlist.append(item.clone(action="play", title="Reproducir/Descargar", server="kbagi")) usuario = scrapertools.find_single_match(item.url, '%s/([^/]+)/' % item.extra) url_usuario = item.extra + "/" + usuario @@ -265,7 +265,7 @@ def colecciones(item): matches = matches[:20] index = 20 - folder = filetools.join(config.get_data_path(), 'thumbs_copiapop') + folder = filetools.join(config.get_data_path(), 'thumbs_kbagi') for url, scrapedtitle, thumb, info in matches: url = item.extra + url + "/gallery,1,1?ref=pager" title = "%s (%s)" % (scrapedtitle, scrapertools.htmlclean(info)) @@ -313,7 +313,7 @@ def cuenta(item): import urllib itemlist = [] - web = "copiapop" + web = "kbagi" if "diskokosmiko" in item.extra: web = "diskokosmiko" logueado, error_message = login("diskokosmiko.mx") @@ -321,7 +321,7 @@ def cuenta(item): itemlist.append(item.clone(title=error_message, action="configuracion", folder=False)) return itemlist - user = config.get_setting("%suser" % web, "copiapop") + user = config.get_setting("%suser" % web, "kbagi") user = unicode(user, "utf8").lower().encode("utf8") url = item.extra + "/" + urllib.quote(user) data = httptools.downloadpage(url).data @@ -364,7 +364,7 @@ def filtro(item): 'type': 'text', 'default': '0', 'visible': True}) # Se utilizan los valores por defecto/guardados - web = "copiapop" + web = "kbagi" if "diskokosmiko" in item.extra: web = "diskokosmiko" valores_guardados = config.get_setting("filtro_defecto_" + web, item.channel) @@ -378,7 +378,7 @@ def filtro(item): def filtrado(item, values): values_copy = values.copy() - web = "copiapop" + web = "kbagi" if "diskokosmiko" in item.extra: web = "diskokosmiko" # Guarda el filtro para que sea el que se cargue por defecto @@ -407,7 +407,7 @@ def download_thumb(filename, url): lock = threading.Lock() lock.acquire() - folder = filetools.join(config.get_data_path(), 'thumbs_copiapop') + folder = filetools.join(config.get_data_path(), 'thumbs_kbagi') if not filetools.exists(folder): filetools.mkdir(folder) lock.release() @@ -419,7 +419,7 @@ def download_thumb(filename, url): def delete_cache(url): - folder = filetools.join(config.get_data_path(), 'thumbs_copiapop') + folder = filetools.join(config.get_data_path(), 'thumbs_kbagi') filetools.rmdirtree(folder) if config.is_xbmc(): import xbmc diff --git a/plugin.video.alfa/channels/maxipelis.py b/plugin.video.alfa/channels/maxipelis.py index ce8c7f8d..8f6b07de 100644 --- a/plugin.video.alfa/channels/maxipelis.py +++ b/plugin.video.alfa/channels/maxipelis.py @@ -104,9 +104,10 @@ def peliculas(item): new_item = Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle , infoLabels={'year':year} ) - if year: - tmdb.set_infoLabels_item(new_item) + #if year: + # tmdb.set_infoLabels_item(new_item) itemlist.append(new_item) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) try: patron = '
    ' next_page = re.compile(patron,re.DOTALL).findall(data) diff --git a/plugin.video.alfa/channels/newpct1.py b/plugin.video.alfa/channels/newpct1.py index 009ea8c6..d64b241b 100644 --- a/plugin.video.alfa/channels/newpct1.py +++ b/plugin.video.alfa/channels/newpct1.py @@ -339,20 +339,20 @@ def episodios(item): infoLabels = item.infoLabels data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") - logger.debug('data: %s'%data) pattern = '
      (.*?)
    ' % "pagination" # item.pattern pagination = scrapertools.find_single_match(data, pattern) if pagination: pattern = '
  • Last<\/a>' full_url = scrapertools.find_single_match(pagination, pattern) url, last_page = scrapertools.find_single_match(full_url, r'(.*?\/pg\/)(\d+)') - list_pages = [] - for x in range(1, int(last_page) + 1): - list_pages.append("%s%s" % (url, x)) + list_pages = [item.url] + for x in range(2, int(last_page) + 1): + response = httptools.downloadpage('%s%s'% (url,x)) + if response.sucess: + list_pages.append("%s%s" % (url, x)) else: list_pages = [item.url] - logger.debug ('pattern: %s'%pattern) for index, page in enumerate(list_pages): logger.debug("Loading page %s/%s url=%s" % (index, len(list_pages), page)) data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data) @@ -424,7 +424,7 @@ def episodios(item): if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append( item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios")) - + return itemlist def search(item, texto): diff --git a/plugin.video.alfa/channels/ohpelis.py b/plugin.video.alfa/channels/ohpelis.py index 910ab14c..24c3a9dc 100644 --- a/plugin.video.alfa/channels/ohpelis.py +++ b/plugin.video.alfa/channels/ohpelis.py @@ -14,18 +14,19 @@ from core.item import Item from platformcode import config, logger host = 'http://www.ohpelis.com' -headers = { - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0 Chrome/58.0.3029.110', - 'Referer': host} - def mainlist(item): logger.info() - itemlist = [] + data = httptools.downloadpage(host).data + patron = '
  • (.*?)<\/a> (\d+)<\/i>' + matches = scrapertools.find_multiple_matches(data, patron) + mcantidad = 0 + for scrapedurl, scrapedtitle, cantidad in matches: + mcantidad += int(cantidad) itemlist.append( - item.clone(title="Peliculas", + item.clone(title="Peliculas (%s)" %mcantidad, action='movies_menu' )) @@ -95,14 +96,14 @@ def list_all(item): for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedplot in matches: title = scrapedtitle - plot = scrapedplot thumbnail = scrapedthumbnail url = scrapedurl year = scrapedyear new_item = (item.clone(title=title, url=url, thumbnail=thumbnail, - plot=plot, + fulltitle=title, + contentTitle=title, infoLabels={'year': year} )) if item.extra == 'serie': @@ -114,7 +115,7 @@ def list_all(item): itemlist.append(new_item) - tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + tmdb.set_infoLabels(itemlist, True) # Paginacion next_page = scrapertools.find_single_match(data, ' 0 and item.extra != 'findvideos': itemlist.append( @@ -288,9 +290,9 @@ def findvideos(item): url=item.url, action="add_pelicula_to_library", extra="findvideos", - contentTitle=item.contentTitle, )) - + tmdb.set_infoLabels(itemlist, True) + itemlist = servertools.get_servers_itemlist(itemlist) return itemlist @@ -314,3 +316,8 @@ def newest(categoria): return [] return itemlist + +def play(item): + logger.info() + item.thumbnail = item.contentThumbnail + return [item] diff --git a/plugin.video.alfa/channels/pedropolis.py b/plugin.video.alfa/channels/pedropolis.py index 45cc0b15..845cba80 100644 --- a/plugin.video.alfa/channels/pedropolis.py +++ b/plugin.video.alfa/channels/pedropolis.py @@ -120,40 +120,51 @@ def peliculas(item): if len(matches_next_page) > 0: url_next_page = urlparse.urljoin(item.url, matches_next_page[0]) - for scrapedthumbnail, scrapedtitle, rating, calidad, scrapedurl, year in matches: - if 'Proximamente' not in calidad: + for scrapedthumbnail, scrapedtitle, rating, quality, scrapedurl, year in matches: + if 'Proximamente' not in quality: scrapedtitle = scrapedtitle.replace('Ver ', '').partition(' /')[0].partition(':')[0].replace( 'Español Latino', '').strip() - title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (scrapedtitle, year, calidad) + title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (scrapedtitle, year, quality) - new_item = Item(channel=__channel__, action="findvideos", contentTitle=scrapedtitle, - infoLabels={'year': year, 'rating': rating}, thumbnail=scrapedthumbnail, - url=scrapedurl, next_page=next_page, quality=calidad, title=title) - if year: - tmdb.set_infoLabels_item(new_item, __modo_grafico__) - itemlist.append(new_item) + + + itemlist.append(Item(channel=item.channel, action="findvideos", contentTitle=scrapedtitle, + infoLabels={"year":year, "rating":rating}, thumbnail=scrapedthumbnail, + url=scrapedurl, next_page=next_page, quality=quality, title=title)) + + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) if url_next_page: itemlist.append(Item(channel=__channel__, action="peliculas", title="» Siguiente »", url=url_next_page, next_page=next_page, folder=True, text_blod=True, thumbnail=get_thumb("next.png"))) - for item in itemlist: - if item.infoLabels['plot'] == '': - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - # logger.info(data) - item.fanart = scrapertools.find_single_match(data, - "").replace( - 'w780', 'original') - item.plot = scrapertools.find_single_match(data, '
    .*?

    ([' - '^<]+)

    ') - item.plot = scrapertools.htmlclean(item.plot) - item.infoLabels['director'] = scrapertools.find_single_match(data, - '
    ([^<]+)') - item.infoLabels['rating'] = scrapertools.find_single_match(data, '([^<]+)') - item.infoLabels['votes'] = scrapertools.find_single_match(data, '[' - '^<]+\s(.*?) votos') + for no_plot in itemlist: + if no_plot.infoLabels['plot'] == '': + thumb_id = scrapertools.find_single_match(no_plot.thumbnail, '.*?\/\d{2}\/(.*?)-') + thumbnail = "/%s.jpg" % thumb_id + filtro_list = {"poster_path": thumbnail} + filtro_list = filtro_list.items() + no_plot.infoLabels={'filtro':filtro_list} + tmdb.set_infoLabels_item(no_plot, __modo_grafico__) + + if no_plot.infoLabels['plot'] == '': + data = httptools.downloadpage(no_plot.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + # logger.info(data) + no_plot.fanart = scrapertools.find_single_match(data, + "").replace( + 'w780', 'original') + no_plot.plot = scrapertools.find_single_match(data, '
    .*?

    ([' + '^<]+)

    ') + no_plot.plot = scrapertools.htmlclean(no_plot.plot) + no_plot.infoLabels['director'] = scrapertools.find_single_match(data, + '
    ([^<]+)') + no_plot.infoLabels['rating'] = scrapertools.find_single_match(data, '([' + '^<]+)') + no_plot.infoLabels['votes'] = scrapertools.find_single_match(data, '[' + '^<]+\s(.*?) votos') return itemlist diff --git a/plugin.video.alfa/channels/peliculasaudiolatino.py b/plugin.video.alfa/channels/peliculasaudiolatino.py index d5a021ac..197cc18c 100644 --- a/plugin.video.alfa/channels/peliculasaudiolatino.py +++ b/plugin.video.alfa/channels/peliculasaudiolatino.py @@ -155,6 +155,8 @@ def findvideos(item): url = scrapedurl server = servertools.get_server_name(servidor) title = "Enlace encontrado en %s" % (server) + if idioma == 'Ingles Subtitulado': + idioma = 'vose' itemlist.append(Item(channel=item.channel, action="play", title=title, fulltitle=item.fulltitle, url=url, thumbnail=scrapedthumbnail, language=idioma, quality=calidad, server=server)) if itemlist: diff --git a/plugin.video.alfa/channels/peliculasmx.py b/plugin.video.alfa/channels/peliculasmx.py index 4055fd26..81ccd672 100644 --- a/plugin.video.alfa/channels/peliculasmx.py +++ b/plugin.video.alfa/channels/peliculasmx.py @@ -76,14 +76,11 @@ def peliculas(item): tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) # Extrae la marca de siguiente página - paginador = scrapertools.find_single_match(data, "
    .*?lateral") + next_page = scrapertools.find_single_match(data, 'class="nextpostslink" rel="next" href="(.*?)">') - patron = ".*?href='([^']+)" - scrapedurl = scrapertools.find_single_match(paginador, patron) - - if scrapedurl: + if next_page: scrapedtitle = "!Pagina Siguiente ->" - itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl, folder=True)) + itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=next_page, folder=True)) return itemlist diff --git a/plugin.video.alfa/channels/peliculasrey.py b/plugin.video.alfa/channels/peliculasrey.py index bcaa5abd..ebd7c362 100755 --- a/plugin.video.alfa/channels/peliculasrey.py +++ b/plugin.video.alfa/channels/peliculasrey.py @@ -1,11 +1,11 @@ # -*- coding: utf-8 -*- import re -import urlparse from core import httptools from core import scrapertools from core import servertools +from core import tmdb from core.item import Item from platformcode import logger, config @@ -16,117 +16,54 @@ def mainlist(item): itemlist = [] itemlist.append(Item(channel=item.channel, action="peliculas", title="Recientes", url=host)) - itemlist.append(Item(channel=item.channel, action="PorFecha", title="Año de Lanzamiento", url=host)) - itemlist.append(Item(channel=item.channel, action="Idiomas", title="Idiomas", url=host)) - itemlist.append(Item(channel=item.channel, action="calidades", title="Por calidad", url=host)) - itemlist.append(Item(channel=item.channel, action="generos", title="Por género", url=host)) + itemlist.append(Item(channel = item.channel, + action = "filtro", + title = "Año de Lanzamiento", + category = "lanzamiento" + )) + itemlist.append(Item(channel = item.channel, + action = "filtro", + title = "Idiomas", + category = "idioma" + )) + itemlist.append(Item(channel = item.channel, + action = "filtro", + title = "Por calidad", + category = "calidades" + )) + itemlist.append(Item(channel = item.channel, + action = "filtro", + title = "Por género", + category = "generos" + )) itemlist.append(Item(channel=item.channel, action="search", title="Buscar...", url=host)) return itemlist -def PorFecha(item): - logger.info() - - # Descarga la pagina - data = httptools.downloadpage(item.url).data - data = scrapertools.find_single_match(data, '
    (.*?)
    ') - - # Extrae las entradas (carpetas) - patron = '(.*?)') - - # Extrae las entradas (carpetas) - patron = '(.*?)') - - # Extrae las entradas (carpetas) - patron = '(.*?)') - patron = '(.*?)
    ') patron = '([^.*?Duración') - for element in patron: info = scrapertools.find_single_match(element, "calidad>(.*?)<.*?ahref=(.*?)>.*?'reflectMe' src=(.*?)\/>.*?

    (.*?)" @@ -103,28 +96,22 @@ def agregadas(item): title = info[3] plot = info[4] year = info[5].strip() - itemlist.append(Item(channel=item.channel, action='findvideos', contentType = "movie", + contentTitle = title, fulltitle = title, infoLabels={'year':year}, - plot=plot, quality=quality, thumbnail=thumbnail, title=title, - contentTitle = title, url=url )) - # Paginación - try: - next_page = scrapertools.find_single_match(data,'tima>.*?href=(.*?) >.*?href=(.*?) > (.*?)

    ' - matches = re.compile(patron, re.DOTALL).findall(data) + matches = scrapertools.find_multiple_matches(data, patron) for url, thumbnail, title, sinopsis in matches: itemlist.append(Item(channel=item.channel, action="findvideos", title=title + " ", fulltitle=title, url=url, @@ -150,17 +135,13 @@ def listaBuscar(item): def findvideos(item): logger.info() - itemlist = [] - plot = item.plot - # Descarga la pagina data = httptools.downloadpage(item.url).data patron = 'cursor: hand" rel="(.*?)".*?class="optxt">(.*?)<.*?width.*?class="q">(.*?)Titulo original: ([^<]+)

    ') - contentTitle = scrapertools.decodeHtmlentities(contentTitle.strip()) - rating = scrapertools.find_single_match(datas, 'alt="Puntaje MPA IMDb" />
    ([^<]+)') - director = scrapertools.find_single_match( - datas, '') - title = "%s [COLOR yellow][%s][/COLOR]" % (scrapedtitle.strip(), calidad.upper()) + title = scrapedtitle + contentTitle = title + url = scrapedurl + quality = quality + thumbnail = scrapedthumbnail - new_item = Item(channel=item.channel, action="findvideos", title=title, plot='', contentType='movie', - url=scrapedurl, contentQuality=calidad, thumbnail=scrapedthumbnail, - contentTitle=contentTitle, infoLabels={"year": year, 'rating': rating, 'director': director}, - text_color=color3) + itemlist.append(Item(channel=item.channel, + action="findvideos", + title=title, url=url, + quality=quality, + thumbnail=thumbnail, + contentTitle=contentTitle, + infoLabels={"year": year}, + text_color=color3 + )) - if year: - tmdb.set_infoLabels_item(new_item, __modo_grafico__) - itemlist.append(new_item) + # for scrapedurl, calidad, year, scrapedtitle, scrapedthumbnail in matches: + # datas = httptools.downloadpage(scrapedurl).data + # datas = re.sub(r"\n|\r|\t|\s{2}| ", "", datas) + # # logger.info(datas) + # if '/ ' in scrapedtitle: + # scrapedtitle = scrapedtitle.partition('/ ')[2] + # contentTitle = scrapertools.find_single_match(datas, 'Titulo original: ([^<]+)

    ') + # contentTitle = scrapertools.decodeHtmlentities(contentTitle.strip()) + # rating = scrapertools.find_single_match(datas, 'alt="Puntaje MPA IMDb" />([^<]+)') + # director = scrapertools.find_single_match( + # datas, '') + # title = "%s [COLOR yellow][%s][/COLOR]" % (scrapedtitle.strip(), calidad.upper()) + # + # logger.debug('thumbnail: %s' % scrapedthumbnail) + # new_item = Item(channel=item.channel, action="findvideos", title=title, plot='', contentType='movie', + # url=scrapedurl, contentQuality=calidad, thumbnail=scrapedthumbnail, + # contentTitle=contentTitle, infoLabels={"year": year, 'rating': rating, 'director': director}, + # text_color=color3) + # itemlist.append(new_item) + tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) paginacion = scrapertools.find_single_match(data, '

    ([^<]+)<\/h2>next &') - while item.url[-1] != '=': - item.url = item.url[:-1] - next_page_url = item.url + next_page - if next_page != '': - itemlist.append(Item(channel=item.channel, action="lista", title='Siguiente >>>', url=next_page_url, - thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png', extra=item.extra)) - return itemlist - - -def temporadas(item): - logger.info() - itemlist = [] - templist = [] - data = httptools.downloadpage(item.url).data - - patron = 'class="listatemporadas" >([^<]+)<' - matches = re.compile(patron, re.DOTALL).findall(data) - - for scrapedurl, scrapedthumbnail, scrapedtitle in matches: - url = host + scrapedurl - title = scrapedtitle - thumbnail = scrapedthumbnail - plot = '' - fanart = '' - contentSeasonNumber = scrapedtitle.replace('Temporada ', '') - - itemlist.append(Item(channel=item.channel, action="episodiosxtemp", title=title, fulltitle=item.title, url=url, - thumbnail=thumbnail, plot=plot, fanart=fanart, contentSerieName=item.contentSerieName, - contentSeasonNumber=contentSeasonNumber)) - - if item.extra == 'temporadas': - for tempitem in itemlist: - templist += episodiosxtemp(tempitem) - - if config.get_videolibrary_support() and len(itemlist) > 0: - itemlist.append( - Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url, - action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName)) - - return itemlist - - -def episodios(item): - logger.info() - itemlist = [] - templist = temporadas(item) - for tempitem in templist: - itemlist += episodiosxtemp(tempitem) - - return itemlist - - -def episodiosxtemp(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '' - matches = re.compile(patron, re.DOTALL).findall(data) - ep = 1 - for scrapedtitle in matches: - scrapedtitle = scrapedtitle.replace(item.contentSeasonNumber + 'x' + '0' + str(ep), '') - url = host + '/VerCapitulo/' + scrapedtitle.replace(' ', '-') - title = item.contentSeasonNumber + 'x' + str(ep) + ' ' + scrapedtitle.strip('/') - - thumbnail = item.thumbnail - plot = '' - fanart = '' - plot = '' - contentEpisodeNumber = ep - - itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=item.title, url=url, - thumbnail=thumbnail, plot=plot, fanart=fanart, extra='series', - contentSerieName=item.contentSerieName, contentSeasonNumber=item.contentSeasonNumber, - contentEpisodeNumber=contentEpisodeNumber)) - ep = ep + 1 - - return itemlist - - -def seccion(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '