diff --git a/plugin.video.alfa/addon.xml b/plugin.video.alfa/addon.xml index 12ee44df..ab5bc485 100755 --- a/plugin.video.alfa/addon.xml +++ b/plugin.video.alfa/addon.xml @@ -1,5 +1,5 @@ - + @@ -19,11 +19,16 @@ [B]Estos son los cambios para esta versión:[/B] [COLOR green][B]Canales agregados y arreglos[/B][/COLOR] - » torrentrapid » torrentlocura - » mispelisyseries » descargas2020 + » vidlox » downace + » tvvip » clipwatching + » hdfull » peliculasaudiolatino + » descargas2020 » mispelisyseries + » torrentloculra » torrentrapid + » tumejortorrent » tvsinpagar + ¤ arreglos internos - ¤ Gracias a la colaboración de @pipcat y @lopezvg en ésta versión + ¤ Gracias al equipo SOD, @lopezvg, @f_y_m por colaborar en ésta versión Navega con Kodi por páginas web para ver sus videos de manera fácil. Browse web pages using Kodi diff --git a/plugin.video.alfa/channels/animeflv_me.py b/plugin.video.alfa/channels/animeflv_me.py old mode 100755 new mode 100644 index 1fc97cbf..1644edd6 --- a/plugin.video.alfa/channels/animeflv_me.py +++ b/plugin.video.alfa/channels/animeflv_me.py @@ -213,7 +213,7 @@ def series(item): context.extend(context2) for show in show_list: title, url, thumbnail, plot = show - items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, + items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, contentSerieName=title, plot=plot, show=title, viewmode="movies_with_plot", context=context)) url_next_page = scrapertools.find_single_match(page_html, REGEX_NEXT_PAGE) @@ -237,21 +237,26 @@ def episodios(item): es_pelicula = False for url, title, date in episodes: episode = scrapertools.find_single_match(title, r'Episodio (\d+)') + new_item=itemlist.append(Item(channel=item.channel, action="findvideos", + url=url, thumbnail=item.thumbnail, plot=plot, show=item.show)) # El enlace pertenece a un episodio if episode: season = 1 episode = int(episode) season, episode = renumbertools.numbered_for_tratk( - item.channel, item.show, season, episode) - + item.channel, item.contentSerieName, season, episode) + new_item.infoLabels["episode"] = episode + new_item.infoLabels["season"] = season + new_item.contentSerieName = item.contentSerieName title = "%sx%s %s (%s)" % (season, str(episode).zfill(2), "Episodio %s" % episode, date) # El enlace pertenece a una pelicula else: title = "%s (%s)" % (title, date) item.url = url es_pelicula = True - itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=item.thumbnail, - plot=plot, show=item.show, fulltitle="%s %s" % (item.show, title))) + new_item.title=title + new_item.fulltitle="%s %s" % (item.show, title) + itemlist.append(new_item) # El sistema soporta la videoteca y se encontro por lo menos un episodio # o pelicula diff --git a/plugin.video.alfa/channels/autoplay.py b/plugin.video.alfa/channels/autoplay.py index a6352fc6..76e638ae 100644 --- a/plugin.video.alfa/channels/autoplay.py +++ b/plugin.video.alfa/channels/autoplay.py @@ -136,6 +136,8 @@ def start(itemlist, item): # Obtiene las listas servidores, calidades disponibles desde el nodo del json de AutoPlay server_list = channel_node.get('servers', []) + for server in server_list: + server = server.lower() quality_list = channel_node.get('quality', []) # Si no se definen calidades la se asigna default como calidad unica @@ -145,7 +147,7 @@ def start(itemlist, item): # Se guardan los textos de cada servidor y calidad en listas p.e. favorite_servers = ['openload', # 'streamcloud'] for num in range(1, 4): - favorite_servers.append(channel_node['servers'][settings_node['server_%s' % num]]) + favorite_servers.append(channel_node['servers'][settings_node['server_%s' % num]].lower()) favorite_quality.append(channel_node['quality'][settings_node['quality_%s' % num]]) # Se filtran los enlaces de itemlist y que se correspondan con los valores de autoplay @@ -175,25 +177,25 @@ def start(itemlist, item): # si el servidor y la calidad no se encuentran en las listas de favoritos o la url esta repetida, # descartamos el item - if item.server not in favorite_servers or item.quality not in favorite_quality \ + if item.server.lower() not in favorite_servers or item.quality not in favorite_quality \ or item.url in url_list_valid: item.type_b = True b_dict['videoitem']= item autoplay_b.append(b_dict) continue - autoplay_elem["indice_server"] = favorite_servers.index(item.server) + autoplay_elem["indice_server"] = favorite_servers.index(item.server.lower()) autoplay_elem["indice_quality"] = favorite_quality.index(item.quality) elif priority == 2: # Solo servidores # si el servidor no se encuentra en la lista de favoritos o la url esta repetida, # descartamos el item - if item.server not in favorite_servers or item.url in url_list_valid: + if item.server.lower() not in favorite_servers or item.url in url_list_valid: item.type_b = True b_dict['videoitem'] = item autoplay_b.append(b_dict) continue - autoplay_elem["indice_server"] = favorite_servers.index(item.server) + autoplay_elem["indice_server"] = favorite_servers.index(item.server.lower()) elif priority == 3: # Solo calidades @@ -261,11 +263,11 @@ def start(itemlist, item): if not platformtools.is_playing() and not played: videoitem = autoplay_elem['videoitem'] logger.debug('videoitem %s' % videoitem) - if videoitem.server not in max_intentos_servers: - max_intentos_servers[videoitem.server] = max_intentos + if videoitem.server.lower() not in max_intentos_servers: + max_intentos_servers[videoitem.server.lower()] = max_intentos # Si se han alcanzado el numero maximo de intentos de este servidor saltamos al siguiente - if max_intentos_servers[videoitem.server] == 0: + if max_intentos_servers[videoitem.server.lower()] == 0: continue lang = " " @@ -312,15 +314,15 @@ def start(itemlist, item): logger.debug(str(len(autoplay_list))) # Si hemos llegado hasta aqui es por q no se ha podido reproducir - max_intentos_servers[videoitem.server] -= 1 + max_intentos_servers[videoitem.server.lower()] -= 1 # Si se han alcanzado el numero maximo de intentos de este servidor # preguntar si queremos seguir probando o lo ignoramos - if max_intentos_servers[videoitem.server] == 0: + if max_intentos_servers[videoitem.server.lower()] == 0: text = "Parece que los enlaces de %s no estan funcionando." % videoitem.server.upper() if not platformtools.dialog_yesno("AutoPlay", text, "¿Desea ignorar todos los enlaces de este servidor?"): - max_intentos_servers[videoitem.server] = max_intentos + max_intentos_servers[videoitem.server.lower()] = max_intentos # Si no quedan elementos en la lista se informa if autoplay_elem == autoplay_list[-1]: @@ -439,7 +441,7 @@ def check_value(channel, itemlist): quality_list = channel_node['quality'] = list() for item in itemlist: - if item.server not in server_list and item.server !='': + if item.server.lower() not in server_list and item.server !='': server_list.append(item.server) change = True if item.quality not in quality_list and item.quality !='': diff --git a/plugin.video.alfa/channels/danimados.py b/plugin.video.alfa/channels/danimados.py index 9f2f7ad0..a3df1ecb 100644 --- a/plugin.video.alfa/channels/danimados.py +++ b/plugin.video.alfa/channels/danimados.py @@ -121,8 +121,8 @@ def episodios(item): data_lista = scrapertools.find_single_match(data, '
    (.+?)<\/ul><\/div><\/div><\/div>') show = item.title - patron_caps = '.+?' - patron_caps += '<\/a><\/div>
    ([^"]+)<\/div>.+?([^"]+)<\/a>' + patron_caps = '.+?<\/a><\/div>
    ([^"]+)<\/div>.+?' + patron_caps += '([^"]+)<\/a>' #scrapedthumbnail,#scrapedtempepi, #scrapedurl, #scrapedtitle matches = scrapertools.find_multiple_matches(data_lista, patron_caps) for scrapedthumbnail, scrapedtempepi, scrapedurl, scrapedtitle in matches: @@ -148,11 +148,11 @@ def findvideos(item): data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - data1 = scrapertools.find_single_match(data, + data1 = scrapertools.find_single_match(data, '
    (.+?)<\/nav><\/div><\/div>') patron='src="(.+?)"' itemla = scrapertools.find_multiple_matches(data1,patron) - if "favicons?domain" in itemla[1]: + if "favicons?domain" in itemla[0]: method = 1 data2=scrapertools.find_single_match(data, "var \$user_hashs = {(.+?)}") patron='".+?":"(.+?)"' diff --git a/plugin.video.alfa/channels/descargas2020.py b/plugin.video.alfa/channels/descargas2020.py index 4008919a..32d8e713 100644 --- a/plugin.video.alfa/channels/descargas2020.py +++ b/plugin.video.alfa/channels/descargas2020.py @@ -140,6 +140,8 @@ def listado(item): extra = "" context = "movie" year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})') + if not year or year <= "1900": + year = '-' if ".com/serie" in url and "/miniseries" not in url: action = "episodios" @@ -219,6 +221,8 @@ def listado_busqueda(item): if calidad == "": calidad = scrapertools.find_single_match(title, r'..*?(\[.*?.*\])') #movies year = scrapertools.find_single_match(thumb, r'-(\d{4})') + if not year or year <= "1900": + year = '-' # fix encoding for title title = scrapertools.htmlclean(title) @@ -319,7 +323,14 @@ def findvideos(item): # item.url = item.url.replace(".com/",".com/ver-online/") # item.url = item.url.replace(".com/",".com/descarga-directa/") item.url = item.url.replace(".com/", ".com/descarga-torrent/") - + + # Obtener la información actualizada del Episodio + if item.contentType == "episode": + if not item.contentTitle and (not item.infoLabels['title'] or item.infoLabels['title'] == 'null' or item.infoLabels['title'] == "None"): + tmdb.set_infoLabels_item(item, seekTmdb = True) + if not item.contentTitle: + item.contentTitle = item.infoLabels['title'] + # Descarga la página data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") @@ -367,7 +378,7 @@ def findvideos(item): itemlist.append(item.clone(title=title_gen, action="", folder=False)) #Título con todos los datos del vídeo title = title_torrent - title_torrent = '[COLOR yellow][Torrent]- [/COLOR]%s [online]' % (title_torrent) + title_torrent = '[COLOR salmon]??[/COLOR], [COLOR yellow][Torrent]- [/COLOR]%s [online]' % (title_torrent) if url != "": #Torrent itemlist.append( Item(channel=item.channel, action="play", server="torrent", title=title_torrent, fulltitle=title, @@ -410,6 +421,14 @@ def findvideos(item): devuelve = servertools.findvideosbyserver(enlace, servidor) if devuelve: enlace = devuelve[0][1] + item.alive = servertools.check_video_link(enlace, servidor) + if item.alive.lower() == "ok": + titulo = '%s, %s' % (item.alive, titulo) + elif item.alive == "??": + titulo = '[COLOR salmon]%s[/COLOR], %s' % (item.alive, titulo) + else: + logger.debug(item.alive + ": / " + titulo + " / " + enlace) + raise itemlist.append( Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo, fulltitle=title, url=enlace, thumbnail=logo, plot=item.plot, infoLabels=item.infoLabels, folder=False)) @@ -446,6 +465,15 @@ def findvideos(item): devuelve = servertools.findvideosbyserver(enlace, servidor) if devuelve: enlace = devuelve[0][1] + if p <= 2: + item.alive = servertools.check_video_link(enlace, servidor) + if item.alive.lower() == "ok": + parte_titulo = '%s, %s' % (item.alive, parte_titulo) + elif item.alive == "??": + parte_titulo = '[COLOR salmon]%s[/COLOR], %s' % (item.alive, parte_titulo) + else: + logger.debug(item.alive + ": / " + parte_titulo + " / " + enlace) + break itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=parte_titulo, fulltitle=title, url=enlace, thumbnail=logo, plot=item.plot, infoLabels=item.infoLabels, folder=False)) diff --git a/plugin.video.alfa/channels/documentalesonline.py b/plugin.video.alfa/channels/documentalesonline.py index ee8ce0f5..6b4818d5 100755 --- a/plugin.video.alfa/channels/documentalesonline.py +++ b/plugin.video.alfa/channels/documentalesonline.py @@ -4,55 +4,79 @@ import re from core import httptools from core import scrapertools +from core import servertools from core.item import Item -from platformcode import logger from channelselector import get_thumb +from platformcode import logger HOST = "http://documentales-online.com/" def mainlist(item): logger.info() - itemlist = list() - - itemlist.append(Item(channel=item.channel, title="Novedades", action="listado", url=HOST, + itemlist.append(Item(channel=item.channel, title="Novedades", action="videos", url=HOST, thumbnail=get_thumb('newest', auto=True))) itemlist.append(Item(channel=item.channel, title="Destacados", action="seccion", url=HOST, extra="destacados", thumbnail=get_thumb('hot', auto=True))) - itemlist.append(Item(channel=item.channel, title="Series", action="seccion", url=HOST, extra="series", + itemlist.append(Item(channel=item.channel, title="Series destacadas", action="seccion", url=HOST, extra="series", thumbnail=get_thumb('tvshows', auto=True))) itemlist.append(Item(channel=item.channel, title="Categorías", action="categorias", url=HOST, thumbnail=get_thumb('categories', auto=True))) - # itemlist.append(Item(channel=item.channel, title="Top 100", action="categorias", url=HOST)) - # itemlist.append(Item(channel=item.channel, title="Populares", action="categorias", url=HOST)) - + itemlist.append(Item(channel=item.channel, title="Top 100", action="listado", url=HOST + "top/", + thumbnail=get_thumb('more voted', auto=True))) + itemlist.append(Item(channel=item.channel, title="Populares", action="listado", url=HOST + "populares/", + thumbnail=get_thumb('more watched', auto=True))) + itemlist.append(Item(channel=item.channel, title="Series y Temas", action="listado", url=HOST + "series-temas/", + thumbnail=get_thumb('tvshows', auto=True))) itemlist.append(Item(channel=item.channel, title="Buscar", action="search", thumbnail=get_thumb('search', auto=True))) + return itemlist - # itemlist.append(Item(channel=item.channel, title=" Series y Temas", action="categorias", url=HOST)) +def listado(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = data.replace('', '') + bloque = scrapertools.find_single_match(data, 'class="post-entry(.*?)class="post-share') + if "series-temas" not in item.url: + patron = '([^<]+)<.*?""" + matches = scrapertools.find_multiple_matches(bloque, patron) + for scrapedurl, scrapedtitle in matches: + itemlist.append(Item(action = "videos", + channel = item.channel, + title = scrapedtitle, + url = HOST + scrapedurl + )) return itemlist def seccion(item): logger.info() itemlist = [] - data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) - if item.extra == "destacados": patron_seccion = '

    Destacados

      (.*?)
    ' action = "findvideos" else: patron_seccion = '

    Series destacadas

      (.*?)
    ' - action = "listado" - + action = "videos" data = scrapertools.find_single_match(data, patron_seccion) - - matches = re.compile('
    (.*?)', re.DOTALL).findall(data) - + matches = scrapertools.find_multiple_matches(data, '(.*?)') aux_action = action for url, title in matches: if item.extra != "destacados" and "Cosmos (Carl Sagan)" in title: @@ -60,61 +84,46 @@ def seccion(item): else: action = aux_action itemlist.append(item.clone(title=title, url=url, action=action, fulltitle=title)) - return itemlist -def listado(item): +def videos(item): logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) - - pagination = scrapertools.find_single_match(data, '
    \d' '') - patron = '
      (.*?)
    ' data = scrapertools.find_single_match(data, patron) - matches = re.compile('
    (.*?).*?Categorías') - matches = re.compile('(.*?)', re.DOTALL).findall(data) - + matches = scrapertools.find_multiple_matches(data, '(.*?)') for url, title in matches: - itemlist.append(item.clone(title=title, url=url, action="listado", fulltitle=title)) - + itemlist.append(item.clone(title=title, url=url, action="videos", fulltitle=title)) return itemlist def search(item, texto): logger.info() - texto = texto.replace(" ", "+") - try: item.url = HOST + "?s=%s" % texto - return listado(item) + return videos(item) # Se captura la excepción, para no interrumpir al buscador global si un canal falla except: import sys @@ -125,37 +134,21 @@ def search(item, texto): def findvideos(item): logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data) - - if item.fulltitle == "Cosmos (Carl Sagan)": - - matches = scrapertools.find_multiple_matches(data, - '

    (.*?)
    |\s{2,}', "", data) - video_id = scrapertools.find_single_match(data, 'id=videoInfo >(.*?)') - new_url = 'https://v.d0stream.com/api/videoinfo/%s?src-url=https://Fv.d0stream.com' % video_id - json_data = httptools.downloadpage(new_url).data - dict_data = jsontools.load(json_data) - sources = dict_data['sources'] - - for vip_item in sources['mp4_cdn']: - vip_url= vip_item['url'] - vip_quality = vip_item['label'] - title ='%s [%s]' % (item.title, vip_quality) - itemlist.append(item.clone(title = title, url=vip_url, action='play', quality=vip_quality, server='directo')) - - return itemlist - def findvideos(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r'"|\n|\r|\t| |
    |\s{2,}', "", data) - player_vip = scrapertools.find_single_match(data, 'class=movieplay>