diff --git a/plugin.video.alfa/addon.xml b/plugin.video.alfa/addon.xml index d39f9de3..0ae3730c 100755 --- a/plugin.video.alfa/addon.xml +++ b/plugin.video.alfa/addon.xml @@ -1,5 +1,5 @@  - + @@ -19,8 +19,11 @@ [B]Estos son los cambios para esta versión:[/B] [COLOR green][B]Canales agregados y arreglos[/B][/COLOR] - » pelisplus » pelisplusco - » gvideo ¤ arreglos internos + » cinetux » peliculasgratis + » gamovideo » peliculasaudiolatino + » streamixcloud » uptobox + » canalpelis » verpelis + ¤ arreglos internos Navega con Kodi por páginas web para ver sus videos de manera fácil. Browse web pages using Kodi diff --git a/plugin.video.alfa/channels/canalpelis.py b/plugin.video.alfa/channels/canalpelis.py index cb719be4..79e1dc61 100644 --- a/plugin.video.alfa/channels/canalpelis.py +++ b/plugin.video.alfa/channels/canalpelis.py @@ -357,19 +357,24 @@ def findvideos(item): data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data) - patron = '
' - + patron = '
' matches = re.compile(patron, re.DOTALL).findall(data) for option, url in matches: - lang = scrapertools.find_single_match( - data, '
  • <\/b> (.*?)' % option) + lang = scrapertools.find_single_match(data, + '
  • <\/b> (.*?)' % option) lang = lang.replace('Español ', '').replace('B.S.O. ', '') + data_b = httptools.downloadpage(urlparse.urljoin(host, url), headers={'Referer': item.url}).data + patron = ']+src="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data_b) + url = matches[0] server = servertools.get_server_from_url(url) title = "%s [COLOR yellow](%s) (%s)[/COLOR]" % (item.contentTitle, server.title(), lang) - itemlist.append(item.clone(action='play', url=url, title=title, extra1=title, - server=server, language = lang, text_color=color3)) + itemlist.append(item.clone(action='play', url=url, title=title, extra1=title, server=server, language=lang, + text_color=color3)) itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', diff --git a/plugin.video.alfa/channels/cinetux.py b/plugin.video.alfa/channels/cinetux.py index 96c848e4..59805451 100644 --- a/plugin.video.alfa/channels/cinetux.py +++ b/plugin.video.alfa/channels/cinetux.py @@ -182,18 +182,15 @@ def destacadas(item): matches = scrapertools.find_multiple_matches(bloque, patron) for scrapedtitle, scrapedurl, scrapedthumbnail in matches: scrapedurl = CHANNEL_HOST + scrapedurl - scrapedtitle = scrapedtitle.replace("Ver ", "") - new_item = item.clone(action="findvideos", title=scrapedtitle, fulltitle=scrapedtitle, + itemlist.append(item.clone(action="findvideos", title=scrapedtitle, fulltitle=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, - contentType="movie") - itemlist.append(new_item) - + contentType="movie" + )) # Extrae el paginador next_page_link = scrapertools.find_single_match(data, ']+>»') if next_page_link: itemlist.append( item.clone(action="destacadas", title=">> Página siguiente", url=next_page_link, text_color=color3)) - return itemlist @@ -243,13 +240,9 @@ def findvideos(item): # Busca el argumento data = httptools.downloadpage(item.url).data - year = scrapertools.find_single_match(item.title, "\(([0-9]+)") - tmdb.set_infoLabels(item, __modo_grafico__) - - if not item.infoLabels.get('plot'): - plot = scrapertools.find_single_match(data, '

    (.*?)

    ') - item.infoLabels['plot'] = plot + if item.infoLabels["year"]: + tmdb.set_infoLabels(item, __modo_grafico__) if filtro_enlaces != 0: list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas, "online", item) diff --git a/plugin.video.alfa/channels/news.py b/plugin.video.alfa/channels/news.py index b2d48c92..cb0a4fb0 100644 --- a/plugin.video.alfa/channels/news.py +++ b/plugin.video.alfa/channels/news.py @@ -15,6 +15,8 @@ from core import scrapertools from core.item import Item from platformcode import config, logger from platformcode import platformtools +from core import jsontools +from channels import side_menu THUMBNAILS = {'0': 'posters', '1': 'banners', '2': 'squares'} @@ -27,11 +29,16 @@ perfil = [['0xFF0B7B92', '0xFF89FDFB', '0xFFACD5D4'], ['0xFFA5DEE5', '0xFFE0F9B5', '0xFFFEFDCA'], ['0xFFF23557', '0xFF22B2DA', '0xFFF0D43A']] +#color1, color2, color3 = ["white", "white", "white"] color1, color2, color3 = perfil[__perfil__] list_newest = [] +list_newest_tourl = [] channels_id_name = {} +menu_cache_path = os.path.join(config.get_data_path(), "settings_channels", 'menu_cache_data.json') +menu_settings_path = os.path.join(config.get_data_path(), "settings_channels", 'menu_settings_data.json') + def mainlist(item): logger.info() @@ -150,6 +157,54 @@ def get_channels_list(): return list_canales, any_active +def set_cache(item): + logger.info() + item.mode = 'set_cache' + t = Thread(target=novedades, args=[item]) + t.start() + #t.join() + +def get_from_cache(item): + logger.info() + itemlist=[] + cache_node = jsontools.get_node_from_file('menu_cache_data.json', 'cached') + first=item.last + last = first+40 + #if last >=len(cache_node[item.extra]): + # last = len(cache_node[item.extra]) + + for cached_item in cache_node[item.extra][first:last]: + new_item= Item() + new_item = new_item.fromurl(cached_item) + itemlist.append(new_item) + if item.mode == 'silent': + set_cache(item) + if last >= len(cache_node[item.extra]): + item.mode='finish' + itemlist = add_menu_items(item, itemlist) + else: + item.mode='get_cached' + item.last =last + itemlist = add_menu_items(item, itemlist) + + return itemlist + +def add_menu_items(item, itemlist): + logger.info() + + menu_icon = get_thumb('menu.png') + menu = Item(channel="channelselector", action="getmainlist", viewmode="movie", thumbnail=menu_icon, title='Menu') + itemlist.insert(0, menu) + if item.mode != 'finish': + if item.mode == 'get_cached': + last=item.last + else: + last = len(itemlist) + refresh_icon = get_thumb('more.png') + refresh = item.clone(thumbnail=refresh_icon, mode='get_cached',title='Mas', last=last) + itemlist.insert(len(itemlist), refresh) + + return itemlist def novedades(item): logger.info() @@ -159,6 +214,14 @@ def novedades(item): list_newest = [] start_time = time.time() + mode = item.mode + if mode == '': + mode = 'normal' + + if mode=='get_cached': + if os.path.exists(menu_cache_path): + return get_from_cache(item) + multithread = config.get_setting("multithread", "news") logger.info("multithread= " + str(multithread)) @@ -170,8 +233,22 @@ def novedades(item): if config.set_setting("multithread", True, "news"): multithread = True - progreso = platformtools.dialog_progress(item.category, "Buscando canales...") + if mode == 'normal': + progreso = platformtools.dialog_progress(item.category, "Buscando canales...") + list_canales, any_active = get_channels_list() + + if mode=='silent' and any_active and len(list_canales[item.extra]) > 0: + side_menu.set_menu_settings(item) + aux_list=[] + for canal in list_canales[item.extra]: + if len(aux_list)<2: + aux_list.append(canal) + list_canales[item.extra]=aux_list + + if mode == 'set_cache': + list_canales[item.extra] = list_canales[item.extra][2:] + if any_active and len(list_canales[item.extra])>0: import math # fix float porque la division se hace mal en python 2.x @@ -191,12 +268,14 @@ def novedades(item): t = Thread(target=get_newest, args=[channel_id, item.extra], name=channel_title) t.start() threads.append(t) - progreso.update(percentage, "", "Buscando en '%s'..." % channel_title) + if mode == 'normal': + progreso.update(percentage, "", "Buscando en '%s'..." % channel_title) # Modo single Thread else: - logger.info("Obteniendo novedades de channel_id=" + channel_id) - progreso.update(percentage, "", "Buscando en '%s'..." % channel_title) + if mode == 'normal': + logger.info("Obteniendo novedades de channel_id=" + channel_id) + progreso.update(percentage, "", "Buscando en '%s'..." % channel_title) get_newest(channel_id, item.extra) # Modo Multi Thread: esperar q todos los hilos terminen @@ -208,25 +287,29 @@ def novedades(item): percentage = int(math.ceil(index * t)) list_pendent_names = [a.getName() for a in pendent] - mensaje = "Buscando en %s" % (", ".join(list_pendent_names)) - progreso.update(percentage, "Finalizado en %d/%d canales..." % (len(threads) - len(pendent), len(threads)), + if mode == 'normal': + mensaje = "Buscando en %s" % (", ".join(list_pendent_names)) + progreso.update(percentage, "Finalizado en %d/%d canales..." % (len(threads) - len(pendent), len(threads)), mensaje) - logger.debug(mensaje) + logger.debug(mensaje) - if progreso.iscanceled(): - logger.info("Busqueda de novedades cancelada") - break + if progreso.iscanceled(): + logger.info("Busqueda de novedades cancelada") + break time.sleep(0.5) pendent = [a for a in threads if a.isAlive()] - - mensaje = "Resultados obtenidos: %s | Tiempo: %2.f segundos" % (len(list_newest), time.time() - start_time) - progreso.update(100, mensaje, " ", " ") - logger.info(mensaje) - start_time = time.time() - # logger.debug(start_time) + if mode == 'normal': + mensaje = "Resultados obtenidos: %s | Tiempo: %2.f segundos" % (len(list_newest), time.time() - start_time) + progreso.update(100, mensaje, " ", " ") + logger.info(mensaje) + start_time = time.time() + # logger.debug(start_time) result_mode = config.get_setting("result_mode", "news") + if mode != 'normal': + result_mode=0 + if result_mode == 0: # Agrupados por contenido ret = group_by_content(list_newest) elif result_mode == 1: # Agrupados por canales @@ -237,13 +320,19 @@ def novedades(item): while time.time() - start_time < 2: # mostrar cuadro de progreso con el tiempo empleado durante almenos 2 segundos time.sleep(0.5) - - progreso.close() - return ret + if mode == 'normal': + progreso.close() + if mode == 'silent': + set_cache(item) + item.mode = 'set_cache' + ret = add_menu_items(item, ret) + if mode != 'set_cache': + return ret else: - no_channels = platformtools.dialog_ok('Novedades - %s'%item.extra, 'No se ha definido ningun canal para la ' - 'busqueda.','Utilice el menu contextual ' - 'para agregar al menos uno') + if mode != 'set_cache': + no_channels = platformtools.dialog_ok('Novedades - %s'%item.extra, 'No se ha definido ningun canal para la ' + 'busqueda.','Utilice el menu contextual ' + 'para agregar al menos uno') return @@ -251,6 +340,7 @@ def get_newest(channel_id, categoria): logger.info("channel_id=" + channel_id + ", categoria=" + categoria) global list_newest + global list_newest_tourl # Solicitamos las novedades de la categoria (item.extra) buscada en el canal channel # Si no existen novedades para esa categoria en el canal devuelve una lista vacia @@ -271,11 +361,22 @@ def get_newest(channel_id, categoria): logger.info("running channel " + modulo.__name__ + " " + modulo.__file__) list_result = modulo.newest(categoria) logger.info("canal= %s %d resultados" % (channel_id, len(list_result))) - + exist=False + if os.path.exists(menu_cache_path): + cache_node = jsontools.get_node_from_file('menu_cache_data.json', 'cached') + exist=True + else: + cache_node = {} + #logger.debug('cache node: %s' % cache_node) for item in list_result: # logger.info("item="+item.tostring()) item.channel = channel_id list_newest.append(item) + list_newest_tourl.append(item.tourl()) + + cache_node[categoria] = list_newest_tourl + + jsontools.update_node(cache_node, 'menu_cache_data.json', "cached") except: logger.error("No se pueden recuperar novedades de: " + channel_id) diff --git a/plugin.video.alfa/channels/peliculasaudiolatino.py b/plugin.video.alfa/channels/peliculasaudiolatino.py index 8ab94e70..437a3dd2 100644 --- a/plugin.video.alfa/channels/peliculasaudiolatino.py +++ b/plugin.video.alfa/channels/peliculasaudiolatino.py @@ -43,7 +43,7 @@ def newest(categoria): elif categoria == 'terror': item.url = HOST + '/genero/terror.html' itemlist = peliculas(item) - if "Pagina" in itemlist[-1].title: + if ">> Página siguiente" in itemlist[-1].title: itemlist.pop() except: import sys diff --git a/plugin.video.alfa/channels/peliculasgratis.py b/plugin.video.alfa/channels/peliculasgratis.py index 35e07ba5..9105048d 100644 --- a/plugin.video.alfa/channels/peliculasgratis.py +++ b/plugin.video.alfa/channels/peliculasgratis.py @@ -3,28 +3,14 @@ import os import re import urllib -import urlparse -import xbmc -import xbmcgui from core import httptools from core import scrapertools from core import servertools from core import tmdb from core.item import Item -from core.scrapertools import decodeHtmlentities as dhe from platformcode import config, logger -ACTION_SHOW_FULLSCREEN = 36 -ACTION_GESTURE_SWIPE_LEFT = 511 -ACTION_SELECT_ITEM = 7 -ACTION_PREVIOUS_MENU = 10 -ACTION_MOVE_LEFT = 1 -ACTION_MOVE_RIGHT = 2 -ACTION_MOVE_DOWN = 4 -ACTION_MOVE_UP = 3 -OPTION_PANEL = 6 -OPTIONS_OK = 5 host = "http://peliculasgratis.biz" @@ -37,49 +23,6 @@ CALIDADES = {"micro1080p": "[COLOR plum]Micro1080p[/COLOR]", "dvds": "[COLOR lim IDIOMAS = {"castellano": "[COLOR yellow]Castellano[/COLOR]", "latino": "[COLOR orange]Latino[/COLOR]", "vose": "[COLOR lightsalmon]Subtitulada[/COLOR]", "vo": "[COLOR crimson]Ingles[/COLOR]", "en": "[COLOR crimson]Ingles[/COLOR]"} -IDIOMASP = {"es": "[COLOR yellow]CAST[/COLOR]", "la": "[COLOR orange]LAT[/COLOR]", - "vs": "[COLOR lightsalmon]SUB[/COLOR]", "vo": "[COLOR crimson]Ingles[/COLOR]", - "en": "[COLOR crimson]INGL[/COLOR]"} - - -# Para la busqueda en bing evitando baneos - -def browser(url): - import mechanize - - # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing - br = mechanize.Browser() - # Browser options - br.set_handle_equiv(False) - br.set_handle_gzip(True) - br.set_handle_redirect(True) - br.set_handle_referer(False) - br.set_handle_robots(False) - # Follows refresh 0 but not hangs on refresh > 0 - br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) - # Want debugging messages? - # br.set_debug_http(True) - # br.set_debug_redirects(True) - # br.set_debug_responses(True) - - # User-Agent (this is cheating, ok?) - # br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')] - # br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')] - # Open some site, let's pick a random one, the first that pops in mind - r = br.open(url) - response = r.read() - print response - if "img,divreturn" in response: - r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url) - print "prooooxy" - response = r.read() - - return response - - -api_key = "2e2160006592024ba87ccdf78c28f49f" -api_fankey = "dffe90fba4d02c199ae7a9e71330c987" - def mainlist(item): logger.info() @@ -88,22 +31,26 @@ def mainlist(item): thumbnail="http://imgur.com/fN2p6qH.png", fanart="http://imgur.com/b8OuBR2.jpg", contentType="movie")) itemlist.append(itemlist[-1].clone(title="[COLOR lightskyblue][B] Más vistas[/B][/COLOR]", action="scraper", - url="http://peliculasgratis.biz/catalogue?order=most_viewed", + url= host + "/catalogue?order=most_viewed", thumbnail="http://imgur.com/fN2p6qH.png", fanart="http://imgur.com/b8OuBR2.jpg", contentType="movie")) itemlist.append(itemlist[-1].clone(title=" [COLOR lightskyblue][B]Recomendadas[/B][/COLOR]", action="scraper", - url="http://peliculasgratis.biz/catalogue?order=most_rated", - thumbnail="http://imgur.com/fN2p6qH.png.png", + url=host + "/catalogue?order=most_rated", + thumbnail="http://imgur.com/fN2p6qH.png", fanart="http://imgur.com/b8OuBR2.jpg", contentType="movie")) itemlist.append(itemlist[-1].clone(title="[COLOR lightskyblue][B] Actualizadas[/B][/COLOR]", action="scraper", - url="http://peliculasgratis.biz/catalogue?", + url= host + "/catalogue?", + thumbnail="http://imgur.com/fN2p6qH.png", fanart="http://imgur.com/b8OuBR2.jpg", + contentType="movie")) + itemlist.append(itemlist[-1].clone(title="[COLOR lightskyblue][B] Género[/B][/COLOR]", action="genero", + url= host, thumbnail="http://imgur.com/fN2p6qH.png", fanart="http://imgur.com/b8OuBR2.jpg", contentType="movie")) itemlist.append(itemlist[-1].clone(title="[COLOR lightskyblue][B]Series[/B][/COLOR]", action="scraper", - url="http://peliculasgratis.biz/lista-de-series", + url= host + "/lista-de-series", thumbnail="http://imgur.com/Jia27Uc.png", fanart="http://imgur.com/b8OuBR2.jpg", contentType="tvshow")) - itemlist.append(itemlist[-1].clone(title="[COLOR lightskyblue][B]Buscar[/B][/COLOR]", action="", url="", + itemlist.append(itemlist[-1].clone(title="[COLOR lightskyblue][B]Buscar[/B][/COLOR]", thumbnail="http://imgur.com/mwTwfN7.png", fanart="http://imgur.com/b8OuBR2.jpg")) itemlist.append( itemlist[-1].clone(title="[COLOR lightskyblue][B] Buscar Película[/B][/COLOR]", action="search", url="", @@ -117,11 +64,31 @@ def mainlist(item): return itemlist +def genero(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = 'Películas por géneros(.*?)<\/i>Anuncios') if item.contentType != "movie": - matches = scrapertools.find_multiple_matches(bloque_enlaces, - '
    (.*?)<\/a>

    .*?(completa)">([^"]+)<\/a><\/h3> (.*?)<\/span>') + patron = '([^<]+)<.*?
    (.*?)<\/a>

    .*?title[^<]+>([^<]+)<\/a><\/h3> (.*?)<') - for url, thumb, quality, check_idioma, title, check_year in matches: - - logger.debug('check_idioma: %s' % check_idioma) + patron = '') title = title - itemlist.append( - Item(channel=item.channel, title=title, url=urlparse.urljoin(host, url), action="fanart", thumbnail=thumb, - fanart="http://imgur.com/nqmJozd.jpg", extra=title_fan + "|" + title_item + "|" + check_year.strip(), - contentType=item.contentType, folder=True, language = idiomas)) + Item(channel=item.channel, title=title, fulltitle=title, url=host + url, action=action, thumbnail=thumb, + fanart="http://imgur.com/nqmJozd.jpg", extra=title_fan + "|" + title_item + "|" + year, show=title, + contentType=item.contentType, folder=True, language = idiomas, infoLabels={"year":year})) ## Paginación - if check_year: + tmdb.set_infoLabels(itemlist) + if year: next = scrapertools.find_single_match(data, 'href="([^"]+)" title="Siguiente página">') if len(next) > 0: url = next if not "http" in url: - url = urlparse.urljoin(host, url) + url = host + url itemlist.append( Item(channel=item.channel, action="scraper", title="[COLOR floralwhite][B]Siguiente[/B][/COLOR]", url=url, thumbnail="http://imgur.com/jhRFAmk.png", fanart="http://imgur.com/nqmJozd.jpg", extra=item.extra, contentType=item.contentType, folder=True)) - - return itemlist - - -def fanart(item): - logger.info() - itemlist = [] - url = item.url - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - year = item.extra.split("|")[2] - if not year.isdigit(): - try: - year = scrapertools.find_single_match(data, '[^<]+<\/span>(\d+)<') - except: - year = "" - if item.contentType != "movie": - tipo_ps = "tv" - else: - tipo_ps = "movie" - title = item.extra.split("|")[0] - fulltitle = title - if "El infiltrado" in title: - title = "The Night Manager" - title_o = scrapertools.find_single_match(data, '(.*?)<\/div>') - if sinopsis == "": - try: - sinopsis = scrapertools.find_single_match(data, 'sinopsis\'>(.*?)<\/div>') - except: - sinopsis = "" - if "Miniserie" in sinopsis: - tipo_ps = "tv" - year = scrapertools.find_single_match(sinopsis, 'de TV \((\d+)\)') - if year == "": - if item.contentType != "movie": - try: - year = scrapertools.find_single_match(data, 'Estreno:<\/strong>(\d+)<\/span>') - except: - year = "" - else: - year = scrapertools.find_single_match(data, '
    A.*?(\d+)
    ') - if year == "": - try: - year = scrapertools.find_single_match(data, 'Estreno.*?\d+/\d+/(\d+)') - except: - try: - year = scrapertools.find_single_match(data, - '
    .*?Año
    .*?(\d\d\d\d)') - except: - try: - year = scrapertools.find_single_match(data, - '(.*?)h="ID.*?.*?TV Series') - else: - urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % (title_imdb.replace(' ', '+'), year) - data = browser(urlbing_imdb) - data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data) - subdata_imdb = scrapertools.find_single_match(data, '
  • (.*?)h="ID.*?') - try: - imdb_id = scrapertools.get_match(subdata_imdb, '(.*?)h="ID.*?.*?TV Series') - else: - urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % ( - title_imdb.replace(' ', '+'), year) - data = browser(urlbing_imdb) - data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data) - subdata_imdb = scrapertools.find_single_match(data, '
  • (.*?)h="ID.*?') - try: - imdb_id = scrapertools.get_match(subdata_imdb, '= 5: - fanart_info = imagenes[1] - fanart_2 = imagenes[2] - fanart_3 = imagenes[3] - fanart_4 = imagenes[4] - if fanart == item.fanart: - fanart = fanart_info - elif len(imagenes) == 4: - fanart_info = imagenes[1] - fanart_2 = imagenes[2] - fanart_3 = imagenes[3] - fanart_4 = imagenes[1] - if fanart == item.fanart: - fanart = fanart_info - elif len(imagenes) == 3: - fanart_info = imagenes[1] - fanart_2 = imagenes[2] - fanart_3 = imagenes[1] - fanart_4 = imagenes[0] - if fanart == item.fanart: - fanart = fanart_info - elif len(imagenes) == 2: - fanart_info = imagenes[1] - fanart_2 = imagenes[0] - fanart_3 = imagenes[1] - fanart_4 = imagenes[1] - if fanart == item.fanart: - fanart = fanart_info - else: - fanart_info = fanart - fanart_2 = fanart - fanart_3 = fanart - fanart_4 = fanart - images_fanarttv = fanartv(item, id_tvdb, id) - if item.contentType != "movie": - url = item.url + "/episodios" - action = "findvideos_series" - if images_fanarttv: - try: - thumbnail_art = images_fanarttv.get("hdtvlogo")[0].get("url") - except: - try: - thumbnail_art = images_fanarttv.get("clearlogo")[0].get("url") - except: - thumbnail_art = posterdb - if images_fanarttv.get("tvbanner"): - tvf = images_fanarttv.get("tvbanner")[0].get("url") - elif images_fanarttv.get("tvthumb"): - tvf = images_fanarttv.get("tvthumb")[0].get("url") - elif images_fanarttv.get("tvposter"): - tvf = images_fanarttv.get("tvposter")[0].get("url") - else: - tvf = posterdb - if images_fanarttv.get("tvthumb"): - thumb_info = images_fanarttv.get("tvthumb")[0].get("url") - else: - thumb_info = thumbnail_art - - if images_fanarttv.get("hdclearart"): - tiw = images_fanarttv.get("hdclearart")[0].get("url") - elif images_fanarttv.get("characterart"): - tiw = images_fanarttv.get("characterart")[0].get("url") - elif images_fanarttv.get("hdtvlogo"): - tiw = images_fanarttv.get("hdtvlogo")[0].get("url") - else: - tiw = "" - else: - tiw = "" - tvf = thumbnail_info = thumbnail_art = posterdb - else: - url = item.url - action = "findvideos" - if images_fanarttv: - if images_fanarttv.get("hdmovielogo"): - thumbnail_art = images_fanarttv.get("hdmovielogo")[0].get("url") - elif images_fanarttv.get("moviethumb"): - thumbnail_art = images_fanarttv.get("moviethumb")[0].get("url") - elif images_fanarttv.get("moviebanner"): - thumbnail_art = images_fanarttv.get("moviebanner")[0].get("url") - else: - thumbnail_art = posterdb - if images_fanarttv.get("moviedisc"): - tvf = images_fanarttv.get("moviedisc")[0].get("url") - elif images_fanarttv.get("hdmovielogo"): - tvf = images_fanarttv.get("hdmovielogo")[0].get("url") - else: - tvf = posterdb - if images_fanarttv.get("hdmovieclearart"): - tiw = images_fanarttv.get("hdmovieclearart")[0].get("url") - elif images_fanarttv.get("hdmovielogo"): - tiw = images_fanarttv.get("hdmovielogo")[0].get("url") - else: - tiw = "" - else: - tiw = "" - tvf = thumbnail_art = posterdb - extra = str(fanart_2) + "|" + str(fanart_3) + "|" + str(fanart_4) + "|" + str(id) + "|" + str(tvf) + "|" + str( - id_tvdb) + "|" + str(tiw) + "|" + str(rating) + "|" + tipo_ps - itemlist.append( - Item(channel=item.channel, title=item.title, url=url, action=action, thumbnail=thumbnail_art, fanart=fanart, - extra=extra, contentType=item.contentType, fulltitle=fulltitle, folder=True)) - title_info = "[COLOR powderblue][B]Info[/B][/COLOR]" - extra = str(rating) + "|" + str(rating_filma) + "|" + str(id) + "|" + str(item.title) + "|" + str( - id_tvdb) + "|" + str(tagline) + "|" + str(sinopsis) + "|" + str(critica) + "|" + str(thumbnail_art) + "|" + str( - fanart_4) - itemlist.append(Item(channel=item.channel, action="info", title=title_info, url=item.url, thumbnail=posterdb, - fanart=fanart_info, extra=extra, contentType=item.contentType, folder=False)) return itemlist def findvideos_series(item): logger.info() itemlist = [] - fanart = "" check_temp = [] - data = httptools.downloadpage(item.url).data - if item.contentType != "movie": - itmdb = tmdb.Tmdb(id_Tmdb=item.extra.split("|")[3], tipo=item.extra.split("|")[8]) - season = itmdb.result.get("seasons") - check = "no" - try: - temp, bloque_enlaces = scrapertools.find_single_match(data, 'Temporada (\d+)(.*?)Temporada') - except: - if "no se agregaron" in data: - temp = bloque_enlaces = "" - else: - temp, bloque_enlaces = scrapertools.find_single_match(data, 'Temporada (\d+)(.*?)
    ') - if temp != "": - thumbnail = "" - if season: - for detail in season: - if str(detail["season_number"]) == temp: - if detail["poster_path"]: - thumbnail = "https://image.tmdb.org/t/p/original" + detail["poster_path"] - images_fanarttv = fanartv(item, item.extra.split("|")[5], item.extra.split("|")[3]) - if images_fanarttv: - season_f = images_fanarttv.get("showbackground") - if season_f: - for detail in season_f: - if str(detail["season"]) == temp: - if detail["url"]: - fanart = detail["url"] - if fanart == "": - fanart = item.extra.split("|")[0] - if thumbnail == "": - thumbnail = item.thumbnail - itemlist.append(Item(channel=item.channel, - title="[COLOR darkturquoise]Temporada[/COLOR] " + "[COLOR beige]" + temp + "[/COLOR]", - url="", action="", thumbnail=thumbnail, fanart=fanart, extra="", - contentType=item.contentType, folder=False)) - capitulos = scrapertools.find_multiple_matches(bloque_enlaces, 'href="([^"]+)".*?Episodio (\d+) - ([^<]+)') - for url, epi, title in capitulos: - if epi == "1": - if epi in str(check_temp): - temp = int(temp) + 1 - thumbnail = "" - if season: - for detail in season: - if detail["season_number"] == temp: - if detail["poster_path"]: - thumbnail = "https://image.tmdb.org/t/p/original" + detail["poster_path"] - images_fanarttv = fanartv(item, item.extra.split("|")[5], item.extra.split("|")[3]) - if images_fanarttv: - season_f = images_fanarttv.get("showbackground") - if season_f: - for detail in season_f: - if detail["season"] == temp: - if detail["url"]: - fanart = detail["url"] - if fanart == "": - fanart = item.extra.split("|")[0] - if thumbnail == "": - thumbnail = item.thumbnail - itemlist.append(Item(channel=item.channel, - title="[COLOR darkturquoise]Temporada[/COLOR] " + "[COLOR beige]" + str( - temp) + "[/COLOR]", url="", action="", thumbnail=thumbnail, fanart=fanart, - extra="", contentType=item.contentType, folder=False)) - check_temp.append([epi]) - itemlist.append(Item(channel=item.channel, - title=" [COLOR cyan]Episodio[/COLOR] " + "[COLOR darkcyan]" + epi + "[/COLOR]" + " - " + "[COLOR cadetblue]" + title + "[/COLOR]", - url=url, action="findvideos", thumbnail=item.extra.split("|")[4], - fanart=item.extra.split("|")[0], extra="", contentType=item.contentType, folder=True)) - title_info = " Info" - title_info = "[COLOR steelblue]" + title_info + "[/COLOR]" - itemlist.append(Item(channel=item.channel, action="info_capitulos", title=title_info, url=item.url, - thumbnail=item.extra.split("|")[6], fanart=item.extra.split("|")[1], - extra=item.extra + "|" + str(temp) + "|" + epi, folder=False)) - + data = httptools.downloadpage(item.url + "/episodios").data + try: + temp, bloque_enlaces = scrapertools.find_single_match(data, 'Temporada (\d+)(.*?)Temporada') + except: + if "no se agregaron" in data: + temp = bloque_enlaces = "" + else: + temp, bloque_enlaces = scrapertools.find_single_match(data, 'Temporada (\d+)(.*?)
    ') + if temp != "": + item.infoLabels["season"] = temp + itemlist.append(item.clone(title="[COLOR darkturquoise]Temporada[/COLOR] " + "[COLOR beige]" + temp + "[/COLOR]", + folder=False)) + capitulos = scrapertools.find_multiple_matches(bloque_enlaces, 'href="([^"]+)".*?Episodio (\d+) - ([^<]+)') + for url, epi, title in capitulos: + if epi == "1": + if epi in str(check_temp): + temp = int(temp) + 1 + item.infoLabels["season"] = temp + item.infoLabels["episode"] = 0 + itemlist.append(item.clone(title="[COLOR darkturquoise]Temporada[/COLOR] " + "[COLOR beige]" + str( + temp) + "[/COLOR]", folder=False + )) + check_temp.append([epi]) + item.infoLabels["season"] = temp + item.infoLabels["episode"] = epi + itemlist.append(item.clone(title=" [COLOR cyan]Episodio[/COLOR] " + "[COLOR darkcyan]" + epi + "[/COLOR]" + " - " + "[COLOR cadetblue]" + title + "[/COLOR]", + url=url, action="findvideos", thumbnail="", + extra="", contentType=item.contentType, folder=True)) + tmdb.set_infoLabels(itemlist) return itemlist @@ -584,9 +197,7 @@ def findvideos(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data - if item.extra != "dd" and item.extra != "descarga": - if item.contentType != "movie": bloque_links = scrapertools.find_single_match(data, '