diff --git a/plugin.video.alfa/channels/abtoon.json b/plugin.video.alfa/channels/abtoon.json deleted file mode 100644 index 7b4e314b..00000000 --- a/plugin.video.alfa/channels/abtoon.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "id": "abtoon", - "name": "abtoon", - "active": true, - "adult": false, - "language": ["esp", "lat"], - "thumbnail": "http://i.imgur.com/EpNUqsD.png", - "banner": "http://i.imgur.com/c1YTgNT.png", - "categories": [ - "tvshow" - ] -} \ No newline at end of file diff --git a/plugin.video.alfa/channels/abtoon.py b/plugin.video.alfa/channels/abtoon.py deleted file mode 100644 index ef006d94..00000000 --- a/plugin.video.alfa/channels/abtoon.py +++ /dev/null @@ -1,263 +0,0 @@ -# -*- coding: utf-8 -*- - -import re - -from channels import renumbertools -from channelselector import get_thumb -from core import httptools -from core import scrapertools -from core import servertools -from core import tmdb -from core.item import Item -from platformcode import config, logger -from channels import filtertools -from channels import autoplay -from lib import gktools - -IDIOMAS = {'latino': 'Latino'} -list_language = IDIOMAS.values() -list_servers = ['openload' - ] -list_quality = ['default'] - - -host = "https://abtoon.net" - - -def mainlist(item): - logger.info() - thumb_series = get_thumb("channels_tvshow.png") - autoplay.init(item.channel, list_servers, list_quality) - - itemlist = list() - - itemlist.append( - Item(channel=item.channel, action="lista", title="Series Actuales", url=host+'/p/actuales', - thumbnail=thumb_series)) - - itemlist.append( - Item(channel=item.channel, action="lista", title="Series Clasicas", url=host+'/p/clasicas', - thumbnail=thumb_series)) - - itemlist.append( - Item(channel=item.channel, action="lista", title="Series Anime", url=host + '/p/anime', - thumbnail=thumb_series)) - - itemlist.append( - Item(channel=item.channel, action="lista", title="Series Live Action", url=host + '/p/live-action', - thumbnail=thumb_series)) - itemlist.append( - Item(channel=item.channel, action="search", title="Buscar", thumbnail='')) - - itemlist = renumbertools.show_option(item.channel, itemlist) - autoplay.show_option(item.channel, itemlist) - return itemlist - - -def lista(item): - logger.info() - - itemlist = [] - - full_data = httptools.downloadpage(item.url).data - full_data = re.sub(r"\n|\r|\t|\s{2}| ", "", full_data) - data = scrapertools.find_single_match(full_data, 'class="sl">(.*?)
') - patron = '' - - matches = scrapertools.find_multiple_matches(data, patron) - - - for link, img, name in matches: - if " y " in name: - title=name.replace(" y "," & ") - else: - title = name - url = host + link - scrapedthumbnail = host + img - context = renumbertools.context(item) - context2 = autoplay.context - context.extend(context2) - - itemlist.append(Item(channel=item.channel, title=title, url=url, action="episodios", thumbnail=scrapedthumbnail, - contentSerieName=title, context=context)) - - # Paginacion - - next_page = scrapertools.find_single_match(full_data, '\d+\d+') - if next_page != '': - itemlist.append(Item(channel=item.channel, contentSerieName=item.contentSerieName, - title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=host+next_page, action="lista")) - - tmdb.set_infoLabels(itemlist) - return itemlist - -def peliculas(item): - logger.info() - - itemlist = [] - - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - patron = '
(.+?)<\/span>' - matches = scrapertools.find_multiple_matches(data, patron) - # Paginacion - num_items_x_pagina = 30 - min = item.page * num_items_x_pagina - min=min-item.page - max = min + num_items_x_pagina - 1 - b=0 - for scrapedplot,scrapedthumbnail, scrapedtitle, scrapedyear, scrapedurl in matches[min:max]: - b=b+1 - url = host + scrapedurl - thumbnail = host +scrapedthumbnail - context = renumbertools.context(item) - context2 = autoplay.context - context.extend(context2) - itemlist.append(item.clone(title=scrapedtitle+"-"+scrapedyear, url=url, action="findvideos", thumbnail=thumbnail, plot=scrapedplot, - show=scrapedtitle,contentSerieName=scrapedtitle,context=context)) - if b<29: - pass - else: - itemlist.append( - Item(channel=item.channel, contentSerieName=item.contentSerieName, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=item.url, action="peliculas", page=item.page + 1)) - - tmdb.set_infoLabels(itemlist) - return itemlist - -def episodios(item): - logger.info() - - itemlist = [] - data = httptools.downloadpage(item.url).data - # obtener el numero total de episodios - total_episode = 0 - - patron_caps = '
  • (.*?) - (.*?)<\/a><\/li>' - matches = scrapertools.find_multiple_matches(data, patron_caps) - patron_info = '.+?

    ([^"]+)<\/h1>

    (.+?)<\/p>' - scrapedthumbnail, show, scrapedplot = scrapertools.find_single_match(data, patron_info) - scrapedthumbnail = host + scrapedthumbnail - - for link, cap, name in matches: - - title = "" - pat = "$%&" - # varios episodios en un enlace - if len(name.split(pat)) > 1: - i = 0 - for pos in name.split(pat): - i = i + 1 - total_episode += 1 - season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1, total_episode) - if len(name.split(pat)) == i: - title += "%sx%s " % (season, str(episode).zfill(2)) - else: - title += "%sx%s_" % (season, str(episode).zfill(2)) - else: - total_episode += 1 - season, episode = renumbertools.numbered_for_tratk(item.channel,item.contentSerieName, 1, total_episode) - - title += "%sx%s " % (season, str(episode).zfill(2)) - - url = host + "/" + link - if "DISPONIBLE" in name: - title += "No Disponible aún" - else: - title += name - itemlist.append( - Item(channel=item.channel, action="findvideos", title=title, url=url, show=show, plot=scrapedplot, - thumbnail=scrapedthumbnail)) - - if config.get_videolibrary_support() and len(itemlist) > 0: - itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", url=item.url, - action="add_serie_to_library", extra="episodios", show=show)) - - return itemlist - -def findvideos(item): - import base64 - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - _sl = scrapertools.find_single_match(data, 'var abi = ([^;]+);') - sl = eval(_sl) - buttons = scrapertools.find_multiple_matches(data,'class="bsel" sl="(.+?)"')#[0,1,2,3,4] - for ids in buttons: - id = int(ids) - url_end = golink(id,sl) - new_url = "https://abtoon.net/" + "embed/" + sl[0] + "/" + sl[1] + "/" + str(id) + "/" + sl[2] + url_end - data_new = httptools.downloadpage(new_url).data - data_new = re.sub(r"\n|\r|\t|\s{2}| ", "", data_new) - logger.info("asdasdasdcc"+data_new) - valor1, valor2 = scrapertools.find_single_match(data_new, 'var x0x = \["[^"]*", "([^"]+)", "[^"]*", "[^"]*", "([^"]+)"\];') - try: - url = base64.b64decode(gktools.transforma_gsv(valor2, base64.b64decode(valor1))) - if 'download' in url: - url = url.replace('download', 'preview') - title = '%s' - itemlist.append(Item(channel=item.channel, title=title, url=url, action='play', language='latino', - infoLabels=item.infoLabels)) - except Exception as e: - logger.info(e) - itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) - # Requerido para FilterTools - itemlist = filtertools.get_links(itemlist, item, list_language) - # Requerido para AutoPlay - autoplay.start(itemlist, item) - - return itemlist - -def search_results(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url, post=item.post).data - if len(data) > 0: - results = eval(data) - else: - return itemlist - - for result in results: - try: - thumbnail = host + "/tb/%s.jpg" % result[0] - title = u'%s' % result[1] - logger.debug(title) - url = host + "/s/%s" % result[2] - itemlist.append(Item(channel=item.channel, thumbnail=thumbnail, title=title, url=url, contentSerieName=title, - action='episodios')) - except: - pass - - tmdb.set_infoLabels(itemlist, seekTmdb=True) - return itemlist - -def search(item, texto): - logger.info() - import urllib - - if texto != "": - texto = texto.replace(" ", "+") - item.url = host+"/b.php" - post = {'k':texto, "pe":"", "te":""} - item.post = urllib.urlencode(post) - - try: - return search_results(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def golink(ida,sl): - a=ida - b=[3,10,5,22,31] - c=1 - d="" - e=sl[2] - for i in range(len(b)): - d=d+substr(e,b[i]+a,c) - return d - -def substr(st,a,b): - return st[a:a+b] \ No newline at end of file diff --git a/plugin.video.alfa/channels/allcalidad.json b/plugin.video.alfa/channels/allcalidad.json deleted file mode 100755 index d699a8a5..00000000 --- a/plugin.video.alfa/channels/allcalidad.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "id": "allcalidad", - "name": "Allcalidad", - "active": true, - "adult": false, - "language": ["esp", "lat"], - "thumbnail": "https://s22.postimg.cc/irnlwuizh/allcalidad1.png", - "banner": "https://s22.postimg.cc/9y1athlep/allcalidad2.png", - "categories": [ - "movie", - "direct" - ], - "settings": [ - { - "id": "filter_languages", - "type": "list", - "label": "Mostrar enlaces en idioma...", - "default": 0, - "enabled": true, - "visible": true, - "lvalues": [ - "No filtrar", - "Latino" - ] - }, - { - "id": "modo_grafico", - "type": "bool", - "label": "Buscar información extra", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_newest_latino", - "type": "bool", - "label": "Incluir en Novedades - Latino", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_newest_peliculas", - "type": "bool", - "label": "Incluir en Novedades - Peliculas", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_newest_infantiles", - "type": "bool", - "label": "Incluir en Novedades - Infantiles", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_newest_terror", - "type": "bool", - "label": "Incluir en Novedades - terror", - "default": true, - "enabled": true, - "visible": true - } - ] -} diff --git a/plugin.video.alfa/channels/allcalidad.py b/plugin.video.alfa/channels/allcalidad.py deleted file mode 100755 index f9469bdc..00000000 --- a/plugin.video.alfa/channels/allcalidad.py +++ /dev/null @@ -1,222 +0,0 @@ -# -*- coding: utf-8 -*- - -from channelselector import get_thumb -from channels import autoplay -from channels import filtertools -from core import httptools -from core import scrapertools -from core import servertools -from core import tmdb -from core.item import Item -from platformcode import config, logger - - -IDIOMAS = {'Latino': 'Latino'} -list_language = IDIOMAS.values() -list_quality = [] -list_servers = ['rapidvideo', 'streamango', 'fastplay', 'flashx', 'openload', 'vimeo', 'netutv'] - - -__channel__='allcalidad' - -host = "https://allcalidad.io/" - -try: - __modo_grafico__ = config.get_setting('modo_grafico', __channel__) -except: - __modo_grafico__ = True - - -def mainlist(item): - logger.info() - import ast - from core import jsontools - data = '{"country_code":"PE","country_name":"Peru","city":null,"postal":null,"latitude":-12.0433,"longitude":-77.0283,"IPv4":"190.41.210.15","state":null}' - data = data.replace("null",'"null"') - logger.info("Intel22 %s" %data) - user_loc = ast.literal_eval(data) - autoplay.init(item.channel, list_servers, list_quality) - itemlist = [] - itemlist.append(Item(channel = item.channel, title = "Novedades", action = "peliculas", url = host, thumbnail = get_thumb("newest", auto = True))) - itemlist.append(Item(channel = item.channel, title = "Por género", action = "generos_years", url = host, extra = "Genero", thumbnail = get_thumb("genres", auto = True) )) - itemlist.append(Item(channel = item.channel, title = "Por año", action = "generos_years", url = host, extra = ">Año<", thumbnail = get_thumb("year", auto = True))) - itemlist.append(Item(channel = item.channel, title = "Favoritas", action = "favorites", url = host + "/favorites", thumbnail = get_thumb("favorites", auto = True) )) - itemlist.append(Item(channel = item.channel, title = "")) - itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "?s=", thumbnail = get_thumb("search", auto = True))) - autoplay.show_option(item.channel, itemlist) - return itemlist - -def favorites(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '(?s)short_overlay.*?([^<]+)').strip() - datapostid = scrapertools.find_single_match(datos, 'data-postid="([^"]+)') - thumbnail = scrapertools.find_single_match(datos, 'img w.*?src="([^"]+)') - post = 'action=get_movie_details&postID=%s' %datapostid - data1 = httptools.downloadpage(host + "wp-admin/admin-ajax.php", post=post).data - idioma = "Latino" - mtitulo = titulo + " (" + idioma + ")" - year = scrapertools.find_single_match(data1, "Año:.*?(\d{4})") - if year: - mtitulo += " (" + year + ")" - item.infoLabels['year'] = int(year) - itemlist.append(item.clone(channel = item.channel, - action = "findvideos", - title = mtitulo, - fulltitle = titulo, - thumbnail = thumbnail, - url = url, - contentType="movie", - language = idioma - )) - tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) - url_pagina = scrapertools.find_single_match(data, 'next" href="([^"]+)') - if url_pagina != "": - pagina = "Pagina: " + scrapertools.find_single_match(url_pagina, "page/([0-9]+)") - itemlist.append(Item(channel = item.channel, action = "peliculas", title = pagina, url = url_pagina)) - return itemlist - - -def findvideos(item): - itemlist = [] - data = httptools.downloadpage(item.url).data - if not item.infoLabels["year"]: - item.infoLabels["year"] = scrapertools.find_single_match(data, 'dateCreated.*?(\d{4})') - if "orig_title" in data: - contentTitle = scrapertools.find_single_match(data, 'orig_title.*?>([^<]+)<').strip() - if contentTitle != "": - item.contentTitle = contentTitle - bloque = scrapertools.find_single_match(data, '(?s)

    (.*?)") - unpack = jsunpack.unpack(packed) - urls = scrapertools.find_multiple_matches(unpack, '"file":"([^"]+).*?label":"([^"]+)') - for url2, quality in urls: - if url2: - itemlist.append(item.clone(action = "play", title = "Ver en %s (" + quality + ") (" + language + ")", language = language, url = url2)) - # Segundo grupo de enlaces - matches = scrapertools.find_multiple_matches(data, '') - for ser in matches1: - ser = ser.replace("×","x") - aud = scrapertools.find_single_match(ser, 'aud">.*?x([^<]+)') - language = "Versión RAW" - if aud == "jp" and sub == "si": - language = "Sub. Español" - matches2 = scrapertools.find_multiple_matches(ser, 'href="([^"]+)') - for url2 in matches2: - if url2: - itemlist.append(item.clone(action = "play", title = "Ver en %s (" + quality + ") (" + language + ")", language = language, url = url2)) - itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) - # Requerido para FilterTools - itemlist = filtertools.get_links(itemlist, item, list_language) - - # Requerido para AutoPlay - - autoplay.start(itemlist, item) - return itemlist diff --git a/plugin.video.alfa/channels/canalpelis.json b/plugin.video.alfa/channels/canalpelis.json deleted file mode 100644 index 9be2a946..00000000 --- a/plugin.video.alfa/channels/canalpelis.json +++ /dev/null @@ -1,81 +0,0 @@ -{ - "id": "canalpelis", - "name": "CanalPelis", - "active": true, - "adult": false, - "language": ["esp", "lat", "cast", "vose"], - "fanart": "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/canalpelisbg.jpg", - "thumbnail": "http://www.canalpelis.com/wp-content/uploads/2016/11/logo_web.gif", - "banner": "", - "categories": [ - "movie", - "tvshow", - "vos" - ], - "settings": [ - { - "id": "modo_grafico", - "type": "bool", - "label": "Buscar información extra", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "perfil", - "type": "list", - "label": "Perfil de color", - "default": 3, - "enabled": true, - "visible": true, - "lvalues": [ - "Sin color", - "Perfil 5", - "Perfil 4", - "Perfil 3", - "Perfil 2", - "Perfil 1" - ] - }, - { - "id": "orden_episodios", - "type": "bool", - "label": "Mostrar los episodios de las series en orden descendente", - "default": false, - "enabled": true, - "visible": true - }, - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_newest_peliculas", - "type": "bool", - "label": "Incluir en Novedades - Peliculas", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_newest_infantiles", - "type": "bool", - "label": "Incluir en Novedades - Infantiles", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_newest_terror", - "type": "bool", - "label": "Incluir en Novedades - terror", - "default": true, - "enabled": true, - "visible": true - } - ] -} diff --git a/plugin.video.alfa/channels/canalpelis.py b/plugin.video.alfa/channels/canalpelis.py deleted file mode 100644 index d827b70f..00000000 --- a/plugin.video.alfa/channels/canalpelis.py +++ /dev/null @@ -1,424 +0,0 @@ -# -*- coding: utf-8 -*- -# -*- Channel CanalPelis -*- -# -*- Created for Alfa-addon -*- -# -*- By the Alfa Develop Group -*- - -import re -import sys -import urllib -import urlparse - -from core import httptools -from core import scrapertools -from core import servertools -from core.item import Item -from core import channeltools -from core import tmdb -from platformcode import config, logger -from channelselector import get_thumb - -__channel__ = "canalpelis" - -host = "http://www.canalpelis.com/" - -try: - __modo_grafico__ = config.get_setting('modo_grafico', __channel__) - __perfil__ = int(config.get_setting('perfil', __channel__)) -except: - __modo_grafico__ = True - __perfil__ = 0 - -# Fijar perfil de color -perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'], - ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'], - ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']] -if __perfil__ < 3: - color1, color2, color3, color4, color5 = perfil[__perfil__] -else: - color1 = color2 = color3 = color4 = color5 = "" - -headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'], - ['Referer', host]] - -parameters = channeltools.get_channel_parameters(__channel__) -fanart_host = parameters['fanart'] -thumbnail_host = parameters['thumbnail'] - -thumbnail = "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/%s.png" - - -def mainlist(item): - logger.info() - itemlist = [] - - itemlist.append(item.clone(title="Peliculas", action="peliculas", thumbnail=get_thumb('movies', auto=True), - text_blod=True, page=0, viewcontent='movies', - url=host + 'movies/', viewmode="movie_with_plot")) - - itemlist.append(item.clone(title="Géneros", action="generos", thumbnail=get_thumb('genres', auto=True), - text_blod=True, page=0, viewcontent='movies', - url=host + 'genre/', viewmode="movie_with_plot")) - - itemlist.append(item.clone(title="Año de Estreno", action="year_release", thumbnail=get_thumb('year', auto=True), - text_blod=True, page=0, viewcontent='movies', url=host + 'release/', - viewmode="movie_with_plot")) - - itemlist.append(item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True), - text_blod=True, url=host, page=0)) - - itemlist.append(item.clone(title="Series", action="series", extra='serie', url=host + 'tvshows/', - viewmode="movie_with_plot", text_blod=True, viewcontent='movies', - thumbnail=get_thumb('tvshows', auto=True), page=0)) - - return itemlist - - -def search(item, texto): - logger.info() - - texto = texto.replace(" ", "+") - item.url = urlparse.urljoin(item.url, "?s={0}".format(texto)) - - try: - return sub_search(item) - - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("{0}".format(line)) - return [] - - -def sub_search(item): - logger.info() - - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |
    ", "", data) - # logger.info(data) - patron = '
    .*?' # url - patron += '([^.*?' # img and title - patron += '\d+') - - if paginacion: - itemlist.append(Item(channel=item.channel, action="sub_search", - title="» Siguiente »", url=paginacion)) - - tmdb.set_infoLabels(itemlist) - - return itemlist - - -def newest(categoria): - logger.info() - itemlist = [] - item = Item() - try: - if categoria == 'peliculas': - item.url = host + 'movies/' - elif categoria == 'infantiles': - item.url = host + "genre/cine-animacion/" - elif categoria == 'terror': - item.url = host + "genre/cine-terror/" - else: - return [] - - itemlist = peliculas(item) - if itemlist[-1].title == "» Siguiente »": - itemlist.pop() - - # Se captura la excepción, para no interrumpir al canal novedades si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("{0}".format(line)) - return [] - - return itemlist - - -def peliculas(item): - logger.info() - itemlist = [] - - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data) - - patron = '

    (.*?)

    ') - item.plot = scrapertools.htmlclean(item.plot) - item.infoLabels['director'] = scrapertools.find_single_match( - datas, '
    ([^<]+)') - item.infoLabels['genre'] = scrapertools.find_single_match( - datas, 'rel="tag">[^<]+') - - return itemlist - - -def generos(item): - logger.info() - itemlist = [] - - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - - patron = '
  • ([^<]+) ([^<]+)
  • ' - matches = re.compile(patron, re.DOTALL).findall(data) - - for scrapedurl, scrapedtitle, cantidad in matches: - if cantidad != '0' and scrapedtitle != '# Próximamente': - title = "%s (%s)" % (scrapedtitle, cantidad) - itemlist.append(item.clone(channel=item.channel, action="peliculas", title=title, page=0, - url=scrapedurl, text_color=color3, viewmode="movie_with_plot")) - - return itemlist - - -def year_release(item): - logger.info() - itemlist = [] - - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - # logger.info(data) - patron = '
  • ([^<]+)
  • ' # url, title - matches = re.compile(patron, re.DOTALL).findall(data) - - for scrapedurl, scrapedtitle in matches: - itemlist.append(item.clone(channel=item.channel, action="peliculas", title=scrapedtitle, page=0, - url=scrapedurl, text_color=color3, viewmode="movie_with_plot", extra='next')) - - return itemlist - - -def series(item): - logger.info() - itemlist = [] - - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\(.*?\)| |
    ", "", data) - patron = '
    ([^.*?.*?' - patron += '
    ([^<]+)
    ' - - matches = scrapertools.find_multiple_matches(data, patron) - - for scrapedthumbnail, scrapedtitle, scrapedurl, plot in matches[item.page:item.page + 30]: - if plot == '': - plot = scrapertools.find_single_match(data, '
    ([^<]+)
    ') - scrapedtitle = scrapedtitle.replace('Ver ', '').replace( - ' Online HD', '').replace('ver ', '').replace(' Online', '').replace(' (Serie TV)', '').strip() - itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="temporadas", - contentSerieName=scrapedtitle, show=scrapedtitle, plot=plot, - thumbnail=scrapedthumbnail, contentType='tvshow')) - - # url_next_page = scrapertools.find_single_match(data, '') - - tmdb.set_infoLabels(itemlist, __modo_grafico__) - - if item.page + 30 < len(matches): - itemlist.append(item.clone(page=item.page + 30, - title="» Siguiente »", text_color=color3)) - else: - next_page = scrapertools.find_single_match( - data, '') - - if next_page: - itemlist.append(item.clone(url=next_page, page=0, - title="» Siguiente »", text_color=color3)) - - return itemlist - - -def temporadas(item): - logger.info() - itemlist = [] - - data = httptools.downloadpage(item.url).data - datas = re.sub(r"\n|\r|\t| |
    ", "", data) - patron = "([^<]+).*?" # numeros de temporadas - patron += "" # capitulos - # logger.info(datas) - matches = scrapertools.find_multiple_matches(datas, patron) - if len(matches) > 1: - for scrapedseason, scrapedthumbnail in matches: - scrapedseason = " ".join(scrapedseason.split()) - temporada = scrapertools.find_single_match(scrapedseason, '(\d+)') - new_item = item.clone(action="episodios", season=temporada, thumbnail=scrapedthumbnail, extra='temporadas') - new_item.infoLabels['season'] = temporada - new_item.extra = "" - itemlist.append(new_item) - - tmdb.set_infoLabels(itemlist, __modo_grafico__) - - for i in itemlist: - i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle']) - if i.infoLabels['title']: - # Si la temporada tiene nombre propio añadirselo al titulo del item - i.title += " - %s" % (i.infoLabels['title']) - if i.infoLabels.has_key('poster_path'): - # Si la temporada tiene poster propio remplazar al de la serie - i.thumbnail = i.infoLabels['poster_path'] - - itemlist.sort(key=lambda it: it.title) - - if config.get_videolibrary_support() and len(itemlist) > 0: - itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url, - action="add_serie_to_library", extra="episodios", show=item.show, category="Series", - text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host)) - - return itemlist - else: - return episodios(item) - - -def episodios(item): - logger.info() - itemlist = [] - - data = httptools.downloadpage(item.url).data - datas = re.sub(r"\n|\r|\t| |
    ", "", data) - patron = "
    .*?" - patron += "
    (.*?)
    .*?" - patron += "
    ([^<]+)" - - matches = scrapertools.find_multiple_matches(datas, patron) - - for scrapedtitle, scrapedurl, scrapedname in matches: - scrapedtitle = scrapedtitle.replace('--', '0') - patron = '(\d+) - (\d+)' - match = re.compile(patron, re.DOTALL).findall(scrapedtitle) - season, episode = match[0] - - if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season): - continue - - title = "%sx%s: %s" % (season, episode.zfill(2), scrapertools.unescape(scrapedname)) - new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title, - contentType="episode") - if 'infoLabels' not in new_item: - new_item.infoLabels = {} - - new_item.infoLabels['season'] = season - new_item.infoLabels['episode'] = episode.zfill(2) - - itemlist.append(new_item) - - # TODO no hacer esto si estamos añadiendo a la videoteca - if not item.extra: - # Obtenemos los datos de todos los capitulos de la temporada mediante multihilos - tmdb.set_infoLabels(itemlist, __modo_grafico__) - for i in itemlist: - if i.infoLabels['title']: - # Si el capitulo tiene nombre propio añadirselo al titulo del item - i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels[ - 'episode'], i.infoLabels['title']) - if i.infoLabels.has_key('poster_path'): - # Si el capitulo tiene imagen propia remplazar al poster - i.thumbnail = i.infoLabels['poster_path'] - - itemlist.sort(key=lambda it: int(it.infoLabels['episode']), - reverse=config.get_setting('orden_episodios', __channel__)) - tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) - # Opción "Añadir esta serie a la videoteca" - if config.get_videolibrary_support() and len(itemlist) > 0: - itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url, - action="add_serie_to_library", extra="episodios", show=item.show, category="Series", - text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host)) - - return itemlist - - -def findvideos(item): - logger.info() - import base64 - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data) - - patron = "data-post='(\d+)' data-nume='(\d+)'.*?img src='([^']+)'>" - matches = re.compile(patron, re.DOTALL).findall(data) - for id, option, lang in matches: - lang = scrapertools.find_single_match(lang, '.*?/flags/(.*?).png') - lang = lang.lower().strip() - idioma = {'mx': '[COLOR cornflowerblue](LAT)[/COLOR]', - 'es': '[COLOR green](CAST)[/COLOR]', - 'en': '[COLOR red](VOSE)[/COLOR]', - 'gb': '[COLOR red](VOSE)[/COLOR]'} - if lang in idioma: - lang = idioma[lang] - post = {'action': 'doo_player_ajax', 'post': id, 'nume': option, 'type': 'movie'} - post = urllib.urlencode(post) - test_url = '%swp-admin/admin-ajax.php' % host - new_data = httptools.downloadpage(test_url, post=post, headers={'Referer': item.url}).data - hidden_url = scrapertools.find_single_match(new_data, "src='([^']+)'") - new_data = httptools.downloadpage(hidden_url, follow_redirects=False) - - try: - b64_url = scrapertools.find_single_match(new_data.headers['location'], "y=(.*)") - url = base64.b64decode(b64_url) - except: - url = hidden_url - if url != '': - itemlist.append( - Item(channel=item.channel, action='play', language=lang, infoLabels=item.infoLabels, - url=url, title='Ver en: ' + '[COLOR yellowgreen]%s [/COLOR]' + lang)) - - itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize()) - itemlist.sort(key=lambda it: it.language, reverse=False) - - if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios': - itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos", - title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', - thumbnail=thumbnail_host, contentTitle=item.contentTitle)) - - return itemlist diff --git a/plugin.video.alfa/channels/cartoonlatino.json b/plugin.video.alfa/channels/cartoonlatino.json deleted file mode 100755 index 5075a77f..00000000 --- a/plugin.video.alfa/channels/cartoonlatino.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "id": "cartoonlatino", - "name": "Cartoon-Latino", - "active": true, - "adult": false, - "language": ["esp", "lat"], - "thumbnail": "http://i.imgur.com/wk6fRDZ.png", - "banner": "http://i.imgur.com/115c59F.png", - "categories": [ - "tvshow" - ] -} \ No newline at end of file diff --git a/plugin.video.alfa/channels/cartoonlatino.py b/plugin.video.alfa/channels/cartoonlatino.py deleted file mode 100644 index c96dcc0c..00000000 --- a/plugin.video.alfa/channels/cartoonlatino.py +++ /dev/null @@ -1,174 +0,0 @@ -# -*- coding: utf-8 -*- - -import re - -from channelselector import get_thumb -from core import httptools -from core import scrapertools -from core import servertools -from core import tmdb -from core.item import Item -from platformcode import config, logger -from channels import autoplay - -host = "http://www.cartoon-latino.com/" -from channels import autoplay - -IDIOMAS = {'latino': 'Latino'} -list_language = IDIOMAS.values() -list_servers = ['openload', - 'vimple', - 'gvideo', - 'rapidvideo' - ] -list_quality = ['default'] - -def mainlist(item): - logger.info() - thumb_series = get_thumb('tvshows', auto=True) - autoplay.init(item.channel, list_servers, list_quality) - itemlist = list() - itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=host, - thumbnail=thumb_series)) - autoplay.show_option(item.channel, itemlist) - return itemlist - - -def lista_gen(item): - logger.info() - - itemlist = [] - - data1 = httptools.downloadpage(item.url).data - data1 = re.sub(r"\n|\r|\t|\s{2}| ", "", data1) - patron_sec = '
    .+?<\/section>' - data = scrapertools.find_single_match(data1, patron_sec) - patron = '
    ' - patron += '