diff --git a/plugin.video.alfa/channels/asialiveaction.json b/plugin.video.alfa/channels/asialiveaction.json index 051d2359..23ed268c 100644 --- a/plugin.video.alfa/channels/asialiveaction.json +++ b/plugin.video.alfa/channels/asialiveaction.json @@ -8,6 +8,7 @@ "banner": "https://imgur.com/B1IOAu4.png", "categories": [ "movie", - "tvshow" + "tvshow", + "vos" ] } diff --git a/plugin.video.alfa/channels/asialiveaction.py b/plugin.video.alfa/channels/asialiveaction.py index 1913d9ed..e995972f 100644 --- a/plugin.video.alfa/channels/asialiveaction.py +++ b/plugin.video.alfa/channels/asialiveaction.py @@ -6,6 +6,7 @@ import urlparse from core import httptools from core import scrapertools from core import servertools +from core import tmdb from core.item import Item from platformcode import config, logger @@ -17,15 +18,21 @@ def mainlist(item): itemlist = list() - itemlist.append(Item(channel=item.channel, action="estrenos", title="Estrenos", url=host)) itemlist.append(Item(channel=item.channel, action="lista", title="Peliculas", - url=urlparse.urljoin(host, "p/peliculas.html"))) + url=urlparse.urljoin(host, "p/peliculas.html"), type='pl', first=0)) + itemlist.append(Item(channel=item.channel, action="lista", title="Series", - url=urlparse.urljoin(host, "p/series.html"))) - itemlist.append(Item(channel=item.channel, action="category", title="Orden Alfabético", url=host)) - itemlist.append(Item(channel=item.channel, action="category", title="Géneros", url=host)) - itemlist.append(Item(channel=item.channel, action="category", title="Año de Estreno", url=host)) - #itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=urlparse.urljoin(host, "/search?q="))) + url=urlparse.urljoin(host, "p/series.html"), type='sr', first=0)) + + itemlist.append(Item(channel=item.channel, action="category", title="Géneros", url=host, cat='genre')) + + itemlist.append(Item(channel=item.channel, action="category", title="Calidad", url=host, cat='quality')) + + itemlist.append(Item(channel=item.channel, action="category", title="Orden Alfabético", url=host, cat='abc')) + + itemlist.append(Item(channel=item.channel, action="category", title="Año de Estreno", url=host, cat='year')) + + itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+"/search?q=")) return itemlist @@ -34,155 +41,175 @@ def category(item): itemlist = list() data = httptools.downloadpage(host).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - patron_generos = "

"+item.title+"<\/h2>
') + elif item.cat == 'genre': + data = scrapertools.find_single_match(data, 'Géneros.*?') + elif item.cat == 'year': + data = scrapertools.find_single_match(data, 'Año.*?') + elif item.cat == 'quality': + data = scrapertools.find_single_match(data, 'Calidad.*?') + + patron = "
  • ([^<]+)" + + matches = re.compile(patron, re.DOTALL).findall(data) + for scrapedtitle, scrapedurl in matches: if scrapedtitle != 'Próximas Películas': - itemlist.append(item.clone(action='lista', title=scrapedtitle, url=host+scrapedurl)) + itemlist.append(item.clone(action='lista', title=scrapedtitle, url=host+scrapedurl, type='cat', first=0)) + return itemlist +def search_results(item): + logger.info() + itemlist = [] + + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + + patron = '([^<]+).*?class="poster-bg" src="([^"]+)"/>.*?

    .*?' + patron +=">(\d{4}).*?

    ([^<]+) 0: - itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", url=item.url, - action="add_serie_to_library", extra="episodios", show=item.show)) + itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", + url=item.url, action="add_serie_to_library", extra="episodios", + contentSerieName=item.contentSerieName)) return itemlist -def bitly(item): - logger.info() - itemlist = list() - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - patron = '.*?
    .*?") + + patron = '.*?
    (\d{4})
    ' + patron += '
    (.*?)
    ([^<]+)
    ' + matches = scrapertools.find_multiple_matches(data, patron) - for scrapedtype,scrapedquality,scrapedthumbnail,scrapedtitle,scrapedyear,scrapedurl in matches: + + first = int(item.first) + last = first + 19 + if last > len(matches): + last = len(matches) + next = False + + + for scrapedtype, scrapedyear, scrapedthumbnail, scrapedquality, scrapedtitle ,scrapedurl in matches[first:last]: patron_quality="(.+?)" quality = scrapertools.find_multiple_matches(scrapedquality, patron_quality) qual="" + for calidad in quality: qual=qual+"["+calidad+"] " + title="%s [%s] %s" % (scrapedtitle,scrapedyear,qual) - if item.title =="Series": - itemlist.append(item.clone(title=title, url=host+scrapedurl, extra=scrapedtitle, plot=scrapedtitle, - show=scrapedtitle, thumbnail=scrapedthumbnail, contentType="serie", action="capitulos")) - elif scrapedtype != 'serie': - itemlist.append( - item.clone(title=title, url=host+scrapedurl, action="findvideos", extra=scrapedtype, plot=scrapedtitle, - show=scrapedtitle, thumbnail=scrapedthumbnail, contentType="movie", context=["buscar_trailer"])) + new_item= Item(channel=item.channel, title=title, url=host+scrapedurl, thumbnail=scrapedthumbnail, + type=scrapedtype, infoLabels={'year':scrapedyear}) + if scrapedtype.strip() == 'sr': + new_item.contentSerieName = scrapedtitle + new_item.action = 'episodios' + else: + new_item.contentTitle = scrapedtitle + new_item.action = 'findvideos' - # Paginacion - patron_genero = '

    ([^"]+)<\/h1>' - genero = scrapertools.find_single_match(data, patron_genero) - if genero == "Romance" or genero == "Drama": - patron = "') + g_url = '%s%s' % ('https://drive.google.com', video_id) + g_url = g_url.replace('&', '&') + g_data = httptools.downloadpage(g_url, follow_redirects=False, only_headers=True).headers + url = g_data['location'] + dl_links.append(Item(channel=item.channel, title='%s', url=url, action='play', infoLabels=item.infoLabels)) + + if item.type == 'pl': + new_url = scrapertools.find_single_match(data, '
    .*?", "", data) - data = data.decode('cp1252') realplot = '' - patron = '.*?<\/a>' + patron = ' ", "", data) - data = data.decode('cp1252') - data = scrapertools.find_single_match(data, '<\/form><\/table><\/div>.*?<\/ul>') - - patron = '
  • (.*?)<\/a><\/li>' + patron = '
  • ([^<]+)<\/a><\/li>' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: @@ -356,36 +351,40 @@ def findvideos(item): logger.info() itemlist = [] - new_url = get_link(get_source(item.url)) - new_url = get_link(get_source(new_url)) - video_id = scrapertools.find_single_match(new_url, 'http.*?h=(\w+)') - new_url = '%s%s' % (host, 'playeropstream/api.php') - post = {'h': video_id} - post = urllib.urlencode(post) - data = httptools.downloadpage(new_url, post=post).data - json_data = jsontools.load(data) - url = json_data['url'] - server = servertools.get_server_from_url(url) - title = '%s [%s]' % (server, item.language) - itemlist.append(Item(channel=item.channel, title=title, url=url, action='play', language=item.language, - server=server, infoLabels=item.infoLabels)) + try: + new_url = get_link(get_source(item.url)) + new_url = get_link(get_source(new_url)) + video_id = scrapertools.find_single_match(new_url, 'http.*?h=(\w+)') + new_url = '%s%s' % (host, 'playeropstream/api.php') + post = {'h': video_id} + post = urllib.urlencode(post) + data = httptools.downloadpage(new_url, post=post).data + json_data = jsontools.load(data) + url = json_data['url'] + server = servertools.get_server_from_url(url) + title = '%s [%s]' % (server, item.language) + itemlist.append(Item(channel=item.channel, title=title, url=url, action='play', language=item.language, + server=server, infoLabels=item.infoLabels)) - # Requerido para FilterTools + # Requerido para FilterTools - itemlist = filtertools.get_links(itemlist, item, list_language) + itemlist = filtertools.get_links(itemlist, item, list_language) - # Requerido para AutoPlay + # Requerido para AutoPlay - autoplay.start(itemlist, item) + autoplay.start(itemlist, item) + + if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': + itemlist.append(Item(channel=item.channel, + title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', + url=item.url, + action="add_pelicula_to_library", + extra="findvideos", + contentTitle=item.contentTitle + )) + except: + pass - if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': - itemlist.append(Item(channel=item.channel, - title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', - url=item.url, - action="add_pelicula_to_library", - extra="findvideos", - contentTitle=item.contentTitle - )) return itemlist diff --git a/plugin.video.alfa/channels/peliculasyseries.json b/plugin.video.alfa/channels/peliculasyseries.json new file mode 100644 index 00000000..fd0c878a --- /dev/null +++ b/plugin.video.alfa/channels/peliculasyseries.json @@ -0,0 +1,73 @@ +{ +"id": "peliculasyseries", + "name": "PeliculasySeries", + "active": true, + "adult": false, + "language": ["lat", "cast"], + "thumbnail": "https://s22.postimg.cc/xy1burkep/peliculasyseries.png", + "banner": "", + "categories": [ + "movie", + "tvshow", + "vos" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "No filtrar", + "Latino", + "Castellano", + "VOSE", + "VOS", + "VO" + ] + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_infantiles", + "type": "bool", + "label": "Incluir en Novedades - Infantiles", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "comprueba_enlaces", + "type": "bool", + "label": "Verificar si los enlaces existen", + "default": false, + "enabled": true, + "visible": true + }, + { + "id": "comprueba_enlaces_num", + "type": "list", + "label": "Número de enlaces a verificar", + "default": 1, + "enabled": true, + "visible": "eq(-1,true)", + "lvalues": [ "5", "10", "15", "20" ] + } + ] +} diff --git a/plugin.video.alfa/channels/peliculasyseries.py b/plugin.video.alfa/channels/peliculasyseries.py new file mode 100644 index 00000000..ac83a4be --- /dev/null +++ b/plugin.video.alfa/channels/peliculasyseries.py @@ -0,0 +1,345 @@ +# -*- coding: utf-8 -*- +# -*- Channel PeliculasySeries -*- +# -*- Created for Alfa-addon -*- +# -*- By the Alfa Develop Group -*- + +import re +import urllib +import base64 + +from channelselector import get_thumb +from core import httptools +from core import jsontools +from core import scrapertools +from core import servertools +from core import tmdb +from lib import jsunpack +from core.item import Item +from channels import filtertools +from channels import autoplay +from platformcode import config, logger + + +IDIOMAS = {'la': 'Latino', 'lat':'Latino', 'cas':'Castellano','es': 'Castellano', 'vs': 'VOSE', 'vos':'VOSE', 'vo':'VO', + 'ori':'VO', 'so':'VOS', 'sor':'VOS'} +list_language = IDIOMAS.values() + +list_quality = ['TS','Screener','DVDRip','HDRip', 'HDTV', 'micro720', 'micro1080'] + +list_servers = ['openload', 'rapidvideo', 'powvideo', 'gamovideo', 'streamplay', 'flashx', 'clipwatching', 'vidoza', + 'thevideome'] + +__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'peliculasyseries') +__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'peliculasyseries') + +host = 'https://peliculasyseries.org/' + +def mainlist(item): + logger.info() + + autoplay.init(item.channel, list_servers, list_quality) + + itemlist = [] + + itemlist.append(Item(channel=item.channel, title='Peliculas', action='menu_movies', + thumbnail= get_thumb('movies', auto=True))) + itemlist.append(Item(channel=item.channel, title='Series', url=host+'series', action='list_all', type='tvshows', + thumbnail= get_thumb('tvshows', auto=True))) + itemlist.append( + item.clone(title="Buscar", action="search", url=host + 'buscar/q/', thumbnail=get_thumb("search", auto=True), + extra='movie')) + + itemlist = filtertools.show_option(itemlist, item.channel, list_language, list_quality) + autoplay.show_option(item.channel, itemlist) + + return itemlist + +def menu_movies(item): + logger.info() + + itemlist=[] + + itemlist.append(Item(channel=item.channel, title='Todas', url=host + 'movies', action='list_all', + thumbnail=get_thumb('all', auto=True), type='movies')) + itemlist.append(Item(channel=item.channel, title='Genero', action='section', + thumbnail=get_thumb('genres', auto=True), type='movies')) + + return itemlist + +def get_source(url): + logger.info() + data = httptools.downloadpage(url).data + data = re.sub(r'\n|\r|\t| |
    |\s{2,}', "", data) + return data + + +def get_language(lang_data): + logger.info() + language = [] + lang_data = lang_data.replace('language-ES', '').replace('medium', '').replace('serie', '').replace('-','') + if 'class' in lang_data: + lang_list = scrapertools.find_multiple_matches(lang_data, 'class=" ([^"]+)"') + else: + return lang_data.strip() + + for lang in lang_list: + if lang not in IDIOMAS: + lang = 'VOS' + if lang not in language: + language.append(IDIOMAS[lang]) + return language + +def section(item): + logger.info() + itemlist=[] + duplicados=[] + data = get_source(host) + data = scrapertools.find_single_match(data, 'data-toggle="dropdown">Géneros.*?multi-column-dropdown">.*?"clearfix"') + if 'Genero' in item.title: + patron = '
  • ([^<]+)' + + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + title = scrapedtitle + if title not in duplicados: + itemlist.append(Item(channel=item.channel, url=scrapedurl, title=title, action='list_all', + type=item.type)) + duplicados.append(title) + + return itemlist + + +def list_all(item): + logger.info() + itemlist = [] + + data = get_source(item.url) + if item.type == 'movies': + patron = '
    ' + patron += '»") + if url_next_page: + itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all')) + + return itemlist + +def seasons(item): + logger.info() + + itemlist=[] + + data=get_source(item.url) + patron='[^' + patron += '
    Temporada (\d+)
    ' + matches = re.compile(patron, re.DOTALL).findall(data) + + infoLabels = item.infoLabels + for scrapedurl, scrapedthumbnail, season in matches: + infoLabels['season']=season + title = 'Temporada %s' % season + itemlist.append(Item(channel=item.channel, title=title, url=scrapedurl, action='episodesxseasons', + thumbnail=scrapedthumbnail, infoLabels=infoLabels)) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append( + Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url, + action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName)) + + return itemlist + +def episodios(item): + logger.info() + itemlist = [] + templist = seasons(item) + for tempitem in templist: + itemlist += episodesxseasons(tempitem) + + return itemlist + +def episodesxseasons(item): + logger.info() + + itemlist = [] + + data=get_source(item.url) + patron ='class="row-serie-item">
    .*?([^' + patron += '(.*?)
    %s+x(\d+)
    ' % item.infoLabels['season'] + matches = re.compile(patron, re.DOTALL).findall(data) + + infoLabels = item.infoLabels + + for scrapedurl, scrapedthumbnail, scrapedtitle, lang_data, scrapedepisode in matches: + + infoLabels['episode'] = scrapedepisode + url = scrapedurl + language = get_language(lang_data) + title = '%sx%s - %s %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle, language) + + itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', + thumbnail=scrapedthumbnail, language=language, infoLabels=infoLabels)) + + itemlist = filtertools.get_links(itemlist, item, list_language) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + + return itemlist + + +def findvideos(item): + logger.info() + from lib import generictools + itemlist = [] + data = get_source(item.url) + patron = '
    .*?' + patron += 'data-data="([^"]+)".*?([^<]+)<' + matches = re.compile(patron, re.DOTALL).findall(data) + + for lang_data, scrapedurl, quality in matches: + lang = get_language(lang_data) + if 'screener' in quality.lower(): + quality = 'Screener' + + quality = quality + title = '%s [%s] [%s]' + url = base64.b64decode(scrapedurl[1:]) + + itemlist.append( + Item(channel=item.channel, url=url, title=title, action='play', quality=quality, language=IDIOMAS[lang], + infoLabels=item.infoLabels)) + + itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.quality, x.language)) + + # Requerido para Filtrar enlaces + + if __comprueba_enlaces__: + itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__) + + # Requerido para FilterTools + + itemlist = filtertools.get_links(itemlist, item, list_language, list_quality) + + # Requerido para AutoPlay + + autoplay.start(itemlist, item) + + itemlist = sorted(itemlist, key=lambda it: it.language) + + if item.contentType != 'episode': + if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': + itemlist.append( + Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url, + action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle)) + + return itemlist + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = item.url + texto + + if texto != '': + return search_results(item) + else: + return [] + +def search_results(item): + logger.info() + + itemlist=[] + + data=get_source(item.url) + patron = '
  • .*?' + patron += '(Pelicula|Serie) del año([^<]+)

    ' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle, scrapedthumb, content_type, year in matches: + + title = scrapedtitle + if len(year)==0: + year = '-' + url = scrapedurl + thumbnail = scrapedthumb + if not '/serie' in url: + action = 'findvideos' + else: + action = 'seasons' + + new_item=Item(channel=item.channel, title=title, url=url, thumbnail=thumbnail, action=action, + infoLabels={'year':year}) + if new_item.action == 'findvideos': + new_item.contentTitle = new_item.title + else: + new_item.contentSerieName = new_item.title + + itemlist.append(new_item) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + + return itemlist + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + try: + if categoria in ['peliculas']: + item.url = host + 'movies' + elif categoria == 'infantiles': + item.url = host + 'genero/animation/' + item.type='movies' + itemlist = list_all(item) + if itemlist[-1].title == 'Siguiente >>': + itemlist.pop() + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist diff --git a/plugin.video.alfa/channels/pelisr.py b/plugin.video.alfa/channels/pelisr.py index c7679eb0..cdcdacb3 100644 --- a/plugin.video.alfa/channels/pelisr.py +++ b/plugin.video.alfa/channels/pelisr.py @@ -72,7 +72,7 @@ def menu_movies(item): def get_source(url): logger.info() data = httptools.downloadpage(url).data - data = re.sub(r'"|\n|\r|\t| |
    |\s{2,}', "", data) + data = re.sub(r'\n|\r|\t| |
    |\s{2,}', "", data) return data @@ -93,9 +93,9 @@ def section(item): duplicados=[] data = get_source(host+'/'+item.type) if 'Genero' in item.title: - patron = '
  • (.*?)/i>' + patron = '
  • (.*?) (.*?)<') - title = scrapertools.find_single_match(scrapedtitle,'(.*?) ([^<]+)<') + title = scrapertools.find_single_match(scrapedtitle,'([^<]+)
  • .*?([^<]+)
    (.*?).*?' - patron += '.*?<\/h3>(.*?)<\/span><\/div>' + patron = '