diff --git a/plugin.video.alfa/addon.xml b/plugin.video.alfa/addon.xml index b8de206d..68d81bb4 100755 --- a/plugin.video.alfa/addon.xml +++ b/plugin.video.alfa/addon.xml @@ -1,5 +1,5 @@ - + @@ -19,10 +19,13 @@ [B]Estos son los cambios para esta versión:[/B] [COLOR green][B]Arreglos[/B][/COLOR] - ¤ maxipelis24 ¤ thevid ¤ gamovideo - ¤ pack +18 - - Agradecimientos a @chivmalev por colaborar en ésta versión + ¤ pack +18 ¤ cinehindi ¤ anonfile + ¤ fembed ¤ doomtv ¤ vk + ¤ vshare ¤ CineCalidad ¤ seriesblanco + ¤ dospelis + + [COLOR green][B]Novedades[/B][/COLOR] + ¤ cineonline ¤ pelix Navega con Kodi por páginas web para ver sus videos de manera fácil. diff --git a/plugin.video.alfa/channels/cinecalidad.py b/plugin.video.alfa/channels/cinecalidad.py index ec18bc95..2c11d1ad 100644 --- a/plugin.video.alfa/channels/cinecalidad.py +++ b/plugin.video.alfa/channels/cinecalidad.py @@ -130,7 +130,7 @@ def anyos(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data - patron = '([^<]+)([^<]+)<\/a>' + patron = '' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: url = urlparse.urljoin(item.url, scrapedurl) @@ -206,8 +206,8 @@ def peliculas(item): data = httptools.downloadpage(item.url).data - patron = '
.*?""") - patron = 'class="item">.*?' # Todos los items de peliculas (en esta web) empiezan con esto - patron += '""") + patron = '
' # Todos los items de peliculas (en esta web) empiezan con esto + patron += '
' # scrapedurl + patron += '.*?'Año de estreno(.*?)') + patron = '
  • (\d+)' + else: + patron = '
  • ([^"]+) (\d+)' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl, scrapedtitle, cantidad in matches: + scrapedplot = "" + scrapedthumbnail = "" + title = scrapedtitle + " %s" % cantidad + itemlist.append(item.clone(channel=item.channel, action="lista", title=title , url=scrapedurl , + thumbnail=scrapedthumbnail , plot=scrapedplot) ) + return itemlist + + +def lista(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = '
    Siguiente') + if next_page_url!="": + next_page_url = urlparse.urljoin(item.url,next_page_url) + itemlist.append(item.clone(channel=item.channel , action="lista" , title="Next page >>" , + text_color="blue", url=next_page_url) ) + return itemlist + + +def temporadas(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = '(\d+)' + matches = re.compile(patron, re.DOTALL).findall(data) + for numtempo in matches: + itemlist.append(item.clone( action='episodesxseason', title='Temporada %s' % numtempo, url = item.url, + contentType='season', contentSeason=numtempo )) + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append( + Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url, + action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName)) + # return sorted(itemlist, key=lambda it: it.title) + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + templist = temporadas(item) + for tempitem in templist: + itemlist += episodesxseason(tempitem) + return itemlist + + +def episodesxseason(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = '
    %s x (\d+)
    .*?' % item.contentSeason + patron += '([^"]+)' + matches = re.compile(patron, re.DOTALL).findall(data) + for episode, url, title in matches: + titulo = '%sx%s %s' % (item.contentSeason, episode, title) + itemlist.append(item.clone( action='findvideos', url=url, title=titulo, + contentType='episode', contentEpisodeNumber=episode )) + tmdb.set_infoLabels(itemlist) + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t| |
    ", "", data) + patron = 'id="plays-(\d+)">\s*([^<]+)([^<]+)' % xnumber) + else: + lang = scrapertools.find_single_match(data, '#div%s">([^<]+)<' % xnumber) + if "lat" in lang.lower(): lang= "Lat" + if 'cast' in lang.lower(): lang= "Cast" + if 'sub' in lang.lower(): lang= "Sub" + if lang in IDIOMAS: + lang = IDIOMAS[lang] + post= {"nombre":xname} + url= httptools.downloadpage("https://www.cine-online.eu/ecrypt", post=urllib.urlencode(post)).data + url = scrapertools.find_single_match(url,'<(?:IFRAME SRC|iframe src)="([^"]+)"') + + if not config.get_setting('unify'): + title = ' (%s)' % (lang) + else: + title = '' + if url != '': + itemlist.append(item.clone(action="play", title='%s'+title, url=url, language=lang )) + itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) + + # Requerido para Filtrar enlaces + if __comprueba_enlaces__: + itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__) + # Requerido para FilterTools + itemlist = filtertools.get_links(itemlist, item, list_language) + # Requerido para AutoPlay + autoplay.start(itemlist, item) + if not "/episodios/" in item.url: + if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra !='findvideos': + itemlist.append(Item(channel=item.channel, action="add_pelicula_to_library", + title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url, + extra="findvideos", contentTitle=item.contentTitle)) + return itemlist + + diff --git a/plugin.video.alfa/channels/doomtv.py b/plugin.video.alfa/channels/doomtv.py index dff3215f..6a233c27 100644 --- a/plugin.video.alfa/channels/doomtv.py +++ b/plugin.video.alfa/channels/doomtv.py @@ -15,12 +15,10 @@ from core.item import Item from platformcode import config, logger from channelselector import get_thumb -IDIOMAS = {'latino': 'Latino'} +IDIOMAS = {'Latino': 'Latino'} list_language = IDIOMAS.values() - -CALIDADES = {'1080p': '1080p', '720p': '720p', '480p': '480p', '360p': '360p'} -list_quality = CALIDADES.values() -list_servers = ['directo', 'openload'] +list_quality = [] +list_servers = ['dostream', 'openload'] host = 'http://doomtv.net/' @@ -28,6 +26,8 @@ host = 'http://doomtv.net/' def mainlist(item): logger.info() + autoplay.init(item.channel, list_servers, list_quality) + itemlist = [] itemlist.append( @@ -65,6 +65,8 @@ def mainlist(item): fanart='https://s30.postimg.cc/pei7txpa9/buscar.png' )) + autoplay.show_option(item.channel, itemlist) + return itemlist @@ -75,7 +77,6 @@ def get_source(url, referer=None): else: data = httptools.downloadpage(url, headers={'Referer':referer}).data data = re.sub(r'\n|\r|\t| |
    |\s{2,}', "", data) - logger.debug(data) return data def lista(item): @@ -98,9 +99,9 @@ def lista(item): for scrapedurl, quality, scrapedthumbnail, scrapedtitle, plot in matches[first:last]: - url = 'http:'+scrapedurl - thumbnail = scrapedthumbnail - filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w185", "") + url = host+scrapedurl + thumbnail = 'https:'+scrapedthumbnail.strip() + filtro_thumb = thumbnail.replace("https://image.tmdb.org/t/p/w185", "") filtro_list = {"poster_path": filtro_thumb.strip()} filtro_list = filtro_list.items() title = scrapedtitle @@ -144,7 +145,7 @@ def seccion(item): matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: - url = 'http:'+ scrapedurl + url = host+scrapedurl title = scrapedtitle thumbnail = '' if url not in duplicado: @@ -196,22 +197,36 @@ def findvideos(item): itemlist = [] data = get_source(item.url) - patron = 'id="(tab\d+)">
    .*?src="([^"]+)"' matches = re.compile(patron, re.DOTALL).findall(data) for option, urls in matches: + language = 'Latino' if 'http' not in urls: urls = 'https:'+urls + if not config.get_setting('unify'): + title = ' [%s]' % language + else: + title = '%s' new_item = Item( channel=item.channel, url=urls, - title=item.title, + title= '%s'+ title, contentTitle=item.title, action='play', + language = IDIOMAS[language], + infoLabels = item.infoLabels ) itemlist.append(new_item) - itemlist = servertools.get_servers_itemlist(itemlist) + itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) + + # Requerido para FilterTools + + itemlist = filtertools.get_links(itemlist, item, list_language) + + # Requerido para AutoPlay + + autoplay.start(itemlist, item) if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': itemlist.append( @@ -223,4 +238,5 @@ def findvideos(item): contentTitle=item.contentTitle, )) + return itemlist diff --git a/plugin.video.alfa/channels/dospelis.json b/plugin.video.alfa/channels/dospelis.json index 13130b10..b311ee3b 100644 --- a/plugin.video.alfa/channels/dospelis.json +++ b/plugin.video.alfa/channels/dospelis.json @@ -4,7 +4,7 @@ "active": true, "adult": false, "language": ["lat", "cast"], - "thumbnail": "https://www.dospelis.com/wp-content/uploads/2018/07/dospelislogo.png", + "thumbnail": "https://www.dospelis.net/wp-content/uploads/2019/02/logodospelisamor.png", "banner": "", "categories": [ "movie", diff --git a/plugin.video.alfa/channels/dospelis.py b/plugin.video.alfa/channels/dospelis.py index 4b02891a..ae332456 100644 --- a/plugin.video.alfa/channels/dospelis.py +++ b/plugin.video.alfa/channels/dospelis.py @@ -90,11 +90,11 @@ def section(item): logger.info() itemlist=[] duplicados=[] - data = get_source(host+'/'+item.type) + data = get_source(host+item.type) if 'Genero' in item.title: - patron = '
  • (.*?)/i>' + patron = '(.*?)/i>' elif 'Año' in item.title: - patron = '
  • ([^<]+)' + patron = '
  • ([^<]+)' matches = re.compile(patron, re.DOTALL).findall(data) @@ -102,7 +102,7 @@ def section(item): title = scrapedtitle plot='' if 'Genero' in item.title: - quantity = scrapertools.find_single_match(scrapedtitle,' (.*?)<') + quantity = scrapertools.find_single_match(scrapedtitle,'(.*?)<') title = scrapertools.find_single_match(scrapedtitle,'(.*?)
    .?([^.*?' - patron +='"quality">([^<]+)<\/div>.?.*?' - patron +='<\/h3>.?([^"]+)<\/span><\/div>.*?"flags"(.*?)metadata' + patron = '.?.*?' + patron += 'quality>([^<]+)<.*?]+)>.*?<\/h3>([^<]+)<.*?flags(.*?)metadata' matches = re.compile(patron, re.DOTALL).findall(data) @@ -148,8 +147,8 @@ def list_all(item): infoLabels={'year':year})) elif item.type == 'tvshows': - patron = '
    .?
    .?') + url_next_page = scrapertools.find_single_match(data,']+)>') if url_next_page: itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all')) @@ -180,7 +179,7 @@ def seasons(item): itemlist=[] data=get_source(item.url) - patron='Temporada.?\d+' + patron='title>Temporada.?(\d+)' matches = re.compile(patron, re.DOTALL).findall(data) infoLabels = item.infoLabels @@ -214,7 +213,7 @@ def episodesxseasons(item): itemlist = [] data=get_source(item.url) - patron='class="numerando">%s - (\d+)
    .?
    .?.?]+)>([^<]+)<' % item.infoLabels['season'] matches = re.compile(patron, re.DOTALL).findall(data) infoLabels = item.infoLabels @@ -236,12 +235,15 @@ def findvideos(item): logger.info() itemlist = [] data = get_source(item.url) - patron = 'id="option-(\d+)".*?rptss" src="([^"]+)" frameborder' + patron = 'id=option-(\d+).*?src=([^ ]+) frameborder' matches = re.compile(patron, re.DOTALL).findall(data) lang='' for option, scrapedurl in matches: lang = scrapertools.find_single_match(data, 'href=#option-%s>.*?/flags/(.*?).png' % option) quality = '' + if 'goo.gl' in scrapedurl: + new_data = httptools.downloadpage(scrapedurl, follow_redirects=False).headers + scrapedurl = new_data['location'] if lang not in IDIOMAS: lang = 'en' title = '%s %s' @@ -291,8 +293,7 @@ def search_results(item): itemlist=[] data=get_source(item.url) - patron = '
    .*?([^.*?meta.*?' - patron += '"year">([^<]+)<(.*?)

    ([^<]+)<\/p>' + patron = '

    .*?]+)>.*?year>([^<]+)<(.*?)

    ([^<]+)<\/p>' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedthumb, scrapedtitle, year, lang_data, scrapedplot in matches: diff --git a/plugin.video.alfa/channels/mastorrents.json b/plugin.video.alfa/channels/mastorrents.json deleted file mode 100644 index 6ebae2fc..00000000 --- a/plugin.video.alfa/channels/mastorrents.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "id": "mastorrents", - "name": "MasTorrents", - "active": true, - "adult": false, - "language": ["cast","lat"], - "thumbnail": "https://s33.postimg.cc/3y8720l9b/mastorrents.png", - "banner": "", - "version": 1, - "categories": [ - "movie", - "tvshow", - "torrent" - ], - "settings": [ - { - "id": "include_in_newest_peliculas", - "type": "bool", - "label": "Incluir en Novedades - Peliculas", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_newest_torrent", - "type": "bool", - "label": "Incluir en Novedades - Torrent", - "default": true, - "enabled": true, - "visible": true - } - ] -} \ No newline at end of file diff --git a/plugin.video.alfa/channels/mastorrents.py b/plugin.video.alfa/channels/mastorrents.py deleted file mode 100644 index df53aece..00000000 --- a/plugin.video.alfa/channels/mastorrents.py +++ /dev/null @@ -1,323 +0,0 @@ -# -*- coding: utf-8 -*- -# -*- Channel MasTorrents -*- -# -*- Created for Alfa-addon -*- -# -*- By the Alfa Develop Group -*- - -import re -from channelselector import get_thumb -from platformcode import logger -from platformcode import config -from core import scrapertools -from core.item import Item -from core import servertools -from core import httptools -from core import tmdb - -host = 'http://www.mastorrents.com/' - -def mainlist(item): - logger.info() - - itemlist = [] - - itemlist.append(item.clone(title="Peliculas", - action="movie_list", - thumbnail=get_thumb("channels_movie.png") - )) - - itemlist.append(item.clone(title="Series", - action="series_list", - thumbnail=get_thumb("channels_tvshow.png") - )) - return itemlist - - -def movie_list(item): - logger.info() - - itemlist = [] - - itemlist.append(item.clone(title="Todas", - action="lista", - url=host+'peliculas', - extra='movie', - thumbnail=get_thumb('all', auto=True) - )) - - itemlist.append(item.clone(title="Generos", - action="genres", - url=host, - extra='movie', - thumbnail=get_thumb('genres', auto=True) - )) - - itemlist.append(item.clone(title="Buscar", - action="search", - url=host + '?pTit=', thumbnail=get_thumb('search', auto=True), - extra='movie' - )) - return itemlist - - -def series_list(item): - logger.info() - - itemlist = [] - - itemlist.append(item.clone(title="Todas", - action="lista", - url=host + 'series', - extra='serie', - thumbnail=get_thumb('all', auto=True) - )) - - itemlist.append(item.clone(title="Generos", - action="genres", - url=host + 'series/', - extra='serie', - thumbnail=get_thumb('genres', auto=True) - )) - - itemlist.append(item.clone(title="Buscar", - action="search", - url=host + 'series/?pTit=', - extra='serie', - thumbnail=get_thumb('search', auto=True) - )) - return itemlist - - -def get_source(url): - logger.info() - data = httptools.downloadpage(url).data - data = re.sub(r'"|\n|\r|\t| |
    |\s{2,}', "", data) - return data - -def lista (item): - logger.info () - itemlist = [] - infoLabels = dict() - data = get_source(item.url) - patron = "

    .*?
    ') - - patron = '' - - matches = re.compile(patron,re.DOTALL).findall(data) - - for value, title in matches: - url = item.url + value - title = title.decode('latin1').encode('utf8') - itemlist.append(Item(channel=item.channel, title=title, url=url, action='lista')) - return itemlist - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = item.url + texto - - if texto != '': - return lista(item) - else: - return [] - - -def seasons(item): - logger.info() - itemlist=[] - infoLabels = item.infoLabels - data=get_source(item.url) - patron ='href=javascript:showSeasson\(.*?\); id=.*?>Temporada (.*?)<\/a>' - matches = re.compile(patron, re.DOTALL).findall(data) - - for season in matches: - title='Temporada %s' % season - infoLabels['season'] = season - itemlist.append(Item(channel=item.channel, - title= title, - url=item.url, - action='episodesxseasons', - contentSeasonNumber=season, - contentSerieName=item.contentSerieName, - infoLabels=infoLabels - )) - tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) - itemlist = itemlist[::-1] - if config.get_videolibrary_support() and len(itemlist) > 0: - itemlist.append( - Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url, - action="add_serie_to_library", extra="all_episodes", contentSerieName=item.contentSerieName)) - - return itemlist - -def all_episodes(item): - logger.info() - itemlist = [] - templist = seasons(item) - for tempitem in templist: - itemlist += episodesxseasons(tempitem) - - return itemlist - -def episodesxseasons(item): - logger.info() - - itemlist=[] - - data=get_source(item.url) - patron = "
    %sx(.\d+)<\/div>.*?" % item.contentSeasonNumber - patron += "image:url\('(.*?)'.*?href.*?>(%s)<" % item.contentSerieName - matches = re.compile(patron, re.DOTALL).findall(data) - infoLabels=item.infoLabels - for episode, scrapedurl, scrapedthumbnail, scrapedtitle in matches: - contentEpisodeNumber=episode - season = item.contentSeasonNumber - url=scrapedurl - thumbnail=scrapedthumbnail - infoLabels['episode']=episode - title = '%sx%s - %s' % (season, episode, item.contentSerieName) - itemlist.append(Item(channel=item.channel, - action='findvideos', - title=title, - url=url, - thumbnail=thumbnail, - contentSerieName=item.contentSerieName, - contentEpisodeNumber=contentEpisodeNumber, - infoLabels=infoLabels - )) - tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) - - return itemlist[::-1] - - -def findvideos(item): - logger.info() - itemlist=[] - data = get_source(item.url) - patron = "showDownload\(([^\)]+)\);.*?alt=.*?torrent (.*?) " - matches = re.compile(patron, re.DOTALL).findall(data) - - for extra_info, quality in matches: - extra_info= extra_info.replace(",'",'|') - extra_info= extra_info.split('|') - title = '%s [%s]' % ('Torrent', quality.strip()) - if item.extra == 'movie': - url = extra_info[2].strip("'") - else: - url = extra_info[3].strip("'") - server = 'torrent' - - if not '.torrent' in url: - if 'tvsinpagar' in url: - url = url.replace('http://','http://www.') - try: - from_web = httptools.downloadpage(url, follow_redirects=False) - url = from_web.headers['location'] - except: - pass - - if '.torrent' in url: - itemlist.append(Item(channel=item.channel, - title=title, - contentTitle= item.title, - url=url, - action='play', - quality=quality, - server=server, - thumbnail = item.infoLabels['thumbnail'], - infoLabels=item.infoLabels - )) - - if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': - itemlist.append(Item(channel=item.channel, - title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', - url=item.url, - action="add_pelicula_to_library", - extra="findvideos", - contentTitle=item.contentTitle - )) - - return itemlist - - -def newest(category): - logger.info() - item = Item() - try: - if category in ['peliculas', 'torrent']: - item.url = host + 'estrenos-de-cine' - item.extra='movie' - itemlist = lista(item) - if itemlist[-1].title == 'Siguiente >>>': - itemlist.pop() - if category == 'torrent': - - item.url = host+'series' - item.extra = 'serie' - itemlist.extend(lista(item)) - - if itemlist[-1].title == 'Siguiente >>>': - itemlist.pop() - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - return itemlist diff --git a/plugin.video.alfa/channels/pelismagnet.py b/plugin.video.alfa/channels/pelismagnet.py index 414e772a..70bec39e 100644 --- a/plugin.video.alfa/channels/pelismagnet.py +++ b/plugin.video.alfa/channels/pelismagnet.py @@ -348,7 +348,7 @@ def listado(item): title = re.sub(r'- $', '', title) #Limpiamos el título de la basura innecesaria - title = re.sub(r'TV|Online|Spanish|Torrent|en Espa\xc3\xb1ol|Español|Latino|Subtitulado|Blurayrip|Bluray rip|\[.*?\]|R2 Pal|\xe3\x80\x90 Descargar Torrent \xe3\x80\x91|Completa|Temporada|Descargar|Torren', '', title, flags=re.IGNORECASE) + title = re.sub(r'(?i)TV|Online|Spanish|Torrent|en Espa\xc3\xb1ol|Español|Latino|Subtitulado|Blurayrip|Bluray rip|\[.*?\]|R2 Pal|\xe3\x80\x90 Descargar Torrent \xe3\x80\x91|Completa|Temporada|Descargar|Torren', '', title) #Terminamos de limpiar el título title = re.sub(r'\??\s?\d*?\&.*', '', title) diff --git a/plugin.video.alfa/channels/pelix.json b/plugin.video.alfa/channels/pelix.json new file mode 100644 index 00000000..56e91975 --- /dev/null +++ b/plugin.video.alfa/channels/pelix.json @@ -0,0 +1,78 @@ +{ +"id": "pelix", + "name": "Pelix", + "active": true, + "adult": false, + "language": ["lat", "cast"], + "thumbnail": "https://pelix.tv/build/images/logo.png", + "banner": "", + "categories": [ + "movie", + "tvshow" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "No filtrar", + "Latino", + "Castellano", + "VOSE" + ] + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_infantiles", + "type": "bool", + "label": "Incluir en Novedades - Infantiles", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_terror", + "type": "bool", + "label": "Incluir en Novedades - terror", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "comprueba_enlaces", + "type": "bool", + "label": "Verificar si los enlaces existen", + "default": false, + "enabled": true, + "visible": true + }, + { + "id": "comprueba_enlaces_num", + "type": "list", + "label": "Número de enlaces a verificar", + "default": 1, + "enabled": true, + "visible": "eq(-1,true)", + "lvalues": [ "5", "10", "15", "20" ] + } + ] +} diff --git a/plugin.video.alfa/channels/pelix.py b/plugin.video.alfa/channels/pelix.py new file mode 100644 index 00000000..ee58bd00 --- /dev/null +++ b/plugin.video.alfa/channels/pelix.py @@ -0,0 +1,352 @@ +# -*- coding: utf-8 -*- +# -*- Channel Pelix -*- +# -*- Created for Alfa-addon -*- +# -*- By the Alfa Develop Group -*- + +import re +import urllib +import base64 + +from channelselector import get_thumb +from core import httptools +from core import jsontools +from core import scrapertools +from core import servertools +from core import tmdb +from lib import jsunpack +from core.item import Item +from channels import filtertools +from channels import autoplay +from platformcode import config, logger + + +IDIOMAS = {'6': 'Latino', '7': 'Castellano'} +list_language = IDIOMAS.values() +CALIDADES = {'1': '1080p', '3': '720p', '4':'720p'} +list_quality = CALIDADES.values() + +list_servers = [ + 'openload', + 'streamango', + 'fastplay', + 'rapidvideo', + 'netutv' +] + +__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'pelix') +__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'pelix') + +host = 'https://pelix.tv/' + +def mainlist(item): + logger.info() + + autoplay.init(item.channel, list_servers, list_quality) + + itemlist = [] + + itemlist.append(Item(channel=item.channel, title='Peliculas', action='menu_movies', + thumbnail= get_thumb('movies', auto=True), page=0)) + itemlist.append(Item(channel=item.channel, title='Series', url=host+'home/genero/5', action='list_all', + type='tvshows', thumbnail= get_thumb('tvshows', auto=True), page=0)) + itemlist.append( + item.clone(title="Buscar", action="search", url=host + 'movies/headserach', thumbnail=get_thumb("search", auto=True), + extra='movie')) + + autoplay.show_option(item.channel, itemlist) + + return itemlist + +def menu_movies(item): + logger.info() + + itemlist=[] + + itemlist.append(Item(channel=item.channel, title='Ultimas', url=host, path='home/newest?show=', action='list_all', + thumbnail=get_thumb('last', auto=True), type='movies', page=0)) + + #itemlist.append(Item(channel=item.channel, title='Mas Vistas', url=host, path='home/views?show=', action='list_all', + # thumbnail=get_thumb('all', auto=True), type='movies', page=0)) + + itemlist.append(Item(channel=item.channel, title='Genero', action='section', + thumbnail=get_thumb('genres', auto=True), type='movies')) + itemlist.append(Item(channel=item.channel, title='Por Año', action='section', + thumbnail=get_thumb('year', auto=True), type='movies')) + + return itemlist + +def get_source(url): + logger.info() + data = httptools.downloadpage(url).data + data = re.sub(r'\n|\r|\t| |
    |\s{2,}', "", data) + return data + + +def get_language(lang_data): + logger.info() + language = [] + lang_list = scrapertools.find_multiple_matches(lang_data, '/flags/(.*?).png\)') + for lang in lang_list: + if lang == 'en': + lang = 'vose' + if lang not in language: + language.append(lang) + return language + +def section(item): + logger.info() + itemlist=[] + data = get_source(host) + if 'Genero' in item.title: + data = scrapertools.find_single_match(data, '
    Género(.*?)') + elif 'Año' in item.title: + data = scrapertools.find_single_match(data, 'Año(.*?)') + + patron = '([^<]+)' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + itemlist.append(Item(channel=item.channel, url=scrapedurl, title=scrapedtitle, action='list_all', + type=item.type, page=0)) + + return itemlist + + +def list_all(item): + logger.info() + import urllib + itemlist = [] + if item.page == 0: + data = get_source(item.url+item.path) + else: + post = {'page': str(item.page)} + post = urllib.urlencode(post) + data = httptools.downloadpage(host+'home/%sAjax/%s' % ('newest', str(item.page)), post=post).data + data = re.sub(r'\n|\r|\t| |
    |\s{2,}', "", data) + + patron = '
    .*?.*? 0: + itemlist.append( + Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url, + action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName)) + + return itemlist + +def episodios(item): + logger.info() + itemlist = [] + templist = seasons(item) + for tempitem in templist: + itemlist += episodesxseasons(tempitem) + + return itemlist + +def episodesxseasons(item): + logger.info() + + itemlist = [] + duplicados = [] + data=get_source(item.url) + patron='data-id="(\d+)" season="%s" id_lang="(\d+)" id_movies_types="\d".*?' \ + 'block;">([^<]+)' % item.infoLabels['season'] + matches = re.compile(patron, re.DOTALL).findall(data) + + infoLabels = item.infoLabels + + for scrapedepisode, lang, scrapedtitle in matches: + + infoLabels['episode'] = scrapedepisode + url = item.url + title = '%sx%s - %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle) + + if scrapedepisode not in duplicados: + itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', infoLabels=infoLabels)) + duplicados.append(scrapedepisode) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + + return itemlist + + +def findvideos(item): + logger.info() + + itemlist = [] + + data = get_source(item.url) + if 'episode="0" season="0"' not in data and item.contentType != 'episode': + item.contentSerieName = item.contentTitle + item.contentTitle = None + item.contentType = None + item.infoLabels = None + tmdb.set_infoLabels_item(item, seekTmdb=True) + return seasons(item) + + if 'episode="0" season="0"' not in data: + season = item.infoLabels['season'] + episode = item.infoLabels['episode'] + else: + season = '0' + episode = '0' + + patron = ' 0 and item.extra != 'findvideos': + itemlist.append( + Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url, + action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle)) + + return itemlist + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + post = 'search=%s' % texto + item.post = post + item.url = item.url + + if texto != '': + return search_results(item) + else: + return [] + +def search_results(item): + logger.info() + + itemlist=[] + + headers = {'Referer': host, 'X-Requested-With': 'XMLHttpRequest'} + data = httptools.downloadpage(item.url, headers=headers, post=item.post).data + data = re.sub(r'\n|\r|\t| |
    |\s{2,}', "", data) + patron = 'class="results\d+".*?([^<]+)<' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedthumb, scrapedtitle in matches: + + if '(' in scrapedtitle: + title = scrapertools.find_single_match(scrapedtitle, '(.*?)\(').strip() + year = scrapertools.find_single_match(scrapedtitle, '\((\d+)\)') + else: + title = scrapedtitle + year = '-' + url = scrapedurl + thumbnail = scrapedthumb + + new_item=Item(channel=item.channel, title=title, url=url, thumbnail=thumbnail, + action='findvideos', infoLabels={'year':year}) + + itemlist.append(new_item) + + return itemlist + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + try: + item.type = 'movies' + item.page = 0 + if categoria in ['peliculas']: + item.url = host + 'home/newest?show=' + elif categoria == 'infantiles': + item.url = host + 'home/genero/54' + elif categoria == 'terror': + item.url = host + 'home/genero/49' + itemlist = list_all(item) + if itemlist[-1].title == 'Siguiente >>': + itemlist.pop() + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist diff --git a/plugin.video.alfa/channels/rarbg.py b/plugin.video.alfa/channels/rarbg.py index 9c885e61..374b36b5 100644 --- a/plugin.video.alfa/channels/rarbg.py +++ b/plugin.video.alfa/channels/rarbg.py @@ -336,8 +336,8 @@ def listado(item): item_local.season_colapse = True #Muestra las series agrupadas por temporadas #Limpiamos el título de la basura innecesaria - title = re.sub(r'TV|Online', '', title, flags=re.IGNORECASE).strip() - item_local.quality = re.sub(r'proper|unrated|directors|cut|german|repack|internal|real|korean|extended|masted|docu|oar|super|duper|amzn|uncensored|hulu', '', item_local.quality, flags=re.IGNORECASE).strip() + title = re.sub(r'(?i)TV|Online', '', title).strip() + item_local.quality = re.sub(r'(?i)proper|unrated|directors|cut|german|repack|internal|real|korean|extended|masted|docu|oar|super|duper|amzn|uncensored|hulu', '', item_local.quality).strip() #Analizamos el año. Si no está claro ponemos '-' try: @@ -472,7 +472,7 @@ def findvideos(item): item_local.quality = '' title = title.replace('.', ' ') item_local.quality = item_local.quality.replace('.', ' ') - item_local.quality = re.sub(r'proper|unrated|directors|cut|german|repack|internal|real|korean|extended|masted|docu|oar|super|duper|amzn|uncensored|hulu', '', item_local.quality, flags=re.IGNORECASE).strip() + item_local.quality = re.sub(r'(?i)proper|unrated|directors|cut|german|repack|internal|real|korean|extended|masted|docu|oar|super|duper|amzn|uncensored|hulu', '', item_local.quality).strip() #Buscamos si ya tiene tamaño, si no, los buscamos en el archivo .torrent size = scrapedsize diff --git a/plugin.video.alfa/channels/seriesblanco.py b/plugin.video.alfa/channels/seriesblanco.py index 0eed39d9..aebecc79 100644 --- a/plugin.video.alfa/channels/seriesblanco.py +++ b/plugin.video.alfa/channels/seriesblanco.py @@ -114,6 +114,49 @@ def list_all(item): )) return itemlist +def list_from_genre(item): + logger.info() + + itemlist = [] + data = get_source(item.url) + contentSerieName = '' + + patron = '
    .*?src="([^"]+)"' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedthumbnail in matches: + url = scrapedurl + thumbnail = scrapedthumbnail + title = scrapertools.find_single_match(scrapedurl, 'https://seriesblanco.org/capitulos/([^/]+)/') + title = title.replace('-', ' ').capitalize() + + itemlist.append(Item(channel=item.channel, + action='seasons', + title=title, + url=url, + thumbnail=thumbnail, + contentSerieName=title, + context=filtertools.context(item, list_language, list_quality), + )) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + + # #Paginacion + + if itemlist != []: + next_page = scrapertools.find_single_match(data, '

    (.*?)/a>

    ') + data = scrapertools.get_match(data,'--more-->(.*?)/a>') data = re.sub(r"\n|\r|\t| |
    ", "", data) patron = '
    (.*?)<' matches = re.compile(patron,re.DOTALL).findall(data) diff --git a/plugin.video.alfa/channels/youjizz.py b/plugin.video.alfa/channels/youjizz.py index 9ca28e61..dfc31771 100644 --- a/plugin.video.alfa/channels/youjizz.py +++ b/plugin.video.alfa/channels/youjizz.py @@ -38,9 +38,8 @@ def categorias(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data - data = scrapertools.get_match(data,'

    Trending Categories

    (.*?)') + data = scrapertools.get_match(data,'

    Trending(.*?)') data = re.sub(r"\n|\r|\t| |
    ", "", data) - itemlist.append( Item(channel=item.channel, action="lista", title="big tits", url= host + "/search/big-tits-1.html?") ) patron = '
  • ([^"]+)' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedurl,scrapedtitle in matches: diff --git a/plugin.video.alfa/channels/zonatorrent.py b/plugin.video.alfa/channels/zonatorrent.py index 3f08b208..f73bd7e1 100644 --- a/plugin.video.alfa/channels/zonatorrent.py +++ b/plugin.video.alfa/channels/zonatorrent.py @@ -353,7 +353,7 @@ def listado(item): item_local.quality += " 3D" else: item_local.quality = "3D" - title = re.sub('3D', '', title, flags=re.IGNORECASE) + title = re.sub('(?i)3D', '', title) title = title.replace('[]', '') if item_local.quality: item_local.quality += ' %s' % scrapertools.find_single_match(title, '\[(.*?)\]') @@ -418,7 +418,7 @@ def listado(item): title = re.sub(r'- $', '', title) #Limpiamos el título de la basura innecesaria - title = re.sub(r'TV|Online|Spanish|Torrent|en Espa\xc3\xb1ol|Español|Latino|Subtitulado|Blurayrip|Bluray rip|\[.*?\]|R2 Pal|\xe3\x80\x90 Descargar Torrent \xe3\x80\x91|Completa|Temporada|Descargar|Torren', '', title, flags=re.IGNORECASE) + title = re.sub(r'(?i)TV|Online|Spanish|Torrent|en Espa\xc3\xb1ol|Español|Latino|Subtitulado|Blurayrip|Bluray rip|\[.*?\]|R2 Pal|\xe3\x80\x90 Descargar Torrent \xe3\x80\x91|Completa|Temporada|Descargar|Torren', '', title) title = title.replace("Dual", "").replace("dual", "").replace("Subtitulada", "").replace("subtitulada", "").replace("Subt", "").replace("subt", "").replace("(Proper)", "").replace("(proper)", "").replace("Proper", "").replace("proper", "").replace("#", "").replace("(Latino)", "").replace("Latino", "").replace("LATINO", "").replace("Spanish", "").replace("Trailer", "").replace("Audio", "") title = title.replace("HDTV-Screener", "").replace("DVDSCR", "").replace("TS ALTA", "").replace("- HDRip", "").replace("(HDRip)", "").replace("- Hdrip", "").replace("(microHD)", "").replace("(DVDRip)", "").replace("HDRip", "").replace("(BR-LINE)", "").replace("(HDTS-SCREENER)", "").replace("(BDRip)", "").replace("(BR-Screener)", "").replace("(DVDScreener)", "").replace("TS-Screener", "").replace(" TS", "").replace(" Ts", "").replace(" 480p", "").replace(" 480P", "").replace(" 720p", "").replace(" 720P", "").replace(" 1080p", "").replace(" 1080P", "").replace("DVDRip", "").replace(" Dvd", "").replace(" DVD", "").replace(" V.O", "").replace(" Unrated", "").replace(" UNRATED", "").replace(" unrated", "").replace("screener", "").replace("TS-SCREENER", "").replace("TSScreener", "").replace("HQ", "").replace("AC3 5.1", "").replace("Telesync", "").replace("Line Dubbed", "").replace("line Dubbed", "").replace("LineDuB", "").replace("Line", "").replace("XviD", "").replace("xvid", "").replace("XVID", "").replace("Mic Dubbed", "").replace("HD", "").replace("V2", "").replace("CAM", "").replace("VHS.SCR", "").replace("Dvd5", "").replace("DVD5", "").replace("Iso", "").replace("ISO", "").replace("Reparado", "").replace("reparado", "").replace("DVD9", "").replace("Dvd9", "") diff --git a/plugin.video.alfa/lib/generictools.py b/plugin.video.alfa/lib/generictools.py index 2c07d74c..bd2284fb 100644 --- a/plugin.video.alfa/lib/generictools.py +++ b/plugin.video.alfa/lib/generictools.py @@ -280,7 +280,9 @@ def post_tmdb_listado(item, itemlist): item.category_new = '' for item_local in itemlist: #Recorremos el Itemlist generado por el canal - title = re.sub(r'online|descarga|downloads|trailer|videoteca|gb|autoplay', '', item_local.title, flags=re.IGNORECASE).strip() + item_local.title = re.sub(r'(?i)online|descarga|downloads|trailer|videoteca|gb|autoplay', '', item_local.title).strip() + #item_local.title = re.sub(r'online|descarga|downloads|trailer|videoteca|gb|autoplay', '', item_local.title, flags=re.IGNORECASE).strip() + title = item_local.title #logger.debug(item_local) item_local.last_page = 0 @@ -375,11 +377,13 @@ def post_tmdb_listado(item, itemlist): item_local.contentSerieName = item_local.from_title if item_local.contentType == 'season': item_local.title = item_local.from_title - title = re.sub(r'online|descarga|downloads|trailer|videoteca|gb|autoplay', '', item_local.title, flags=re.IGNORECASE).strip() + item_local.title = re.sub(r'(?i)online|descarga|downloads|trailer|videoteca|gb|autoplay', '', item_local.title).strip() + title = item_local.title #Limpiamos calidad de títulos originales que se hayan podido colar if item_local.infoLabels['originaltitle'].lower() in item_local.quality.lower(): - item_local.quality = re.sub(item_local.infoLabels['originaltitle'], '', item_local.quality, flags=re.IGNORECASE) + item_local.quality = re.sub(item_local.infoLabels['originaltitle'], '', item_local.quality) + #item_local.quality = re.sub(item_local.infoLabels['originaltitle'], '', item_local.quality, flags=re.IGNORECASE) # Preparamos el título para series, con los núm. de temporadas, si las hay if item_local.contentType in ['season', 'tvshow', 'episode']: @@ -775,7 +779,7 @@ def post_tmdb_episodios(item, itemlist): del item_local.totalItems item_local.unify = 'xyz' del item_local.unify - item_local.title = re.sub(r'online|descarga|downloads|trailer|videoteca|gb|autoplay', '', item_local.title, flags=re.IGNORECASE).strip() + item_local.title = re.sub(r'(?i)online|descarga|downloads|trailer|videoteca|gb|autoplay', '', item_local.title).strip() #logger.debug(item_local) @@ -851,7 +855,8 @@ def post_tmdb_episodios(item, itemlist): #Limpiamos calidad de títulos originales que se hayan podido colar if item_local.infoLabels['originaltitle'].lower() in item_local.quality.lower(): - item_local.quality = re.sub(item_local.infoLabels['originaltitle'], '', item_local.quality, flags=re.IGNORECASE) + item_local.quality = re.sub(item_local.infoLabels['originaltitle'], '', item_local.quality) + #item_local.quality = re.sub(item_local.infoLabels['originaltitle'], '', item_local.quality, flags=re.IGNORECASE) #Si no está el título del episodio, pero sí está en "title", lo rescatamos if not item_local.infoLabels['episodio_titulo'] and item_local.infoLabels['title'].lower() != item_local.infoLabels['tvshowtitle'].lower():