diff --git a/plugin.video.alfa/addon.xml b/plugin.video.alfa/addon.xml index 6eb64831..e91f3a40 100644 --- a/plugin.video.alfa/addon.xml +++ b/plugin.video.alfa/addon.xml @@ -1,5 +1,5 @@ - + @@ -19,12 +19,17 @@ [B]Estos son los cambios para esta versión:[/B] [COLOR green][B]Arreglos[/B][/COLOR] - ¤ allcalidad ¤ animeflv ¤ streamcloud - ¤ pack +18 ¤ divxtotal ¤ elitetorrent - ¤ estrenosgo ¤ mejortorrent ¤ mejortorrent1 - ¤ newpct1 ¤ pelismagnet + ¤ animeshd ¤ gamovideo ¤ elitetorrent + ¤ newpct1 ¤ cinetux ¤ asialiveaction + ¤ gnula ¤ fembed ¤ hdfilmologia + ¤ gvideo ¤ vidlox ¤ javtasty + ¤ qwertyy + + [COLOR green][B]Novedades[/B][/COLOR] + ¤ watchseries ¤ xstreamcdn ¤ videobb + ¤ animespace ¤ tvanime - Agradecimientos a @shlibidon y @nyicris por colaborar con esta versión + Agradecimientos a @shlibidon por colaborar con esta versión Navega con Kodi por páginas web para ver sus videos de manera fácil. diff --git a/plugin.video.alfa/channels/animejl.json b/plugin.video.alfa/channels/animejl.json index a84a6ce9..9f28c47c 100644 --- a/plugin.video.alfa/channels/animejl.json +++ b/plugin.video.alfa/channels/animejl.json @@ -3,8 +3,8 @@ "name": "AnimeJL", "active": true, "adult": false, - "language": ["esp", "lat", "cast"], - "thumbnail": "https://www.animejl.net/img/Logo.png", + "language": ["esp", "cast", "lat"], + "thumbnail": "https://i.imgur.com/S6foTE9.png", "banner": "", "categories": [ "anime" @@ -27,4 +27,4 @@ "visible": true } ] -} \ No newline at end of file +} diff --git a/plugin.video.alfa/channels/animeshd.py b/plugin.video.alfa/channels/animeshd.py index 18d53b81..12c4778b 100644 --- a/plugin.video.alfa/channels/animeshd.py +++ b/plugin.video.alfa/channels/animeshd.py @@ -113,9 +113,10 @@ def lista(item): patron = 'class="anime">' patron +='
.*?

([^<]+)<\/h2>' matches = re.compile(patron, re.DOTALL).findall(data) - context = renumbertools.context(item) - context2 = autoplay.context - context.extend(context2) + if item.extra != "next": + context = renumbertools.context(item) + context2 = autoplay.context + context.extend(context2) for scrapedurl, scrapedthumbnail, scrapedtitle in matches: url = scrapedurl thumbnail = host + scrapedthumbnail @@ -133,12 +134,12 @@ def lista(item): '.*?') + + if next_page != "": + actual_page = scrapertools.find_single_match(item.url, '([^\?]+)?') + itemlist.append(Item(channel=item.channel, + action="list_all", + title=">> Página siguiente", + url=actual_page + next_page, + thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png' + )) + tmdb.set_infoLabels(itemlist, seekTmdb=True) + return itemlist + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = item.url + texto + try: + if texto != '': + return list_all(item) + else: + return [] + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + +def new_episodes(item): + logger.info() + + itemlist = [] + + full_data = get_source(item.url) + data = scrapertools.find_single_match(full_data, '
.*?
') + patron = '.*?src="([^"]+)".*?' + patron += '.*?([^<]+).*?

([^<]+)

' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedthumbnail, epi, scrapedtitle in matches: + url = scrapedurl + lang = 'VOSE' + title = '%s - %s' % (scrapedtitle, epi) + itemlist.append(Item(channel=item.channel, title=title, url=url, thumbnail=scrapedthumbnail, + action='findvideos', language=lang)) + + return itemlist + +def episodios(item): + logger.info() + itemlist = [] + + data = get_source(item.url) + patron = '
' + matches = re.compile(patron, re.DOTALL).findall(data) + + infoLabels = item.infoLabels + for scrapedurl in matches: + episode = scrapertools.find_single_match(scrapedurl, '.*?capitulo-(\d+)') + lang = 'VOSE' + season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1, int(episode)) + title = "%sx%s - %s" % (season, str(episode).zfill(2),item.contentSerieName) + url = scrapedurl + infoLabels['season'] = season + infoLabels['episode'] = episode + + itemlist.append(Item(channel=item.channel, title=title, contentSerieName=item.contentSerieName, url=url, + action='findvideos', language=lang, infoLabels=infoLabels)) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + + itemlist = itemlist[::-1] + if item.contentSerieName != '' and config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append( + Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url, + action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName, + extra1='library')) + + return itemlist + + +def findvideos(item): + import urllib + logger.info() + + itemlist = [] + + data = get_source(item.url) + patron = 'id="Opt\d+">.*?src=(.*?) frameborder' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl in matches: + server = '' + scrapedurl = scrapedurl.replace('"', '') + new_data = get_source(scrapedurl) + + if "/stream/" in scrapedurl: + scrapedurl = scrapertools.find_single_match(new_data, '19: - itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='lista', pag=pag)) + url_next_page = scrapertools.find_single_match(data, 'rel="next" href="([^"]+)"') + if len(itemlist)>0 and url_next_page: + itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='lista')) return itemlist @@ -189,14 +185,16 @@ def findvideos(item): data1 = httptools.downloadpage(url, headers={"Referer":url1}).data url = scrapertools.find_single_match(data1, 'src: "([^"]+)"') if "embed.php" not in url: - itemlist.append(item.clone(action = "play", title = "Ver en %s (" + language + ")", language = language, url = url)) + if url: + itemlist.append(item.clone(action = "play", title = "Ver en %s (" + language + ")", language = language, url = url)) continue data1 = httptools.downloadpage(url).data packed = scrapertools.find_single_match(data1, "(?is)eval\(function\(p,a,c,k,e.*?") unpack = jsunpack.unpack(packed) urls = scrapertools.find_multiple_matches(unpack, '"file":"([^"]+).*?label":"([^"]+)') for url2, quality in urls: - itemlist.append(item.clone(action = "play", title = "Ver en %s (" + quality + ") (" + language + ")", language = language, url = url2)) + if url2: + itemlist.append(item.clone(action = "play", title = "Ver en %s (" + quality + ") (" + language + ")", language = language, url = url2)) # Segundo grupo de enlaces matches = scrapertools.find_multiple_matches(data, '(.*?)
') - item.plot = scrapertools.htmlclean(item.plot).strip() - item.contentPlot = item.plot + #item.plot = scrapertools.find_single_match(data, '
(.*?)
') + #item.plot = scrapertools.htmlclean(item.plot).strip() + #item.contentPlot = item.plot patron = 'Ver película online.*?>.*?>([^<]+)' scrapedopcion = scrapertools.find_single_match(data, patron) titulo_opcional = scrapertools.find_single_match(scrapedopcion, ".*?, (.*)").upper() @@ -167,14 +172,12 @@ def findvideos(item): urls = scrapertools.find_multiple_matches(datos, '(?:src|href)="([^"]+)') titulo = "Ver en %s " + titulo_opcion for url in urls: - itemlist.append(Item(channel = item.channel, - action = "play", - contentThumbnail = item.thumbnail, - fulltitle = item.contentTitle, + itemlist.append(item.clone(action = "play", title = titulo, url = url )) itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) + #tmdb.set_infoLabels_itemlist(itemlist, True) if itemlist: if config.get_videolibrary_support(): itemlist.append(Item(channel = item.channel, action = "")) diff --git a/plugin.video.alfa/channels/hdfilmologia.json b/plugin.video.alfa/channels/hdfilmologia.json index 18b641b9..4324a844 100644 --- a/plugin.video.alfa/channels/hdfilmologia.json +++ b/plugin.video.alfa/channels/hdfilmologia.json @@ -5,7 +5,7 @@ "adult": false, "language": ["esp", "lat", "cast", "vose"], "fanart": "https://i.postimg.cc/qvFCZNKT/Alpha-652355392-large.jpg", - "thumbnail": "https://hdfilmologia.com/templates/gorstyle/images/logo.png", + "thumbnail": "https://hdfilmologia.com/templates/hdfilmologia/images/logo.png", "banner": "", "categories": [ "movie", diff --git a/plugin.video.alfa/channels/hdfilmologia.py b/plugin.video.alfa/channels/hdfilmologia.py index b553bb55..ec1d8775 100644 --- a/plugin.video.alfa/channels/hdfilmologia.py +++ b/plugin.video.alfa/channels/hdfilmologia.py @@ -179,7 +179,7 @@ def genres(item): logger.info() itemlist = [] - data = httptools.downloadpage(item.url).data + data = httptools.downloadpage(item.url) data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) patron = '
  • ([^<]+)' @@ -221,12 +221,11 @@ def findvideos(item): data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t|amp;|#038;|\(.*?\)|\s{2}| ", "", data) - - patron = '(\w+)src\d+="([^"]+)"' + patron = '>([^<]+)
  • ", "", data) patron = '
    Next') if next_page=="": next_page = scrapertools.find_single_match(data,'
  • .*?
  • ') @@ -82,7 +85,17 @@ def play(item): url = scrapertools.find_single_match(data,'|\s{2,}', "", data) + return data + + +def list_all(item): + logger.info() + + itemlist = [] + + data = get_source(item.url) + patron = '.*?src="([^"]+)".*?' + patron += '

    ([^<]+)

    .*?"fecha">([^<]+)<.*?([^<]+)' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedthumbnail, scrapedtitle, year, type in matches: + type = type.strip().lower() + url = scrapedurl + thumbnail = scrapedthumbnail + if 'latino' in scrapedtitle.lower(): + lang = 'Latino' + elif 'castellano' in scrapedtitle.lower(): + lang = 'Castellano' + else: + lang = 'VOSE' + title = re.sub('Audio|Latino|Castellano', '', scrapedtitle) + context = renumbertools.context(item) + context2 = autoplay.context + context.extend(context2) + new_item= Item(channel=item.channel, + action='episodios', + title=title, + url=url, + thumbnail=thumbnail, + language = lang, + infoLabels={'year':year} + ) + if type != 'anime': + new_item.contentTitle=title + else: + new_item.plot=type + new_item.contentSerieName=title + new_item.context = context + itemlist.append(new_item) + + # Paginacion + next_page = scrapertools.find_single_match(data, + '"page-item active">.*?
    .*?') + + if next_page != "": + actual_page = scrapertools.find_single_match(item.url, '([^\?]+)?') + itemlist.append(Item(channel=item.channel, + action="list_all", + title=">> Página siguiente", + url=actual_page + next_page, + thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png' + )) + tmdb.set_infoLabels(itemlist, seekTmdb=True) + return itemlist + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = item.url + texto + try: + if texto != '': + return list_all(item) + else: + return [] + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + +def new_episodes(item): + logger.info() + + itemlist = [] + + full_data = get_source(item.url) + data = scrapertools.find_single_match(full_data, '
    .*?
    ') + patron = '.*?src="([^"]+)".*?' + patron += '.*?([^<]+).*?

    ([^<]+)

    ' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedthumbnail, epi, scrapedtitle in matches: + url = scrapedurl + if 'latino' in scrapedtitle.lower(): + lang = 'Latino' + elif 'castellano' in scrapedtitle.lower(): + lang = 'Castellano' + else: + lang = 'VOSE' + scrapedtitle = re.sub('Audio|Latino|Castellano', '', scrapedtitle) + title = '%s - Episodio %s' % (scrapedtitle, epi) + itemlist.append(Item(channel=item.channel, title=title, url=url, thumbnail=scrapedthumbnail, + action='findvideos', language=lang)) + + return itemlist + +def episodios(item): + logger.info() + itemlist = [] + + data = get_source(item.url) + patron = '
    ' + matches = re.compile(patron, re.DOTALL).findall(data) + + infoLabels = item.infoLabels + for scrapedurl in matches: + episode = scrapertools.find_single_match(scrapedurl, '.*?episodio-(\d+)') + lang = item.language + season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1, int(episode)) + title = "%sx%s - %s" % (season, str(episode).zfill(2),item.contentSerieName) + url = scrapedurl + infoLabels['season'] = season + infoLabels['episode'] = episode + + itemlist.append(Item(channel=item.channel, title=title, contentSerieName=item.contentSerieName, url=url, + action='findvideos', language=lang, infoLabels=infoLabels)) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + + itemlist = itemlist[::-1] + if item.contentSerieName != '' and config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append( + Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url, + action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName, + extra1='library')) + + return itemlist + + +def findvideos(item): + import urllib + logger.info() + + itemlist = [] + + data = get_source(item.url) + patron = 'id="Opt\d+">.*?src=(.*?) frameborder' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl in matches: + server = '' + scrapedurl = scrapedurl.replace('"', '') + new_data = get_source(scrapedurl) + + if "/stream/" in scrapedurl: + scrapedurl = scrapertools.find_single_match(new_data, ' (.*?)
  • [\s\S]+?[\s\S]+?', re.DOTALL).findall(data) + itemlist = [] + for episode, url, thumbnail,season in matches: + + if item.extra == "watch-series": + scrapedinfo = season.split(' - ') + scrapedtitle = scrapedinfo[0] + season = scrapertools.find_single_match(scrapedinfo[1], 'Season (\d+)') + episode = scrapertools.find_single_match(episode, 'Episode (\d+)') + title = scrapedtitle + " %sx%s" % (season, episode) + else: + scrapedtitle = season + title = scrapedtitle + ' - ' + episode + url = urlparse.urljoin(host, url) + + new_item = Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, + contentSerieName=scrapedtitle,) + itemlist.append(new_item) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True, idioma_busqueda='en') + if url_pagination: + url = urlparse.urljoin(host + item.extra, url_pagination) + title = ">> Pagina Siguiente" + itemlist.append(Item(channel=item.channel, action="novedades_episodios", title=title, url=url, extra=item.extra)) + return itemlist + + +def novedades_cine(item): + logger.info() + data = httptools.downloadpage(item.url).data + url_pagination = scrapertools.find_single_match(data, "
    [\s\S]+?[\s\S]+?', re.DOTALL).findall(data) + itemlist = [] + for episode, url, thumbnail,season in matches: + scrapedyear = '-' + title = "%s [%s]" % (season, episode) + url = urlparse.urljoin(host, url) + new_item = Item(channel=item.channel, action="findvideos",title=title, url=url, contentTitle=season, thumbnail=thumbnail,infoLabels={'year':scrapedyear}) + itemlist.append(new_item) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True, idioma_busqueda='en') + if url_pagination: + url = urlparse.urljoin(host + item.extra, url_pagination) + title = ">> Pagina Siguiente" + itemlist.append(Item(channel=item.channel, action="novedades_cine", title=title, url=url)) + return itemlist + +def popular(item): + logger.info() + data = httptools.downloadpage(item.url).data + url_pagination = scrapertools.find_single_match(data, "

    " in data1: + item.url = item.url.replace("-episode-0", "-episode-1") + + data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", httptools.downloadpage(item.url).data) + matches = scrapertools.find_multiple_matches(data, 'data-video="(.*?)"') + url = '' + urlsub = '' + urlsub = scrapertools.find_single_match(data, "&sub=(.*?)&cover") + if urlsub != '': + urlsub = base64.b64decode(urlsub) + urlsub = 'https://sub.movie-series.net' + urlsub + for source in matches: + if '/streaming.php' in source: + new_data = httptools.downloadpage("https:" + source).data + url = scrapertools.find_single_match(new_data, "file: '(https://redirector.*?)'") + thumbnail= "https://martechforum.com/wp-content/uploads/2015/07/drive-300x300.png" + if url == "": + source = source.replace("streaming.php", "load.php") + elif '/load.php' in source: + new_data = httptools.downloadpage("https:" + source).data + url = scrapertools.find_single_match(new_data, "file: '(https://[A-z0-9]+.cdnfile.info/.*?)'") + thumbnail= "https://vidcloud.icu/img/logo_vid.png" + else: + url = source + thumbnail= "" + if "https://redirector." in url or "cdnfile.info" in url: + url = url+"|referer=https://vidcloud.icu/" + + if url != "": + itemlist.append(Item(channel=item.channel, url=url, title='%s', action='play',plot=item.plot, thumbnail=thumbnail, subtitle=urlsub)) + + itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server) + if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra == 'film': + itemlist.append(Item(channel=item.channel, title="Añadir a la Videoteca", text_color="yellow", + action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail, + contentTitle = item.contentTitle + )) + # Requerido para AutoPlay + + autoplay.start(itemlist, item) + + return itemlist diff --git a/plugin.video.alfa/core/cloudflare.py b/plugin.video.alfa/core/cloudflare.py index a5379380..0b024fbb 100755 --- a/plugin.video.alfa/core/cloudflare.py +++ b/plugin.video.alfa/core/cloudflare.py @@ -10,7 +10,6 @@ import urlparse from platformcode import logger from decimal import Decimal -from js2py.internals import seval class Cloudflare: @@ -47,25 +46,50 @@ class Cloudflare: logger.debug("Metodo #2 (headers): NO disponible") self.header_data = {} - def solve_cf(self, body, domain): - k = re.compile('