From c43162cbc2234c94388ac6c134bae494d5550e62 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Sat, 28 Oct 2017 08:40:11 -0500 Subject: [PATCH 1/5] flashx: lo dicho!!! --- plugin.video.alfa/servers/flashx.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugin.video.alfa/servers/flashx.py b/plugin.video.alfa/servers/flashx.py index 7326085a..fd654da7 100644 --- a/plugin.video.alfa/servers/flashx.py +++ b/plugin.video.alfa/servers/flashx.py @@ -33,11 +33,11 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= 'Cookie': ''} data = httptools.downloadpage(page_url, headers=headers, replace_headers=True).data data = data.replace("\n","") - cgi_counter = scrapertools.find_single_match(data, '(?is)src=.(https://www.flashx.tv/counter.cgi.*?fx=[0-9a-zA-Z=]+)') + cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.tv/counter.cgi.*?[^(?:'|")]+)""") cgi_counter = cgi_counter.replace("%0A","").replace("%22","") playnow = scrapertools.find_single_match(data, 'https://www.flashx.tv/dl[^"]+') # Para obtener el f y el fxfx - js_fxfx = scrapertools.find_single_match(data, '(?is)src=.(https://www.flashx.tv/js/code.js.*?=[0-9]+)') + js_fxfx = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.tv/js/code.js.*?[^(?:'|")]+)""") data_fxfx = httptools.downloadpage(js_fxfx).data mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","") matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)') From 9a1effbe25a432ca1598b9ec73390ac01e55940a Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Sat, 28 Oct 2017 10:58:15 -0500 Subject: [PATCH 2/5] =?UTF-8?q?cinetux:=20mostrar=20cantidad=20de=20pel?= =?UTF-8?q?=C3=ADculas?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- plugin.video.alfa/channels/cinetux.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugin.video.alfa/channels/cinetux.py b/plugin.video.alfa/channels/cinetux.py index a2c86f67..3bcc11e5 100644 --- a/plugin.video.alfa/channels/cinetux.py +++ b/plugin.video.alfa/channels/cinetux.py @@ -28,9 +28,9 @@ def mainlist(item): itemlist = [] item.viewmode = viewmode - data = httptools.downloadpage(CHANNEL_HOST).data - total = scrapertools.find_single_match(data, "TENEMOS\s(.*?)") - titulo = "Peliculas" + data = httptools.downloadpage(CHANNEL_HOST + "pelicula").data + total = scrapertools.find_single_match(data, "Películas(.*?)") + titulo = "Peliculas (%s)" %total itemlist.append(item.clone(title=titulo, text_color=color2, action="", text_bold=True)) itemlist.append(item.clone(action="peliculas", title=" Novedades", url=CHANNEL_HOST + "pelicula", thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres" From fc58c717eb70943bddf415f0cc5611a94a2a1d22 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Sat, 28 Oct 2017 11:17:48 -0500 Subject: [PATCH 3/5] plusdede: actualizado findvideos --- plugin.video.alfa/channels/plusdede.py | 61 +------------------------- 1 file changed, 2 insertions(+), 59 deletions(-) diff --git a/plugin.video.alfa/channels/plusdede.py b/plugin.video.alfa/channels/plusdede.py index 227023c6..a1467884 100644 --- a/plugin.video.alfa/channels/plusdede.py +++ b/plugin.video.alfa/channels/plusdede.py @@ -25,7 +25,6 @@ color1, color2, color3 = ['0xFFB10021', '0xFFB10021', '0xFFB10004'] def login(): url_origen = "https://www.plusdede.com/login?popup=1" data = httptools.downloadpage(url_origen, follow_redirects=True).data - logger.debug("dataPLUSDEDE=" + data) if re.search(r'(?i)%s' % config.get_setting("plusdedeuser", "plusdede"), data): return True @@ -34,12 +33,10 @@ def login(): post = "_token=" + str(token) + "&email=" + str( config.get_setting("plusdedeuser", "plusdede")) + "&password=" + str( config.get_setting("plusdedepassword", "plusdede")) + "&app=2131296469" - # logger.debug("dataPLUSDEDE_POST="+post) url = "https://www.plusdede.com/" headers = {"Referer": url, "X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": token} data = httptools.downloadpage("https://www.plusdede.com/login", post=post, headers=headers, replace_headers=False).data - logger.debug("PLUSDEDE_DATA=" + data) if "redirect" in data: return True else: @@ -183,7 +180,6 @@ def generos(item): tipo = item.url.replace("https://www.plusdede.com/", "") # Descarga la pagina data = httptools.downloadpage(item.url).data - logger.debug("data=" + data) # Extrae las entradas (carpetas) data = scrapertools.find_single_match(data, @@ -198,7 +194,6 @@ def generos(item): plot = "" # https://www.plusdede.com/pelis?genre_id=1 url = "https://www.plusdede.com/" + tipo + "?genre_id=" + id_genere - logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") itemlist.append( Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot, fulltitle=title)) @@ -229,11 +224,9 @@ def buscar(item): # Descarga la pagina headers = {"X-Requested-With": "XMLHttpRequest"} data = httptools.downloadpage(item.url, headers=headers).data - logger.debug("data=" + data) # Extrae las entradas (carpetas) json_object = jsontools.load(data) - logger.debug("content=" + json_object["content"]) data = json_object["content"] return parse_mixed_results(item, data) @@ -248,7 +241,6 @@ def parse_mixed_results(item, data): patron += '.*?
([^<]+)
+' patron += '.*?
([^<]+)
' matches = re.compile(patron, re.DOTALL).findall(data) - logger.debug("PARSE_DATA:" + data) if item.tipo == "lista": following = scrapertools.find_single_match(data, '
') data_id = scrapertools.find_single_match(data, 'data-model="10" data-id="([^"]+)">') @@ -286,7 +278,6 @@ def parse_mixed_results(item, data): sectionStr = "docu" referer = urlparse.urljoin(item.url, scrapedurl) url = urlparse.urljoin(item.url, scrapedurl) - logger.debug("PELII_title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") if item.tipo != "series": itemlist.append(Item(channel=item.channel, action="findvideos", title=title, extra=referer, url=url, thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, fanart=fanart, @@ -294,7 +285,6 @@ def parse_mixed_results(item, data): else: referer = item.url url = urlparse.urljoin(item.url, scrapedurl) - logger.debug("SERIE_title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") if item.tipo != "pelis": itemlist.append(Item(channel=item.channel, action="episodios", title=title, extra=referer, url=url, thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, show=title, fanart=fanart, @@ -304,7 +294,6 @@ def parse_mixed_results(item, data): '
') if next_page != "": url = urlparse.urljoin("https://www.plusdede.com", next_page).replace("amp;", "") - logger.debug("URL_SIGUIENTE:" + url) itemlist.append( Item(channel=item.channel, action="pag_sig", token=item.token, title=">> Página siguiente", extra=item.extra, url=url)) @@ -323,7 +312,6 @@ def siguientes(item): # No utilizada # Descarga la pagina data = httptools.downloadpage(item.url).data - logger.debug("data=" + data) # Extrae las entradas (carpetas) bloque = scrapertools.find_single_match(data, '

Siguiendo

(.*?)
') @@ -358,7 +346,6 @@ def siguientes(item): # No utilizada Item(channel=item.channel, action="episodio", title=title, url=url, thumbnail=thumbnail, plot=plot, fulltitle=title, show=title, fanart=fanart, extra=session + "|" + episode)) - logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") return itemlist @@ -369,7 +356,6 @@ def episodio(item): # Descarga la pagina data = httptools.downloadpage(item.url).data - # logger.debug("data="+data) session = str(int(item.extra.split("|")[0])) episode = str(int(item.extra.split("|")[1])) @@ -377,7 +363,6 @@ def episodio(item): matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data) for bloque_episodios in matchestemporadas: - logger.debug("bloque_episodios=" + bloque_episodios) # Extrae los episodios patron = '' + episode + ' ([^<]+)(\s*
\s*]*>]*>[^<]*]*>[^<]*]*>]*>]*>[^<]*]*>[^<]*
)?' @@ -401,7 +386,6 @@ def episodio(item): itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot, fulltitle=title, fanart=item.fanart, show=item.show)) - logger.debug("Abrimos title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") itemlist2 = [] for capitulo in itemlist: @@ -415,11 +399,9 @@ def peliculas(item): # Descarga la pagina headers = {"X-Requested-With": "XMLHttpRequest"} data = httptools.downloadpage(item.url, headers=headers).data - logger.debug("data_DEF_PELICULAS=" + data) # Extrae las entradas (carpetas) json_object = jsontools.load(data) - logger.debug("html=" + json_object["content"]) data = json_object["content"] return parse_mixed_results(item, data) @@ -432,24 +414,18 @@ def episodios(item): # Descarga la pagina idserie = '' data = httptools.downloadpage(item.url).data - # logger.debug("dataEPISODIOS="+data) patrontemporada = '([^<]+)<(.*?)\s+' matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data) - logger.debug(matchestemporadas) idserie = scrapertools.find_single_match(data, 'data-model="5" data-id="(\d+)"') token = scrapertools.find_single_match(data, '_token" content="([^"]+)"') if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("kodi")): itemlist.append(Item(channel=item.channel, action="infosinopsis", title="INFO / SINOPSIS", url=item.url, thumbnail=item.thumbnail, fanart=item.fanart, folder=False)) for nombre_temporada, bloque_episodios in matchestemporadas: - logger.debug("nombre_temporada=" + nombre_temporada) - logger.debug("bloque_episodios=" + bloque_episodios) - logger.debug("id_serie=" + idserie) # Extrae los episodios patron_episodio = '
  • ' # patron = '
  • \s*
    \s*([^<]+)\s*([^<]+)\s*
    .*?"show-close-footer episode model([^"]+)"' matches = re.compile(patron_episodio, re.DOTALL).findall(bloque_episodios) - # logger.debug(matches) for data_episodio in matches: scrapeid = scrapertools.find_single_match(data_episodio, '
  • ([^<]+)') if (showlinks == 1 and jdown != '') or ( showlinks == 2 and jdown == ''): # Descartar enlaces veronline/descargar continue idioma_1 = "" - idiomas = re.compile(' 1: idioma_1 = idiomas[1] @@ -670,16 +630,12 @@ def findvideos(item, verTodos=False): calidad_video = scrapertools.find_single_match(match, '(.*?)
  • ').replace( " ", "").replace("\n", "") - logger.debug("calidad_video=" + calidad_video) calidad_audio = scrapertools.find_single_match(match, '(.*?)
    ').replace( " ", "").replace("\n", "") - logger.debug("calidad_audio=" + calidad_audio) thumb_servidor = scrapertools.find_single_match(match, '') - logger.debug("thumb_servidor=" + thumb_servidor) nombre_servidor = scrapertools.find_single_match(thumb_servidor, "hosts/([^\.]+).png") - logger.debug("nombre_servidor=" + nombre_servidor) if jdown != '': title = "Download " + nombre_servidor + " (" + idioma + ") (Calidad " + calidad_video.strip() + ", audio " + calidad_audio.strip() + ")" @@ -696,7 +652,6 @@ def findvideos(item, verTodos=False): url = urlparse.urljoin(item.url, scrapertools.find_single_match(match, 'href="([^"]+)"')) thumbnail = thumb_servidor plot = "" - logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") if sortlinks > 0: # orden1 para dejar los "downloads" detras de los "ver" al ordenar # orden2 segun configuración @@ -788,13 +743,10 @@ def play(item): headers = {'Referer': item.extra} data = httptools.downloadpage(item.url, headers=headers).data - # logger.debug("dataLINK="+data) url = scrapertools.find_single_match(data, '') url = urlparse.urljoin("https://www.plusdede.com", url) - # logger.debug("DATA_LINK_FINAL:"+url) - logger.debug("URL_PLAY:" + url) headers = {'Referer': item.url} media_url = httptools.downloadpage(url, headers=headers, follow_redirects=False).headers.get("location") # logger.info("media_url="+media_url) @@ -808,7 +760,6 @@ def play(item): videoitem.channel = item.channel # Marcar como visto - logger.debug(item) checkseen(item) return itemlist @@ -827,7 +778,6 @@ def checkseen(item): tipo_str = "pelis" headers = {"Referer": "https://www.plusdede.com/" + tipo_str, "X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": item.token} - logger.debug("Entrando a checkseen " + url_temp + item.token) data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers, replace_headers=True).data return True @@ -836,7 +786,6 @@ def infosinopsis(item): logger.info() data = httptools.downloadpage(item.url).data - logger.debug("SINOPSISdata=" + data) scrapedtitle = scrapertools.find_single_match(data, '
    ([^<]+)
    ') scrapedvalue = scrapertools.find_single_match(data, '([^<]+)') @@ -845,11 +794,8 @@ def infosinopsis(item): scrapedduration = scrapertools.htmlclean(scrapertools.find_single_match(data, 'Duración\s*
    ([^<]+)
    ').strip().replace( " ", "").replace("\n", "")) - logger.debug(scrapedduration) scrapedplot = scrapertools.find_single_match(data, '