diff --git a/plugin.video.alfa/channels/allpeliculas.py b/plugin.video.alfa/channels/allpeliculas.py index 6f4ca9e1..06175ca6 100644 --- a/plugin.video.alfa/channels/allpeliculas.py +++ b/plugin.video.alfa/channels/allpeliculas.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- -import urlparse - from core import httptools from core import jsontools from core import scrapertools @@ -59,6 +57,7 @@ def colecciones(item): title = scrapedtitle.capitalize() + " (" + scrapedcantidad + ")" itemlist.append(Item(channel = item.channel, action = "listado_colecciones", + page = 1, thumbnail = host + scrapedthumbnail, title = title, url = host + scrapedurl @@ -71,7 +70,7 @@ def listado_colecciones(item): itemlist = [] data = httptools.downloadpage(item.url).data data_url = scrapertools.find_single_match(data, "data_url: '([^']+)") - post = "page=1" + post = "page=%s" %item.page data = httptools.downloadpage(host + data_url, post=post).data patron = 'a href="(/peli[^"]+).*?' patron += 'src="([^"]+).*?' @@ -88,6 +87,16 @@ def listado_colecciones(item): url = host + scrapedurl )) tmdb.set_infoLabels(itemlist) + item.page += 1 + post = "page=%s" %item.page + data = httptools.downloadpage(host + data_url, post=post).data + if len(data) > 50: + itemlist.append(Item(channel = item.channel, + action = "listado_colecciones", + title = "Pagina siguiente>>", + page = item.page, + url = item.url + )) return itemlist @@ -159,6 +168,7 @@ def lista(item): params = jsontools.dump(dict_param) data = httptools.downloadpage(item.url, post=params).data + data = data.replace("","").replace("<\/mark>","") dict_data = jsontools.load(data) for it in dict_data["items"]: @@ -167,7 +177,7 @@ def lista(item): rating = it["imdb"] year = it["year"] url = host + "pelicula/" + it["slug"] - thumb = urlparse.urljoin(host, it["image"]) + thumb = host + it["image"] item.infoLabels['year'] = year itemlist.append(item.clone(action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumb, plot=plot, context=["buscar_trailer"], contentTitle=title, contentType="movie")) diff --git a/plugin.video.alfa/channels/repelis.py b/plugin.video.alfa/channels/repelis.py index 480a5e4f..dec63bd8 100644 --- a/plugin.video.alfa/channels/repelis.py +++ b/plugin.video.alfa/channels/repelis.py @@ -30,11 +30,6 @@ def mainlist(item): itemlist.append( Item(channel=item.channel, action="menudesta", title="Destacadas", url= host + "/pag/1", thumbnail="http://img.irtve.es/v/1074982/", fanart=mifan)) - itemlist.append(Item(channel=item.channel, action="menupelis", title="Proximos estrenos", - url= host + "/archivos/proximos-estrenos/pag/1", - thumbnail="https://encrypted-tbn3.gstatic.com/images?q=tbn:ANd9GcTpsRC" - "-GTYzCqhor2gIDfAB61XeymwgXWSVBHoRAKs2c5HAn29f&reload=on", - fanart=mifan)) itemlist.append(Item(channel=item.channel, action="menupelis", title="Todas las Peliculas", url= host + "/pag/1", thumbnail="https://freaksociety.files.wordpress.com/2012/02/logos-cine.jpg", fanart=mifan)) @@ -70,7 +65,8 @@ def menupelis(item): logger.info(item.url) itemlist = [] data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8') - + if item.genre: + item.extra = item.genre if item.extra == '': section = 'Recién Agregadas' elif item.extra == 'year': @@ -79,17 +75,13 @@ def menupelis(item): section = 'de Eróticas \+18' else: section = 'de %s'%item.extra - - patronenlaces = 'Películas %s<\/h.>.*?>(.*?)<\/section>'%section - matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data) - + patronenlaces = 'Películas %s.*?>(.*?)'%section + matchesenlaces = scrapertools.find_multiple_matches(data, patronenlaces) for bloque_enlaces in matchesenlaces: - patron = '
.*?' patron += '> 4) chr2 = ((enc2 & 15) << 4) | (enc3 >> 2) chr3 = ((enc3 & 3) << 6) | enc4 @@ -352,4 +326,4 @@ def decode(string): output = output.decode('utf8') - return output \ No newline at end of file + return output diff --git a/plugin.video.alfa/channels/ultrapeliculashd.json b/plugin.video.alfa/channels/ultrapeliculashd.json index 0480994d..184a5f4a 100755 --- a/plugin.video.alfa/channels/ultrapeliculashd.json +++ b/plugin.video.alfa/channels/ultrapeliculashd.json @@ -34,6 +34,14 @@ "default": true, "enabled": true, "visible": true + }, + { + "id": "include_in_newest_terror", + "type": "bool", + "label": "Incluir en Novedades -Terror", + "default": true, + "enabled": true, + "visible": true } ] -} \ No newline at end of file +} diff --git a/plugin.video.alfa/channels/ultrapeliculashd.py b/plugin.video.alfa/channels/ultrapeliculashd.py index 73f4e78a..e1093573 100755 --- a/plugin.video.alfa/channels/ultrapeliculashd.py +++ b/plugin.video.alfa/channels/ultrapeliculashd.py @@ -252,10 +252,13 @@ def newest(categoria): item.extra = 'estrenos/' try: if categoria == 'peliculas': - item.url = host + '/category/estrenos/' + item.url = host + '/genre/estrenos/' elif categoria == 'infantiles': - item.url = host + '/category/infantil/' + item.url = host + '/genre/animacion/' + + elif categoria == 'terror': + item.url = host + '/genre/terror/' itemlist = lista(item) if itemlist[-1].title == 'Siguiente >>>': diff --git a/plugin.video.alfa/servers/flashx.py b/plugin.video.alfa/servers/flashx.py index 9730b95d..e2191039 100644 --- a/plugin.video.alfa/servers/flashx.py +++ b/plugin.video.alfa/servers/flashx.py @@ -37,12 +37,16 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= cgi_counter = cgi_counter.replace("%0A","").replace("%22","") playnow = scrapertools.find_single_match(data, 'https://www.flashx.tv/dl[^"]+') # Para obtener el f y el fxfx - js_fxfx = "https://www." + scrapertools.find_single_match(data, """(?is)(flashx.tv/js/code.js.*?[^(?:'|")]+)""") + js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.tv/js/code.js.*?[^(?:'|")]+)""") data_fxfx = httptools.downloadpage(js_fxfx).data mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","") matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)') for f, v in matches: pfxfx += f + "=" + v + "&" + logger.info("mfxfxfx1= %s" %js_fxfx) + logger.info("mfxfxfx2= %s" %pfxfx) + if pfxfx == "": + pfxfx = "ss=yes&f=fail&fxfx=6" coding_url = 'https://www.flashx.tv/flashx.php?%s' %pfxfx # {f: 'y', fxfx: '6'} flashx_id = scrapertools.find_single_match(data, 'name="id" value="([^"]+)"') diff --git a/plugin.video.alfa/servers/gvideo.py b/plugin.video.alfa/servers/gvideo.py index 7ed9570c..8cb31f7a 100644 --- a/plugin.video.alfa/servers/gvideo.py +++ b/plugin.video.alfa/servers/gvideo.py @@ -30,12 +30,20 @@ def get_video_url(page_url, user="", password="", video_password=""): streams =[] logger.debug('page_url: %s'%page_url) if 'googleusercontent' in page_url: - data = httptools.downloadpage(page_url, follow_redirects = False, headers={"Referer": page_url}) - url=data.headers['location'] + + response = httptools.downloadpage(page_url, follow_redirects = False, cookies=False, headers={"Referer": page_url}) + url=response.headers['location'] + cookies = "" + cookie = response.headers["set-cookie"].split("HttpOnly, ") + for c in cookie: + cookies += c.split(";", 1)[0] + "; " + data = response.data.decode('unicode-escape') + data = urllib.unquote_plus(urllib.unquote_plus(data)) + headers_string = "|Cookie=" + cookies + quality = scrapertools.find_single_match (url, '.itag=(\d+).') streams.append((quality, url)) - headers_string="" else: response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url}) diff --git a/plugin.video.alfa/servers/streamixcloud.py b/plugin.video.alfa/servers/streamixcloud.py index 5f99d151..bc9d81a5 100755 --- a/plugin.video.alfa/servers/streamixcloud.py +++ b/plugin.video.alfa/servers/streamixcloud.py @@ -8,7 +8,6 @@ from platformcode import logger def test_video_exists(page_url): logger.info("(page_url='%s')" % page_url) - data = httptools.downloadpage(page_url).data if "Not Found" in data: return False, "[streamixcloud] El archivo no existe o ha sido borrado" @@ -21,7 +20,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= logger.info("(page_url='%s')" % page_url) data = httptools.downloadpage(page_url).data video_urls = [] - packed = scrapertools.find_single_match(data, + patron = "