From 9e1c190c0b130ad12e60b1ba6fe4b3ebdba373dc Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Tue, 2 Jan 2018 11:58:40 -0500 Subject: [PATCH 1/7] =?UTF-8?q?ciberpeliculashd:=20agregado=20secci=C3=B3n?= =?UTF-8?q?=20series?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../channels/ciberpeliculashd.py | 150 ++++++++++++++++-- 1 file changed, 135 insertions(+), 15 deletions(-) diff --git a/plugin.video.alfa/channels/ciberpeliculashd.py b/plugin.video.alfa/channels/ciberpeliculashd.py index 37c9439c..3562697f 100644 --- a/plugin.video.alfa/channels/ciberpeliculashd.py +++ b/plugin.video.alfa/channels/ciberpeliculashd.py @@ -20,14 +20,125 @@ except: def mainlist(item): logger.info() itemlist = [] - itemlist.append(Item(channel = item.channel, title = "Novedades", action = "peliculas", url = host + "/?peli=1")) - itemlist.append(Item(channel = item.channel, title = "Por género", action = "filtro", url = host, extra = "categories" )) - itemlist.append(Item(channel = item.channel, title = "Por calidad", action = "filtro", url = host, extra = "qualitys")) - itemlist.append(Item(channel = item.channel, title = "Por idioma", action = "filtro", url = host, extra = "languages")) + itemlist.append(Item(channel = item.channel, title = "Películas", text_bold = True, folder = False)) + itemlist.append(Item(channel = item.channel, title = " Novedades", action = "peliculas", url = host + "/?peli=1")) + itemlist.append(Item(channel = item.channel, title = " Por género", action = "filtro", url = host, extra = "categories" )) + itemlist.append(Item(channel = item.channel, title = " Por calidad", action = "filtro", url = host, extra = "qualitys")) + itemlist.append(Item(channel = item.channel, title = " Por idioma", action = "filtro", url = host, extra = "languages")) + itemlist.append(Item(channel = item.channel, title = "")) + itemlist.append(Item(channel = item.channel, title = "Series", text_bold = True, folder = False)) + itemlist.append(Item(channel = item.channel, title = " Novedades", action = "series", url = host + "/series/?peli=1")) + itemlist.append(Item(channel = item.channel, title = " Nuevos Capitulos", action = "nuevos_capitulos", url = host + "/series/?peli=1")) itemlist.append(Item(channel = item.channel, title = "")) itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "/?s=")) return itemlist +def nuevos_capitulos(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = 'class="episode" href="([^"]+).*?' + patron += 'src="([^"]+).*?' + patron += 'title="([^"]+).*?' + patron += '-->([^<]+).*?' + patron += 'created_at">([^<]+)' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedepisode, scrapeddays in matches: + scrapedtitle = scrapedtitle + " %s (%s)" %(scrapedepisode.strip(), scrapeddays.strip()) + itemlist.append(Item(action = "findvideos", + channel = item.channel, + title = scrapedtitle, + thumbnail = scrapedthumbnail, + url = scrapedurl + )) + return itemlist + +def series(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + bloque = scrapertools.find_single_match(data, 'loop-posts series.*?panel-pagination pagination-bottom') + patron = 'a href="([^"]+).*?' + patron += '((?:http|https)://image.tmdb.org[^"]+).*?' + patron += 'title="([^"]+)' + matches = scrapertools.find_multiple_matches(bloque, patron) + for scrapedurl, scrapedthumbnail, scrapedtitle in matches: + itemlist.append(Item(action = "temporadas", + channel = item.channel, + thumbnail = scrapedthumbnail, + title = scrapedtitle, + contentSerieName = scrapedtitle, + url = scrapedurl + )) + if itemlist: + tmdb.set_infoLabels(itemlist) + page = int(scrapertools.find_single_match(item.url,"peli=([0-9]+)")) + 1 + next_page = scrapertools.find_single_match(item.url,".*?peli=") + next_page += "%s" %page + itemlist.append(Item(action = "series", + channel = item.channel, + title = "Página siguiente", + url = next_page + )) + return itemlist + + +def temporadas(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + bloque = scrapertools.find_single_match(data, 'Lista de Temporadas.*?') + matches = scrapertools.find_multiple_matches(bloque, ' (.*?[0-9]+)') + for scrapedtitle in matches: + season = scrapertools.find_single_match(scrapedtitle, '[0-9]+') + item.infoLabels["season"] = season + url = item.url + "?temporada=%s" %season + itemlist.append(item.clone(action = "capitulos", + title = scrapedtitle, + url = url + )) + tmdb.set_infoLabels(itemlist) + if config.get_videolibrary_support(): + itemlist.append(Item(channel=item.channel, title ="")) + itemlist.append(item.clone(action = "add_serie_to_library", + channel = item.channel, + extra = "episodios", + title = '[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', + url = item.url + )) + return itemlist + + +def episodios(item): + logger.info() + itemlist = [] + templist = temporadas(item) + for tempitem in templist: + itemlist += capitulos(tempitem) + return itemlist + + +def capitulos(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = '", "") + episode = scrapertools.find_single_match(scrapedtitle, "Capitulo ([0-9]+)") + scrapedtitle = scrapedtitle.split(":")[1] + scrapedtitle = "%sx%s %s" %(item.infoLabels["season"], episode, scrapedtitle) + item.infoLabels["episode"] = episode + itemlist.append(item.clone(action = "findvideos", + title = scrapedtitle, + url = scrapedurl + )) + tmdb.set_infoLabels(itemlist) + return itemlist + + def newest(categoria): logger.info() itemlist = [] @@ -83,6 +194,7 @@ def filtro(item): def peliculas(item): logger.info() itemlist = [] + infoLabels = dict() data = httptools.downloadpage(item.url).data bloque = scrapertools.find_single_match(data, 'loop-posts".*?panel-pagination pagination-bottom') patron = 'a href="([^"]+)".*?' @@ -98,23 +210,31 @@ def peliculas(item): else: year = 0 fulltitle = scrapertools.find_single_match(scrapedtitle, "(.*?) \(") - itemlist.append(Item(action = "findvideos", + if "serie" in scrapedurl: + action = "temporadas" + infoLabels ['tvshowtitle'] = scrapedtitle + else: + action = "findvideos" + infoLabels ['tvshowtitle'] = "" + infoLabels ['year'] = year + itemlist.append(Item(action = action, channel = item.channel, fulltitle = fulltitle, thumbnail = scrapedthumbnail, - infoLabels = {'year': year}, + infoLabels = infoLabels, title = scrapedtitle, url = scrapedurl )) - tmdb.set_infoLabels(itemlist) - page = int(scrapertools.find_single_match(item.url,"peli=([0-9]+)")) + 1 - next_page = scrapertools.find_single_match(item.url,".*?peli=") - next_page += "%s" %page - itemlist.append(Item(action = "peliculas", - channel = item.channel, - title = "Página siguiente", - url = next_page - )) + if itemlist: + tmdb.set_infoLabels(itemlist) + page = int(scrapertools.find_single_match(item.url,"peli=([0-9]+)")) + 1 + next_page = scrapertools.find_single_match(item.url,".*?peli=") + next_page += "%s" %page + itemlist.append(Item(action = "peliculas", + channel = item.channel, + title = "Página siguiente", + url = next_page + )) return itemlist From 738fb50ce9371c02e2345e312c6632b46003d0f3 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Fri, 5 Jan 2018 16:39:46 -0500 Subject: [PATCH 2/7] userscloud: actualido test_video_exists --- plugin.video.alfa/servers/userscloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin.video.alfa/servers/userscloud.py b/plugin.video.alfa/servers/userscloud.py index a2dfc668..58a572b6 100755 --- a/plugin.video.alfa/servers/userscloud.py +++ b/plugin.video.alfa/servers/userscloud.py @@ -11,7 +11,7 @@ def test_video_exists(page_url): response = httptools.downloadpage(page_url) - if not response.sucess or "Not Found" in response.data or "File was deleted" in response.data: + if not response.sucess or "Not Found" in response.data or "File was deleted" in response.data or "is no longer available" in response.data: return False, "[Userscloud] El fichero no existe o ha sido borrado" return True, "" From 4a0f1b5c413e603ba2b1ee1afb3ea9c82d6b753d Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Mon, 8 Jan 2018 08:30:12 -0500 Subject: [PATCH 3/7] doomtv: fix --- plugin.video.alfa/channels/doomtv.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/plugin.video.alfa/channels/doomtv.py b/plugin.video.alfa/channels/doomtv.py index f906175d..a5f1bd9c 100644 --- a/plugin.video.alfa/channels/doomtv.py +++ b/plugin.video.alfa/channels/doomtv.py @@ -222,11 +222,14 @@ def newest(categoria): def findvideos(item): logger.info() itemlist = [] - #itemlist = get_url(item) data = httptools.downloadpage(item.url).data data = re.sub(r'"|\n|\r|\t| |
|\s{2,}', "", data) - url_m3u8 = scrapertools.find_single_match(data, '') + player_vip = scrapertools.find_single_match(data, 'src=(https:\/\/content.jwplatform.com\/players.*?js)') + data_m3u8 = httptools.downloadpage(player_vip, headers= {'referer':item.url}).data + data_m3u8 = re.sub(r'"|\n|\r|\t| |
|\s{2,}', "", data_m3u8) + url_m3u8 = scrapertools.find_single_match(data_m3u8,',sources:.*?file: (.*?),') itemlist.append(item.clone(url=url_m3u8, action='play')) + patron = 'id=(tab\d+)>
<(?:iframe|script) src=(.*?)(?:scrolling|><\/script>)' matches = re.compile(patron, re.DOTALL).findall(data) From 02abbfcc646c2fba7bf24c54c173dd34bed563db Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Mon, 15 Jan 2018 16:02:31 -0500 Subject: [PATCH 4/7] httptools: updated --- plugin.video.alfa/core/httptools.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/plugin.video.alfa/core/httptools.py b/plugin.video.alfa/core/httptools.py index dfb404b0..82345bfb 100755 --- a/plugin.video.alfa/core/httptools.py +++ b/plugin.video.alfa/core/httptools.py @@ -3,6 +3,7 @@ # httptools # -------------------------------------------------------------------------------- +import inspect import cookielib import gzip import os @@ -15,6 +16,7 @@ from threading import Lock from core.cloudflare import Cloudflare from platformcode import config, logger +from platformcode.logger import WebErrorException cookies_lock = Lock() @@ -23,7 +25,7 @@ ficherocookies = os.path.join(config.get_data_path(), "cookies.dat") # Headers por defecto, si no se especifica nada default_headers = dict() -default_headers["User-Agent"] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3163.100 Safari/537.36" +default_headers["User-Agent"] = "Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3163.100 Safari/537.36" default_headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8" default_headers["Accept-Language"] = "es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3" default_headers["Accept-Charset"] = "UTF-8" @@ -205,8 +207,18 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr logger.info("Response error: %s" % (response["error"])) logger.info("Response data length: %s" % (len(response["data"]))) logger.info("Response headers:") + server_cloudflare = "" for header in response["headers"]: logger.info("- %s: %s" % (header, response["headers"][header])) + if "cloudflare" in response["headers"][header]: + server_cloudflare = "cloudflare" + + is_channel = inspect.getmodule(inspect.currentframe().f_back) + # error 4xx o 5xx se lanza excepcion + # response["code"] = 400 + if type(response["code"]) == int and "\\servers\\" not in str(is_channel): + if response["code"] > 399 and (server_cloudflare == "cloudflare" and response["code"] != 503): + raise WebErrorException(urlparse.urlparse(url)[1]) if cookies: save_cookies() From ba2a6c682ea869bf5de1c6687cdc9d8386af324d Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Mon, 15 Jan 2018 16:27:50 -0500 Subject: [PATCH 5/7] logger: updated --- plugin.video.alfa/platformcode/logger.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/plugin.video.alfa/platformcode/logger.py b/plugin.video.alfa/platformcode/logger.py index 7008407a..10741580 100644 --- a/plugin.video.alfa/platformcode/logger.py +++ b/plugin.video.alfa/platformcode/logger.py @@ -76,3 +76,8 @@ def error(texto=""): xbmc.log("######## ERROR #########", xbmc.LOGERROR) xbmc.log(texto, xbmc.LOGERROR) + + +class WebErrorException(Exception): + def __init__(self, *args, **kwargs): + Exception.__init__(self, *args, **kwargs) From f4238302a5d2d07db7d440a4a9fade91cf833e58 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Mon, 15 Jan 2018 16:29:35 -0500 Subject: [PATCH 6/7] launcher: updated --- plugin.video.alfa/platformcode/launcher.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/plugin.video.alfa/platformcode/launcher.py b/plugin.video.alfa/platformcode/launcher.py index 513576c8..d9eabd37 100644 --- a/plugin.video.alfa/platformcode/launcher.py +++ b/plugin.video.alfa/platformcode/launcher.py @@ -14,8 +14,7 @@ from core import videolibrarytools from core.item import Item from platformcode import config, logger from platformcode import platformtools -from channelselector import get_thumb - +from platformcode.logger import WebErrorException def start(): @@ -298,7 +297,19 @@ def run(item=None): logger.error("Codigo de error HTTP : %d" % e.code) # "El sitio web no funciona correctamente (error http %d)" platformtools.dialog_ok("alfa", config.get_localized_string(30051) % e.code) + except WebErrorException, e: + import traceback + logger.error(traceback.format_exc()) + patron = 'File "' + os.path.join(config.get_runtime_path(), "channels", "").replace("\\", + "\\\\") + '([^.]+)\.py"' + canal = scrapertools.find_single_match(traceback.format_exc(), patron) + + platformtools.dialog_ok( + "Error en el canal " + canal, + "La web de la que depende parece no estar disponible, puede volver a intentarlo, " + "si el problema persiste verifique mediante un navegador la web: %s. " + "Si la web funciona correctamente informe el error en: www.alfa-addon.com" %(e)) except: import traceback logger.error(traceback.format_exc()) From b8f66623da988eed3b6dc7e1ca1461cd8c363518 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Tue, 16 Jan 2018 08:12:02 -0500 Subject: [PATCH 7/7] Update platformtools.py --- plugin.video.alfa/platformcode/platformtools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin.video.alfa/platformcode/platformtools.py b/plugin.video.alfa/platformcode/platformtools.py index a5b88b8a..611338ac 100644 --- a/plugin.video.alfa/platformcode/platformtools.py +++ b/plugin.video.alfa/platformcode/platformtools.py @@ -142,7 +142,7 @@ def render_items(itemlist, parent_item): if item.fanart: fanart = item.fanart else: - fanart = os.path.join(config.get_runtime_path(), "fanart-xmas.jpg") + fanart = os.path.join(config.get_runtime_path(), "fanart.jpg") # Creamos el listitem listitem = xbmcgui.ListItem(item.title)