From 0c891a375385a69d05dddd1403a03049d5feaf3a Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Sat, 17 Feb 2018 12:01:15 -0500 Subject: [PATCH 01/26] youtube: fix --- plugin.video.alfa/servers/youtube.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin.video.alfa/servers/youtube.py b/plugin.video.alfa/servers/youtube.py index 989cfde6..5a398635 100755 --- a/plugin.video.alfa/servers/youtube.py +++ b/plugin.video.alfa/servers/youtube.py @@ -17,7 +17,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= page_url = "http://www.youtube.com/watch?v=%s" % page_url logger.info(" page_url->'%s'" % page_url) - video_id = scrapertools.find_single_match(page_url, 'v=([A-z0-9_-]{11})') + video_id = scrapertools.find_single_match(page_url, '(?:v=|embed/)([A-z0-9_-]{11})') video_urls = extract_videos(video_id) video_urls.reverse() From 82fe2b67c9ffdbc58c920691f32d596e876c8e0a Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Mon, 19 Feb 2018 11:34:27 -0500 Subject: [PATCH 02/26] flashx: fix --- plugin.video.alfa/servers/flashx.py | 32 +++++++++++------------------ 1 file changed, 12 insertions(+), 20 deletions(-) diff --git a/plugin.video.alfa/servers/flashx.py b/plugin.video.alfa/servers/flashx.py index 6c33cac0..6efb18d0 100644 --- a/plugin.video.alfa/servers/flashx.py +++ b/plugin.video.alfa/servers/flashx.py @@ -13,21 +13,13 @@ from platformcode import config, logger def test_video_exists(page_url): logger.info("(page_url='%s')" % page_url) - data = httptools.downloadpage(page_url, cookies=False).data - if 'file was deleted' in data: - return False, "[FlashX] El archivo no existe o ha sido borrado" - elif 'File Not Found' in data: - return False, "[FlashX] El archivo no existe" - elif 'Video is processing now' in data: - return False, "[FlashX] El archivo se está procesando" - return True, "" def get_video_url(page_url, premium=False, user="", password="", video_password=""): logger.info("url=" + page_url) pfxfx = "" - headers = {'Host': 'www.flashx.tv', + headers = {'Host': 'www.flashx.sx', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/537.36', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en-US,en;q=0.5', @@ -35,11 +27,11 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= 'Cookie': ''} data = httptools.downloadpage(page_url, cookies=False).data data = data.replace("\n","") - cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.tv/counter.cgi.*?[^(?:'|")]+)""") + cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.sx/counter.cgi.*?[^(?:'|")]+)""") cgi_counter = cgi_counter.replace("%0A","").replace("%22","") - playnow = scrapertools.find_single_match(data, 'https://www.flashx.tv/dl[^"]+') + playnow = scrapertools.find_single_match(data, 'https://www.flashx.sx/dl[^"]+') # Para obtener el f y el fxfx - js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.tv/js\w+/c\w+.*?[^(?:'|")]+)""") + js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.sx/js\w+/c\w+.*?[^(?:'|")]+)""") data_fxfx = httptools.downloadpage(js_fxfx).data mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","") matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)') @@ -49,19 +41,20 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= logger.info("mfxfxfx2= %s" %pfxfx) if pfxfx == "": pfxfx = "ss=yes&f=fail&fxfx=6" - coding_url = 'https://www.flashx.tv/flashx.php?%s' %pfxfx + coding_url = 'https://www.flashx.sx/flashx.php?%s' %pfxfx # {f: 'y', fxfx: '6'} - flashx_id = scrapertools.find_single_match(data, 'name="id" value="([^"]+)"') - fname = scrapertools.find_single_match(data, 'name="fname" value="([^"]+)"') - hash_f = scrapertools.find_single_match(data, 'name="hash" value="([^"]+)"') - imhuman = scrapertools.find_single_match(data, "value='([^']+)' name='imhuman'") + bloque = scrapertools.find_single_match(data, '(?s)Form method="POST" action(.*?))", "", httptools.downloadpage(item.url).data) - data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") - - patron = '
  • .*?' - data = scrapertools.get_match(data, patron) - - patron = '([^>]+)' - matches = re.compile(patron, re.DOTALL).findall(data) - + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '
  • (.+?)<\/ul>' #Filtrado por url + data_cat = scrapertools.find_single_match(data, patron) + patron_cat='
  • <\/li>' + matches = scrapertools.find_multiple_matches(data_cat, patron_cat) for scrapedurl, scrapedtitle in matches: - title = scrapedtitle.strip() - url = scrapedurl - - itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra="pelilist")) - itemlist.append( - Item(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url, extra="pelilist")) - - return itemlist - - -def alfabeto(item): - logger.info() - itemlist = [] - - data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) - data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") - - patron = '' - data = scrapertools.get_match(data, patron) - - patron = ']+>([^>]+)' - matches = re.compile(patron, re.DOTALL).findall(data) - - for scrapedurl, scrapedtitle in matches: - title = scrapedtitle.upper() - url = scrapedurl - - itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra=item.extra)) - + itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,action="listado")) return itemlist def listado(item): logger.info() itemlist = [] - url_next_page ='' - - data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) - data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8") - #logger.debug(data) - logger.debug('item.modo: %s'%item.modo) - logger.debug('item.extra: %s'%item.extra) - - if item.modo != 'next' or item.modo =='': - logger.debug('item.title: %s'% item.title) - patron = '' - logger.debug("patron=" + patron) - fichas = scrapertools.get_match(data, patron) - page_extra = item.extra + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron_data='' + data_listado = scrapertools.find_single_match(data, patron_data) + logger.info("sadas"+data_listado) + patron_listado='
  • 30: - url_next_page = item.url - matches = matches[:30] - next_page = 'b' - modo = 'continue' - else: - matches = matches[30:] - next_page = 'a' - patron_next_page = 'Next<\/a>' - matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data) - modo = 'continue' - if len(matches_next_page) > 0: - url_next_page = matches_next_page[0] - modo = 'next' - - for scrapedurl, scrapedtitle, scrapedthumbnail, calidad in matches: - url = scrapedurl - title = scrapedtitle - thumbnail = scrapedthumbnail - action = "findvideos" - extra = "" - year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})') - if "1.com/series" in url: - action = "episodios" - extra = "serie" - - - title = scrapertools.find_single_match(title, '([^-]+)') - title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea", "", - 1).strip() + patron_listado+='>' + patron_listado+='(.+?)<\/h2>(.+?)<\/span><\/a><\/li>' + logger.info("sasssss"+patron_listado) + matches = scrapertools.find_multiple_matches(data_listado, patron_listado) + for scrapedurl, scrapedthumbnail,scrapedtitle,scrapedquality in matches: + if 'Serie' in item.title: + action="episodios" else: - title = title.replace("Descargar", "", 1).strip() - if title.endswith("gratis"): title = title[:-7] - - show = title - if item.extra != "buscar-list": - title = title + ' ' + calidad - - context = "" - context_title = scrapertools.find_single_match(url, "http://(?:www.)?newpct1.com/(.*?)/(.*?)/") - if context_title: - try: - context = context_title[0].replace("descargar-", "").replace("pelicula", "movie").replace("series", - "tvshow") - context_title = context_title[1].replace("-", " ") - if re.search('\d{4}', context_title[-4:]): - context_title = context_title[:-4] - elif re.search('\(\d{4}\)', context_title[-6:]): - context_title = context_title[:-6] - - except: - context_title = show - logger.debug('contxt title: %s'%context_title) - logger.debug('year: %s' % year) - - logger.debug('context: %s' % context) - if not 'array' in title: - itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail, - extra = extra, - show = context_title, contentTitle=context_title, contentType=context, - context=["buscar_trailer"], infoLabels= {'year':year})) - - tmdb.set_infoLabels(itemlist, True) - - - - if url_next_page: - itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente", - url=url_next_page, next_page=next_page, folder=True, - text_color='yellow', text_bold=True, modo = modo, plot = extra, - extra = page_extra)) + action="findvideos" + itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,thumbnail=scrapedthumbnail, action=action, quality=scrapedquality,show=scrapedtitle)) + # Página siguiente + patron_pag='' - - match_ver = scrapertools.find_single_match(data, patron_ver) - match_descargar = scrapertools.find_single_match(data, patron_descargar) - - patron = '
    \d+)?)<.+?]+>(?P.*?)\s*Calidad\s*]+>" \ - "[\[]\s*(?P.*?)\s*[\]]" - r = re.compile(pattern) - match = [m.groupdict() for m in r.finditer(info)][0] - - if match["episode2"]: - multi = True - title = "%s (%sx%s-%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), - str(match["episode2"]).zfill(2), match["lang"], - match["quality"]) - else: - multi = False - title = "%s (%sx%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2), - match["lang"], match["quality"]) - - else: # old style - pattern = "\[(?P.*?)\].*?\[Cap.(?P\d+)(?P\d{2})(?:_(?P\d+)" \ - "(?P\d{2}))?.*?\].*?(?:\[(?P.*?)\])?" - - r = re.compile(pattern) - match = [m.groupdict() for m in r.finditer(info)][0] - # logger.debug("data %s" % match) - - str_lang = "" - if match["lang"] is not None: - str_lang = "[%s]" % match["lang"] - - if match["season2"] and match["episode2"]: - multi = True - if match["season"] == match["season2"]: - - title = "%s (%sx%s-%s) %s[%s]" % (item.show, match["season"], match["episode"], - match["episode2"], str_lang, match["quality"]) - else: - title = "%s (%sx%s-%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], - match["season2"], match["episode2"], str_lang, - match["quality"]) - else: - title = "%s (%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], str_lang, - match["quality"]) - multi = False - - season = match['season'] - episode = match['episode'] - itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb, - quality=item.quality, multi=multi, contentSeason=season, - contentEpisodeNumber=episode, infoLabels = infoLabels)) - - # order list - tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True) - if len(itemlist) > 1: - itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) - - if config.get_videolibrary_support() and len(itemlist) > 0: - itemlist.append( - item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios")) - - return itemlist - -def search(item, texto): - logger.info("search:" + texto) - # texto = texto.replace(" ", "+") - - try: - item.post = "q=%s" % texto - item.pattern = "buscar-list" - itemlist = listado2(item) - - return itemlist - - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - -def newest(categoria): - logger.info() - itemlist = [] - item = Item() - try: - item.extra = 'pelilist' - if categoria == 'torrent': - item.url = host+'peliculas/' - - itemlist = listado(item) - if itemlist[-1].title == ">> Página siguiente": - itemlist.pop() - item.url = host+'series/' - itemlist.extend(listado(item)) - if itemlist[-1].title == ">> Página siguiente": - itemlist.pop() - - # Se captura la excepción, para no interrumpir al canal novedades si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("{0}".format(line)) - return [] - - return itemlist + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron_data='
      (.+?)
    ' + data_listado = scrapertools.find_single_match(data, patron_data) + patron = '.+?.+?
    .+?.+?>Serie.+?>(.+?)<' + matches = scrapertools.find_multiple_matches(data_listado, patron) + for scrapedthumbnail,scrapedurl, scrapedtitle in matches: + if " al " in scrapedtitle: + #action="episodios" + titulo=scrapedurl.split('http') + scrapedurl="http"+titulo[1] + itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,thumbnail=scrapedthumbnail, action="findvideos", show=scrapedtitle)) + return itemlist \ No newline at end of file From fadc8157cd6f0d1093ded70ece0969ecc25c3d11 Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Fri, 23 Feb 2018 14:07:12 -0500 Subject: [PATCH 18/26] documaniatv: no mantenido por el addon --- plugin.video.alfa/channels/documaniatv.json | 47 --------------------- 1 file changed, 47 deletions(-) delete mode 100755 plugin.video.alfa/channels/documaniatv.json diff --git a/plugin.video.alfa/channels/documaniatv.json b/plugin.video.alfa/channels/documaniatv.json deleted file mode 100755 index 0ebbeb99..00000000 --- a/plugin.video.alfa/channels/documaniatv.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "id": "documaniatv", - "name": "DocumaniaTV", - "active": true, - "adult": false, - "language": ["cast", "lat"], - "thumbnail": "http://i.imgur.com/qMR9sg9.png", - "banner": "documaniatv.png", - "categories": [ - "documentary" - ], - "settings": [ - { - "id": "include_in_newest_documentales", - "type": "bool", - "label": "Incluir en Novedades - Documentales", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "documaniatvaccount", - "type": "bool", - "label": "Usar cuenta de documaniatv", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "documaniatvuser", - "type": "text", - "label": "Usuario", - "color": "0xFFd50b0b", - "enabled": "eq(-1,true)", - "visible": true - }, - { - "id": "documaniatvpassword", - "type": "text", - "label": "Contraseña", - "color": "0xFFd50b0b", - "enabled": "!eq(-1,)+eq(-2,true)", - "visible": true, - "hidden": true - } - ] -} \ No newline at end of file From ef22d23d034dca72b1698353f703e7de17aad2db Mon Sep 17 00:00:00 2001 From: Intel1 <25161862+Intel11@users.noreply.github.com> Date: Fri, 23 Feb 2018 14:10:20 -0500 Subject: [PATCH 19/26] documaniatv: no mantenido por el addon --- plugin.video.alfa/channels/documaniatv.py | 373 ---------------------- 1 file changed, 373 deletions(-) delete mode 100755 plugin.video.alfa/channels/documaniatv.py diff --git a/plugin.video.alfa/channels/documaniatv.py b/plugin.video.alfa/channels/documaniatv.py deleted file mode 100755 index 35f945fb..00000000 --- a/plugin.video.alfa/channels/documaniatv.py +++ /dev/null @@ -1,373 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urllib -import urlparse - -from core import jsontools -from core import scrapertools -from core.item import Item -from platformcode import config, logger - -host = "http://www.documaniatv.com/" -account = config.get_setting("documaniatvaccount", "documaniatv") - -headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'], - ['Referer', host]] - - -def login(): - logger.info() - - user = config.get_setting("documaniatvuser", "documaniatv") - password = config.get_setting("documaniatvpassword", "documaniatv") - if user == "" or password == "": - return True, "" - - data = scrapertools.cachePage(host, headers=headers) - if "http://www.documaniatv.com/user/" + user in data: - return False, user - - post = "username=%s&pass=%s&Login=Iniciar Sesión" % (user, password) - data = scrapertools.cachePage("http://www.documaniatv.com/login.php", headers=headers, post=post) - - if "Nombre de usuario o contraseña incorrectas" in data: - logger.error("login erróneo") - return True, "" - - return False, user - - -def mainlist(item): - logger.info() - - itemlist = [] - itemlist.append(item.clone(action="novedades", title="Novedades", url="http://www.documaniatv.com/newvideos.html")) - itemlist.append( - item.clone(action="categorias", title="Categorías y Canales", url="http://www.documaniatv.com/browse.html")) - itemlist.append(item.clone(action="novedades", title="Top", url="http://www.documaniatv.com/topvideos.html")) - itemlist.append(item.clone(action="categorias", title="Series Documentales", - url="http://www.documaniatv.com/top-series-documentales-html")) - itemlist.append(item.clone(action="viendo", title="Viendo ahora", url="http://www.documaniatv.com")) - itemlist.append(item.clone(action="", title="")) - itemlist.append(item.clone(action="search", title="Buscar")) - - folder = False - action = "" - if account: - error, user = login() - if error: - title = "Playlists Personales (Error en usuario y/o contraseña)" - else: - title = "Playlists Personales (Logueado)" - action = "usuario" - folder = True - - else: - title = "Playlists Personales (Sin cuenta configurada)" - user = "" - - url = "http://www.documaniatv.com/user/%s" % user - itemlist.append(item.clone(title=title, action=action, url=url, folder=folder)) - itemlist.append(item.clone(title="Configurar canal...", text_color="gold", action="configuracion", - folder=False)) - return itemlist - - -def configuracion(item): - from platformcode import platformtools - platformtools.show_channel_settings() - if config.is_xbmc(): - import xbmc - xbmc.executebuiltin("Container.Refresh") - - -def newest(categoria): - itemlist = [] - item = Item() - try: - if categoria == 'documentales': - item.url = "http://www.documaniatv.com/newvideos.html" - itemlist = novedades(item) - - if itemlist[-1].action == "novedades": - itemlist.pop() - - # Se captura la excepción, para no interrumpir al canal novedades si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("{0}".format(line)) - return [] - - return itemlist - - -def search(item, texto): - logger.info() - data = scrapertools.cachePage(host, headers=headers) - item.url = scrapertools.find_single_match(data, 'form action="([^"]+)"') + "?keywords=%s&video-id=" - texto = texto.replace(" ", "+") - item.url = item.url % texto - try: - return novedades(item) - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def novedades(item): - logger.info() - itemlist = [] - - # Descarga la pagina - data = scrapertools.cachePage(item.url, headers=headers) - # Saca el plot si lo tuviese - scrapedplot = scrapertools.find_single_match(data, '
    (.*?)
    ') - if "(.*?)
  • ') - - if "Registrarse" in data or not account: - for match in bloque: - patron = '(.*?).*?»') - next_page_url = urlparse.urljoin(host, next_page_url) - itemlist.append(item.clone(action="novedades", title=">> Página siguiente", url=next_page_url)) - except: - logger.error("Siguiente pagina no encontrada") - - return itemlist - - -def categorias(item): - logger.info() - itemlist = [] - data = scrapertools.cachePage(item.url, headers=headers) - - patron = '
    .*?(?:|)(.*?)<' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedurl, scrapedthumbnail, scrapedtitle in matches: - if not scrapedthumbnail.startswith("data:image"): - scrapedthumbnail += "|" + headers[0][0] + "=" + headers[0][1] - else: - scrapedthumbnail = item.thumbnail - itemlist.append(item.clone(action="novedades", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, - fanart=scrapedthumbnail)) - - # Busca enlaces de paginas siguientes... - next_page_url = scrapertools.find_single_match(data, '') - if next_page_url != "": - itemlist.append(item.clone(action="categorias", title=">> Página siguiente", url=next_page_url)) - - return itemlist - - -def viendo(item): - logger.info() - itemlist = [] - - # Descarga la pagina - data = scrapertools.cachePage(item.url, headers=headers) - bloque = scrapertools.find_single_match(data, '
    ' \ - '.*?(.*?)' - matches = scrapertools.find_multiple_matches(data, patron) - for scrapedthumbnail, scrapedurl, scrapedtitle in matches: - scrapedthumbnail += "|" + headers[0][0] + "=" + headers[0][1] - logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") - itemlist.append(item.clone(action="play_", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, - fanart=scrapedthumbnail, fulltitle=scrapedtitle, folder=False)) - - return itemlist From 6952b1a3ffedc2d0213da1e431cd4a11229c47da Mon Sep 17 00:00:00 2001 From: danielr460 Date: Fri, 23 Feb 2018 15:22:34 -0500 Subject: [PATCH 20/26] =?UTF-8?q?A=C3=B1adiendo=20Torrent=20a=20Newptc=20y?= =?UTF-8?q?=20Newptc1?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- plugin.video.alfa/channels/newpct.py | 21 ++++++++++++++++++--- plugin.video.alfa/channels/newpct1.py | 17 +++++++++++++++++ 2 files changed, 35 insertions(+), 3 deletions(-) diff --git a/plugin.video.alfa/channels/newpct.py b/plugin.video.alfa/channels/newpct.py index 4e8f36c7..14ea9969 100755 --- a/plugin.video.alfa/channels/newpct.py +++ b/plugin.video.alfa/channels/newpct.py @@ -4,6 +4,7 @@ import re import urllib import urlparse +from core import servertools from core import scrapertools from core.item import Item from platformcode import logger @@ -48,15 +49,12 @@ def listado(item): data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) patron_data='
      (.+?)
    ' data_listado = scrapertools.find_single_match(data, patron_data) - logger.info("sadas"+data_listado) patron_listado='
  • Date: Fri, 23 Feb 2018 15:24:49 -0500 Subject: [PATCH 21/26] Update --- plugin.video.alfa/channels/newpct1.py | 1 + 1 file changed, 1 insertion(+) diff --git a/plugin.video.alfa/channels/newpct1.py b/plugin.video.alfa/channels/newpct1.py index a8adeb50..d328e143 100644 --- a/plugin.video.alfa/channels/newpct1.py +++ b/plugin.video.alfa/channels/newpct1.py @@ -4,6 +4,7 @@ import re import urllib import urlparse +from core import servertools from core import scrapertools from core.item import Item from platformcode import logger From d29edda3a9096a6d7611881d711e923b431e0866 Mon Sep 17 00:00:00 2001 From: danielr460 Date: Fri, 23 Feb 2018 15:34:03 -0500 Subject: [PATCH 22/26] =?UTF-8?q?Eliminando=20c=C3=B3digo=20innecesario?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- plugin.video.alfa/channels/newpct.py | 2 -- plugin.video.alfa/channels/newpct1.py | 5 ----- 2 files changed, 7 deletions(-) diff --git a/plugin.video.alfa/channels/newpct.py b/plugin.video.alfa/channels/newpct.py index 14ea9969..07e33c07 100755 --- a/plugin.video.alfa/channels/newpct.py +++ b/plugin.video.alfa/channels/newpct.py @@ -93,8 +93,6 @@ def findvideos(item): data = httptools.downloadpage(item.url).data itemlist = servertools.find_video_items(data = data) url = scrapertools.find_single_match( data, 'location.href = "([^"]+)"') - data = httptools.downloadpage(url, follow_redirects=False).headers['location'] - data = httptools.downloadpage(url).data new_item.append(Item(url = url, title = "Torrent", server = "torrent", action = "play")) itemlist.extend(new_item) for it in itemlist: diff --git a/plugin.video.alfa/channels/newpct1.py b/plugin.video.alfa/channels/newpct1.py index d328e143..0caf19cd 100644 --- a/plugin.video.alfa/channels/newpct1.py +++ b/plugin.video.alfa/channels/newpct1.py @@ -49,15 +49,12 @@ def listado(item): data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) patron_data='
      (.+?)
    ' data_listado = scrapertools.find_single_match(data, patron_data) - logger.info("sadas"+data_listado) patron_listado='
  • Date: Fri, 23 Feb 2018 15:45:33 -0500 Subject: [PATCH 23/26] pelisultra: nuevo canal (@Paquito Porras) --- plugin.video.alfa/channels/pelisultra.json | 88 +++++++ plugin.video.alfa/channels/pelisultra.py | 282 +++++++++++++++++++++ 2 files changed, 370 insertions(+) create mode 100644 plugin.video.alfa/channels/pelisultra.json create mode 100644 plugin.video.alfa/channels/pelisultra.py diff --git a/plugin.video.alfa/channels/pelisultra.json b/plugin.video.alfa/channels/pelisultra.json new file mode 100644 index 00000000..5e4229b7 --- /dev/null +++ b/plugin.video.alfa/channels/pelisultra.json @@ -0,0 +1,88 @@ +{ + "id": "pelisultra", + "name": "PelisUltra", + "active": true, + "adult": false, + "language": ["lat"], + "thumbnail": "https://s17.postimg.org/ft51srhjj/logoultra.png", + "categories": ["movie", "tvshow"], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_series", + "type": "bool", + "label": "Incluir en Novedades - Series", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_terror", + "type": "bool", + "label": "Incluir en Novedades - terror", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_latino", + "type": "bool", + "label": "Incluir en Novedades - Latino", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "perfil", + "type": "list", + "label": "Perfil de color", + "default": 2, + "enabled": true, + "visible": true, + "lvalues": [ + "Perfil 3", + "Perfil 2", + "Perfil 1", + "Ninguno" + ] + }, + { + "id": "episodios_x_pag", + "type": "list", + "label": "Episodios por página", + "default": 2, + "enabled": true, + "visible": true, + "lvalues": [ + "10", + "15", + "20", + "25", + "30" + ] + }, + { + "id": "temporada_o_todos", + "type": "bool", + "label": "Mostrar temporadas", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/pelisultra.py b/plugin.video.alfa/channels/pelisultra.py new file mode 100644 index 00000000..d8cb2b6f --- /dev/null +++ b/plugin.video.alfa/channels/pelisultra.py @@ -0,0 +1,282 @@ +# -*- coding: utf-8 -*- +from core import httptools +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item +from platformcode import config, logger + +__perfil__ = int(config.get_setting('perfil', 'pelisultra')) + +# Fijar perfil de color +perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'], + ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'], + ['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']] + +if __perfil__ < 3: + color1, color2, color3 = perfil[__perfil__] +else: + color1 = color2 = color3 = "" + +host="http://www.pelisultra.com" + +def mainlist(item): + logger.info() + itemlist = [] + item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/Directors%20Chair.png" + itemlist.append(item.clone(title="Películas:", folder=False, text_color="0xFFD4AF37", text_bold=True)) + itemlist.append(Item(channel = item.channel, title = " Novedades", action = "peliculas", url = host)) + itemlist.append(Item(channel = item.channel, title = " Estrenos", action = "peliculas", url = host + "/genero/estrenos/" )) + itemlist.append(Item(channel = item.channel, title = " Por género", action = "genero", url = host + "/genero/" )) + item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/TV%20Series.png" + itemlist.append(item.clone(title="Series:", folder=False, text_color="0xFFD4AF37", text_bold=True)) + itemlist.append(Item(channel = item.channel, title = " Todas las series", action = "series", url = host + "/series/" )) + itemlist.append(Item(channel = item.channel, title = " Nuevos episodios", action = "nuevos_episodios", url = host + "/episodio/" )) + itemlist.append(Item(channel = item.channel, title = "Buscar...", action = "search", url = host, text_color="red", text_bold=True)) + itemlist.append(item.clone(title="Configurar canal...", text_color="green", action="configuracion", text_bold=True)) + return itemlist + +def configuracion(item): + from platformcode import platformtools + ret = platformtools.show_channel_settings() + platformtools.itemlist_refresh() + return ret + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + try: + if categoria in ["peliculas", "latino"]: + item.url = host + itemlist = peliculas(item) + elif categoria == 'terror': + item.url = host + '/genero/terror/' + itemlist = peliculas(item) + elif categoria == "series": + item.url = host + "/episodio/" + itemlist = nuevos_episodios(item) + if "Pagina" in itemlist[-1].title: + itemlist.pop() + + # Se captura la excepción, para no interrumpir al canal novedades si un canal falla + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist + +def peliculas(item): + #logger.info() + logger.info(item) + itemlist = [] + data = httptools.downloadpage(item.url).data + + data2 = scrapertools.find_single_match(data,'(?s)
  • ' + matches = scrapertools.find_multiple_matches(data, patron) + # Se quita "Estrenos" de la lista porque tiene su propio menu + matches.pop(0) + + for scrapedurl, scrapedtitle in matches: + itemlist.append(Item(action = "peliculas", channel = item.channel, title = scrapedtitle, url = scrapedurl)) + + return itemlist + +def series(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + # Se saca la info + patron = '(?s)class="ml-item.*?' # base + patron += 'a href="([^"]+).*?' # url + patron += 'img src="([^"]+).*?' # imagen + patron += 'alt="([^"]+).*?' # titulo + patron += 'class="year">(\d{4})' # año + matches = scrapertools.find_multiple_matches(data, patron) + + #if config.get_setting('temporada_o_todos', 'pelisultra') == 0: + if config.get_setting('temporada_o_todos', 'pelisultra'): + accion="temporadas" + else: + accion="episodios" + + for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches: + itemlist.append(Item(action = accion, channel = item.channel, title = scrapedtitle + " (" + scrapedyear + ")", contentSerieName=scrapedtitle, contentType="tvshow", thumbnail = scrapedthumbnail, url = scrapedurl, infoLabels={'year':scrapedyear})) + + # InfoLabels: + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + + # Pagina siguiente + patron_siguiente='class="pag_b"> item.page + episodios_por_pagina: + itemlist_page.append(item.clone(title = ">>> Pagina siguiente", page = item.page + episodios_por_pagina)) + + # InfoLabels: + tmdb.set_infoLabels_itemlist(itemlist_page, seekTmdb=True) + + return itemlist_page + +def nuevos_episodios(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = '(?s).*?' # base + patron += '' # url + patron += '(.*?).*?' # nombre_serie + patron += ' Date: Fri, 23 Feb 2018 16:08:00 -0500 Subject: [PATCH 24/26] v.2.5.0 --- plugin.video.alfa/addon.xml | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/plugin.video.alfa/addon.xml b/plugin.video.alfa/addon.xml index 67e21088..418c82c2 100755 --- a/plugin.video.alfa/addon.xml +++ b/plugin.video.alfa/addon.xml @@ -1,5 +1,5 @@ - + @@ -19,10 +19,14 @@ [B]Estos son los cambios para esta versión:[/B] [COLOR green][B]Canales agregados y arreglos[/B][/COLOR] - » seriesblanco » rapidvideo - » kbagi » bitertv - » doomtv » miltorrents + » newpct » newpct1 + » youtube » flashx + » kbagi » pelismagnet + » gnula » animemovil + » cinecalidad » cuelgame + » divxtotal » cinemahd ¤ arreglos internos + ¤ Agradecimientos a @Paquito Porras por PelisUltra. Navega con Kodi por páginas web para ver sus videos de manera fácil. Browse web pages using Kodi From 8873559812498877add38747f989f94c48a9ab6f Mon Sep 17 00:00:00 2001 From: Alfa <30527549+alfa-addon@users.noreply.github.com> Date: Fri, 23 Feb 2018 16:15:34 -0500 Subject: [PATCH 25/26] newpct: limpieza de codigo --- plugin.video.alfa/channels/newpct.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/plugin.video.alfa/channels/newpct.py b/plugin.video.alfa/channels/newpct.py index 07e33c07..22bf446e 100755 --- a/plugin.video.alfa/channels/newpct.py +++ b/plugin.video.alfa/channels/newpct.py @@ -97,6 +97,4 @@ def findvideos(item): itemlist.extend(new_item) for it in itemlist: it.channel = item.channel - - scrapertools.printMatches(itemlist) - return itemlist \ No newline at end of file + return itemlist From 3af2e191c99d5ca9fcb01f80b66193722a96154d Mon Sep 17 00:00:00 2001 From: Alfa <30527549+alfa-addon@users.noreply.github.com> Date: Fri, 23 Feb 2018 16:20:02 -0500 Subject: [PATCH 26/26] newpct1: limpieza de codigo --- plugin.video.alfa/channels/newpct1.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/plugin.video.alfa/channels/newpct1.py b/plugin.video.alfa/channels/newpct1.py index 0caf19cd..b6bcc865 100644 --- a/plugin.video.alfa/channels/newpct1.py +++ b/plugin.video.alfa/channels/newpct1.py @@ -97,6 +97,4 @@ def findvideos(item): itemlist.extend(new_item) for it in itemlist: it.channel = item.channel - - scrapertools.printMatches(itemlist) - return itemlist \ No newline at end of file + return itemlist