diff --git a/plugin.video.alfa/addon.xml b/plugin.video.alfa/addon.xml index 7d27e2ce..b44f5691 100755 --- a/plugin.video.alfa/addon.xml +++ b/plugin.video.alfa/addon.xml @@ -1,5 +1,5 @@ - + @@ -19,10 +19,14 @@ [B]Estos son los cambios para esta versión:[/B] [COLOR green][B]Canales agregados y arreglos[/B][/COLOR] - ¤ mejortorrent ¤ mejortorrent1 - ¤ newpct1 ¤ inkaseries - ¤ wikiseries ¤ powvideo - ¤ mega + ¤ doomtv ¤ locopelis + ¤ veseriesonline ¤ hdfull + ¤ flashx ¤ powvideo + ¤ vidoza ¤ alltorrent + ¤ elitetorrent ¤ grantorrent + ¤ mejortorrent ¤ newpct1 + ¤ poseidonhd + ¤ arreglos internos ¤ Agradecimientos a @angedam por colaborar en ésta versión diff --git a/plugin.video.alfa/channels/alltorrent.json b/plugin.video.alfa/channels/alltorrent.json index c627ebbb..e2188b7b 100755 --- a/plugin.video.alfa/channels/alltorrent.json +++ b/plugin.video.alfa/channels/alltorrent.json @@ -4,7 +4,8 @@ "active": true, "adult": false, "language": ["cast"], - "thumbnail": "http://imgur.com/sLaXHvp.png", + "thumbnail": "altorrent.png", + "fanart": "altorrent.jpg", "categories": [ "torrent", "movie" @@ -22,7 +23,7 @@ "id": "include_in_global_search", "type": "bool", "label": "Incluir en busqueda global", - "default": true, + "default": false, "enabled": true, "visible": true }, diff --git a/plugin.video.alfa/channels/alltorrent.py b/plugin.video.alfa/channels/alltorrent.py index 34869367..505f5059 100755 --- a/plugin.video.alfa/channels/alltorrent.py +++ b/plugin.video.alfa/channels/alltorrent.py @@ -1,93 +1,261 @@ # -*- coding: utf-8 -*- -import os import re -import unicodedata -from threading import Thread +import sys +import urllib +import urlparse +from channelselector import get_thumb from core import httptools from core import scrapertools from core import servertools -from core import tmdb from core.item import Item from platformcode import config, logger - -__modo_grafico__ = config.get_setting('modo_grafico', "ver-pelis") - - -# Para la busqueda en bing evitando baneos - - -def browser(url): - import mechanize - - # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing - br = mechanize.Browser() - # Browser options - br.set_handle_equiv(False) - br.set_handle_gzip(True) - br.set_handle_redirect(True) - br.set_handle_referer(False) - br.set_handle_robots(False) - # Follows refresh 0 but not hangs on refresh > 0 - br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) - # Want debugging messages? - # br.set_debug_http(True) - # br.set_debug_redirects(True) - # br.set_debug_responses(True) - - # User-Agent (this is cheating, ok?) - # br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')] - # br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')] - # Open some site, let's pick a random one, the first that pops in mind - r = br.open(url) - response = r.read() - print response - if "img,divreturn" in response: - r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url) - print "prooooxy" - response = r.read() - - return response - - -api_key = "2e2160006592024ba87ccdf78c28f49f" -api_fankey = "dffe90fba4d02c199ae7a9e71330c987" +from core import tmdb +from lib import generictools host = 'http://alltorrent.net/' +__modo_grafico__ = config.get_setting('modo_grafico', 'alltorrent') + def mainlist(item): logger.info() itemlist = [] - i = 0 - global i - itemlist.append(item.clone(title="[COLOR springgreen][B]Todas Las Películas[/B][/COLOR]", action="scraper", - url="http://alltorrent.net/", thumbnail="http://imgur.com/XLqPZoF.png", - fanart="http://imgur.com/v3ChkZu.jpg", contentType="movie")) - itemlist.append(item.clone(title="[COLOR springgreen] Incluyen 1080p[/COLOR]", action="scraper", - url="http://alltorrent.net/rezolucia/1080p/", thumbnail="http://imgur.com/XLqPZoF.png", - fanart="http://imgur.com/v3ChkZu.jpg", contentType="movie")) - itemlist.append(item.clone(title="[COLOR springgreen] Incluyen 720p[/COLOR]", action="scraper", - url="http://alltorrent.net/rezolucia/720p/", thumbnail="http://imgur.com/XLqPZoF.png", - fanart="http://imgur.com/v3ChkZu.jpg", contentType="movie")) - itemlist.append(item.clone(title="[COLOR springgreen] Incluyen Hdrip[/COLOR]", action="scraper", - url="http://alltorrent.net/rezolucia/hdrip/", thumbnail="http://imgur.com/XLqPZoF.png", - fanart="http://imgur.com/v3ChkZu.jpg", contentType="movie")) - itemlist.append(item.clone(title="[COLOR springgreen] Incluyen 3D[/COLOR]", action="scraper", - url="http://alltorrent.net/rezolucia/3d/", thumbnail="http://imgur.com/XLqPZoF.png", - fanart="http://imgur.com/v3ChkZu.jpg", contentType="movie")) - itemlist.append(itemlist[-1].clone(title="[COLOR floralwhite][B]Buscar[/B][/COLOR]", action="search", - thumbnail="http://imgur.com/5EBwccS.png", fanart="http://imgur.com/v3ChkZu.jpg", - contentType="movie", extra="titulo")) - itemlist.append(itemlist[-1].clone(title="[COLOR oldlace] Por Título[/COLOR]", action="search", - thumbnail="http://imgur.com/5EBwccS.png", fanart="http://imgur.com/v3ChkZu.jpg", - contentType="movie", extra="titulo")) - itemlist.append(itemlist[-1].clone(title="[COLOR oldlace] Por Año[/COLOR]", action="search", - thumbnail="http://imgur.com/5EBwccS.png", fanart="http://imgur.com/v3ChkZu.jpg", - contentType="movie", extra="año")) - itemlist.append(itemlist[-1].clone(title="[COLOR oldlace] Por Rating Imdb[/COLOR]", action="search", - thumbnail="http://imgur.com/5EBwccS.png", fanart="http://imgur.com/v3ChkZu.jpg", - contentType="movie", extra="rating")) + + thumb_pelis = get_thumb("channels_movie.png") + thumb_pelis_hd = get_thumb("channels_movie_hd.png") + thumb_series = get_thumb("channels_tvshow.png") + thumb_series_hd = get_thumb("channels_tvshow_hd.png") + thumb_buscar = get_thumb("search.png") + + itemlist.append(item.clone(title="[COLOR springgreen][B]Todas Las Películas[/B][/COLOR]", action="listado", + url=host, thumbnail=thumb_pelis, extra="pelicula")) + itemlist.append(item.clone(title="[COLOR springgreen] Incluyen 1080p[/COLOR]", action="listado", + url=host + "rezolucia/1080p/", thumbnail=thumb_pelis_hd, extra="pelicula")) + itemlist.append(item.clone(title="[COLOR springgreen] Incluyen 720p[/COLOR]", action="listado", + url=host + "rezolucia/720p/", thumbnail=thumb_pelis_hd, extra="pelicula")) + itemlist.append(item.clone(title="[COLOR springgreen] Incluyen Hdrip[/COLOR]", action="listado", + url=host + "rezolucia/hdrip/", thumbnail=thumb_pelis, extra="pelicula")) + itemlist.append(item.clone(title="[COLOR springgreen] Incluyen 3D[/COLOR]", action="listado", + url=host + "rezolucia/3d/", thumbnail=thumb_pelis_hd, extra="pelicula")) + itemlist.append(item.clone(title="[COLOR floralwhite][B]Buscar[/B][/COLOR]", action="search", thumbnail=thumb_buscar, + extra="titulo")) + itemlist.append(item.clone(title="[COLOR oldlace] Por Título[/COLOR]", action="search", thumbnail=thumb_buscar, + extra="titulo")) + itemlist.append(item.clone(title="[COLOR oldlace] Por Año[/COLOR]", action="search", thumbnail=thumb_buscar, + extra="año")) + itemlist.append(item.clone(title="[COLOR oldlace] Por Rating Imdb[/COLOR]", action="search", thumbnail=thumb_buscar, + extra="rating")) + + return itemlist + + +def listado(item): + logger.info() + itemlist = [] + + # Descarga la página + data = '' + try: + data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url).data) + except: + pass + + if not data: #Si la web está caída salimos sin dar error + logger.error("ERROR 01: LISTADO: La Web no responde o ha cambiado de URL: " + item.url + " / DATA: " + data) + itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: LISTADO:. La Web no responde o ha cambiado de URL. Si la Web está activa, reportar el error con el log')) + return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos + + patron = '
([^"]+)<\/a>\s?<\/div>
(.*?)<\/div><\/div><\/div>' + #data = scrapertools.find_single_match(data, patron) + matches = re.compile(patron, re.DOTALL).findall(data) + if not matches and not '
') except: - logger.error("ERROR 01: LISTADO: La Web no responde o ha cambiado de URL: " + item.url + " / DATA: " + video_section) - itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: LISTADO:. La Web no responde o ha cambiado de URL. Si la Web está activa, reportar el error con el log')) - return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos + pass + cnt_next += 1 if not data: #Si la web está caída salimos sin dar error logger.error("ERROR 01: LISTADO: La Web no responde o ha cambiado de URL: " + item.url + " / DATA: " + video_section) @@ -178,6 +196,11 @@ def listado(item): matches_alt = re.compile(patron, re.DOTALL).findall(video_section) if not matches_alt and not '
0 resultados' in data: #error + item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada + if item.intervencion: #Sí ha sido clausurada judicialmente + item, itemlist = generictools.post_tmdb_listado(item, itemlist) #Llamamos al método para el pintado del error + return itemlist #Salimos + logger.error("ERROR 02: LISTADO: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data) itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: LISTADO: Ha cambiado la estructura de la Web. Reportar el error con el log')) return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos @@ -349,9 +372,13 @@ def findvideos(item): itemlist = [] #Bajamos los datos de la página + data = '' try: data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data) except: + pass + + if not data: logger.error("ERROR 01: FINDVIDEOS: La Web no responde o la URL es erronea: " + item.url + " / DATA: " + data) itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: FINDVIDEOS:. La Web no responde o la URL es erronea. Si la Web está activa, reportar el error con el log')) return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos @@ -361,6 +388,11 @@ def findvideos(item): patron = '\/icono_.*?png" title="(?P.*?)?" [^>]+><\/td>(?P.*?)?.*?(?P.*?)?<\/td>(.+?)" - matches = re.compile(patronvideos, re.DOTALL).findall(data) - scrapertools.printMatches(matches) - - for match in matches: - scrapedurl = match[0] - # Se debe quitar saltos de linea en match[1] - scrapedtitle = match[1][1:-1] + " (" + match[2] + ")" - # ~ scrapedtitle = match[1] - scrapedthumbnail = "" - scrapedplot = "" - logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") - itemlist.append(Item(channel=item.channel, action="listvideos", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True)) - - return itemlist - - -def DocuARCHIVO(item): - logger.info() - itemlist = [] - - # Descarga la página - data = scrapertools.cache_page(item.url) - patronvideos = "([^<]+)[^<]+" - patronvideos += "(.+?)" - matches = re.compile(patronvideos, re.DOTALL).findall(data) - scrapertools.printMatches(matches) - - for match in matches: - scrapedurl = match[0] - scrapedtitle = match[1] + " " + match[2] - scrapedthumbnail = "" - scrapedplot = "" - logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") - itemlist.append(Item(channel=item.channel, action="listvideos", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True)) - - return itemlist - - -def listvideos(item): - logger.info() - itemlist = [] - - scrapedthumbnail = "" - scrapedplot = "" - - # Descarga la página - data = scrapertools.cache_page(item.url) - patronvideos = "

]+>", " ", scrapedtitle) - scrapedtitle = scrapertools.unescape(scrapedtitle)[1:-1] - scrapedurl = match[0] - regexp = re.compile(r'src="(http[^"]+)"') - - matchthumb = regexp.search(match[2]) - if matchthumb is not None: - scrapedthumbnail = matchthumb.group(1) - matchplot = re.compile('
(
', re.DOTALL).findall(match[2]) - - if len(matchplot) > 0: - scrapedplot = matchplot[0] - # print matchplot - else: - scrapedplot = "" - - scrapedplot = re.sub("<[^>]+>", " ", scrapedplot) - scrapedplot = scrapertools.unescape(scrapedplot) - logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") - itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True)) - - # Extrae la marca de siguiente página - patronvideos = " 0: - scrapedtitle = "Página siguiente" - scrapedurl = urlparse.urljoin(item.url, matches[0]) - scrapedthumbnail = "" - scrapedplot = "" - itemlist.append(Item(channel=item.channel, action="listvideos", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True)) - - return itemlist - - # ~ return itemlist - - -def findvideos(item): - logger.info() - data = scrapertools.cachePage(item.url) - - # Busca los enlaces a los videos - - listavideos = servertools.findvideos(data) - - if item is None: - item = Item() - - itemlist = [] - for video in listavideos: - scrapedtitle = video[0].strip() + " - " + item.title.strip() - scrapedurl = video[1] - server = video[2] - - itemlist.append(Item(channel=item.channel, title=scrapedtitle, action="play", server=server, url=scrapedurl, - thumbnail=item.thumbnail, show=item.show, plot=item.plot, folder=False)) - - return itemlist diff --git a/plugin.video.alfa/channels/hdfull.py b/plugin.video.alfa/channels/hdfull.py index f02dff6f..a05cd914 100644 --- a/plugin.video.alfa/channels/hdfull.py +++ b/plugin.video.alfa/channels/hdfull.py @@ -28,16 +28,12 @@ def settingCanal(item): def login(): logger.info() - data = agrupa_datos(httptools.downloadpage(host).data) - patron = "" sid = scrapertools.find_single_match(data, patron) - post = urllib.urlencode({'__csrf_magic': sid}) + "&username=" + config.get_setting('hdfulluser', 'hdfull') + "&password=" + config.get_setting( 'hdfullpassword', 'hdfull') + "&action=login" - httptools.downloadpage(host, post=post) @@ -56,15 +52,12 @@ def mainlist(item): else: login() itemlist.append(Item(channel=item.channel, action="settingCanal", title="Configuración...", url="")) - return itemlist def menupeliculas(item): logger.info() - itemlist = [] - if account: itemlist.append(Item(channel=item.channel, action="items_usuario", title="[COLOR orange][B]Favoritos[/B][/COLOR]", @@ -72,7 +65,6 @@ def menupeliculas(item): itemlist.append(Item(channel=item.channel, action="items_usuario", title="[COLOR orange][B]Pendientes[/B][/COLOR]", url=host + "/a/my?target=movies&action=pending&start=-28&limit=28", folder=True)) - itemlist.append(Item(channel=item.channel, action="fichas", title="ABC", url=host + "/peliculas/abc", folder=True)) itemlist.append( Item(channel=item.channel, action="fichas", title="Últimas películas", url=host + "/peliculas", folder=True)) @@ -89,15 +81,12 @@ def menupeliculas(item): itemlist.append(Item(channel=item.channel, action="items_usuario", title="[COLOR orange][B]Vistas[/B][/COLOR]", url=host + "/a/my?target=movies&action=seen&start=-28&limit=28", folder=True)) - return itemlist def menuseries(item): logger.info() - itemlist = [] - if account: itemlist.append(Item(channel=item.channel, action="items_usuario", title="[COLOR orange][B]Siguiendo[/B][/COLOR]", @@ -105,9 +94,7 @@ def menuseries(item): itemlist.append(Item(channel=item.channel, action="items_usuario", title="[COLOR orange][B]Para Ver[/B][/COLOR]", url=host + "/a/my?target=shows&action=watch&start=-28&limit=28", folder=True)) - itemlist.append(Item(channel=item.channel, action="series_abc", title="A-Z", folder=True)) - itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Últimos Emitidos", url=host + "/a/episodes?action=latest&start=-24&limit=24&elang=ALL", folder=True)) itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Episodios Estreno", @@ -132,20 +119,16 @@ def menuseries(item): itemlist.append(Item(channel=item.channel, action="items_usuario", title="[COLOR orange][B]Vistas[/B][/COLOR]", url=host + "/a/my?target=shows&action=seen&start=-28&limit=28", folder=True)) - return itemlist def search(item, texto): logger.info() - data = agrupa_datos(httptools.downloadpage(host).data) - sid = scrapertools.get_match(data, '.__csrf_magic. value="(sid:[^"]+)"') item.extra = urllib.urlencode({'__csrf_magic': sid}) + '&menu=search&query=' + texto item.title = "Buscar..." item.url = host + "/buscar" - try: return fichas(item) # Se captura la excepción, para no interrumpir al buscador global si un canal falla @@ -158,59 +141,44 @@ def search(item, texto): def series_abc(item): logger.info() - itemlist = [] - az = "ABCDEFGHIJKLMNOPQRSTUVWXYZ#" - for l in az: itemlist.append( Item(channel=item.channel, action='fichas', title=l, url=host + "/series/abc/" + l.replace('#', '9'))) - return itemlist def items_usuario(item): logger.info() - itemlist = [] ## Carga estados status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data) - ## Fichas usuario url = item.url.split("?")[0] post = item.url.split("?")[1] - old_start = scrapertools.get_match(post, 'start=([^&]+)&') limit = scrapertools.get_match(post, 'limit=(\d+)') start = "%s" % (int(old_start) + int(limit)) - post = post.replace("start=" + old_start, "start=" + start) next_page = url + "?" + post - ## Carga las fichas de usuario data = httptools.downloadpage(url, post=post).data fichas_usuario = jsontools.load(data) - for ficha in fichas_usuario: - try: title = ficha['title']['es'].strip() except: title = ficha['title']['en'].strip() - try: title = title.encode('utf-8') except: pass - show = title - try: thumbnail = host + "/thumbs/" + ficha['thumbnail'] except: thumbnail = host + "/thumbs/" + ficha['thumb'] - try: url = urlparse.urljoin(host, '/serie/' + ficha['permalink']) + "###" + ficha['id'] + ";1" action = "episodios" @@ -237,37 +205,26 @@ def items_usuario(item): action = "findvideos" str = get_status(status, 'movies', ficha['id']) if str != "": title += str - - # try: title = title.encode('utf-8') - # except: pass - itemlist.append( Item(channel=item.channel, action=action, title=title, fulltitle=title, url=url, thumbnail=thumbnail, show=show, folder=True)) - if len(itemlist) == int(limit): itemlist.append( Item(channel=item.channel, action="items_usuario", title=">> Página siguiente", url=next_page, folder=True)) - return itemlist def listado_series(item): logger.info() - itemlist = [] - data = agrupa_datos(httptools.downloadpage(item.url).data) - patron = '' matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedtitle in matches: url = scrapedurl + "###0;1" itemlist.append( Item(channel=item.channel, action="episodios", title=scrapedtitle, fulltitle=scrapedtitle, url=url, show=scrapedtitle, contentType="tvshow")) - return itemlist @@ -278,22 +235,19 @@ def fichas(item): infoLabels=dict() ## Carga estados status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data) - if item.title == "Buscar...": data = agrupa_datos(httptools.downloadpage(item.url, post=item.extra).data) s_p = scrapertools.get_match(data, '

(.*?)