diff --git a/plugin.video.alfa/channels/animemovil.py b/plugin.video.alfa/channels/animemovil.py
index fe586fa3..e26f0c97 100644
--- a/plugin.video.alfa/channels/animemovil.py
+++ b/plugin.video.alfa/channels/animemovil.py
@@ -96,7 +96,6 @@ def recientes(item):
action = "peliculas"
if not thumb.startswith("http"):
thumb = "http:%s" % thumb
- action ="findvideos"
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(item.clone(action=action, title=title, url=url, thumbnail=thumb, text_color=color3,
contentTitle=contentTitle, contentSerieName=show, infoLabels=infoLabels,
diff --git a/plugin.video.alfa/channels/bajui.py b/plugin.video.alfa/channels/bajui.py
index 0baf7190..017562e3 100644
--- a/plugin.video.alfa/channels/bajui.py
+++ b/plugin.video.alfa/channels/bajui.py
@@ -1,8 +1,9 @@
-# -*- coding: utf-8 -*-
+# -*- coding: utf-8 -*-
import re
import urlparse
+from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
@@ -33,8 +34,7 @@ def menupeliculas(item):
Item(channel=item.channel, title="Películas - A-Z", action="peliculas", url=item.url + "/orden:nombre",
fanart=item.fanart, viewmode="movie_with_plot"))
- #
- data = scrapertools.cache_page(item.url)
+ data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data, '')
patron = '([^<]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -78,7 +78,6 @@ def menudocumentales(item):
return itemlist
-# Al llamarse "search" la función, el launcher pide un texto a buscar y lo añade como parámetro
def search(item, texto, categoria=""):
logger.info(item.url + " search " + texto)
itemlist = []
@@ -101,9 +100,7 @@ def search(item, texto, categoria=""):
def peliculas(item, paginacion=True):
logger.info()
url = item.url
-
- # Descarga la página
- data = scrapertools.cache_page(url)
+ data = httptools.downloadpage(url).data
patron = 'Ficha\: ([^<]+)[^<]+'
@@ -118,16 +115,11 @@ def peliculas(item, paginacion=True):
scrapedtitle = title
scrapedplot = clean_plot(plot)
scrapedurl = urlparse.urljoin(item.url, url)
- scrapedthumbnail = urlparse.urljoin("http://www.bajui.org/", thumbnail.replace("_m.jpg", "_g.jpg"))
- logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
-
- # Añade al listado de XBMC
+ scrapedthumbnail = urlparse.urljoin("http://bajui.org/", thumbnail.replace("_m.jpg", "_g.jpg"))
itemlist.append(
Item(channel=item.channel, action="enlaces", title=scrapedtitle, fulltitle=title, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, extra=scrapedtitle, context="4|5",
fanart=item.fanart, viewmode="movie_with_plot"))
-
- # Extrae el paginador
patron = 'Siguiente \»\;'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
@@ -187,7 +179,7 @@ def enlaces(item):
logger.info()
itemlist = []
- data = scrapertools.cache_page(item.url)
+ data = httptools.downloadpage(item.url).data
try:
item.plot = scrapertools.get_match(data, '(.*?)')
@@ -201,18 +193,6 @@ def enlaces(item):
except:
pass
- '''
-
-
-
-
-
Actualizado: Hace 8 minutos
-
-
-
- '''
-
patron = '
Enlaces[^<]+'
patron += '
([^<]+)[^<]+'
@@ -222,19 +202,15 @@ def enlaces(item):
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
- logger.debug("matches=" + repr(matches))
for thumbnail, usuario, fecha, id, id2, servidores in matches:
- #








patronservidores = '

]+>
]+>
- #
- #
- #
- # docs
- #
- #
- #
-
- # busca series
patron = "]+>(.*?)"
patron += ".*?([^']+)"
patron_enlace = "/serie-descargar-torrents-\d+-\d+-(.*?)\.html"
matches = scrapertools.find_multiple_matches(data, patron)
- scrapertools.printMatches(matches)
for scrapedurl, scrapedtitle, scrapedinfo in matches:
title = scrapertools.remove_htmltags(scrapedtitle).decode('iso-8859-1').encode(
@@ -119,10 +101,7 @@ def buscador(item):
# busca pelis
patron = "]+>(.*?)"
patron_enlace = "/peli-descargar-torrent-\d+(.*?)\.html"
-
matches = re.compile(patron, re.DOTALL).findall(data)
- scrapertools.printMatches(matches)
-
for scrapedurl, scrapedtitle in matches:
title = scrapertools.remove_htmltags(scrapedtitle).decode('iso-8859-1').encode('utf-8')
url = urlparse.urljoin(item.url, scrapedurl)
@@ -135,10 +114,7 @@ def buscador(item):
patron += "(.*?).*?"
patron += "(.*?) | "
patron_enlace = "/doc-descargar-torrent-\d+-\d+-(.*?)\.html"
-
matches = re.compile(patron, re.DOTALL).findall(data)
- scrapertools.printMatches(matches)
-
for scrapedurl, scrapedtitle, scrapedinfo in matches:
title = scrapedtitle.decode('iso-8859-1').encode('utf8') + " " + scrapedinfo.decode('iso-8859-1').encode('utf8')
url = urlparse.urljoin(item.url, scrapedurl)
@@ -154,23 +130,7 @@ def buscador(item):
def getlist(item):
logger.info()
itemlist = []
-
data = httptools.downloadpage(item.url).data
-
- # pelis
- #
- # 
- #

- #
- # docs
- #
- #
- # 
-
if item.url.find("peliculas") > -1:
patron = '
[^<]+'
patron += '
'
@@ -202,27 +162,18 @@ def getlist(item):
action = "episodios"
folder = True
extra = "docus"
-
matches = re.compile(patron, re.DOTALL).findall(data)
- scrapertools.printMatches(matches)
-
for scrapedurl, scrapedthumbnail in matches:
title = scrapertools.get_match(scrapedurl, patron_enlace)
title = title.replace("-", " ")
url = urlparse.urljoin(item.url, scrapedurl)
- thumbnail = urlparse.urljoin(item.url, urllib.quote(scrapedthumbnail))
+ thumbnail = host + urllib.quote(scrapedthumbnail)
plot = ""
- logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail, plot=plot,
folder=folder, extra=extra))
matches = re.compile(patron_title, re.DOTALL).findall(data)
- scrapertools.printMatches(matches)
- # Cambia el título sacado de la URL por un título con más información.
- # esta implementación asume que va a encontrar las mismas coincidencias
- # que en el bucle anterior, lo cual técnicamente es erróneo, pero que
- # funciona mientras no cambien el formato de la página
cnt = 0
for scrapedtitle, notused, scrapedinfo in matches:
title = re.sub('\r\n', '', scrapedtitle).decode('iso-8859-1').encode('utf8').strip()
@@ -244,7 +195,6 @@ def getlist(item):
# Extrae el paginador
patronvideos = " Siguiente >>"
matches = re.compile(patronvideos, re.DOTALL).findall(data)
- scrapertools.printMatches(matches)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
@@ -267,18 +217,11 @@ def episodios(item):
item.thumbnail = scrapertools.find_single_match(data,
"src='http://www\.mejortorrent\.com(/uploads/imagenes/" + tabla + "/[a-zA-Z0-9_ ]+.jpg)'")
- item.thumbnail = 'http://www.mejortorrent.com' + urllib.quote(item.thumbnail)
+ item.thumbnail = host + + urllib.quote(item.thumbnail)
# ")
- '''
- 4x01 - Episodio en V.O. Sub Esp. |
-
Fecha: 2014-04-07 |
-
-
- '''
-
if item.extra == "series":
patron = " | ]+>]+>([^>]+) | [^<]+"
else:
@@ -289,7 +232,6 @@ def episodios(item):
patron += "
link = scrapertools.get_match(torrent_data, "
")
link = urlparse.urljoin(url, link)
-
logger.debug("link=" + link)
-
itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link,
thumbnail=item.thumbnail, plot=item.plot, fanart=item.fanart, folder=False))
@@ -402,26 +339,12 @@ def play(item):
data = httptools.downloadpage(item.url, post=item.extra).data
logger.debug("data=" + data)
- # series
- #
- # El sueo de todos.
-
params = dict(urlparse.parse_qsl(item.extra))
-
patron = '
>":
itemlist.pop()
- item.url = 'http://www.mejortorrent.com/torrents-de-series.html'
+ item.url = host + "/torrents-de-series.html"
itemlist.extend(getlist(item))
if itemlist[-1].title == "Pagina siguiente >>":
itemlist.pop()
diff --git a/plugin.video.alfa/channels/pelisplus.py b/plugin.video.alfa/channels/pelisplus.py
index 3e06e516..93dd1fa9 100644
--- a/plugin.video.alfa/channels/pelisplus.py
+++ b/plugin.video.alfa/channels/pelisplus.py
@@ -238,23 +238,12 @@ def lista(item):
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
-
- #Encuentra los elementos que no tienen plot y carga las paginas correspondientes para obtenerlo#
- for item in itemlist:
- if item.infoLabels['plot'] == '':
- data = httptools.downloadpage(item.url).data
- item.fanart = scrapertools.find_single_match(data, 'meta property="og:image" content="([^"]+)" \/>')
- item.plot = scrapertools.find_single_match(data,
- 'Sinopsis:<\/span>.([^<]+)<\/span>.<\/p>')
-
- # Paginacion
if item.title != 'Buscar' and actual != '':
if itemlist != []:
next_page = str(int(actual) + 1)
next_page_url = item.extra + 'pag-' + next_page
if not next_page_url.startswith("http"):
- next_page_url = host + next_page_url
+ next_page_url = host + next_page_url
itemlist.append(
Item(channel=item.channel,
action="lista",
@@ -441,9 +430,8 @@ def get_vip(url):
else:
id = scrapertools.find_single_match(item,'episodes\/(\d+)')
new_url = 'https://www.elreyxhd.com/samir.php?id=%s&tipo=capitulo&idioma=latino&x=&sv=' % id
- data=httptools.downloadpage(new_url, follow_redirects=False).headers
- itemlist.extend(servertools.find_video_items(data=str(data)))
-
+ data=httptools.downloadpage(new_url, follow_redirects=False).headers.get("location", "")
+ itemlist.append(Item(url=data))
return itemlist
@@ -463,22 +451,17 @@ def findvideos(item):
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
- # videoitem.infoLabels = item.infoLabels
videoitem.channel = item.channel
+ videoitem.infoLabels = item.infoLabels
if videoitem.quality == '' or videoitem.language == '':
videoitem.quality = 'default'
videoitem.language = 'Latino'
- if videoitem.server != '':
- videoitem.thumbnail = item.thumbnail
- else:
- videoitem.thumbnail = item.thumbnail
- videoitem.server = 'directo'
videoitem.action = 'play'
videoitem.fulltitle = item.title
-
if videoitem.extra != 'directo' and 'youtube' not in videoitem.url:
- videoitem.title = item.contentTitle + ' (' + videoitem.server + ')'
+ videoitem.title = item.contentTitle + ' (%s)'
+ itemlist=servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
n = 0
for videoitem in itemlist:
if 'youtube' in videoitem.url:
@@ -490,7 +473,7 @@ def findvideos(item):
itemlist.pop(1)
# Requerido para FilterTools
-
+ tmdb.set_infoLabels_itemlist(itemlist, True)
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
@@ -511,6 +494,11 @@ def findvideos(item):
return itemlist
+def play(item):
+ item.thumbnail = item.contentThumbnail
+ return [item]
+
+
def newest(categoria):
logger.info()
itemlist = []
diff --git a/plugin.video.alfa/channels/yaske.py b/plugin.video.alfa/channels/yaske.py
index 2f63fccc..3ae191b6 100644
--- a/plugin.video.alfa/channels/yaske.py
+++ b/plugin.video.alfa/channels/yaske.py
@@ -11,6 +11,7 @@ from core import tmdb
from core.item import Item
from platformcode import config, logger
+idiomas1 = {"/es.png":"CAST","/en_es.png":"VOSE","/la.png":"LAT","/en.png":"ENG"}
HOST = 'http://www.yaske.ro'
parameters = channeltools.get_channel_parameters('yaske')
fanart_host = parameters['fanart']
@@ -119,15 +120,9 @@ def peliculas(item):
matchesidiomas = scrapertools.find_multiple_matches(idiomas, patronidiomas)
idiomas_disponibles = []
for idioma in matchesidiomas:
- if idioma.endswith("/la.png"):
- idiomas_disponibles.append("LAT")
- elif idioma.endswith("/en.png"):
- idiomas_disponibles.append("VO")
- elif idioma.endswith("/en_es.png"):
- idiomas_disponibles.append("VOSE")
- elif idioma.endswith("/es.png"):
- idiomas_disponibles.append("ESP")
-
+ for lang in idiomas1.keys():
+ if idioma.endswith(lang):
+ idiomas_disponibles.append(idiomas1[lang])
if idiomas_disponibles:
idiomas_disponibles = "[" + "/".join(idiomas_disponibles) + "]"
contentTitle = scrapertoolsV2.htmlclean(scrapedtitle.strip())
@@ -179,36 +174,28 @@ def findvideos(item):
logger.info()
itemlist = []
sublist = []
-
- # Descarga la página
- url = "http://widget.olimpo.link/playlist/?tmdb=" + scrapertools.find_single_match(item.url, 'yaske.ro/([0-9]+)')
+ data = httptools.downloadpage(item.url).data
+ mtmdb = scrapertools.find_single_match(item.url, 'yaske.ro/([0-9]+)')
+ patron = '(?s)id="online".*?server="([^"]+)"'
+ mserver = scrapertools.find_single_match(data, patron)
+ url = "http://olimpo.link/?tmdb=%s&server=%s" %(mtmdb, mserver)
data = httptools.downloadpage(url).data
- if not item.plot:
- item.plot = scrapertoolsV2.find_single_match(data, '>Sinopsis ([^<]+)')
- item.plot = scrapertoolsV2.decodeHtmlentities(item.plot)
-
- patron = '(/embed/[^"]+).*?'
- patron += 'quality text-overflow ">([^<]+).*?'
- patron += 'title="([^"]+)'
+ patron = '/\?tmdb=[^"]+.*?domain=(?:www\.|)([^\.]+).*?text-overflow.*?href="([^"]+).*?'
+ patron += '\[([^\]]+)\].*?\[([^\]]+)\]'
matches = scrapertools.find_multiple_matches(data, patron)
-
- for url, calidad, idioma in matches:
- if 'embed' in url:
- url = "http://widget.olimpo.link" + url
- data = httptools.downloadpage(url).data
- url = scrapertools.find_single_match(data, 'iframe src="([^"]+)')
- sublist.append(item.clone(channel=item.channel, action="play", url=url, folder=False, text_color=color1, quality=calidad.strip(),
- language=idioma.strip()))
- sublist = servertools.get_servers_itemlist(sublist, lambda i: "Ver en %s %s" % (i.server, i.quality), True)
-
- # Añadir servidores encontrados, agrupandolos por idioma
+ for server, url, idioma, calidad in matches:
+ sublist.append(item.clone(channel=item.channel, action="play", url=url, folder=False, text_color=color1, quality=calidad.strip(),
+ language=idioma.strip(),
+ title="Ver en %s %s" %(server, calidad)
+ ))
for k in ["Español", "Latino", "Subtitulado", "Ingles"]:
lista_idioma = filter(lambda i: i.language == k, sublist)
if lista_idioma:
- itemlist.append(Item(channel=item.channel, title=k, fanart=item.fanart, folder=False,
+ itemlist.append(item.clone(title=k, folder=False,
text_color=color2, text_bold=True, thumbnail=thumbnail_host))
itemlist.extend(lista_idioma)
+ tmdb.set_infoLabels(itemlist, True)
# Insertar items "Buscar trailer" y "Añadir a la videoteca"
if itemlist and item.extra != "library":
title = "%s [Buscar trailer]" % (item.contentTitle)
@@ -221,3 +208,12 @@ def findvideos(item):
contentTitle=item.contentTitle, extra="library", thumbnail=thumbnail_host))
return itemlist
+
+def play(item):
+ logger.info()
+ itemlist = []
+ ddd = httptools.downloadpage(item.url).data
+ url = "http://olimpo.link" + scrapertools.find_single_match(ddd, '