Merge remote-tracking branch 'alfa-addon/master'

This commit is contained in:
unknown
2017-11-28 09:02:06 -03:00
17 changed files with 168 additions and 873 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?> <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.4.2" provider-name="Alfa Addon"> <addon id="plugin.video.alfa" name="Alfa" version="2.4.3" provider-name="Alfa Addon">
<requires> <requires>
<import addon="xbmc.python" version="2.1.0"/> <import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/> <import addon="script.module.libtorrent" optional="true"/>
@@ -19,12 +19,12 @@
</assets> </assets>
<news>[B]Estos son los cambios para esta versión:[/B] <news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR] [COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
» cartoonlatino » clasicofilm » cinetux » descargasmix
» hdfull » yaske » hdfull » peliculasdk
» ver-peliculas » thevideome » pelisfox » yaske
» powvideo ¤ arreglos internos » gvideo » powvideo
[COLOR green]Gracias a [COLOR yellow]caperucitaferoz[/COLOR] por su colaboración en esta versión[/COLOR] » yourupload ¤ arreglos internos
</news> </news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description> <description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary> <summary lang="en">Browse web pages using Kodi</summary>
<description lang="en">Browse web pages using Kodi, you can easily watch their video content.</description> <description lang="en">Browse web pages using Kodi, you can easily watch their video content.</description>

View File

@@ -367,6 +367,9 @@ def play(item):
scrapedurl = httptools.downloadpage(scrapedurl, follow_redirects=False, only_headers=True).headers.get( scrapedurl = httptools.downloadpage(scrapedurl, follow_redirects=False, only_headers=True).headers.get(
"location", "") "location", "")
item.url = scrapedurl item.url = scrapedurl
item.thumbnail = item.contentThumbnail item.server = ""
item.server = servertools.get_server_from_url(item.url) itemlist.append(item.clone())
return [item] itemlist = servertools.get_servers_itemlist(itemlist)
for i in itemlist:
i.thumbnail = i.contentThumbnail
return itemlist

View File

@@ -42,12 +42,12 @@ def mainlist(item):
fanart="http://i.imgur.com/ggFFR8o.png")) fanart="http://i.imgur.com/ggFFR8o.png"))
itemlist.append(item.clone(title="", action="")) itemlist.append(item.clone(title="", action=""))
itemlist.append(item.clone(title="Buscar...", action="search")) itemlist.append(item.clone(title="Buscar...", action="search"))
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False)) itemlist.append(item.clone(action="setting_channel", title="Configurar canal...", text_color="gold", folder=False))
return itemlist return itemlist
def configuracion(item): def setting_channel(item):
from platformcode import platformtools from platformcode import platformtools
ret = platformtools.show_channel_settings() ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh() platformtools.itemlist_refresh()
@@ -108,7 +108,7 @@ def busqueda(item):
def lista(item): def lista(item):
logger.info() logger.info()
itemlist = [] itemlist = list()
itemlist.append(item.clone(title="Novedades", action="entradas", url="%s/peliculas" % host)) itemlist.append(item.clone(title="Novedades", action="entradas", url="%s/peliculas" % host))
itemlist.append(item.clone(title="Estrenos", action="entradas", url="%s/peliculas/estrenos" % host)) itemlist.append(item.clone(title="Estrenos", action="entradas", url="%s/peliculas/estrenos" % host))
@@ -125,7 +125,7 @@ def lista(item):
def lista_series(item): def lista_series(item):
logger.info() logger.info()
itemlist = [] itemlist = list()
itemlist.append(item.clone(title="Novedades", action="entradas", url="%s/series/" % host)) itemlist.append(item.clone(title="Novedades", action="entradas", url="%s/series/" % host))
itemlist.append(item.clone(title="Miniseries", action="entradas", url="%s/series/miniseries" % host)) itemlist.append(item.clone(title="Miniseries", action="entradas", url="%s/series/miniseries" % host))
@@ -254,7 +254,7 @@ def episodios(item):
return itemlist return itemlist
def epienlaces(item): def episode_links(item):
logger.info() logger.info()
itemlist = [] itemlist = []
item.text_color = color3 item.text_color = color3
@@ -286,7 +286,7 @@ def epienlaces(item):
else: else:
if servertools.is_server_enabled(scrapedserver): if servertools.is_server_enabled(scrapedserver):
try: try:
servers_module = __import__("servers." + scrapedserver) # servers_module = __import__("servers." + scrapedserver)
lista_enlaces.append(item.clone(action="play", title=titulo, server=scrapedserver, url=scrapedurl, lista_enlaces.append(item.clone(action="play", title=titulo, server=scrapedserver, url=scrapedurl,
extra=item.url)) extra=item.url))
except: except:
@@ -302,13 +302,14 @@ def epienlaces(item):
def findvideos(item): def findvideos(item):
logger.info() logger.info()
if item.contentSeason!='': if item.contentSeason != '':
return epienlaces(item) return episode_links(item)
itemlist = [] itemlist = []
item.text_color = color3 item.text_color = color3
data = get_data(item.url) data = get_data(item.url)
item.plot = scrapertools.find_single_match(data, 'SINOPSIS(?:</span>|</strong>):(.*?)</p>') item.plot = scrapertools.find_single_match(data, 'SINOPSIS(?:</span>|</strong>):(.*?)</p>')
year = scrapertools.find_single_match(data, '(?:<span class="bold">|<strong>)AÑO(?:</span>|</strong>):\s*(\d+)') year = scrapertools.find_single_match(data, '(?:<span class="bold">|<strong>)AÑO(?:</span>|</strong>):\s*(\d+)')
if year: if year:
@@ -346,9 +347,9 @@ def findvideos(item):
patron = 'make_links.*?,[\'"]([^"\']+)["\']' patron = 'make_links.*?,[\'"]([^"\']+)["\']'
matches = scrapertools.find_multiple_matches(data_online, patron) matches = scrapertools.find_multiple_matches(data_online, patron)
for i, code in enumerate(matches): for i, code in enumerate(matches):
enlace = mostrar_enlaces(code) enlace = show_links(code)
enlaces = servertools.findvideos(data=enlace[0]) links = servertools.findvideos(data=enlace[0])
if enlaces and "peliculas.nu" not in enlaces: if links and "peliculas.nu" not in links:
if i == 0: if i == 0:
extra_info = scrapertools.find_single_match(data_online, '<span class="tooltiptext">(.*?)</span>') extra_info = scrapertools.find_single_match(data_online, '<span class="tooltiptext">(.*?)</span>')
size = scrapertools.find_single_match(data_online, '(?i)TAMAÑO:\s*(.*?)<').strip() size = scrapertools.find_single_match(data_online, '(?i)TAMAÑO:\s*(.*?)<').strip()
@@ -362,8 +363,8 @@ def findvideos(item):
new_item.title += " +INFO" new_item.title += " +INFO"
itemlist.append(new_item) itemlist.append(new_item)
title = " Ver vídeo en " + enlaces[0][2] title = " Ver vídeo en " + links[0][2]
itemlist.append(item.clone(action="play", server=enlaces[0][2], title=title, url=enlaces[0][1])) itemlist.append(item.clone(action="play", server=links[0][2], title=title, url=links[0][1]))
scriptg = scrapertools.find_single_match(data, "<script type='text/javascript'>str='([^']+)'") scriptg = scrapertools.find_single_match(data, "<script type='text/javascript'>str='([^']+)'")
if scriptg: if scriptg:
gvideo = urllib.unquote_plus(scriptg.replace("@", "%")) gvideo = urllib.unquote_plus(scriptg.replace("@", "%"))
@@ -419,9 +420,9 @@ def findvideos(item):
continue continue
if servertools.is_server_enabled(scrapedserver): if servertools.is_server_enabled(scrapedserver):
try: try:
servers_module = __import__("servers." + scrapedserver) # servers_module = __import__("servers." + scrapedserver)
# Saca numero de enlaces # Saca numero de enlaces
urls = mostrar_enlaces(scrapedurl) urls = show_links(scrapedurl)
numero = str(len(urls)) numero = str(len(urls))
titulo = " %s - Nº enlaces: %s" % (titulo, numero) titulo = " %s - Nº enlaces: %s" % (titulo, numero)
itemlist.append(item.clone(action="enlaces", title=titulo, extra=scrapedurl, server=scrapedserver)) itemlist.append(item.clone(action="enlaces", title=titulo, extra=scrapedurl, server=scrapedserver))
@@ -449,12 +450,13 @@ def play(item):
headers=headers, follow_redirects=False).data headers=headers, follow_redirects=False).data
url = scrapertools.find_single_match(data, 'url":"([^"]+)"').replace("\\", "") url = scrapertools.find_single_match(data, 'url":"([^"]+)"').replace("\\", "")
if "enlacesmix" in url:
if "enlacesmix" in url or "enlacesws.com" in url:
data = httptools.downloadpage(url, headers={'Referer': item.extra}, follow_redirects=False).data data = httptools.downloadpage(url, headers={'Referer': item.extra}, follow_redirects=False).data
url = scrapertools.find_single_match(data, '<iframe.*?src="([^"]+)"') url = scrapertools.find_single_match(data, '<iframe.*?src="([^"]+)"')
enlaces = servertools.findvideosbyserver(url, item.server) links = servertools.findvideosbyserver(url, item.server)
if enlaces: if links:
itemlist.append(item.clone(action="play", server=enlaces[0][2], url=enlaces[0][1])) itemlist.append(item.clone(action="play", server=links[0][2], url=links[0][1]))
else: else:
itemlist.append(item.clone()) itemlist.append(item.clone())
@@ -465,13 +467,13 @@ def enlaces(item):
logger.info() logger.info()
itemlist = [] itemlist = []
urls = mostrar_enlaces(item.extra) urls = show_links(item.extra)
numero = len(urls) numero = len(urls)
for enlace in urls: for url in urls:
enlaces = servertools.findvideos(data=enlace) links = servertools.findvideos(data=url)
if enlaces: if links:
for link in enlaces: for link in links:
if "/folder/" in enlace: if "/folder/" in url:
titulo = link[0] titulo = link[0]
else: else:
titulo = "%s - Enlace %s" % (item.title.split("-")[0], str(numero)) titulo = "%s - Enlace %s" % (item.title.split("-")[0], str(numero))
@@ -482,7 +484,7 @@ def enlaces(item):
return itemlist return itemlist
def mostrar_enlaces(data): def show_links(data):
import base64 import base64
data = data.split(",") data = data.split(",")
len_data = len(data) len_data = len(data)
@@ -536,6 +538,7 @@ def get_data(url_orig, get_host=False):
return response.data return response.data
def newest(categoria): def newest(categoria):
logger.info() logger.info()
itemlist = [] itemlist = []
@@ -558,7 +561,6 @@ def newest(categoria):
itemlist.extend(entradas(item)) itemlist.extend(entradas(item))
if itemlist[-1].title == ">> Siguiente": if itemlist[-1].title == ">> Siguiente":
itemlist.pop() itemlist.pop()
@@ -566,7 +568,7 @@ def newest(categoria):
except: except:
import sys import sys
for line in sys.exc_info(): for line in sys.exc_info():
logger.error("{0}".format(line)) logger.error("%s" % line)
return [] return []
return itemlist return itemlist

View File

@@ -310,7 +310,8 @@ def fichas(item):
for scrapedurl, scrapedthumbnail, scrapedlangs, scrapedrating, scrapedtitle, scrapedid in matches: for scrapedurl, scrapedthumbnail, scrapedlangs, scrapedrating, scrapedtitle, scrapedid in matches:
thumbnail = scrapedthumbnail.replace("/tthumb/130x190/", "/thumbs/") #thumbnail = scrapedthumbnail.replace("/tthumb/130x190/", "/thumbs/")
thumbnail = scrapedthumbnail
language = '' language = ''
title = scrapedtitle.strip() title = scrapedtitle.strip()
show = title show = title

View File

@@ -2,6 +2,7 @@
import re import re
from core import httptools
from core import scrapertools from core import scrapertools
from core import servertools from core import servertools
from core.item import Item from core.item import Item
@@ -10,74 +11,29 @@ from platformcode import logger
from platformcode import config from platformcode import config
from core import tmdb from core import tmdb
try: host = "http://www.peliculasdk.com"
import xbmc
import xbmcgui
except:
pass
import unicodedata
ACTION_SHOW_FULLSCREEN = 36
ACTION_GESTURE_SWIPE_LEFT = 511
ACTION_SELECT_ITEM = 7
ACTION_PREVIOUS_MENU = 10
ACTION_MOVE_LEFT = 1
ACTION_MOVE_RIGHT = 2
ACTION_MOVE_DOWN = 4
ACTION_MOVE_UP = 3
OPTION_PANEL = 6
OPTIONS_OK = 5
host = "http://www.peliculasdk.com/"
def bbcode_kodi2html(text):
if config.get_platform().startswith("plex") or config.get_platform().startswith("mediaserver"):
import re
text = re.sub(r'\[COLOR\s([^\]]+)\]',
r'<span style="color: \1">',
text)
text = text.replace('[/COLOR]', '</span>')
text = text.replace('[CR]', '<br>')
text = text.replace('[B]', '<b>')
text = text.replace('[/B]', '</b>')
text = text.replace('"color: yellow"', '"color: gold"')
text = text.replace('"color: white"', '"color: auto"')
return text
def mainlist(item): def mainlist(item):
logger.info() logger.info()
itemlist = [] itemlist = []
title = "Estrenos"
title = title.replace(title, bbcode_kodi2html("[COLOR orange]" + title + "[/COLOR]"))
itemlist.append( itemlist.append(
Item(channel=item.channel, title=title, action="peliculas", url="http://www.peliculasdk.com/ver/estrenos", Item(channel=item.channel, title="[COLOR orange]Estrenos[/COLOR]", action="peliculas", url= host + "/ver/estrenos",
fanart="http://s24.postimg.org/z6ulldcph/pdkesfan.jpg", fanart="http://s24.postimg.org/z6ulldcph/pdkesfan.jpg",
thumbnail="http://s16.postimg.org/st4x601d1/pdkesth.jpg")) thumbnail="http://s16.postimg.org/st4x601d1/pdkesth.jpg"))
title = "PelisHd"
title = title.replace(title, bbcode_kodi2html("[COLOR orange]" + title + "[/COLOR]"))
itemlist.append( itemlist.append(
Item(channel=item.channel, title=title, action="peliculas", url="http://www.peliculasdk.com/calidad/HD-720/", Item(channel=item.channel, title="[COLOR orange]PelisHd[/COLOR]", action="peliculas", url= host + "/calidad/HD-720/",
fanart="http://s18.postimg.org/wzqonq3w9/pdkhdfan.jpg", fanart="http://s18.postimg.org/wzqonq3w9/pdkhdfan.jpg",
thumbnail="http://s8.postimg.org/nn5669ln9/pdkhdthu.jpg")) thumbnail="http://s8.postimg.org/nn5669ln9/pdkhdthu.jpg"))
title = "Pelis HD-Rip"
title = title.replace(title, bbcode_kodi2html("[COLOR orange]" + title + "[/COLOR]"))
itemlist.append( itemlist.append(
Item(channel=item.channel, title=title, action="peliculas", url="http://www.peliculasdk.com/calidad/HD-320", Item(channel=item.channel, title="[COLOR orange]Pelis HD-Rip[/COLOR]", action="peliculas", url= host + "/calidad/HD-320",
fanart="http://s7.postimg.org/3pmnrnu7f/pdkripfan.jpg", fanart="http://s7.postimg.org/3pmnrnu7f/pdkripfan.jpg",
thumbnail="http://s12.postimg.org/r7re8fie5/pdkhdripthub.jpg")) thumbnail="http://s12.postimg.org/r7re8fie5/pdkhdripthub.jpg"))
title = "Pelis Audio español"
title = title.replace(title, bbcode_kodi2html("[COLOR orange]" + title + "[/COLOR]"))
itemlist.append( itemlist.append(
Item(channel=item.channel, title=title, action="peliculas", url="http://www.peliculasdk.com/idioma/Espanol/", Item(channel=item.channel, title="[COLOR orange]Pelis Audio español[/COLOR]", action="peliculas", url= host + "/idioma/Espanol/",
fanart="http://s11.postimg.org/65t7bxlzn/pdkespfan.jpg", fanart="http://s11.postimg.org/65t7bxlzn/pdkespfan.jpg",
thumbnail="http://s13.postimg.org/sh1034ign/pdkhsphtub.jpg")) thumbnail="http://s13.postimg.org/sh1034ign/pdkhsphtub.jpg"))
title = "Buscar..."
title = title.replace(title, bbcode_kodi2html("[COLOR orange]" + title + "[/COLOR]"))
itemlist.append( itemlist.append(
Item(channel=item.channel, title=title, action="search", url="http://www.peliculasdk.com/calidad/HD-720/", Item(channel=item.channel, title="[COLOR orange]Buscar...[/COLOR]", action="search", url= host + "/calidad/HD-720/",
fanart="http://s14.postimg.org/ceqajaw2p/pdkbusfan.jpg", fanart="http://s14.postimg.org/ceqajaw2p/pdkbusfan.jpg",
thumbnail="http://s13.postimg.org/o85gsftyv/pdkbusthub.jpg")) thumbnail="http://s13.postimg.org/o85gsftyv/pdkbusthub.jpg"))
@@ -88,7 +44,7 @@ def search(item, texto):
logger.info() logger.info()
texto = texto.replace(" ", "+") texto = texto.replace(" ", "+")
item.url = "http://www.peliculasdk.com/index.php?s=%s&x=0&y=0" % (texto) item.url = host + "/index.php?s=%s&x=0&y=0" % (texto)
try: try:
return buscador(item) return buscador(item)
@@ -103,11 +59,8 @@ def search(item, texto):
def buscador(item): def buscador(item):
logger.info() logger.info()
itemlist = [] itemlist = []
data = httptools.downloadpage(item.url).data
# Descarga la página
data = scrapertools.cache_page(item.url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data) data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<div class="karatula".*?' patron = '<div class="karatula".*?'
patron += 'src="([^"]+)".*?' patron += 'src="([^"]+)".*?'
patron += '<div class="tisearch"><a href="([^"]+)">' patron += '<div class="tisearch"><a href="([^"]+)">'
@@ -115,57 +68,38 @@ def buscador(item):
patron += 'Audio:(.*?)</a>.*?' patron += 'Audio:(.*?)</a>.*?'
patron += 'Género:(.*?)</a>.*?' patron += 'Género:(.*?)</a>.*?'
patron += 'Calidad:(.*?),' patron += 'Calidad:(.*?),'
matches = re.compile(patron, re.DOTALL).findall(data) matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedthumbnail, scrapedurl, scrapedtitle, scrapedlenguaje, scrapedgenero, scrapedcalidad in matches: for scrapedthumbnail, scrapedurl, scrapedtitle, scrapedlenguaje, scrapedgenero, scrapedcalidad in matches:
try: year = scrapertools.find_single_match(scrapedtitle, '\((\d+)\)')
year = scrapertools.get_match(scrapedtitle, '\((\d+)\)')
except:
year = ""
title_fan = re.sub(r"\[.*?\]|\(.*?\)|\d+x\d+.*?Final|-\d+|-|\d+x\d+|Temporada.*?Completa| ;", "",
scrapedtitle).strip()
scrapedcalidad = re.sub(r"<a href.*?>|</a>|</span>", "", scrapedcalidad).strip() scrapedcalidad = re.sub(r"<a href.*?>|</a>|</span>", "", scrapedcalidad).strip()
scrapedlenguaje = re.sub(r"<a href.*?>|</a>|</span>", "", scrapedlenguaje).strip() scrapedlenguaje = re.sub(r"<a href.*?>|</a>|</span>", "", scrapedlenguaje).strip()
if not "Adultos" in scrapedgenero and not "Adultos" in scrapedlenguaje and not "Adultos" in scrapedcalidad: if not "Adultos" in scrapedgenero and not "Adultos" in scrapedlenguaje and not "Adultos" in scrapedcalidad:
scrapedcalidad = scrapedcalidad.replace(scrapedcalidad, scrapedcalidad = "[COLOR orange]" + scrapedcalidad + "[/COLOR]"
bbcode_kodi2html("[COLOR orange]" + scrapedcalidad + "[/COLOR]")) scrapedlenguaje = "[COLOR orange]" + scrapedlenguaje + "[/COLOR]"
scrapedlenguaje = scrapedlenguaje.replace(scrapedlenguaje, title = scrapedtitle + "-(Idioma: " + scrapedlenguaje + ")" + "-(Calidad: " + scrapedcalidad + ")"
bbcode_kodi2html("[COLOR orange]" + scrapedlenguaje + "[/COLOR]")) title = "[COLOR white]" + title + "[/COLOR]"
scrapedtitle = scrapedtitle.split("(")[0].strip()
scrapedtitle = scrapedtitle + "-(Idioma: " + scrapedlenguaje + ")" + "-(Calidad: " + scrapedcalidad + ")" itemlist.append(Item(channel=item.channel, title=title, url=scrapedurl, action="findvideos",
scrapedtitle = scrapedtitle.replace(scrapedtitle, thumbnail=scrapedthumbnail, contentTitle = scrapedtitle, infoLabels={'year':year},
bbcode_kodi2html("[COLOR white]" + scrapedtitle + "[/COLOR]"))
extra = year + "|" + title_fan
itemlist.append(Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="fanart",
thumbnail=scrapedthumbnail, extra=extra,
fanart="http://s18.postimg.org/h9kb22mnt/pdkfanart.jpg", library=True, folder=True)) fanart="http://s18.postimg.org/h9kb22mnt/pdkfanart.jpg", library=True, folder=True))
tmdb.set_infoLabels(itemlist, True)
try: try:
next_page = scrapertools.get_match(data, next_page = scrapertools.get_match(data,
'<span class="current">.*?<a href="(.*?)".*?>Siguiente &raquo;</a></div>') '<span class="current">.*?<a href="(.*?)".*?>Siguiente &raquo;</a></div>')
itemlist.append(Item(channel=item.channel, action="buscador", title="[COLOR red]siguiente>>[/COLOR]", url=next_page,
title = "siguiente>>"
title = title.replace(title, bbcode_kodi2html("[COLOR red]" + title + "[/COLOR]"))
itemlist.append(Item(channel=item.channel, action="buscador", title=title, url=next_page,
thumbnail="http://s6.postimg.org/uej03x4r5/bricoflecha.png", thumbnail="http://s6.postimg.org/uej03x4r5/bricoflecha.png",
fanart="http://s18.postimg.org/h9kb22mnt/pdkfanart.jpg", folder=True)) fanart="http://s18.postimg.org/h9kb22mnt/pdkfanart.jpg", folder=True))
except: except:
pass pass
return itemlist return itemlist
def peliculas(item): def peliculas(item):
logger.info() logger.info()
itemlist = [] itemlist = []
# Descarga la página # Descarga la página
data = scrapertools.cache_page(item.url) data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|&#.*?;", "", data) data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|&#.*?;", "", data)
patron = 'style="position:relative;"> ' patron = 'style="position:relative;"> '
patron += '<a href="([^"]+)" ' patron += '<a href="([^"]+)" '
patron += 'title="([^<]+)">' patron += 'title="([^<]+)">'
@@ -173,363 +107,64 @@ def peliculas(item):
patron += 'Audio:(.*?)</br>.*?' patron += 'Audio:(.*?)</br>.*?'
patron += 'Calidad:(.*?)</br>.*?' patron += 'Calidad:(.*?)</br>.*?'
patron += 'Género:.*?tag">(.*?)</a>' patron += 'Género:.*?tag">(.*?)</a>'
matches = re.compile(patron, re.DOTALL).findall(data) matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedlenguaje, scrapedcalidad, scrapedgenero in matches: for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedlenguaje, scrapedcalidad, scrapedgenero in matches:
year = scrapertools.find_single_match(scrapedtitle, '\((\d+)\)')
try:
year = scrapertools.get_match(scrapedtitle, '\((\d+)\)')
except:
year = ""
title_fan = re.sub(r"\[.*?\]|\(.*?\)|\d+x\d+.*?Final|-\d+|-|\d+x\d+|Temporada.*?Completa| ;", "", scrapedtitle)
scrapedtitle = re.sub(r"\(\d+\)", "", scrapedtitle).strip() scrapedtitle = re.sub(r"\(\d+\)", "", scrapedtitle).strip()
scrapedcalidad = re.sub(r"<a href.*?>|</a>", "", scrapedcalidad).strip() scrapedcalidad = re.sub(r"<a href.*?>|</a>", "", scrapedcalidad).strip()
scrapedlenguaje = re.sub(r"<a href.*?>|</a>", "", scrapedlenguaje).strip() scrapedlenguaje = re.sub(r"<a href.*?>|</a>", "", scrapedlenguaje).strip()
scrapedlenguaje = scrapedlenguaje.split(',') scrapedlenguaje = scrapedlenguaje.split(',')
if not "Adultos" in scrapedgenero and not "Adultos" in scrapedlenguaje and not "Adultos" in scrapedcalidad: if not "Adultos" in scrapedgenero and not "Adultos" in scrapedlenguaje and not "Adultos" in scrapedcalidad:
scrapedtitle = scrapedtitle scrapedtitle = scrapedtitle
itemlist.append(Item(channel=item.channel,
extra = year + "|" + title_fan title=scrapedtitle,
new_item = Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="fanart", url=scrapedurl,
thumbnail=scrapedthumbnail, extra=extra, action="findvideos",
fanart="http://s18.postimg.org/h9kb22mnt/pdkfanart.jpg", library=True, folder=True, thumbnail=scrapedthumbnail,
language=scrapedlenguaje, quality=scrapedcalidad, contentTitle= scrapedtitle, infoLabels={ fanart="http://s18.postimg.org/h9kb22mnt/pdkfanart.jpg", library=True, folder=True,
'year':year}) language=scrapedlenguaje,
#TODO Dividir los resultados antes quality=scrapedcalidad,
#if year: contentTitle = scrapedtitle,
# tmdb.set_infoLabels_item(new_item) infoLabels={'year':year}
itemlist.append(new_item) ))
tmdb.set_infoLabels(itemlist)
## Paginación ## Paginación
next_page = scrapertools.get_match(data, '<span class="current">.*?<a href="(.*?)".*?>Siguiente &raquo;</a></div>') next_page = scrapertools.get_match(data, '<span class="current">.*?<a href="(.*?)".*?>Siguiente &raquo;</a></div>')
itemlist.append(Item(channel=item.channel, action="peliculas", title="[COLOR red]siguiente>>[/COLOR]", url=next_page,
title = "siguiente>>"
title = title.replace(title, bbcode_kodi2html("[COLOR red]" + title + "[/COLOR]"))
itemlist.append(Item(channel=item.channel, action="peliculas", title=title, url=next_page,
thumbnail="http://s6.postimg.org/uej03x4r5/bricoflecha.png", thumbnail="http://s6.postimg.org/uej03x4r5/bricoflecha.png",
fanart="http://s18.postimg.org/h9kb22mnt/pdkfanart.jpg", folder=True)) fanart="http://s18.postimg.org/h9kb22mnt/pdkfanart.jpg", folder=True))
return itemlist
def fanart(item):
logger.info()
itemlist = []
url = item.url
data = scrapertools.cachePage(url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
title_fan = item.extra.split("|")[1]
title = re.sub(r'Serie Completa|Temporada.*?Completa', '', title_fan)
fulltitle = title
title = title.replace(' ', '%20')
title = ''.join(
(c for c in unicodedata.normalize('NFD', unicode(title.decode('utf-8'))) if unicodedata.category(c) != 'Mn'))
try:
sinopsis = scrapertools.find_single_match(data, '<span class="clms">Sinopsis: <\/span>(.*?)<\/div>')
except:
sinopsis = ""
year = item.extra.split("|")[0]
if not "series" in item.url:
# filmafinity
url = "http://www.filmaffinity.com/es/advsearch.php?stext={0}&stype%5B%5D=title&country=&genre=&fromyear={1}&toyear={1}".format(
title, year)
data = scrapertools.downloadpage(url)
url_filmaf = scrapertools.find_single_match(data, '<div class="mc-poster">\s*<a title="[^"]*" href="([^"]+)"')
if url_filmaf:
url_filmaf = "http://www.filmaffinity.com%s" % url_filmaf
data = scrapertools.downloadpage(url_filmaf)
else:
try:
url_bing = "http://www.bing.com/search?q=%s+%s+site:filmaffinity.com" % (title.replace(' ', '+'), year)
data = browser(url_bing)
data = re.sub(r'\n|\r|\t|\s{2}|&nbsp;', '', data)
if "myaddrproxy.php" in data:
subdata_bing = scrapertools.get_match(data,
'li class="b_algo"><div class="b_title"><h2>(<a href="/ myaddrproxy.php/http/www.filmaffinity.com/es/film.*?)"')
subdata_bing = re.sub(r'\/myaddrproxy.php\/http\/', '', subdata_bing)
else:
subdata_bing = scrapertools.get_match(data,
'li class="b_algo"><h2>(<a href="http://www.filmaffinity.com/es/film.*?)"')
url_filma = scrapertools.get_match(subdata_bing, '<a href="([^"]+)')
if not "http" in url_filma:
data = scrapertools.cachePage("http://" + url_filma)
else:
data = scrapertools.cachePage(url_filma)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
except:
pass
if sinopsis == " ":
try:
sinopsis = scrapertools.find_single_match(data, '<dd itemprop="description">(.*?)</dd>')
sinopsis = sinopsis.replace("<br><br />", "\n")
sinopsis = re.sub(r"\(FILMAFFINITY\)<br />", "", sinopsis)
except:
pass
try:
rating_filma = scrapertools.get_match(data, 'itemprop="ratingValue" content="(.*?)">')
except:
rating_filma = "Sin puntuacion"
critica = ""
patron = '<div itemprop="reviewBody">(.*?)</div>.*?itemprop="author">(.*?)\s*<i alt="([^"]+)"'
matches_reviews = scrapertools.find_multiple_matches(data, patron)
if matches_reviews:
for review, autor, valoracion in matches_reviews:
review = dhe(scrapertools.htmlclean(review))
review += "\n" + autor + "[CR]"
review = re.sub(r'Puntuac.*?\)', '', review)
if "positiva" in valoracion:
critica += "[COLOR green][B]%s[/B][/COLOR]\n" % review
elif "neutral" in valoracion:
critica += "[COLOR yellow][B]%s[/B][/COLOR]\n" % review
else:
critica += "[COLOR red][B]%s[/B][/COLOR]\n" % review
else:
critica = "[COLOR floralwhite][B]Esta película no tiene críticas todavía...[/B][/COLOR]"
print "ozuu"
print critica
url = "http://api.themoviedb.org/3/search/movie?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + title + "&year=" + year + "&language=es&include_adult=false"
data = scrapertools.cachePage(url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) == 0:
title = re.sub(r":.*|\(.*?\)", "", title)
url = "http://api.themoviedb.org/3/search/movie?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + title + "&language=es&include_adult=false"
data = scrapertools.cachePage(url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) == 0:
extra = item.thumbnail + "|" + "" + "|" + "" + "|" + "Sin puntuación" + "|" + rating_filma + "|" + critica
show = item.fanart + "|" + "" + "|" + sinopsis
posterdb = item.thumbnail
fanart_info = item.fanart
fanart_3 = ""
fanart_2 = item.fanart
category = item.thumbnail
id_scraper = ""
itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action="findvideos",
thumbnail=item.thumbnail, fanart=item.fanart, extra=extra, show=show,
category=category, library=item.library, fulltitle=fulltitle, folder=True))
for id, fan in matches:
fan = re.sub(r'\\|"', '', fan)
try:
rating = scrapertools.find_single_match(data, '"vote_average":(.*?),')
except:
rating = "Sin puntuación"
id_scraper = id + "|" + "peli" + "|" + rating + "|" + rating_filma + "|" + critica
try:
posterdb = scrapertools.get_match(data, '"page":1,.*?"poster_path":"\\\(.*?)"')
posterdb = "https://image.tmdb.org/t/p/original" + posterdb
except:
posterdb = item.thumbnail
if "null" in fan:
fanart = item.fanart
else:
fanart = "https://image.tmdb.org/t/p/original" + fan
item.extra = fanart
url = "http://api.themoviedb.org/3/movie/" + id + "/images?api_key=2e2160006592024ba87ccdf78c28f49f"
data = scrapertools.cachePage(url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '"backdrops".*?"file_path":".*?",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) == 0:
patron = '"backdrops".*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) == 0:
fanart_info = item.extra
fanart_3 = ""
fanart_2 = item.extra
for fanart_info, fanart_3, fanart_2 in matches:
fanart_info = "https://image.tmdb.org/t/p/original" + fanart_info
fanart_3 = "https://image.tmdb.org/t/p/original" + fanart_3
fanart_2 = "https://image.tmdb.org/t/p/original" + fanart_2
if fanart == item.fanart:
fanart = fanart_info
# clearart, fanart_2 y logo
url = "http://webservice.fanart.tv/v3/movies/" + id + "?api_key=dffe90fba4d02c199ae7a9e71330c987"
data = scrapertools.cachePage(url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '"hdmovielogo":.*?"url": "([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
if '"moviedisc"' in data:
disc = scrapertools.get_match(data, '"moviedisc":.*?"url": "([^"]+)"')
if '"movieposter"' in data:
poster = scrapertools.get_match(data, '"movieposter":.*?"url": "([^"]+)"')
if '"moviethumb"' in data:
thumb = scrapertools.get_match(data, '"moviethumb":.*?"url": "([^"]+)"')
if '"moviebanner"' in data:
banner = scrapertools.get_match(data, '"moviebanner":.*?"url": "([^"]+)"')
if len(matches) == 0:
extra = posterdb
# "http://es.seaicons.com/wp-content/uploads/2015/11/Editing-Overview-Pages-1-icon.png"
show = fanart_2 + "|" + fanart_3 + "|" + sinopsis
category = posterdb
itemlist.append(
Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, server="torrent",
thumbnail=posterdb, fanart=item.extra, extra=extra, show=show, category=category,
library=item.library, fulltitle=fulltitle, folder=True))
for logo in matches:
if '"hdmovieclearart"' in data:
clear = scrapertools.get_match(data, '"hdmovieclearart":.*?"url": "([^"]+)"')
if '"moviebackground"' in data:
extra = clear
show = fanart_2 + "|" + fanart_3 + "|" + sinopsis
if '"moviedisc"' in data:
category = disc
else:
category = clear
itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url,
server="torrent", thumbnail=logo, fanart=item.extra, extra=extra,
show=show, category=category, library=item.library, fulltitle=fulltitle,
folder=True))
else:
extra = clear
show = fanart_2 + "|" + fanart_3 + "|" + sinopsis
if '"moviedisc"' in data:
category = disc
else:
category = clear
itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url,
server="torrent", thumbnail=logo, fanart=item.extra, extra=extra,
show=show, category=category, library=item.library, fulltitle=fulltitle,
folder=True))
if '"moviebackground"' in data:
if '"hdmovieclearart"' in data:
clear = scrapertools.get_match(data, '"hdmovieclearart":.*?"url": "([^"]+)"')
extra = clear
show = fanart_2 + "|" + fanart_3 + "|" + sinopsis
if '"moviedisc"' in data:
category = disc
else:
category = clear
else:
extra = logo
show = fanart_2 + "|" + fanart_3 + "|" + sinopsis
if '"moviedisc"' in data:
category = disc
else:
category = logo
itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url,
server="torrent", thumbnail=logo, fanart=item.extra, extra=extra,
show=show, category=category, library=item.library, fulltitle=fulltitle,
folder=True))
if not '"hdmovieclearart"' in data and not '"moviebackground"' in data:
extra = logo
show = fanart_2 + "|" + fanart_3 + "|" + sinopsis
if '"moviedisc"' in data:
category = disc
else:
category = item.extra
itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url,
thumbnail=logo, fanart=item.extra, extra=extra, show=show,
category=category, library=item.library, fulltitle=fulltitle, folder=True))
title_info = "Info"
if posterdb == item.thumbnail:
if '"movieposter"' in data:
thumbnail = poster
else:
thumbnail = item.thumbnail
else:
thumbnail = posterdb
id = id_scraper
extra = extra + "|" + id + "|" + title.encode('utf8')
title_info = title_info.replace(title_info, bbcode_kodi2html("[COLOR skyblue]" + title_info + "[/COLOR]"))
itemlist.append(Item(channel=item.channel, action="info", title=title_info, url=item.url, thumbnail=thumbnail,
fanart=fanart_info, extra=extra, category=category, show=show, folder=False))
return itemlist return itemlist
def findvideos(item): def findvideos(item):
logger.info() logger.info()
itemlist = [] itemlist = []
data = scrapertools.cache_page(item.url) data = httptools.downloadpage(item.url).data
data = re.sub(r"<!--.*?-->", "", data) data = re.sub(r"<!--.*?-->", "", data)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data) data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
bloque_tab = scrapertools.find_single_match(data, '<div id="verpelicula">(.*?)<div class="tab_container">') bloque_tab = scrapertools.find_single_match(data, '<div id="verpelicula">(.*?)<div class="tab_container">')
patron = '<li><a href="#([^<]+)"><span class="re">\d<\/span><span class="([^<]+)"><\/span><span class=.*?>([^<]+)<\/span>' patron = '<li><a href="#([^<]+)"><span class="re">\d<\/span><span class="([^<]+)"><\/span><span class=.*?>([^<]+)<\/span>'
check = re.compile(patron, re.DOTALL).findall(bloque_tab) check = re.compile(patron, re.DOTALL).findall(bloque_tab)
servers_data_list = [] servers_data_list = []
patron = '<div id="(tab\d+)" class="tab_content"><script type="text/rocketscript">(\w+)\("([^"]+)"\)</script></div>' patron = '<div id="(tab\d+)" class="tab_content"><script type="text/rocketscript">(\w+)\("([^"]+)"\)</script></div>'
matches = re.compile(patron, re.DOTALL).findall(data) matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) == 0: if len(matches) == 0:
patron = '<div id="(tab\d+)" class="tab_content"><script>(\w+)\("([^"]+)"\)</script></div>' patron = '<div id="(tab\d+)" class="tab_content"><script>(\w+)\("([^"]+)"\)</script></div>'
matches = re.compile(patron, re.DOTALL).findall(data) matches = re.compile(patron, re.DOTALL).findall(data)
for check_tab, server, id in matches: for check_tab, server, id in matches:
scrapedplot = scrapertools.get_match(data, '<span class="clms">(.*?)</div></div>')
plotformat = re.compile('(.*?:) </span>', re.DOTALL).findall(scrapedplot)
scrapedplot = scrapedplot.replace(scrapedplot, bbcode_kodi2html("[COLOR white]" + scrapedplot + "[/COLOR]"))
for plot in plotformat:
scrapedplot = scrapedplot.replace(plot, bbcode_kodi2html("[COLOR red][B]" + plot + "[/B][/COLOR]"))
scrapedplot = scrapedplot.replace("</span>", "[CR]")
scrapedplot = scrapedplot.replace(":", "")
if check_tab in str(check): if check_tab in str(check):
idioma, calidad = scrapertools.find_single_match(str(check), "" + check_tab + "', '(.*?)', '(.*?)'") idioma, calidad = scrapertools.find_single_match(str(check), "" + check_tab + "', '(.*?)', '(.*?)'")
servers_data_list.append([server, id, idioma, calidad]) servers_data_list.append([server, id, idioma, calidad])
url = host + "/Js/videod.js"
url = "http://www.peliculasdk.com/Js/videod.js" data = httptools.downloadpage(url).data
data = scrapertools.cachePage(url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data) data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data = data.replace('<iframe width="100%" height="400" scrolling="no" frameborder="0"', '') data = data.replace('<iframe width="100%" height="400" scrolling="no" frameborder="0"', '')
patron = 'function (\w+)\(id\).*?' patron = 'function (\w+)\(id\).*?'
patron += 'data-src="([^"]+)"' patron += 'data-src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data) matches = scrapertools.find_multiple_matches(data, patron)
for server, url in matches: for server, url in matches:
for enlace, id, idioma, calidad in servers_data_list: for enlace, id, idioma, calidad in servers_data_list:
if server == enlace: if server == enlace:
video_url = re.sub(r"embed\-|\-.*?x.*?\.html|u\'|\'\(", "", str(url)) video_url = re.sub(r"embed\-|\-.*?x.*?\.html|u\'|\'\(", "", str(url))
video_url = re.sub(r"'\+codigo\+'", "", video_url) video_url = re.sub(r"'\+codigo\+'", "", video_url)
video_url = video_url.replace('embed//', 'embed/') video_url = video_url.replace('embed//', 'embed/')
@@ -541,21 +176,13 @@ def findvideos(item):
video_url = scrapertools.get_match(str(url), "u'([^']+)'") video_url = scrapertools.get_match(str(url), "u'([^']+)'")
except: except:
continue continue
title = "Ver en: %s [" + idioma + "][" + calidad + "]"
servertitle = scrapertools.get_match(video_url, 'http.*?://(.*?)/')
servertitle = servertitle.replace("embed.", "")
servertitle = servertitle.replace("player.", "")
servertitle = servertitle.replace("api.video.", "")
servertitle = re.sub(r"hqq.tv|hqq.watch", "netutv", servertitle)
servertitle = servertitle.replace("anonymouse.org", "netu")
title = servertitle
logger.debug('servertitle: %s' % servertitle)
server = servertools.get_server_name(servertitle)
logger.debug('server: %s'%server)
itemlist.append( itemlist.append(
Item(channel=item.channel, title=title, url=video_url, action="play", item.clone(title=title, url=video_url, action="play",
thumbnail=item.category, thumbnail=item.category,
plot=scrapedplot, fanart=item.show, server=server, language=idioma, quality=calidad)) language=idioma, quality=calidad))
tmdb.set_infoLabels(itemlist)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if item.library and config.get_videolibrary_support() and len(itemlist) > 0: if item.library and config.get_videolibrary_support() and len(itemlist) > 0:
infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'], infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'],
'title': item.fulltitle} 'title': item.fulltitle}
@@ -563,187 +190,12 @@ def findvideos(item):
action="add_pelicula_to_library", url=item.url, infoLabels=infoLabels, action="add_pelicula_to_library", url=item.url, infoLabels=infoLabels,
text_color="0xFFff6666", text_color="0xFFff6666",
thumbnail='http://imgur.com/0gyYvuC.png')) thumbnail='http://imgur.com/0gyYvuC.png'))
return itemlist return itemlist
def play(item): def play(item):
logger.info() item.thumbnail = item.contentThumbnail
return [item]
itemlist = servertools.find_video_items(data=item.url)
data = scrapertools.cache_page(item.url)
listavideos = servertools.findvideos(data)
for video in listavideos:
videotitle = scrapertools.unescape(video[0])
url = item.url
server = video[2]
# xbmctools.addnewvideo( item.channel , "play" , category , server , , url , thumbnail , plot )
itemlist.append(
Item(channel=item.channel, action="play", server=server, title="Trailer - " + videotitle, url=url,
thumbnail=item.thumbnail, plot=item.plot, fulltitle=item.title,
fanart="http://s23.postimg.org/84vkeq863/movietrailers.jpg", folder=False))
return itemlist
def info(item):
logger.info()
itemlist = []
url = item.url
id = item.extra
if "serie" in item.url:
try:
rating_tmdba_tvdb = item.extra.split("|")[6]
if item.extra.split("|")[6] == "":
rating_tmdba_tvdb = "Sin puntuación"
except:
rating_tmdba_tvdb = "Sin puntuación"
else:
rating_tmdba_tvdb = item.extra.split("|")[3]
rating_filma = item.extra.split("|")[4]
print "eztoquee"
print rating_filma
print rating_tmdba_tvdb
filma = "http://s6.postimg.org/6yhe5fgy9/filma.png"
try:
if "serie" in item.url:
title = item.extra.split("|")[8]
else:
title = item.extra.split("|")[6]
title = title.replace("%20", " ")
title = "[COLOR yellow][B]" + title + "[/B][/COLOR]"
except:
title = item.title
try:
if "." in rating_tmdba_tvdb:
check_rat_tmdba = scrapertools.get_match(rating_tmdba_tvdb, '(\d+).')
else:
check_rat_tmdba = rating_tmdba_tvdb
if int(check_rat_tmdba) >= 5 and int(check_rat_tmdba) < 8:
rating = "[COLOR springgreen][B]" + rating_tmdba_tvdb + "[/B][/COLOR]"
elif int(check_rat_tmdba) >= 8 or rating_tmdba_tvdb == 10:
rating = "[COLOR yellow][B]" + rating_tmdba_tvdb + "[/B][/COLOR]"
else:
rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]"
print "lolaymaue"
except:
rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]"
if "10." in rating:
rating = re.sub(r'10\.\d+', '10', rating)
try:
check_rat_filma = scrapertools.get_match(rating_filma, '(\d)')
print "paco"
print check_rat_filma
if int(check_rat_filma) >= 5 and int(check_rat_filma) < 8:
print "dios"
print check_rat_filma
rating_filma = "[COLOR springgreen][B]" + rating_filma + "[/B][/COLOR]"
elif int(check_rat_filma) >= 8:
print check_rat_filma
rating_filma = "[COLOR yellow][B]" + rating_filma + "[/B][/COLOR]"
else:
rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]"
print "rojo??"
print check_rat_filma
except:
rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]"
if not "serie" in item.url:
url_plot = "http://api.themoviedb.org/3/movie/" + item.extra.split("|")[
1] + "?api_key=2e2160006592024ba87ccdf78c28f49f&append_to_response=credits&language=es"
data_plot = scrapertools.cache_page(url_plot)
plot = scrapertools.find_single_match(data_plot, '"overview":"(.*?)",')
tagline = scrapertools.find_single_match(data_plot, '"tagline":(".*?")')
if plot == "":
plot = item.show.split("|")[2]
plot = "[COLOR moccasin][B]" + plot + "[/B][/COLOR]"
plot = re.sub(r"\\", "", plot)
else:
plot = item.show.split("|")[2]
plot = "[COLOR moccasin][B]" + plot + "[/B][/COLOR]"
plot = re.sub(r"\\", "", plot)
if item.extra.split("|")[7] != "":
tagline = item.extra.split("|")[7]
# tagline= re.sub(r',','.',tagline)
else:
tagline = ""
if "serie" in item.url:
check2 = "serie"
icon = "http://s6.postimg.org/hzcjag975/tvdb.png"
foto = item.show.split("|")[1]
if item.extra.split("|")[5] != "":
critica = item.extra.split("|")[5]
else:
critica = "Esta serie no tiene críticas..."
if not ".png" in item.extra.split("|")[0]:
photo = "http://imgur.com/6uXGkrz.png"
else:
photo = item.extra.split("|")[0].replace(" ", "%20")
try:
tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]"
except:
tagline = ""
else:
critica = item.extra.split("|")[5]
if "%20" in critica:
critica = "No hay críticas"
icon = "http://imgur.com/SenkyxF.png"
photo = item.extra.split("|")[0].replace(" ", "%20")
foto = item.show.split("|")[1]
try:
if tagline == "\"\"":
tagline = " "
except:
tagline = " "
tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]"
check2 = "pelicula"
# Tambien te puede interesar
peliculas = []
if "serie" in item.url:
url_tpi = "http://api.themoviedb.org/3/tv/" + item.show.split("|")[
5] + "/recommendations?api_key=2e2160006592024ba87ccdf78c28f49f&language=es"
data_tpi = scrapertools.cachePage(url_tpi)
tpi = scrapertools.find_multiple_matches(data_tpi,
'id":(.*?),.*?"original_name":"(.*?)",.*?"poster_path":(.*?),')
else:
url_tpi = "http://api.themoviedb.org/3/movie/" + item.extra.split("|")[
1] + "/recommendations?api_key=2e2160006592024ba87ccdf78c28f49f&language=es"
data_tpi = scrapertools.cachePage(url_tpi)
tpi = scrapertools.find_multiple_matches(data_tpi,
'id":(.*?),.*?"original_title":"(.*?)",.*?"poster_path":(.*?),')
for idp, peli, thumb in tpi:
thumb = re.sub(r'"|}', '', thumb)
if "null" in thumb:
thumb = "http://s6.postimg.org/tw1vhymj5/noposter.png"
else:
thumb = "https://image.tmdb.org/t/p/original" + thumb
peliculas.append([idp, peli, thumb])
check2 = check2.replace("pelicula", "movie").replace("serie", "tvshow")
infoLabels = {'title': title, 'plot': plot, 'thumbnail': photo, 'fanart': foto, 'tagline': tagline,
'rating': rating}
item_info = item.clone(info=infoLabels, icon=icon, extra=id, rating=rating, rating_filma=rating_filma,
critica=critica, contentType=check2, thumb_busqueda="http://imgur.com/kdfWEJ6.png")
from channels import infoplus
infoplus.start(item_info, peliculas)
def newest(categoria): def newest(categoria):
@@ -754,49 +206,13 @@ def newest(categoria):
if categoria == 'castellano': if categoria == 'castellano':
item.url = host + "idioma/Espanol/" item.url = host + "idioma/Espanol/"
item.action = "peliculas" item.action = "peliculas"
itemlist = peliculas(item) itemlist = peliculas(item)
if itemlist[-1].action == "peliculas": if itemlist[-1].action == "peliculas":
itemlist.pop() itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla # Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except: except:
import sys import sys
for line in sys.exc_info(): for line in sys.exc_info():
logger.error("{0}".format(line)) logger.error("{0}".format(line))
return [] return []
return itemlist return itemlist
def browser(url):
import mechanize
# Utilizamos Browser mechanize para saltar problemas con la busqueda en bing
br = mechanize.Browser()
# Browser options
br.set_handle_equiv(False)
br.set_handle_gzip(True)
br.set_handle_redirect(True)
br.set_handle_referer(False)
br.set_handle_robots(False)
# Follows refresh 0 but not hangs on refresh > 0
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
# Want debugging messages?
# br.set_debug_http(True)
# br.set_debug_redirects(True)
# br.set_debug_responses(True)
# User-Agent (this is cheating, ok?)
br.addheaders = [('User-agent',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')]
# br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')]
# Open some site, let's pick a random one, the first that pops in mind
r = br.open(url)
response = r.read()
print response
if "img,divreturn" in response:
r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url)
print "prooooxy"
response = r.read()
return response

View File

@@ -74,16 +74,15 @@ def lista(item):
itemlist = [] itemlist = []
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data) data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
if item.seccion != 'actor': if item.seccion != 'actor':
patron = '<li class=item-serie.*?><a href=(.*?) title=(.*?)><img src=(.*?) alt=><span ' patron = '(?s)<li class="item-serie.*?href="([^"]+).*?title="([^"]+).*?data-src="([^"]+).*?<span '
patron += 'class=s-title><strong>.*?<\/strong><p>(.*?)<\/p><\/span><\/a><\/li>' patron += 'class="s-title">.*?<p>([^<]+)'
else: else:
patron = '<li><a href=(\/pelicula\/.*?)><figure><img src=(.*?) alt=><\/figure><p class=title>(.*?)<\/p><p ' patron = '(?s)<li>.*?<a href="(/pelicula/[^"]+)".*?<figure>.*?data-src="([^"]+)".*?p class="title">([^<]+).*?'
patron += 'class=year>(.*?)<\/p>' patron += 'year">([^<]+)'
matches = re.compile(patron, re.DOTALL).findall(data) matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear in matches: for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear in matches:
url = host + scrapedurl url = host + scrapedurl
if item.seccion != 'actor': if item.seccion != 'actor':
@@ -109,11 +108,11 @@ def lista(item):
# Paginacion # Paginacion
if itemlist != []: if itemlist != []:
actual_page = scrapertools.find_single_match(data, '<a class=active item href=.*?>(.*?)<\/a>') actual_page = scrapertools.find_single_match(data, '<a class="active item" href=".*?">(.*?)<\/a>')
if actual_page: if actual_page:
next_page_num = int(actual_page) + 1 next_page_num = int(actual_page) + 1
next_page = scrapertools.find_single_match(data, next_page = scrapertools.find_single_match(data,
'<li><a class= item href=(.*?)\?page=.*?&limit=.*?>Siguiente') '<li><a class=" item" href="(.*?)\?page=.*?&limit=.*?">Siguiente')
next_page_url = host + next_page + '?page=%s' % next_page_num next_page_url = host + next_page + '?page=%s' % next_page_num
if next_page != '': if next_page != '':
itemlist.append(Item(channel=item.channel, itemlist.append(Item(channel=item.channel,
@@ -129,15 +128,14 @@ def seccion(item):
logger.info() logger.info()
itemlist = [] itemlist = []
data = httptools.downloadpage(item.url).data data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data) data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
if item.seccion == 'generos': if item.seccion == 'generos':
patron = '<a href=(\/peliculas\/[\D].*?\/) title=Películas de .*?>(.*?)<\/a>' patron = '<a href="(\/peliculas\/[\D].*?\/)" title="Películas de .*?>(.*?)<\/a>'
elif item.seccion == 'anios': elif item.seccion == 'anios':
patron = '<li class=.*?><a href=(.*?)>(\d{4})<\/a> <\/li>' patron = '<li class=.*?><a href="(.*?)">(\d{4})<\/a> <\/li>'
elif item.seccion == 'actor': elif item.seccion == 'actor':
patron = '<li><a href=(.*?)><div.*?<div class=photopurple title=(.*?)><\/div><img src=(.*?)><\/figure>' patron = '<li><a href="(.*?)".*?div.*?<div class="photopurple" title="(.*?)">.*?data-src="([^"]+)'
matches = re.compile(patron, re.DOTALL).findall(data) matches = scrapertools.find_multiple_matches(data, patron)
if item.seccion != 'actor': if item.seccion != 'actor':
for scrapedurl, scrapedtitle in matches: for scrapedurl, scrapedtitle in matches:
title = scrapedtitle.decode('utf-8') title = scrapedtitle.decode('utf-8')
@@ -158,7 +156,6 @@ def seccion(item):
)) ))
else: else:
for scrapedurl, scrapedname, scrapedthumbnail in matches: for scrapedurl, scrapedname, scrapedthumbnail in matches:
thumbnail = scrapedthumbnail
fanart = '' fanart = ''
title = scrapedname title = scrapedname
url = host + scrapedurl url = host + scrapedurl
@@ -168,14 +165,14 @@ def seccion(item):
title=title, title=title,
fulltitle=item.title, fulltitle=item.title,
url=url, url=url,
thumbnail=thumbnail, thumbnail=scrapedthumbnail,
fanart=fanart, fanart=fanart,
seccion=item.seccion seccion=item.seccion
)) ))
# Paginacion # Paginacion
if itemlist != []: if itemlist != []:
next_page = scrapertools.find_single_match(data, '<li><a class= item href=(.*?)&limit=.*?>Siguiente <') next_page = scrapertools.find_single_match(data, '<li><a class=" item" href="(.*?)&limit=.*?>Siguiente <')
next_page_url = host + next_page next_page_url = host + next_page
if next_page != '': if next_page != '':
itemlist.append(item.clone(action="seccion", itemlist.append(item.clone(action="seccion",
@@ -240,7 +237,6 @@ def findvideos(item):
)) ))
for videoitem in templist: for videoitem in templist:
data = httptools.downloadpage(videoitem.url).data data = httptools.downloadpage(videoitem.url).data
urls_list = scrapertools.find_multiple_matches(data, 'var.*?_SOURCE\s+=\s+\[(.*?)\]') urls_list = scrapertools.find_multiple_matches(data, 'var.*?_SOURCE\s+=\s+\[(.*?)\]')
for element in urls_list: for element in urls_list:
json_data=jsontools.load(element) json_data=jsontools.load(element)
@@ -260,19 +256,19 @@ def findvideos(item):
for urls in video_list: for urls in video_list:
if urls.language == '': if urls.language == '':
urls.language = videoitem.language urls.language = videoitem.language
urls.title = item.title + '(%s) (%s)' % (urls.language, urls.server) urls.title = item.title + urls.language + '(%s)'
for video_url in video_list: for video_url in video_list:
video_url.channel = item.channel video_url.channel = item.channel
video_url.action = 'play' video_url.action = 'play'
video_url.quality = quality video_url.quality = quality
video_url.server = ""
video_url.infoLabels = item.infoLabels
else: else:
server = servertools.get_server_from_url(url) video_list.append(item.clone(title=item.title, url=url, action='play', quality = quality
video_list.append(item.clone(title=item.title, url=url, action='play', quality = quality, ))
server=server)) video_list = servertools.get_servers_itemlist(video_list, lambda i: i.title % i.server.capitalize())
tmdb.set_infoLabels(video_list)
if config.get_videolibrary_support() and len(video_list) > 0 and item.extra != 'findvideos': if config.get_videolibrary_support() and len(video_list) > 0 and item.extra != 'findvideos':
video_list.append( video_list.append(
Item(channel=item.channel, Item(channel=item.channel,
@@ -308,3 +304,8 @@ def newest(categoria):
return [] return []
return itemlist return itemlist
def play(item):
item.thumbnail = item.contentThumbnail
return [item]

View File

@@ -24,6 +24,13 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, action="search", itemlist.append(Item(channel=item.channel, action="search",
title="Buscar por titulo", context=context, title="Buscar por titulo", context=context,
thumbnail=get_thumb("search.png"))) thumbnail=get_thumb("search.png")))
thumbnail = get_thumb("search_star.png")
itemlist.append(Item(channel='tvmoviedb', title="Buscar actor/actriz", action="search_",
search={'url': 'search/person', 'language': 'es', 'page': 1}, star=True,
thumbnail=thumbnail))
itemlist.append(Item(channel=item.channel, action="search", itemlist.append(Item(channel=item.channel, action="search",
title="Buscar por categorias (búsqueda avanzada)", extra="categorias", title="Buscar por categorias (búsqueda avanzada)", extra="categorias",
context=context, context=context,

View File

@@ -74,6 +74,15 @@ def configuracion(item):
platformtools.itemlist_refresh() platformtools.itemlist_refresh()
return ret return ret
def search_star(item):
logger.info()
itemlist = []
item.type='movie'
itemlist.extend(search_(item))
item.type='tvshow'
itemlist.extend(search_(item))
return itemlist
def search_(item): def search_(item):
texto = platformtools.dialog_input(heading=item.title) texto = platformtools.dialog_input(heading=item.title)
@@ -95,7 +104,17 @@ def search_(item):
item.search['query'] = texto item.search['query'] = texto
item.action = "listado_tmdb" item.action = "listado_tmdb"
return listado_tmdb(item)
if item.star == True:
types = ['movie','tv']
itemlist = []
for type in types:
item.contentType = type
item.search['type']=type
itemlist.extend(listado_tmdb(item))
return itemlist
else:
return listado_tmdb(item)
def busqueda(item): def busqueda(item):
@@ -338,6 +357,7 @@ def listado_tmdb(item):
# Listado de actores # Listado de actores
if 'nm' in item.infoLabels['imdb_id']: if 'nm' in item.infoLabels['imdb_id']:
try: try:
ob_tmdb = Tmdb(discover=item.search, tipo=item.extra, idioma_busqueda=langt) ob_tmdb = Tmdb(discover=item.search, tipo=item.extra, idioma_busqueda=langt)
id_cast = ob_tmdb.result["person_results"][0]["id"] id_cast = ob_tmdb.result["person_results"][0]["id"]
if item.contentType == "movie": if item.contentType == "movie":
@@ -429,12 +449,13 @@ def listado_tmdb(item):
else: else:
# Si es una búsqueda de personas se incluye en el título y fanart una película por la que es conocido # Si es una búsqueda de personas se incluye en el título y fanart una película por la que es conocido
known_for = ob_tmdb.results[i].get("known_for") known_for = ob_tmdb.results[i].get("known_for")
type = item.search['type']
if known_for: if known_for:
from random import randint from random import randint
random = randint(0, len(known_for) - 1) random = randint(0, len(known_for) - 1)
new_item.title = "%s [COLOR %s](%s)[/COLOR]" \ new_item.title = "%s [COLOR %s](%s)[/COLOR] (%s)" \
% (new_item.contentTitle, color6, % (new_item.contentTitle, color6,
known_for[random].get("title", known_for[random].get("name"))) known_for[random].get("title", known_for[random].get("name")), type)
if known_for[random]["backdrop_path"]: if known_for[random]["backdrop_path"]:
new_item.fanart = 'http://image.tmdb.org/t/p/original' + known_for[random]["backdrop_path"] new_item.fanart = 'http://image.tmdb.org/t/p/original' + known_for[random]["backdrop_path"]
else: else:
@@ -536,12 +557,12 @@ def detalles(item):
itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler", itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
text_color=color5)) text_color=color5))
try: # try:
images['tmdb'] = ob_tmdb.result["images"] # images['tmdb'] = ob_tmdb.result["images"]
itemlist.append(item.clone(action="imagenes", title="Lista de Imágenes", text_color=color5, images=images, # itemlist.append(item.clone(action="imagenes", title="Lista de Imágenes", text_color=color5, images=images,
extra="menu")) # extra="menu"))
except: # except:
pass # pass
try: try:
if item.contentType == "movie" and item.infoLabels["year"] < 2014: if item.contentType == "movie" and item.infoLabels["year"] < 2014:
@@ -591,6 +612,7 @@ def detalles(item):
# Películas/Series similares y recomendaciones # Películas/Series similares y recomendaciones
if item.infoLabels['tmdb_id']: if item.infoLabels['tmdb_id']:
item.extra = item.contentType.replace('tvshow', 'tv')
title = title.replace("película", "Películas").replace("serie", "Series") title = title.replace("película", "Películas").replace("serie", "Series")
itemlist.append(item.clone(title="%s similares" % title, action="listado_tmdb", itemlist.append(item.clone(title="%s similares" % title, action="listado_tmdb",
search={'url': '%s/%s/similar' % (item.extra, item.infoLabels['tmdb_id']), search={'url': '%s/%s/similar' % (item.extra, item.infoLabels['tmdb_id']),
@@ -608,6 +630,7 @@ def reparto(item):
# Actores y equipo de rodaje de una película/serie # Actores y equipo de rodaje de una película/serie
itemlist = [] itemlist = []
item.text_color = color1 item.text_color = color1
item.extra=item.contentType.replace('tvshow','tv')
item.search = {'url': '%s/%s/credits' % (item.extra, item.infoLabels['tmdb_id'])} item.search = {'url': '%s/%s/credits' % (item.extra, item.infoLabels['tmdb_id'])}
ob_tmdb = Tmdb(discover=item.search, tipo=item.extra, idioma_busqueda=langt) ob_tmdb = Tmdb(discover=item.search, tipo=item.extra, idioma_busqueda=langt)
@@ -1899,6 +1922,8 @@ def newlist(item):
##-------------------- LISTADOS DE IMAGENES ------------------------## ##-------------------- LISTADOS DE IMAGENES ------------------------##
def imagenes(item): def imagenes(item):
itemlist = [] itemlist = []
if item.extra == "menu": if item.extra == "menu":
item.folder = not config.is_xbmc() item.folder = not config.is_xbmc()
if "tmdb" in item.images: if "tmdb" in item.images:

View File

@@ -4,7 +4,6 @@ import re
from core import channeltools from core import channeltools
from core import httptools from core import httptools
from core import scrapertoolsV2
from core import scrapertools from core import scrapertools
from core import servertools from core import servertools
from core import tmdb from core import tmdb
@@ -123,7 +122,7 @@ def peliculas(item):
idiomas_disponibles.append(idiomas1[lang]) idiomas_disponibles.append(idiomas1[lang])
if idiomas_disponibles: if idiomas_disponibles:
idiomas_disponibles = "[" + "/".join(idiomas_disponibles) + "]" idiomas_disponibles = "[" + "/".join(idiomas_disponibles) + "]"
contentTitle = scrapertoolsV2.htmlclean(scrapedtitle.strip()) contentTitle = scrapertools.htmlclean(scrapedtitle.strip())
title = "%s %s" % (contentTitle, idiomas_disponibles) title = "%s %s" % (contentTitle, idiomas_disponibles)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl, itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, contentTitle=contentTitle, thumbnail=scrapedthumbnail, contentTitle=contentTitle,
@@ -182,6 +181,8 @@ def findvideos(item):
patron += '\[([^\]]+)\].*?\[([^\]]+)\]' patron += '\[([^\]]+)\].*?\[([^\]]+)\]'
matches = scrapertools.find_multiple_matches(data, patron) matches = scrapertools.find_multiple_matches(data, patron)
for server, url, idioma, calidad in matches: for server, url, idioma, calidad in matches:
if "drive" in server:
server = "gvideo"
sublist.append(item.clone(channel=item.channel, action="play", url=url, folder=False, text_color=color1, quality=calidad.strip(), sublist.append(item.clone(channel=item.channel, action="play", url=url, folder=False, text_color=color1, quality=calidad.strip(),
language=idioma.strip(), language=idioma.strip(),
title="Ver en %s %s" %(server, calidad) title="Ver en %s %s" %(server, calidad)
@@ -189,7 +190,7 @@ def findvideos(item):
for k in ["Español", "Latino", "Ingles - Sub Español", "Ingles"]: for k in ["Español", "Latino", "Ingles - Sub Español", "Ingles"]:
lista_idioma = filter(lambda i: i.language == k, sublist) lista_idioma = filter(lambda i: i.language == k, sublist)
if lista_idioma: if lista_idioma:
itemlist.append(item.clone(title=k, folder=False, itemlist.append(item.clone(title=k, folder=False, infoLabels = "",
text_color=color2, text_bold=True, thumbnail=thumbnail_host)) text_color=color2, text_bold=True, thumbnail=thumbnail_host))
itemlist.extend(lista_idioma) itemlist.extend(lista_idioma)
@@ -204,7 +205,6 @@ def findvideos(item):
itemlist.append(Item(channel=item.channel, title="Añadir película a la videoteca", itemlist.append(Item(channel=item.channel, title="Añadir película a la videoteca",
action="add_pelicula_to_library", url=item.url, text_color="green", action="add_pelicula_to_library", url=item.url, text_color="green",
contentTitle=item.contentTitle, extra="library", thumbnail=thumbnail_host)) contentTitle=item.contentTitle, extra="library", thumbnail=thumbnail_host))
return itemlist return itemlist
def play(item): def play(item):
@@ -213,5 +213,6 @@ def play(item):
ddd = httptools.downloadpage(item.url).data ddd = httptools.downloadpage(item.url).data
url = "http://olimpo.link" + scrapertools.find_single_match(ddd, '<iframe src="([^"]+)') url = "http://olimpo.link" + scrapertools.find_single_match(ddd, '<iframe src="([^"]+)')
item.url = httptools.downloadpage(url + "&ge=1", follow_redirects=False, only_headers=True).headers.get("location", "") item.url = httptools.downloadpage(url + "&ge=1", follow_redirects=False, only_headers=True).headers.get("location", "")
item.server = servertools.get_server_from_url(item.url) itemlist.append(item.clone())
return [item] itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist

Binary file not shown.

After

Width:  |  Height:  |  Size: 32 KiB

View File

@@ -16,7 +16,7 @@
"url": "http://docs.google.com/get_video_info?docid=\\1" "url": "http://docs.google.com/get_video_info?docid=\\1"
}, },
{ {
"pattern": "(?s)\"https://(lh.*?).googleusercontent.com/([^\"]+)", "pattern": "(?s)https://(lh.).googleusercontent.com/([0-9a-zA-Z-_=]+)",
"url": "https://\\1.googleusercontent.com/\\2" "url": "https://\\1.googleusercontent.com/\\2"
} }
] ]

View File

@@ -1,41 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "letwatch.(?:us|to)/(?:embed-|)([a-z0-9A-Z]+)(?:.html|)",
"url": "http://letwatch.to/embed-\\1.html"
}
]
},
"free": true,
"id": "letwatch",
"name": "letwatch",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,43 +0,0 @@
# -*- coding: utf-8 -*-
from core import scrapertools
from lib import jsunpack
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = scrapertools.cache_page(page_url)
if ("File was deleted" or "Not Found") in data:
return False, "[Letwatch] El archivo no existe o ha sido borrado"
if "Video is processing now" in data:
return False, "El vídeo está siendo procesado todavía"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
data = scrapertools.cache_page(page_url)
video_urls = []
media_urls = scrapertools.find_multiple_matches(data, '\{file\:"([^"]+)",label\:"([^"]+)"\}')
if len(media_urls) > 0:
for media_url, label in media_urls:
video_urls.append(
[scrapertools.get_filename_from_url(media_url)[-4:] + " (" + label + ") [letwatch]", media_url])
else:
matches = scrapertools.find_single_match(data, "<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d.*?)"
"</script>")
matchjs = jsunpack.unpack(matches).replace("\\", "")
media_urls = scrapertools.find_multiple_matches(matchjs, '\{file\:"([^"]+)",label\:"([^"]+)"\}')
for media_url, label in media_urls:
video_urls.append(
[scrapertools.get_filename_from_url(media_url)[-4:] + " (" + label + ") [letwatch]", media_url])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -13,12 +13,6 @@ host = "http://powvideo.net/"
def test_video_exists(page_url): def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url) logger.info("(page_url='%s')" % page_url)
referer = page_url.replace('iframe', 'preview')
data = httptools.downloadpage(page_url, headers={'referer': referer}).data
if "el archivo ha sido borrado por no respetar" in data.lower():
return False, "[powvideo] El archivo no existe o ha sido borrado por no respetar los Terminos de uso"
return True, "" return True, ""

View File

@@ -9,7 +9,7 @@ def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url) logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data data = httptools.downloadpage(page_url).data
if ("File was deleted" or "File not found") in data: if "File was deleted" in data or "File not found" in data:
return False, "[Yourupload] El archivo no existe o ha sido borrado" return False, "[Yourupload] El archivo no existe o ha sido borrado"
return True, "" return True, ""

View File

@@ -1,42 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "zstream.to/(?:embed-|)([A-z0-9]+)",
"url": "http://zstream.to/embed-\\1.html"
}
]
},
"free": true,
"id": "zstream",
"name": "zstream",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "http://i.imgur.com/UJMfMZJ.png?1"
}

View File

@@ -1,29 +0,0 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "File was deleted" in data:
return False, "[Zstream] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=%s" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
matches = scrapertools.find_multiple_matches(data, '\{file:"([^"]+)",label:"([^"]+)"')
for media_url, calidad in matches:
calidad = "." + media_url.rsplit('.', 1)[1] + " " + calidad
video_urls.append([calidad + ' [zstream]', media_url])
return video_urls