Merge pull request #250 from Intel11/actualizados

Actualizados
This commit is contained in:
Alfa
2018-04-25 14:16:14 -05:00
committed by GitHub
10 changed files with 1150 additions and 424 deletions

0
plugin.video.alfa/channels/seriecanal.json Executable file → Normal file
View File

0
plugin.video.alfa/channels/seriecanal.py Executable file → Normal file
View File

View File

@@ -1,22 +1,23 @@
{
"id": "vertelenovelas",
"name": "Ver Telenovelas",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "vertelenovelas.png",
"banner": "vertelenovelas.png",
"categories": [
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
}
]
}
{
"id": "tvvip",
"name": "TV-VIP",
"active": true,
"adult": false,
"language": ["cast"],
"thumbnail": "http://i.imgur.com/gNHVlI4.png",
"banner": "http://i.imgur.com/wyRk5AG.png",
"categories": [
"movie",
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": "!eq(-1,'') + !eq(-2,'')",
"visible": true
}
]
}

View File

@@ -0,0 +1,666 @@
# -*- coding: utf-8 -*-
import os
import re
import sys
import unicodedata
import urllib
import time
from core import channeltools
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from platformcode import platformtools
host = "http://tv-vip.com"
def mainlist(item):
logger.info()
item.viewmode = "movie"
itemlist = []
data = httptools.downloadpage(host + "/json/playlist/home/index.json")
itemlist.append(Item(channel=item.channel, title="Películas", action="submenu",
thumbnail=host+"/json/playlist/peliculas/thumbnail.jpg",
fanart=host+"/json/playlist/peliculas/background.jpg"))
itemlist.append(Item(channel=item.channel, title="Series", action="submenu",
thumbnail=host+"/json/playlist/series/poster.jpg",
fanart=host+"/json/playlist/series/background.jpg"))
itemlist.append(Item(channel=item.channel, title="Versión Original", action="entradasconlistas",
url=host+"/json/playlist/version-original/index.json",
thumbnail=host+"/json/playlist/version-original/thumbnail.jpg",
fanart=host+"/json/playlist/version-original/background.jpg"))
itemlist.append(Item(channel=item.channel, title="Documentales", action="entradasconlistas",
url=host+"/json/playlist/documentales/index.json",
thumbnail=host+"/json/playlist/documentales/thumbnail.jpg",
fanart=host+"/json/playlist/documentales/background.jpg"))
itemlist.append(Item(channel=item.channel, title="Películas Infantiles", action="entradasconlistas",
url=host+"/json/playlist/peliculas-infantiles/index.json",
thumbnail=host+"/json/playlist/peliculas-infantiles/thumbnail.jpg",
fanart=host+"/json/playlist/peliculas-infantiles/background.jpg"))
itemlist.append(Item(channel=item.channel, title="Series Infantiles", action="entradasconlistas",
url=host+"/json/playlist/series-infantiles/index.json",
thumbnail=host+"/json/playlist/series-infantiles/thumbnail.jpg",
fanart=host+"/json/playlist/series-infantiles/background.jpg"))
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search",
thumbnail="http://i.imgur.com/gNHVlI4.png", fanart="http://i.imgur.com/9loVksV.png"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "%20")
if item.title == "Buscar...": item.extra = "local"
item.url = host + "/video-prod/s/search?q=%s&n=100" % texto
try:
return busqueda(item, texto)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
for line in sys.exc_info():
logger.error("%s" % line)
return []
def busqueda(item, texto):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = jsontools.load(data)
for child in data["objectList"]:
infolabels = {}
infolabels['year'] = child['year']
if child['tags']: infolabels['genre'] = ', '.join([x.strip() for x in child['tags']])
infolabels['rating'] = child['rate'].replace(',', '.')
infolabels['votes'] = child['rateCount']
if child['cast']: infolabels['cast'] = child['cast'].split(",")
infolabels['director'] = child['director']
if 'playListChilds' not in child:
infolabels['plot'] = child['description']
type = "repo"
fulltitle = child['name']
title = child['name']
infolabels['duration'] = child['duration']
if child['height'] < 720:
quality = "[B] [SD][/B]"
elif child['height'] < 1080:
quality = "[B] [720p][/B]"
elif child['height'] < 2160:
quality = "[B] [1080p][/B]"
elif child['height'] >= 2160:
quality = "[B] [4k][/B]"
if child['name'] == "":
title = child['id'].rsplit(".", 1)[0]
else:
title = child['name']
if child['year']:
title += " (" + child['year'] + ")"
title += quality
else:
type = "playlist"
infolabels['plot'] = "Contiene:\n" + "\n".join(child['playListChilds']) + "\n".join(child['repoChilds'])
fulltitle = child['id']
title = "[COLOR red][LISTA][/COLOR] " + child['id'].replace('-', ' ').capitalize() + " ([COLOR gold]" + \
str(child['number']) + "[/COLOR])"
# En caso de búsqueda global se filtran los resultados
if item.extra != "local":
if "+" in texto: texto = "|".join(texto.split("+"))
if not re.search(r'(?i)' + texto, title, flags=re.DOTALL): continue
url = host + "/json/%s/%s/index.json" % (type, child["id"])
# Fanart
if child['hashBackground']:
fanart = host + "/json/%s/%s/background.jpg" % (type, child["id"])
else:
fanart = host + "/json/%s/%s/thumbnail.jpg" % (type, child["id"])
# Thumbnail
if child['hasPoster']:
thumbnail = host + "/json/%s/%s/poster.jpg" % (type, child["id"])
else:
thumbnail = fanart
if type == 'playlist':
itemlist.insert(0, Item(channel=item.channel, action="entradasconlistas", title=title,
url=url, thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle,
infoLabels=infolabels, viewmode="movie_with_plot", folder=True))
else:
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url,
thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, contentTitle=fulltitle,
context="05", infoLabels=infolabels, viewmode="movie_with_plot", folder=True))
return itemlist
def submenu(item):
logger.info()
itemlist = []
if item.title == "Series":
itemlist.append(Item(channel=item.channel, title="Nuevos Capítulos", action="episodios",
url=host+"/json/playlist/nuevos-capitulos/index.json",
thumbnail=host+"/json/playlist/nuevos-capitulos/background.jpg",
fanart=host+"/json/playlist/nuevos-capitulos/background.jpg"))
itemlist.append(Item(channel=item.channel, title="Más Vistas", action="series",
url=host+"/json/playlist/top-series/index.json",
thumbnail=host+"/playlist/top-series/thumbnail.jpg",
fanart=host+"/json/playlist/top-series/background.jpg",
extra1="Series"))
itemlist.append(Item(channel=item.channel, title="Últimas Series", action="series",
url=host+"/json/playlist/series/index.json",
thumbnail=item.thumbnail, fanart=item.fanart, extra1="Series"))
itemlist.append(Item(channel=item.channel, title="Lista de Series A-Z", action="series",
url=host+"/json/playlist/series/index.json", thumbnail=item.thumbnail,
fanart=item.fanart, extra1="Series"))
else:
itemlist.append(Item(channel=item.channel, title="Novedades", action="entradas",
url=host+"/json/playlist/000-novedades/index.json",
thumbnail=host+"/json/playlist/ultimas-peliculas/thumbnail.jpg",
fanart=host+"/json/playlist/ultimas-peliculas/background.jpg"))
itemlist.append(Item(channel=item.channel, title="Más vistas", action="entradas",
url=host+"/json/playlist/peliculas-mas-vistas/index.json",
thumbnail=host+"/json/playlist/peliculas-mas-vistas/thumbnail.jpg",
fanart=host+"/json/playlist/peliculas-mas-vistas/background.jpg"))
itemlist.append(Item(channel=item.channel, title="Categorías", action="cat",
url=host+"/json/playlist/peliculas/index.json",
thumbnail=item.thumbnail, fanart=item.fanart))
itemlist.append(Item(channel=item.channel, title="Películas 3D", action="entradasconlistas",
url=host+"/json/playlist/3D/index.json",
thumbnail=host+"/json/playlist/3D/thumbnail.jpg",
fanart=host+"/json/playlist/3D/background.jpg"))
return itemlist
def cat(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = jsontools.load(data)
exception = ["peliculas-mas-vistas", "ultimas-peliculas"]
for child in data["sortedPlaylistChilds"]:
if child["id"] not in exception:
url = host + "/json/playlist/%s/index.json" % child["id"]
# Fanart
if child['hashBackground']:
fanart = host + "/json/playlist/%s/background.jpg" % child["id"]
else:
fanart = host + "/json/playlist/%s/thumbnail.jpg" % child["id"]
# Thumbnail
thumbnail = host + "/json/playlist/%s/thumbnail.jpg" % child["id"]
title = child['id'].replace('-', ' ').capitalize().replace("Manga", "Animación/Cine Oriental")
title += " ([COLOR gold]" + str(child['number']) + "[/COLOR])"
itemlist.append(
Item(channel=item.channel, action="entradasconlistas", title=title, url=url,
thumbnail=thumbnail, fanart=fanart, folder=True))
return itemlist
def entradas(item):
logger.info()
itemlist = []
infolabels = {}
if item.title == "Nuevos Capítulos":
context = "5"
else:
context = "05"
data = httptools.downloadpage(item.url).data
data = jsontools.load(data)
for child in data["sortedRepoChilds"]:
infolabels['year'] = child['year']
url = host + "/json/repo/%s/index.json" % child["id"]
thumbnail = ""
if child['hasPoster']:
thumbnail = host + "/json/repo/%s/poster.jpg" % child["id"]
if child['height'] < 720:
quality = "[B] [SD][/B]"
elif child['height'] < 1080:
quality = "[B] [720p][/B]"
elif child['height'] < 2160:
quality = "[B] [1080p][/B]"
elif child['height'] >= 2160:
quality = "[B] [4k][/B]"
fulltitle = child['name']
title = child['name']
if child['year']:
title += " (" + child['year'] + ")"
title += quality
itemlist.append(Item(channel=item.channel, action="findvideos", server="", title=title, url=url,
thumbnail=thumbnail, fulltitle=fulltitle, infoLabels=infolabels,
contentTitle=fulltitle, context=context))
tmdb.set_infoLabels(itemlist)
return itemlist
def entradasconlistas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = jsontools.load(data)
# Si hay alguna lista
contentSerie = False
contentList = False
if data['playListChilds']:
itemlist.append(Item(channel=item.channel, title="**LISTAS**", action="", text_color="red", text_blod=True,
folder=False))
for child in data['sortedPlaylistChilds']:
infolabels = {}
infolabels['plot'] = "Contiene:\n" + "\n".join(child['playListChilds']) + "\n".join(child['repoChilds'])
if child['seasonNumber'] and not contentList and re.search(r'(?i)temporada', child['id']):
infolabels['season'] = child['seasonNumber']
contentSerie = True
else:
contentSerie = False
contentList = True
title = child['id'].replace('-', ' ').capitalize() + " ([COLOR gold]" + str(child['number']) + "[/COLOR])"
url = host + "/json/playlist/%s/index.json" % child["id"]
thumbnail = host + "/json/playlist/%s/thumbnail.jpg" % child["id"]
if child['hashBackground']:
fanart = host + "/json/playlist/%s/background.jpg" % child["id"]
else:
fanart = host + "/json/playlist/%s/thumbnail.jpg" % child["id"]
itemlist.append(Item(channel=item.channel, action="entradasconlistas", title=title,
url=url, thumbnail=thumbnail, fanart=fanart, fulltitle=child['id'],
infoLabels=infolabels, viewmode="movie_with_plot"))
else:
contentList = True
if data["sortedRepoChilds"] and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="**VÍDEOS**", action="", text_color="blue", text_blod=True,
folder=False))
for child in data["sortedRepoChilds"]:
infolabels = {}
infolabels['plot'] = child['description']
infolabels['year'] = data['year']
if child['tags']: infolabels['genre'] = ', '.join([x.strip() for x in child['tags']])
infolabels['rating'] = child['rate'].replace(',', '.')
infolabels['votes'] = child['rateCount']
infolabels['duration'] = child['duration']
if child['cast']: infolabels['cast'] = child['cast'].split(",")
infolabels['director'] = child['director']
url = host + "/json/repo/%s/index.json" % child["id"]
# Fanart
if child['hashBackground']:
fanart = host + "/json/repo/%s/background.jpg" % child["id"]
else:
fanart = host + "/json/repo/%s/thumbnail.jpg" % child["id"]
# Thumbnail
if child['hasPoster']:
thumbnail = host + "/json/repo/%s/poster.jpg" % child["id"]
else:
thumbnail = fanart
if child['height'] < 720:
quality = "[B] [SD][/B]"
elif child['height'] < 1080:
quality = "[B] [720p][/B]"
elif child['height'] < 2160:
quality = "[B] [1080p][/B]"
elif child['height'] >= 2160:
quality = "[B] [4k][/B]"
fulltitle = child['name']
if child['name'] == "":
title = child['id'].rsplit(".", 1)[0]
else:
title = child['name']
if child['year']:
title += " (" + child['year'] + ")"
title += quality
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url,
thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, infoLabels=infolabels,
contentTitle=fulltitle, context="05", viewmode="movie_with_plot", folder=True))
# Se añade item para añadir la lista de vídeos a la videoteca
if data['sortedRepoChilds'] and len(itemlist) > 0 and contentList:
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, text_color="green", title="Añadir esta lista a la videoteca",
url=item.url, action="listas"))
elif contentSerie:
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
action="series_library", fulltitle=data['name'], show=data['name'],
text_color="green"))
return itemlist
def series(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = jsontools.load(data)
exception = ["top-series", "nuevos-capitulos"]
for child in data["sortedPlaylistChilds"]:
if child["id"] not in exception:
infolabels = {}
infolabels['plot'] = child['description']
infolabels['year'] = child['year']
if child['tags']: infolabels['genre'] = ', '.join([x.strip() for x in child['tags']])
infolabels['rating'] = child['rate'].replace(',', '.')
infolabels['votes'] = child['rateCount']
if child['cast']: infolabels['cast'] = child['cast'].split(",")
infolabels['director'] = child['director']
infolabels['mediatype'] = "episode"
if child['seasonNumber']: infolabels['season'] = child['seasonNumber']
url = host + "/json/playlist/%s/index.json" % child["id"]
# Fanart
if child['hashBackground']:
fanart = host + "/json/playlist/%s/background.jpg" % child["id"]
else:
fanart = host + "/json/playlist/%s/thumbnail.jpg" % child["id"]
# Thumbnail
if child['hasPoster']:
thumbnail = host + "/json/playlist/%s/poster.jpg" % child["id"]
else:
thumbnail = fanart
if item.extra1 == "Series":
if child['name'] != "":
fulltitle = child['name']
fulltitle = fulltitle.replace('-', '')
title = child['name'] + " (" + child['year'] + ")"
else:
title = fulltitle = child['id'].capitalize()
if "Temporada" not in title:
title += " [Temporadas: [COLOR gold]" + str(child['numberOfSeasons']) + "[/COLOR]]"
elif item.title == "Más Vistas":
title = title.replace("- Temporada", "--- Temporada")
else:
if data['name'] != "":
fulltitle = data['name']
if child['seasonNumber']:
title = data['name'] + " --- Temporada " + child['seasonNumber'] + \
" [COLOR gold](" + str(child['number']) + ")[/COLOR]"
else:
title = child['name'] + " [COLOR gold](" + str(child['number']) + ")[/COLOR]"
else:
fulltitle = data['id']
if child['seasonNumber']:
title = data['id'].capitalize() + " --- Temporada " + child['seasonNumber'] + \
" [COLOR gold](" + str(child['number']) + ")[/COLOR]"
else:
title = data['id'].capitalize() + " [COLOR gold](" + str(child['number']) + ")[/COLOR]"
if not child['playListChilds']:
action = "episodios"
else:
action = "series"
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, server="",
thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, infoLabels=infolabels,
contentSerieName=fulltitle, context="25", viewmode="movie_with_plot", folder=True))
if len(itemlist) == len(data["sortedPlaylistChilds"]) and item.extra1 != "Series":
itemlist.sort(key=lambda item: item.title, reverse=True)
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", show=data['name'],
text_color="green", extra="series_library"))
if item.title == "Últimas Series": return itemlist
if item.title == "Lista de Series A-Z": itemlist.sort(key=lambda item: item.fulltitle)
if data["sortedRepoChilds"] and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="**VÍDEOS RELACIONADOS/MISMA TEMÁTICA**", text_color="blue",
text_blod=True, action="", folder=False))
for child in data["sortedRepoChilds"]:
infolabels = {}
if child['description']:
infolabels['plot'] = data['description']
else:
infolabels['plot'] = child['description']
infolabels['year'] = data['year']
if not child['tags']:
infolabels['genre'] = ', '.join([x.strip() for x in data['tags']])
else:
infolabels['genre'] = ', '.join([x.strip() for x in child['tags']])
infolabels['rating'] = child['rate'].replace(',', '.')
infolabels['duration'] = child['duration']
if child['cast']: infolabels['cast'] = child['cast'].split(",")
infolabels['director'] = child['director']
url = host + "/json/repo/%s/index.json" % child["id"]
if child['hashBackground']:
fanart = host + "/json/repo/%s/background.jpg" % child["id"]
else:
fanart = host + "/json/repo/%s/thumbnail.jpg" % child["id"]
# Thumbnail
if child['hasPoster']:
thumbnail = host + "/json/repo/%s/poster.jpg" % child["id"]
else:
thumbnail = fanart
if child['height'] < 720:
quality = "[B] [SD][/B]"
elif child['height'] < 1080:
quality = "[B] [720p][/B]"
elif child['height'] < 2160:
quality = "[B] [1080p][/B]"
elif child['height'] >= 2160:
quality = "[B] [1080p][/B]"
fulltitle = child['name']
if child['name'] == "":
title = child['id'].rsplit(".", 1)[0]
else:
title = child['name']
if child['year']:
title += " (" + child['year'] + ")"
title += quality
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url,
server="", thumbnail=thumbnail, fanart=fanart, fulltitle=fulltitle, infoLabels=infolabels,
contentSerieName=fulltitle, context="25", viewmode="movie_with_plot", folder=True))
if item.extra == "new":
itemlist.sort(key=lambda item: item.title, reverse=True)
return itemlist
def episodios(item):
logger.info()
itemlist = []
# Redirección para actualización de videoteca
if item.extra == "series_library":
itemlist = series_library(item)
return itemlist
data = httptools.downloadpage(item.url).data
data = jsontools.load(data)
# Se prueba un método u otro porque algunas series no están bien listadas
if data["sortedRepoChilds"]:
for child in data["sortedRepoChilds"]:
if item.infoLabels:
item.infoLabels['duration'] = str(child['duration'])
item.infoLabels['season'] = str(data['seasonNumber'])
item.infoLabels['episode'] = str(child['episode'])
item.infoLabels['mediatype'] = "episode"
#contentTitle = item.fulltitle + "|" + str(data['seasonNumber']) + "|" + str(child['episode'])
# En caso de venir del apartado nuevos capítulos se redirige a la función series para mostrar los demás
if item.title == "Nuevos Capítulos":
url = host + "/json/playlist/%s/index.json" % child["season"]
action = "series"
extra = "new"
else:
url = host + "/json/repo/%s/index.json" % child["id"]
action = "findvideos"
extra = ""
if child['hasPoster']:
thumbnail = host + "/json/repo/%s/poster.jpg" % child["id"]
else:
thumbnail = host + "/json/repo/%s/thumbnail.jpg" % child["id"]
try:
title = fulltitle = child['name'].rsplit(" ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1]
except:
title = fulltitle = child['id']
itemlist.append(item.clone(action=action, server="", title=title, url=url, thumbnail=thumbnail,
fanart=item.fanart, fulltitle=fulltitle, contentSerieName=fulltitle, context="35",
viewmode="movie", extra=extra, show=item.fulltitle, folder=True))
else:
for child in data["repoChilds"]:
url = host + "/json/repo/%s/index.json" % child
if data['hasPoster']:
thumbnail = host + "/json/repo/%s/poster.jpg" % child
else:
thumbnail = host + "/json/repo/%s/thumbnail.jpg" % child
title = fulltitle = child.capitalize().replace('_', ' ')
itemlist.append(item.clone(action="findvideos", server="", title=title, url=url, thumbnail=thumbnail,
fanart=item.fanart, fulltitle=fulltitle, contentSerieName=item.fulltitle,
context="25", show=item.fulltitle, folder=True))
# Opción de añadir a la videoteca en casos de series de una única temporada
if len(itemlist) > 0 and not "---" in item.title and item.title != "Nuevos Capítulos":
if config.get_videolibrary_support() and item.show == "":
if "-" in item.title:
show = item.title.split('-')[0]
else:
show = item.title.split('(')[0]
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", text_color="green",
url=item.url, action="add_serie_to_library", show=show, extra="series_library"))
return itemlist
def series_library(item):
logger.info()
# Funcion unicamente para añadir/actualizar series a la libreria
lista_episodios = []
show = item.show.strip()
data_serie = anti_cloudflare(item.url, host=host, headers=headers)
data_serie = jsontools.load(data_serie)
# Para series que en la web se listan divididas por temporadas
if data_serie["sortedPlaylistChilds"]:
for season_name in data_serie["sortedPlaylistChilds"]:
url_season = host + "/json/playlist/%s/index.json" % season_name['id']
data = anti_cloudflare(url_season, host=host, headers=headers)
data = jsontools.load(data)
if data["sortedRepoChilds"]:
for child in data["sortedRepoChilds"]:
url = host + "/json/repo/%s/index.json" % child["id"]
fulltitle = child['name'].rsplit(" ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1]
try:
check_filename = scrapertools.get_season_and_episode(fulltitle)
except:
fulltitle += " " + str(data['seasonNumber']) + "x00"
lista_episodios.append(Item(channel=item.channel, action="findvideos", server="",
title=fulltitle, extra=url, url=item.url, fulltitle=fulltitle,
contentTitle=fulltitle, show=show))
else:
for child in data["repoChilds"]:
url = host + "/json/repo/%s/index.json" % child
fulltitle = child.capitalize().replace('_', ' ')
try:
check_filename = scrapertools.get_season_and_episode(fulltitle)
except:
fulltitle += " " + str(data['seasonNumber']) + "x00"
lista_episodios.append(Item(channel=item.channel, action="findvideos", server="",
title=fulltitle, extra=url, url=item.url, contentTitle=fulltitle,
fulltitle=fulltitle, show=show))
# Para series directas de una sola temporada
else:
data = data_serie
if data["sortedRepoChilds"]:
for child in data["sortedRepoChilds"]:
url = host + "/json/repo/%s/index.json" % child["id"]
fulltitle = child['name'].rsplit(" ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1]
try:
check_filename = scrapertools.get_season_and_episode(fulltitle)
except:
fulltitle += " 1x00"
lista_episodios.append(Item(channel=item.channel, action="findvideos", server="", title=fulltitle,
contentTitle=fulltitle, url=item.url, extra=url, fulltitle=fulltitle,
show=show))
else:
for child in data["repoChilds"]:
url = host + "/json/repo/%s/index.json" % child
fulltitle = child.capitalize().replace('_', ' ')
try:
check_filename = scrapertools.get_season_and_episode(fulltitle)
except:
fulltitle += " 1x00"
lista_episodios.append(Item(channel=item.channel, action="findvideos", server="", title=fulltitle,
contentTitle=fulltitle, url=item.url, extra=url, fulltitle=fulltitle,
show=show))
return lista_episodios
def findvideos(item):
logger.info()
itemlist = []
# En caso de llamarse a la función desde una serie de la videoteca
if item.extra.startswith("http"): item.url = item.extra
data = httptools.downloadpage(item.url).data
data = jsontools.load(data)
id = urllib.quote(data['id'])
for child in data["profiles"].keys():
videopath = urllib.quote(data["profiles"][child]['videoUri'])
for i in range(0, len(data["profiles"][child]['servers'])):
url = data["profiles"][child]['servers'][i]['url'] + videopath
size = " " + data["profiles"][child]["sizeHuman"]
resolution = " [" + (data["profiles"][child]['videoResolution']) + "]"
title = "Ver vídeo en " + resolution.replace('1920x1080', 'HD-1080p')
if i == 0:
title += size + " [COLOR purple]Mirror " + str(i + 1) + "[/COLOR]"
else:
title += size + " [COLOR green]Mirror " + str(i + 1) + "[/COLOR]"
# Para poner enlaces de mayor calidad al comienzo de la lista
if data["profiles"][child]["profileId"] == "default":
itemlist.insert(i, item.clone(action="play", server="directo", title=title, url=url,
viewmode="list", extra=id, folder=False))
else:
itemlist.append(item.clone(action="play", server="directo", title=title, url=url,
viewmode="list", extra=id, folder=False))
itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
text_color="magenta"))
if len(itemlist) > 0 and item.extra == "":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir enlaces a la videoteca", text_color="green",
url=item.url, action="add_pelicula_to_library",
infoLabels={'title':item.fulltitle}, extra="findvideos", fulltitle=item.fulltitle))
return itemlist
def play(item):
logger.info()
itemlist = []
uri = scrapertools.find_single_match(item.url, '(/transcoder[\w\W]+)')
uri_request = host + "/video-prod/s/uri?uri=%s&_=%s" % (uri, int(time.time()))
data = httptools.downloadpage(uri_request).data
data = jsontools.load(data)
url = item.url.replace(".tv-vip.com/transcoder/", ".tv-vip.info/c/transcoder/") + "?tt=" + str(data['tt']) + \
"&mm=" + data['mm'] + "&bb=" + data['bb']
itemlist.append(item.clone(action="play", server="directo", url=url, folder=False))
return itemlist
def listas(item):
logger.info()
# Para añadir listas a la videoteca en carpeta CINE
itemlist = []
data = anti_cloudflare(item.url, host=host, headers=headers)
data = jsontools.load(data)
head = header_string + get_cookie_value()
for child in data["sortedRepoChilds"]:
infolabels = {}
# Fanart
if child['hashBackground']:
fanart = host + "/json/repo/%s/background.jpg" % child["id"]
else:
fanart = host + "/json/repo/%s/thumbnail.jpg" % child["id"]
# Thumbnail
if child['hasPoster']:
thumbnail = host + "/json/repo/%s/poster.jpg" % child["id"]
else:
thumbnail = fanart
thumbnail += head
fanart += head
url = host + "/json/repo/%s/index.json" % child["id"]
if child['name'] == "":
title = scrapertools.slugify(child['id'].rsplit(".", 1)[0])
else:
title = scrapertools.slugify(child['name'])
title = title.replace('-', ' ').replace('_', ' ').capitalize()
infolabels['title'] = title
try:
from core import videolibrarytools
new_item = item.clone(title=title, url=url, fulltitle=title, fanart=fanart, extra="findvideos",
thumbnail=thumbnail, infoLabels=infolabels, category="Cine")
videolibrarytools.library.add_movie(new_item)
error = False
except:
error = True
import traceback
logger.error(traceback.format_exc())
if not error:
itemlist.append(Item(channel=item.channel, title='Lista añadida correctamente a la videoteca',
action="", folder=False))
else:
itemlist.append(Item(channel=item.channel, title='ERROR. Han ocurrido uno o varios errores en el proceso',
action="", folder=False))
return itemlist

View File

@@ -1,117 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import logger
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Ultimos capítulos", action="ultimos", url="http://www.vertelenovelas.cc/",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "http://www.vertelenovelas.cc/ajax/autocompletex.php?q=" + texto
try:
return series(item)
# Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def ultimos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<article.*?</article>'
matches = re.compile(patron, re.DOTALL).findall(data)
for match in matches:
title = scrapertools.find_single_match(match, '<span>([^<]+)</span>')
if title == "":
title = scrapertools.find_single_match(match, '<a href="[^"]+" class="title link">([^<]+)</a>')
url = urlparse.urljoin(item.url, scrapertools.find_single_match(match, '<a href="([^"]+)"'))
thumbnail = scrapertools.find_single_match(match, '<div data-src="([^"]+)"')
if thumbnail == "":
thumbnail = scrapertools.find_single_match(match, '<img src="([^"]+)"')
logger.debug("title=[" + title + "], url=[" + url + "]")
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail))
next_page_url = scrapertools.find_single_match(data, '<a href="([^"]+)" class="next">')
if next_page_url != "":
itemlist.append(Item(channel=item.channel, action="series", title=">> Pagina siguiente",
url=urlparse.urljoin(item.url, next_page_url), viewmode="movie", thumbnail="", plot="",
folder=True))
return itemlist
def series(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<article.*?</article>'
matches = re.compile(patron, re.DOTALL).findall(data)
for match in matches:
title = scrapertools.find_single_match(match, '<span>([^<]+)</span>')
if title == "":
title = scrapertools.find_single_match(match, '<a href="[^"]+" class="title link">([^<]+)</a>')
url = urlparse.urljoin(item.url, scrapertools.find_single_match(match, '<a href="([^"]+)"'))
thumbnail = scrapertools.find_single_match(match, '<div data-src="([^"]+)"')
if thumbnail == "":
thumbnail = scrapertools.find_single_match(match, '<img src="([^"]+)"')
logger.debug("title=[" + title + "], url=[" + url + "]")
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail))
next_page_url = scrapertools.find_single_match(data, '<a href="([^"]+)" class="next">')
if next_page_url != "":
itemlist.append(Item(channel=item.channel, action="series", title=">> Pagina siguiente",
url=urlparse.urljoin(item.url, next_page_url), viewmode="movie", thumbnail="", plot="",
folder=True))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<h2>Cap(.*?)</ul>')
patron = '<li><a href="([^"]+)"><span>([^<]+)</span></a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapertools.htmlclean(scrapedtitle)
url = urlparse.urljoin(item.url, scrapedurl)
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url,
folder=True, fulltitle=title))
return itemlist
def findvideos(item):
logger.info()
data = httptools.downloadpage(item.url).data
pattern = 'data-id="([^"]+)"'
list_servers = re.compile(pattern, re.DOTALL).findall(data)
list_urls = []
for _id in list_servers:
post = "id=%s" % _id
data = httptools.downloadpage("http://www.vertelenovelas.cc/goto/", post=post).data
list_urls.append(scrapertools.find_single_match(data, 'document\.location = "([^"]+)";'))
from core import servertools
itemlist = servertools.find_video_items(data=", ".join(list_urls))
for videoitem in itemlist:
# videoitem.title = item.title
videoitem.channel = item.channel
return itemlist

View File

@@ -1,227 +1,453 @@
# -*- coding: utf-8 -*-
try:
from selenium.webdriver import PhantomJS
from contextlib import closing
linkbucks_support = True
except:
linkbucks_support = False
try:
from urllib.request import urlsplit, urlparse
except:
from urlparse import urlsplit, urlparse
import json
import os
import re
import time
from base64 import b64decode
import requests
class UnshortenIt(object):
_headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.69 Safari/537.36'}
_adfly_regex = r'adf\.ly|q\.gs|j\.gs|u\.bb|ay\.gy'
_linkbucks_regex = r'linkbucks\.com|any\.gs|cash4links\.co|cash4files\.co|dyo\.gs|filesonthe\.net|goneviral\.com|megaline\.co|miniurls\.co|qqc\.co|seriousdeals\.net|theseblogs\.com|theseforums\.com|tinylinks\.co|tubeviral\.com|ultrafiles\.net|urlbeat\.net|whackyvidz\.com|yyv\.co'
_adfocus_regex = r'adfoc\.us'
_lnxlu_regex = r'lnx\.lu'
_shst_regex = r'sh\.st'
_this_dir, _this_filename = os.path.split(__file__)
_timeout = 10
def unshorten(self, uri, type=None, timeout=10):
domain = urlsplit(uri).netloc
self._timeout = timeout
if re.search(self._adfly_regex, domain, re.IGNORECASE) or type == 'adfly':
return self._unshorten_adfly(uri)
if re.search(self._adfocus_regex, domain, re.IGNORECASE) or type == 'adfocus':
return self._unshorten_adfocus(uri)
if re.search(self._linkbucks_regex, domain, re.IGNORECASE) or type == 'linkbucks':
if linkbucks_support:
return self._unshorten_linkbucks(uri)
else:
return uri, 'linkbucks.com not supported. Install selenium package to add support.'
if re.search(self._lnxlu_regex, domain, re.IGNORECASE) or type == 'lnxlu':
return self._unshorten_lnxlu(uri)
if re.search(self._shst_regex, domain, re.IGNORECASE):
return self._unshorten_shst(uri)
try:
# headers stop t.co from working so omit headers if this is a t.co link
if domain == 't.co':
r = requests.get(uri, timeout=self._timeout)
return r.url, r.status_code
# p.ost.im uses meta http refresh to redirect.
if domain == 'p.ost.im':
r = requests.get(uri, headers=self._headers, timeout=self._timeout)
uri = re.findall(r'.*url\=(.*?)\"\.*', r.text)[0]
return uri, 200
r = requests.head(uri, headers=self._headers, timeout=self._timeout)
while True:
if 'location' in r.headers:
r = requests.head(r.headers['location'])
uri = r.url
else:
return r.url, r.status_code
except Exception as e:
return uri, str(e)
def _unshorten_adfly(self, uri):
try:
r = requests.get(uri, headers=self._headers, timeout=self._timeout)
html = r.text
ysmm = re.findall(r"var ysmm =.*\;?", html)
if len(ysmm) > 0:
ysmm = re.sub(r'var ysmm \= \'|\'\;', '', ysmm[0])
left = ''
right = ''
for c in [ysmm[i:i + 2] for i in range(0, len(ysmm), 2)]:
left += c[0]
right = c[1] + right
decoded_uri = b64decode(left.encode() + right.encode())[2:].decode()
if re.search(r'go\.php\?u\=', decoded_uri):
decoded_uri = b64decode(re.sub(r'(.*?)u=', '', decoded_uri)).decode()
return decoded_uri, r.status_code
else:
return uri, 'No ysmm variable found'
except Exception as e:
return uri, str(e)
def _unshorten_linkbucks(self, uri):
try:
with closing(PhantomJS(
service_log_path=os.path.dirname(os.path.realpath(__file__)) + '/ghostdriver.log')) as browser:
browser.get(uri)
# wait 5 seconds
time.sleep(5)
page_source = browser.page_source
link = re.findall(r'skiplink(.*?)\>', page_source)
if link is not None:
link = re.sub(r'\shref\=|\"', '', link[0])
if link == '':
return uri, 'Failed to extract link.'
return link, 200
else:
return uri, 'Failed to extract link.'
except Exception as e:
return uri, str(e)
def _unshorten_adfocus(self, uri):
orig_uri = uri
try:
http_header = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.46 Safari/535.11",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "nl-NL,nl;q=0.8,en-US;q=0.6,en;q=0.4",
"Cache-Control": "no-cache",
"Pragma": "no-cache"
}
r = requests.get(uri, headers=http_header, timeout=self._timeout)
html = r.text
adlink = re.findall("click_url =.*;", html)
if len(adlink) > 0:
uri = re.sub('^click_url = "|"\;$', '', adlink[0])
if re.search(r'http(s|)\://adfoc\.us/serve/skip/\?id\=', uri):
http_header = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.46 Safari/535.11",
"Accept-Encoding": "gzip,deflate,sdch",
"Accept-Language": "en-US,en;,q=0.8",
"Connection": "keep-alive",
"Host": "adfoc.us",
"Cache-Control": "no-cache",
"Pragma": "no-cache",
"Referer": orig_uri,
}
r = requests.get(uri, headers=http_header, timeout=self._timeout)
uri = r.url
return uri, r.status_code
else:
return uri, 'No click_url variable found'
except Exception as e:
return uri, str(e)
def _unshorten_lnxlu(self, uri):
try:
r = requests.get(uri, headers=self._headers, timeout=self._timeout)
html = r.text
code = re.findall('/\?click\=(.*)\."', html)
if len(code) > 0:
payload = {'click': code[0]}
r = requests.get('http://lnx.lu/', params=payload, headers=self._headers, timeout=self._timeout)
return r.url, r.status_code
else:
return uri, 'No click variable found'
except Exception as e:
return uri, str(e)
def _unshorten_shst(self, uri):
try:
r = requests.get(uri, headers=self._headers, timeout=self._timeout)
html = r.text
session_id = re.findall(r'sessionId\:(.*?)\"\,', html)
if len(session_id) > 0:
session_id = re.sub(r'\s\"', '', session_id[0])
http_header = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.46 Safari/535.11",
"Accept-Encoding": "gzip,deflate,sdch",
"Accept-Language": "en-US,en;,q=0.8",
"Connection": "keep-alive",
"Content-Type": "application/x-www-form-urlencoded",
"Host": "sh.st",
"Referer": uri,
"Origin": "http://sh.st",
"X-Requested-With": "XMLHttpRequest"
}
time.sleep(5)
payload = {'adSessionId': session_id, 'callback': 'c'}
r = requests.get('http://sh.st/shortest-url/end-adsession', params=payload, headers=http_header,
timeout=self._timeout)
response = r.content[6:-2].decode('utf-8')
if r.status_code == 200:
resp_uri = json.loads(response)['destinationUrl']
if resp_uri is not None:
uri = resp_uri
else:
return uri, 'Error extracting url'
else:
return uri, 'Error extracting url'
return uri, r.status_code
except Exception as e:
return uri, str(e)
def unshorten(uri, type=None, timeout=10):
unshortener = UnshortenIt()
return unshortener.unshorten(uri, type, timeout)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from urllib.parse import urlsplit, urlparse, parse_qs, urljoin
except:
from urlparse import urlsplit, urlparse, parse_qs, urljoin
import json
import os
import re
import time
import urllib
from base64 import b64decode
from platformcode import logger
import xbmc
from core import httptools
def find_in_text(regex, text, flags=re.IGNORECASE | re.DOTALL):
rec = re.compile(regex, flags=flags)
match = rec.search(text)
if not match:
return False
return match.group(1)
class UnshortenIt(object):
_adfly_regex = r'adf\.ly|q\.gs|j\.gs|u\.bb|ay\.gy|threadsphere\.bid|restorecosm\.bid'
_linkbucks_regex = r'linkbucks\.com|any\.gs|cash4links\.co|cash4files\.co|dyo\.gs|filesonthe\.net|goneviral\.com|megaline\.co|miniurls\.co|qqc\.co|seriousdeals\.net|theseblogs\.com|theseforums\.com|tinylinks\.co|tubeviral\.com|ultrafiles\.net|urlbeat\.net|whackyvidz\.com|yyv\.co'
_adfocus_regex = r'adfoc\.us'
_lnxlu_regex = r'lnx\.lu'
_shst_regex = r'sh\.st'
_hrefli_regex = r'href\.li'
_anonymz_regex = r'anonymz\.com'
_shrink_service_regex = r'shrink-service\.it'
_rapidcrypt_regex = r'rapidcrypt\.net'
_maxretries = 5
_this_dir, _this_filename = os.path.split(__file__)
_timeout = 10
def unshorten(self, uri, type=None):
domain = urlsplit(uri).netloc
if not domain:
return uri, "No domain found in URI!"
had_google_outbound, uri = self._clear_google_outbound_proxy(uri)
if re.search(self._adfly_regex, domain,
re.IGNORECASE) or type == 'adfly':
return self._unshorten_adfly(uri)
if re.search(self._adfocus_regex, domain,
re.IGNORECASE) or type == 'adfocus':
return self._unshorten_adfocus(uri)
if re.search(self._linkbucks_regex, domain,
re.IGNORECASE) or type == 'linkbucks':
return self._unshorten_linkbucks(uri)
if re.search(self._lnxlu_regex, domain,
re.IGNORECASE) or type == 'lnxlu':
return self._unshorten_lnxlu(uri)
if re.search(self._shrink_service_regex, domain, re.IGNORECASE):
return self._unshorten_shrink_service(uri)
if re.search(self._shst_regex, domain, re.IGNORECASE):
return self._unshorten_shst(uri)
if re.search(self._hrefli_regex, domain, re.IGNORECASE):
return self._unshorten_hrefli(uri)
if re.search(self._anonymz_regex, domain, re.IGNORECASE):
return self._unshorten_anonymz(uri)
if re.search(self._rapidcrypt_regex, domain, re.IGNORECASE):
return self._unshorten_rapidcrypt(uri)
return uri, 200
def unwrap_30x(self, uri, timeout=10):
domain = urlsplit(uri).netloc
self._timeout = timeout
loop_counter = 0
try:
if loop_counter > 5:
raise ValueError("Infinitely looping redirect from URL: '%s'" %
(uri,))
# headers stop t.co from working so omit headers if this is a t.co link
if domain == 't.co':
r = httptools.downloadpage(uri, timeout=self._timeout)
return r.url, r.code
# p.ost.im uses meta http refresh to redirect.
if domain == 'p.ost.im':
r = httptools.downloadpage(uri, timeout=self._timeout)
uri = re.findall(r'.*url\=(.*?)\"\.*', r.data)[0]
return uri, r.code
else:
while True:
r = httptools.downloadpage(
uri,
timeout=self._timeout,
follow_redirects=False,
only_headers=True)
if not r.success:
return uri, -1
retries = 0
if 'location' in r.headers and retries < self._maxretries:
r = httptools.downloadpage(
r.headers['location'],
follow_redirects=False,
only_headers=True)
uri = r.url
loop_counter += 1
retries = retries + 1
else:
return r.url, r.code
except Exception as e:
return uri, str(e)
def _clear_google_outbound_proxy(self, url):
'''
So google proxies all their outbound links through a redirect so they can detect outbound links.
This call strips them out if they are present.
This is useful for doing things like parsing google search results, or if you're scraping google
docs, where google inserts hit-counters on all outbound links.
'''
# This is kind of hacky, because we need to check both the netloc AND
# part of the path. We could use urllib.parse.urlsplit, but it's
# easier and just as effective to use string checks.
if url.startswith("http://www.google.com/url?") or \
url.startswith("https://www.google.com/url?"):
qs = urlparse(url).query
query = parse_qs(qs)
if "q" in query: # Google doc outbound links (maybe blogspot, too)
return True, query["q"].pop()
elif "url" in query: # Outbound links from google searches
return True, query["url"].pop()
else:
raise ValueError(
"Google outbound proxy URL without a target url ('%s')?" %
url)
return False, url
def _unshorten_adfly(self, uri):
logger.info()
try:
r = httptools.downloadpage(
uri, timeout=self._timeout, cookies=False)
html = r.data
ysmm = re.findall(r"var ysmm =.*\;?", html)
if len(ysmm) > 0:
ysmm = re.sub(r'var ysmm \= \'|\'\;', '', ysmm[0])
left = ''
right = ''
for c in [ysmm[i:i + 2] for i in range(0, len(ysmm), 2)]:
left += c[0]
right = c[1] + right
# Additional digit arithmetic
encoded_uri = list(left + right)
numbers = ((i, n) for i, n in enumerate(encoded_uri) if str.isdigit(n))
for first, second in zip(numbers, numbers):
xor = int(first[1]) ^ int(second[1])
if xor < 10:
encoded_uri[first[0]] = str(xor)
decoded_uri = b64decode("".join(encoded_uri).encode())[16:-16].decode()
if re.search(r'go\.php\?u\=', decoded_uri):
decoded_uri = b64decode(re.sub(r'(.*?)u=', '', decoded_uri)).decode()
return decoded_uri, r.code
else:
return uri, 'No ysmm variable found'
except Exception as e:
return uri, str(e)
def _unshorten_linkbucks(self, uri):
'''
(Attempt) to decode linkbucks content. HEAVILY based on the OSS jDownloader codebase.
This has necessidated a license change.
'''
r = httptools.downloadpage(uri, timeout=self._timeout)
firstGet = time.time()
baseloc = r.url
if "/notfound/" in r.url or \
"(>Link Not Found<|>The link may have been deleted by the owner|To access the content, you must complete a quick survey\.)" in r.data:
return uri, 'Error: Link not found or requires a survey!'
link = None
content = r.data
regexes = [
r"<div id=\"lb_header\">.*?/a>.*?<a.*?href=\"(.*?)\".*?class=\"lb",
r"AdBriteInit\(\"(.*?)\"\)",
r"Linkbucks\.TargetUrl = '(.*?)';",
r"Lbjs\.TargetUrl = '(http://[^<>\"]*?)'",
r"src=\"http://static\.linkbucks\.com/tmpl/mint/img/lb\.gif\" /></a>.*?<a href=\"(.*?)\"",
r"id=\"content\" src=\"([^\"]*)",
]
for regex in regexes:
if self.inValidate(link):
link = find_in_text(regex, content)
if self.inValidate(link):
match = find_in_text(r"noresize=\"[0-9+]\" src=\"(http.*?)\"", content)
if match:
link = find_in_text(r"\"frame2\" frameborder.*?src=\"(.*?)\"", content)
if self.inValidate(link):
scripts = re.findall("(<script type=\"text/javascript\">[^<]+</script>)", content)
if not scripts:
return uri, "No script bodies found?"
js = False
for script in scripts:
# cleanup
script = re.sub(r"[\r\n\s]+\/\/\s*[^\r\n]+", "", script)
if re.search(r"\s*var\s*f\s*=\s*window\['init'\s*\+\s*'Lb'\s*\+\s*'js'\s*\+\s*''\];[\r\n\s]+", script):
js = script
if not js:
return uri, "Could not find correct script?"
token = find_in_text(r"Token\s*:\s*'([a-f0-9]{40})'", js)
if not token:
token = find_in_text(r"\?t=([a-f0-9]{40})", js)
assert token
authKeyMatchStr = r"A(?:'\s*\+\s*')?u(?:'\s*\+\s*')?t(?:'\s*\+\s*')?h(?:'\s*\+\s*')?K(?:'\s*\+\s*')?e(?:'\s*\+\s*')?y"
l1 = find_in_text(r"\s*params\['" + authKeyMatchStr + r"'\]\s*=\s*(\d+?);", js)
l2 = find_in_text(
r"\s*params\['" + authKeyMatchStr + r"'\]\s*=\s?params\['" + authKeyMatchStr + r"'\]\s*\+\s*(\d+?);",
js)
if any([not l1, not l2, not token]):
return uri, "Missing required tokens?"
authkey = int(l1) + int(l2)
p1_url = urljoin(baseloc, "/director/?t={tok}".format(tok=token))
r2 = httptools.downloadpage(p1_url, timeout=self._timeout)
p1_url = urljoin(baseloc, "/scripts/jquery.js?r={tok}&{key}".format(tok=token, key=l1))
r2_1 = httptools.downloadpage(p1_url, timeout=self._timeout)
time_left = 5.033 - (time.time() - firstGet)
xbmc.sleep(max(time_left, 0) * 1000)
p3_url = urljoin(baseloc, "/intermission/loadTargetUrl?t={tok}&aK={key}&a_b=false".format(tok=token,
key=str(authkey)))
r3 = httptools.downloadpage(p3_url, timeout=self._timeout)
resp_json = json.loads(r3.data)
if "Url" in resp_json:
return resp_json['Url'], r3.code
return "Wat", "wat"
def inValidate(self, s):
# Original conditional:
# (s == null || s != null && (s.matches("[\r\n\t ]+") || s.equals("") || s.equalsIgnoreCase("about:blank")))
if not s:
return True
if re.search("[\r\n\t ]+", s) or s.lower() == "about:blank":
return True
else:
return False
def _unshorten_adfocus(self, uri):
orig_uri = uri
try:
r = httptools.downloadpage(uri, timeout=self._timeout)
html = r.data
adlink = re.findall("click_url =.*;", html)
if len(adlink) > 0:
uri = re.sub('^click_url = "|"\;$', '', adlink[0])
if re.search(r'http(s|)\://adfoc\.us/serve/skip/\?id\=', uri):
http_header = dict()
http_header["Host"] = "adfoc.us"
http_header["Referer"] = orig_uri
r = httptools.downloadpage(uri, headers=http_header, timeout=self._timeout)
uri = r.url
return uri, r.code
else:
return uri, 'No click_url variable found'
except Exception as e:
return uri, str(e)
def _unshorten_lnxlu(self, uri):
try:
r = httptools.downloadpage(uri, timeout=self._timeout)
html = r.data
code = re.findall('/\?click\=(.*)\."', html)
if len(code) > 0:
payload = {'click': code[0]}
r = httptools.downloadpage(
'http://lnx.lu?' + urllib.urlencode(payload),
timeout=self._timeout)
return r.url, r.code
else:
return uri, 'No click variable found'
except Exception as e:
return uri, str(e)
def _unshorten_shst(self, uri):
try:
r = httptools.downloadpage(uri, timeout=self._timeout)
html = r.data
session_id = re.findall(r'sessionId\:(.*?)\"\,', html)
if len(session_id) > 0:
session_id = re.sub(r'\s\"', '', session_id[0])
http_header = dict()
http_header["Content-Type"] = "application/x-www-form-urlencoded"
http_header["Host"] = "sh.st"
http_header["Referer"] = uri
http_header["Origin"] = "http://sh.st"
http_header["X-Requested-With"] = "XMLHttpRequest"
xbmc.sleep(5 * 1000)
payload = {'adSessionId': session_id, 'callback': 'c'}
r = httptools.downloadpage(
'http://sh.st/shortest-url/end-adsession?' +
urllib.urlencode(payload),
headers=http_header,
timeout=self._timeout)
response = r.data[6:-2].decode('utf-8')
if r.code == 200:
resp_uri = json.loads(response)['destinationUrl']
if resp_uri is not None:
uri = resp_uri
else:
return uri, 'Error extracting url'
else:
return uri, 'Error extracting url'
return uri, r.code
except Exception as e:
return uri, str(e)
def _unshorten_hrefli(self, uri):
try:
# Extract url from query
parsed_uri = urlparse(uri)
extracted_uri = parsed_uri.query
if not extracted_uri:
return uri, 200
# Get url status code
r = httptools.downloadpage(
extracted_uri,
timeout=self._timeout,
follow_redirects=False,
only_headers=True)
return r.url, r.code
except Exception as e:
return uri, str(e)
def _unshorten_anonymz(self, uri):
# For the moment they use the same system as hrefli
return self._unshorten_hrefli(uri)
def _unshorten_shrink_service(self, uri):
try:
r = httptools.downloadpage(uri, timeout=self._timeout, cookies=False)
html = r.data
uri = re.findall(r"<input type='hidden' name='\d+' id='\d+' value='([^']+)'>", html)[0]
from core import scrapertools
uri = scrapertools.decodeHtmlentities(uri)
uri = uri.replace("&sol;", "/") \
.replace("&colon;", ":") \
.replace("&period;", ".") \
.replace("&excl;", "!") \
.replace("&num;", "#") \
.replace("&quest;", "?") \
.replace("&lowbar;", "_")
return uri, r.code
except Exception as e:
return uri, str(e)
def _unshorten_rapidcrypt(self, uri):
try:
r = httptools.downloadpage(uri, timeout=self._timeout, cookies=False)
html = r.data
uri = re.findall(r'<a class="button" href="([^"]+)">Click to continue</a>', html)[0]
return uri, r.code
except Exception as e:
return uri, str(e)
def unwrap_30x_only(uri, timeout=10):
unshortener = UnshortenIt()
uri, status = unshortener.unwrap_30x(uri, timeout=timeout)
return uri, status
def unshorten_only(uri, type=None, timeout=10):
unshortener = UnshortenIt()
uri, status = unshortener.unshorten(uri, type=type)
return uri, status
def unshorten(uri, type=None, timeout=10):
unshortener = UnshortenIt()
uri, status = unshortener.unshorten(uri, type=type)
if status == 200:
uri, status = unshortener.unwrap_30x(uri, timeout=timeout)
return uri, status

View File

@@ -4,7 +4,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "clipwatching.com/(.*?).html",
"pattern": "clipwatching.com/(\\w+)",
"url": "http://clipwatching.com/\\1.html"
}
]

View File

@@ -12,6 +12,8 @@ def test_video_exists(page_url):
return False, "[Downace] El video ha sido borrado"
if "please+try+again+later." in data:
return False, "[Downace] Error de downace, no se puede generar el enlace al video"
if "File has been removed due to inactivity" in data:
return False, "[Downace] El archivo ha sido removido por inactividad"
return True, ""

View File

@@ -18,66 +18,14 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
data = httptools.downloadpage(page_url).data
cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.bz/counter.cgi.*?[^(?:'|")]+)""")
cgi_counter = cgi_counter.replace("%0A","").replace("%22","")
httptools.downloadpage(cgi_counter, cookies=False)
time.sleep(6)
url_playitnow = "https://www.flashx.bz/dl?playitnow"
fid = scrapertools.find_single_match(data, 'input type="hidden" name="id" value="([^"]*)"')
fname = scrapertools.find_single_match(data, 'input type="hidden" name="fname" value="([^"]*)"')
fhash = scrapertools.find_single_match(data, 'input type="hidden" name="hash" value="([^"]*)"')
headers = {'Content': 'application/x-www-form-urlencoded'}
post_parameters = {
"op": "download1",
"usr_login": "",
"id": fid,
"fname": fname,
"referer": "https://www.flashx.bz/",
"hash": fhash,
"imhuman": "Continue To Video"
}
data = httptools.downloadpage(url_playitnow, urllib.urlencode(post_parameters), headers=headers).data
video_urls = []
media_urls = scrapertools.find_multiple_matches(data, "{src: '([^']+)'.*?,label: '([^']+)'")
subtitle = ""
for media_url, label in media_urls:
if media_url.endswith(".srt") and label == "Spanish":
try:
from core import filetools
data = httptools.downloadpage(media_url)
subtitle = os.path.join(config.get_data_path(), 'sub_flashx.srt')
filetools.write(subtitle, data)
except:
import traceback
logger.info("Error al descargar el subtítulo: " + traceback.format_exc())
for media_url, label in media_urls:
if not media_url.endswith("png") and not media_url.endswith(".srt"):
video_urls.append(["." + media_url.rsplit('.', 1)[1] + " [flashx]", media_url, 0, subtitle])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls
def get_video_url_anterior(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
pfxfx = ""
data = httptools.downloadpage(page_url, cookies=False).data
data = data.replace("\n","")
cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.ws/counter.cgi.*?[^(?:'|")]+)""")
cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.bz/counter.cgi.*?[^(?:'|")]+)""")
cgi_counter = cgi_counter.replace("%0A","").replace("%22","")
playnow = scrapertools.find_single_match(data, 'https://www.flashx.ws/dl[^"]+')
playnow = scrapertools.find_single_match(data, 'https://www.flashx.bz/dl[^"]+')
# Para obtener el f y el fxfx
js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.ws/js\w+/c\w+.*?[^(?:'|")]+)""")
js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.bz/js\w+/c\w+.*?[^(?:'|")]+)""")
data_fxfx = httptools.downloadpage(js_fxfx).data
mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","")
matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')
@@ -87,7 +35,7 @@ def get_video_url_anterior(page_url, premium=False, user="", password="", video_
logger.info("mfxfxfx2= %s" %pfxfx)
if pfxfx == "":
pfxfx = "ss=yes&f=fail&fxfx=6"
coding_url = 'https://www.flashx.ws/flashx.php?%s' %pfxfx
coding_url = 'https://www.flashx.bz/flashx.php?%s' %pfxfx
# {f: 'y', fxfx: '6'}
bloque = scrapertools.find_single_match(data, '(?s)Form method="POST" action(.*?)span')
flashx_id = scrapertools.find_single_match(bloque, 'name="id" value="([^"]+)"')

View File

@@ -4,7 +4,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "(https://vidlox.(?:tv|me)/embed-.*?.html)",
"pattern": "(?i)(https://vidlox.(?:tv|me)/embed-.*?.html)",
"url": "\\1"
}
]