Merge remote-tracking branch 'upstream/master'

This commit is contained in:
Kingbox
2018-05-23 21:15:39 +02:00
23 changed files with 1649 additions and 345 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.5.13" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.5.14" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,11 +19,17 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
» plusdede » cinefox
» kbagi/diskokosmiko » ultrapeliculashd
» mejortorrent » allcalidad
» seriespapaya » seriesdanko
» speedvideo » yourupload
» miradetodo » solocastellano
» descargacineclasico » poseidonhd
» estadepelis » pelismedia
» doramasmp4 » descargas2020
» mejortorrent » mispelisyseries
» torrentlocura » torrentrapid
» tumejortorrent » tvsinpagar
¤ arreglos internos
¤ Agradecimientos al equipo SOD, @angedam, @alaquepasa por colaborar con ésta versión.
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>

View File

@@ -1,15 +1,15 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import re
import urlparse
from channelselector import get_thumb
from core import scrapertools
from core import servertools
from core.item import Item
from core.tmdb import Tmdb
from platformcode import logger
from servers.decrypters import expurl
from core import scrapertools, httptools
from core import servertools
from core import tmdb
from core.item import Item
from lib import unshortenit
host = "http://www.descargacineclasico.net"
def agrupa_datos(data):
@@ -22,54 +22,36 @@ def agrupa_datos(data):
def mainlist(item):
logger.info()
thumb_buscar = get_thumb("search.png")
itemlist = []
itemlist.append(Item(channel=item.channel, title="Últimas agregadas", action="agregadas",
url="http://www.descargacineclasico.net/", viewmode="movie_with_plot",
url=host, viewmode="movie_with_plot",
thumbnail=get_thumb('last', auto=True)))
itemlist.append(Item(channel=item.channel, title="Listado por género", action="porGenero",
url="http://www.descargacineclasico.net/",
url=host,
thumbnail=get_thumb('genres', auto=True)))
itemlist.append(
Item(channel=item.channel, title="Buscar", action="search", url="http://www.descargacineclasico.net/",
Item(channel=item.channel, title="Buscar", action="search", url=host,
thumbnail=get_thumb('search', auto=True)))
return itemlist
def porGenero(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
logger.info("data=" + data)
data = httptools.downloadpage(item.url).data
patron = '<ul class="columnas">(.*?)</ul>'
data = re.compile(patron, re.DOTALL).findall(data)
patron = '<li.*?>.*?href="([^"]+).*?>([^<]+)'
matches = re.compile(patron, re.DOTALL).findall(data[0])
for url, genero in matches:
itemlist.append(
Item(channel=item.channel, action="agregadas", title=genero, url=url, viewmode="movie_with_plot"))
data = re.compile(patron,re.DOTALL).findall(data)
patron = '<li.*?>.*?href="([^"]+).*?>([^<]+)'
matches = re.compile(patron,re.DOTALL).findall(data[0])
for url,genero in matches:
itemlist.append( Item(channel=item.channel , action="agregadas" , title=genero,url=url, viewmode="movie_with_plot"))
return itemlist
def search(item, texto):
def search(item,texto):
logger.info()
'''
texto_get = texto.replace(" ","%20")
texto_post = texto.replace(" ","+")
item.url = "http://pelisadicto.com/buscar/%s?search=%s" % (texto_get,texto_post)
'''
texto = texto.replace(" ", "+")
item.url = "http://www.descargacineclasico.net/?s=" + texto
item.url = host + "?s=" + texto
try:
return agregadas(item)
# Se captura la excepci?n, para no interrumpir al buscador global si un canal falla
@@ -83,98 +65,62 @@ def search(item, texto):
def agregadas(item):
logger.info()
itemlist = []
'''
# Descarga la pagina
if "?search=" in item.url:
url_search = item.url.split("?search=")
data = scrapertools.cache_page(url_search[0], url_search[1])
else:
data = scrapertools.cache_page(item.url)
logger.info("data="+data)
'''
data = scrapertools.cache_page(item.url)
logger.info("data=" + data)
# Extrae las entradas
fichas = re.sub(r"\n|\s{2}", "", scrapertools.get_match(data, '<div class="review-box-container">(.*?)wp-pagenavi'))
# <a href="http://www.descargacineclasico.net/ciencia-ficcion/quatermass-2-1957/"
# title="Quatermass II (Quatermass 2) (1957) Descargar y ver Online">
# <img style="border-radius:6px;"
# src="//www.descargacineclasico.net/wp-content/uploads/2015/12/Quatermass-II-2-1957.jpg"
# alt="Quatermass II (Quatermass 2) (1957) Descargar y ver Online Gratis" height="240" width="160">
patron = '<div class="post-thumbnail"><a href="([^"]+)".*?' # url
patron += 'title="([^"]+)".*?' # title
patron += 'src="([^"]+).*?' # thumbnail
patron += '<p>([^<]+)' # plot
matches = re.compile(patron, re.DOTALL).findall(fichas)
data = httptools.downloadpage(item.url).data
fichas = re.sub(r"\n|\s{2}","",scrapertools.get_match(data,'<div class="review-box-container">(.*?)wp-pagenavi'))
patron = '<div class="post-thumbnail"><a href="([^"]+)".*?' # url
patron+= 'title="([^"]+)".*?' # title
patron+= 'src="([^"]+).*?' # thumbnail
patron+= '<p>([^<]+)' # plot
matches = re.compile(patron,re.DOTALL).findall(fichas)
for url, title, thumbnail, plot in matches:
title = title[0:title.find("Descargar y ver Online")]
url = urlparse.urljoin(item.url, url)
thumbnail = urlparse.urljoin(url, thumbnail)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title + " ", fulltitle=title, url=url,
thumbnail=thumbnail, plot=plot, show=title))
title = title.replace("Descargar y ver Online","").strip()
year = scrapertools.find_single_match(title, '\(([0-9]{4})')
fulltitle = title.replace("(%s)" %year,"").strip()
itemlist.append( Item(action="findvideos",
channel=item.channel,
contentSerieName="",
title=title+" ",
fulltitle=fulltitle ,
infoLabels={'year':year},
url=url ,
thumbnail=thumbnail,
plot=plot,
show=title) )
scrapertools.printMatches(itemlist)
tmdb.set_infoLabels(itemlist)
# Paginación
try:
# <ul class="pagination"><li class="active"><span>1</span></li><li><span><a href="2">2</a></span></li><li><span><a href="3">3</a></span></li><li><span><a href="4">4</a></span></li><li><span><a href="5">5</a></span></li><li><span><a href="6">6</a></span></li></ul>
patron_nextpage = r'<a class="nextpostslink" rel="next" href="([^"]+)'
next_page = re.compile(patron_nextpage, re.DOTALL).findall(data)
itemlist.append(Item(channel=item.channel, action="agregadas", title="Página siguiente >>", url=next_page[0],
viewmode="movie_with_plot"))
except:
pass
next_page = re.compile(patron_nextpage,re.DOTALL).findall(data)
itemlist.append( Item(channel=item.channel, action="agregadas", title="Página siguiente >>" , url=next_page[0], viewmode="movie_with_plot") )
except: pass
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = scrapertools.unescape(data)
titulo = item.title
titulo_tmdb = re.sub("([0-9+])", "", titulo.strip())
oTmdb = Tmdb(texto_buscado=titulo_tmdb, idioma_busqueda="es")
item.fanart = oTmdb.get_backdrop()
# Descarga la pagina
# data = scrapertools.cache_page(item.url)
patron = '#div_\d_\D.+?<img id="([^"]+).*?<span>.*?</span>.*?<span>(.*?)</span>.*?imgdes.*?imgdes/([^\.]+).*?<a href=([^\s]+)' # Añado calidad
matches = re.compile(patron, re.DOTALL).findall(data)
patron = '#div_\d_\D.+?<img id="([^"]+).*?<span>.*?</span>.*?<span>(.*?)</span>.*?imgdes.*?imgdes/([^\.]+).*?<a href=([^\s]+)' #Añado calidad
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedidioma, scrapedcalidad, scrapedserver, scrapedurl in matches:
title = titulo + "_" + scrapedidioma + "_" + scrapedserver + "_" + scrapedcalidad
itemlist.append(Item(channel=item.channel, action="play", title=title, fulltitle=title, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.show, fanart=item.fanart))
while True:
loc = httptools.downloadpage(scrapedurl, follow_redirects=False).headers.get("location", "")
if not loc or "/ad/locked" in loc:
break
scrapedurl = loc
scrapedurl = scrapedurl.replace('"','')
scrapedurl, c = unshortenit.unshorten_only(scrapedurl)
title = item.title + "_" + scrapedidioma + "_"+ scrapedserver + "_" + scrapedcalidad
itemlist.append( item.clone(action="play",
title=title,
url=scrapedurl) )
tmdb.set_infoLabels(itemlist)
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist
def play(item):
logger.info()
video = expurl.expand_url(item.url)
itemlist = []
itemlist = servertools.find_video_items(data=video)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist
item.thumbnail = item.contentThumbnail
return [item]

View File

@@ -27,8 +27,9 @@
"visible": true,
"lvalues": [
"No filtrar",
"VOSE"
"VOSE",
"VO"
]
}
]
}
}

View File

@@ -18,7 +18,7 @@ from channelselector import get_thumb
host = 'https://www.doramasmp4.com/'
IDIOMAS = {'sub': 'VOSE'}
IDIOMAS = {'sub': 'VOSE', 'VO': 'VO'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['openload', 'streamango', 'netutv', 'okru', 'directo', 'mp4upload']
@@ -38,7 +38,7 @@ def mainlist(item):
itemlist.append(Item(channel= item.channel, title="Doramas", action="doramas_menu",
thumbnail=get_thumb('doramas', auto=True), type='dorama'))
itemlist.append(Item(channel=item.channel, title="Películas", action="list_all",
url=host + 'catalogue?type[]=pelicula', thumbnail=get_thumb('movies', auto=True),
url=host + 'catalogue?format=pelicula', thumbnail=get_thumb('movies', auto=True),
type='movie'))
itemlist.append(Item(channel=item.channel, title = 'Buscar', action="search", url= host+'search?q=',
thumbnail=get_thumb('search', auto=True)))
@@ -63,9 +63,8 @@ def list_all(item):
itemlist = []
data = get_source(item.url)
patron = '<a class=item_episode href=(.*?) title=.*?<img src=(.*?) title=.*?title>(.*?)'
patron += '</div> <div class=options> <span>(.*?)</span>'
patron = '<div class=col-lg-2 col-md-3 col-6><a href=(.*?) title=.*?'
patron += '<img src=(.*?) alt=(.*?) class=img-fluid>.*?bg-primary text-capitalize>(.*?)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -107,8 +106,8 @@ def latest_episodes(item):
itemlist = []
infoLabels = dict()
data = get_source(item.url)
patron = '<a class=episode href=(.*?) title=.*?<img src=(.*?) title=.*?title>(.*?)</div>.*?episode>(.*?)</div>'
patron = '<div class=col-lg-3 col-md-6 mb-2><a href=(.*?) title=.*?'
patron +='<img src=(.*?) alt.*?truncate-width>(.*?)<.*?mb-1>(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedep in matches:
@@ -116,6 +115,7 @@ def latest_episodes(item):
contentSerieName = scrapedtitle
itemlist.append(Item(channel=item.channel, action='findvideos', url=scrapedurl, thumbnail=scrapedthumbnail,
title=title, contentSerieName=contentSerieName, type='episode'))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@@ -125,8 +125,9 @@ def episodes(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<li class=link_episode><a itemprop=url href=(.*?) title=.*?itemprop=name>(.*?)'
patron += '</span></a><meta itemprop=episodeNumber content=(.*?) /></li>'
logger.debug(data)
patron = '<a itemprop=url href=(.*?) title=.*? class=media.*?truncate-width>(.*?)<.*?'
patron +='text-muted mb-1>Capítulo (.*?)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
@@ -139,7 +140,7 @@ def episodes(item):
infoLabels['episode'] = contentEpisodeNumber
if scrapedtitle != '':
title = scrapedtitle
title = '%sx%s - %s' % ('1',scrapedep, scrapedtitle)
else:
title = 'episodio %s' % scrapedep
@@ -148,7 +149,12 @@ def episodes(item):
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url,
contentEpisodeNumber=contentEpisodeNumber, type='episode', infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodes", text_color='yellow'))
return itemlist
def findvideos(item):
@@ -156,51 +162,69 @@ def findvideos(item):
itemlist = []
duplicated = []
headers={'referer':item.url}
data = get_source(item.url)
logger.debug(data)
patron = 'animated pulse data-url=(.*?)>'
matches = re.compile(patron, re.DOTALL).findall(data)
if '</strong> ¡Este capítulo no tiene subtítulos, solo audio original! </div>' in data:
language = IDIOMAS['vo']
else:
language = IDIOMAS['sub']
if item.type !='episode' and '<meta property=article:section content=Pelicula>' not in data:
item.type = 'dorama'
item.contentSerieName = item.contentTitle
item.contentTitle = ''
return episodes(item)
else:
itemlist.extend(servertools.find_video_items(data=data))
for video_item in itemlist:
if 'sgl.php' in video_item.url:
headers = {'referer': item.url}
patron_gvideo = "'file':'(.*?)','type'"
data_gvideo = httptools.downloadpage(video_item.url, headers=headers).data
video_item.url = scrapertools.find_single_match(data_gvideo, patron_gvideo)
duplicated.append(video_item.url)
video_item.channel = item.channel
video_item.infoLabels = item.infoLabels
video_item.language=IDIOMAS['sub']
patron = 'var item = {id: (\d+), episode: (\d+),'
matches = re.compile(patron, re.DOTALL).findall(data)
for id, episode in matches:
data_json=jsontools.load(httptools.downloadpage(host+'/api/stream/?id=%s&episode=%s' %(id, episode)).data)
sources = data_json['options']
for src in sources:
url = sources[src]
if 'sgl.php' in url:
headers = {'referer':item.url}
patron_gvideo = "'file':'(.*?)','type'"
data_gvideo = httptools.downloadpage(url, headers = headers).data
url = scrapertools.find_single_match(data_gvideo, patron_gvideo)
new_item = Item(channel=item.channel, title='%s', url=url, language=IDIOMAS['sub'], action='play',
infoLabels=item.infoLabels)
if url != '' and url not in duplicated:
itemlist.append(new_item)
duplicated.append(url)
try:
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
except:
pass
for video_url in matches:
video_data = httptools.downloadpage(video_url, headers=headers).data
server = ''
if 'Media player DMP4' in video_data:
url = scrapertools.find_single_match(video_data, "sources: \[\{'file':'(.*?)'")
server = 'Directo'
else:
url = scrapertools.find_single_match(video_data, '<iframe src="(.*?)".*?scrolling="no"')
new_item = Item(channel=item.channel, title='[%s] [%s]', url=url, action='play', language = language)
if server !='':
new_item.server = server
itemlist.append(new_item)
# for video_item in itemlist:
# if 'sgl.php' in video_item.url:
# headers = {'referer': item.url}
# patron_gvideo = "'file':'(.*?)','type'"
# data_gvideo = httptools.downloadpage(video_item.url, headers=headers).data
# video_item.url = scrapertools.find_single_match(data_gvideo, patron_gvideo)
#
# duplicated.append(video_item.url)
# video_item.channel = item.channel
# video_item.infoLabels = item.infoLabels
# video_item.language=IDIOMAS['sub']
#
# patron = 'var item = {id: (\d+), episode: (\d+),'
# matches = re.compile(patron, re.DOTALL).findall(data)
#
# for id, episode in matches:
# data_json=jsontools.load(httptools.downloadpage(host+'/api/stream/?id=%s&episode=%s' %(id, episode)).data)
# sources = data_json['options']
# for src in sources:
# url = sources[src]
#
# if 'sgl.php' in url:
# headers = {'referer':item.url}
# patron_gvideo = "'file':'(.*?)','type'"
# data_gvideo = httptools.downloadpage(url, headers = headers).data
# url = scrapertools.find_single_match(data_gvideo, patron_gvideo)
#
# new_item = Item(channel=item.channel, title='%s', url=url, language=IDIOMAS['sub'], action='play',
# infoLabels=item.infoLabels)
# if url != '' and url not in duplicated:
# itemlist.append(new_item)
# duplicated.append(url)
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
# Requerido para FilterTools
@@ -215,8 +239,13 @@ def findvideos(item):
def search(item, texto):
logger.info()
itemlist = []
texto = texto.replace(" ", "+")
item.url = item.url + texto
item.type = 'search'
if texto != '':
return list_all(item)
try:
return list_all(item)
except:
itemlist.append(item.clone(url='', title='No hay elementos...', action=''))
return itemlist

View File

@@ -28,7 +28,8 @@
"lvalues": [
"No filtrar",
"Latino",
"VOS"
"VOS",
"Castellano"
]
},
{

View File

@@ -22,7 +22,7 @@ list_quality = []
list_servers = ['yourupload', 'openload', 'sendvid']
vars = {
'ef5ca18f089cf01316bbc967fa10f72950790c39ef5ca18f089cf01316bbc967fa10f72950790c39': 'http://www.estadepelis.com/',
'ef5ca18f089cf01316bbc967fa10f72950790c39ef5ca18f089cf01316bbc967fa10f72950790c39': 'http://tawnestdplsnetps.pw/',
'b48699bb49d4550f27879deeb948d4f7d9c5949a8': 'embed',
'JzewJkLlrvcFnLelj2ikbA': 'php?url=',
'p889c6853a117aca83ef9d6523335dc065213ae86': 'player',
@@ -194,8 +194,8 @@ def generos(item):
itemlist = []
norep = []
data = httptools.downloadpage(item.url).data
patron = '<li class="cat-item cat-item-.*?"><a href="([^"]+)">([^<]+)<\/a>'
logger.debug(data)
patron = '<li class="cat-item cat-item-.*?"><a href="([^"]+)".*?>([^<]+)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
@@ -342,7 +342,6 @@ def findvideos(item):
langs = dict()
data = httptools.downloadpage(item.url).data
logger.debug('data: %s' % data)
patron = '<a onclick="return (play\d+).*?;"> (.*?) <\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -365,26 +364,28 @@ def findvideos(item):
url = dec(encurl)
title = ''
server = ''
servers = {'/opl': 'openload', '/your': 'yourupload', '/sen': 'senvid', '/face': 'netutv', '/vk': 'vk'}
servers = {'/opl': 'openload', '/your': 'yourupload', '/sen': 'senvid', '/face': 'netutv', '/vk': 'vk',
'/jk':'streamcherry'}
server_id = re.sub(r'.*?embed|\.php.*', '', url)
if server_id and server_id in servers:
server = servers[server_id]
logger.debug('server_id: %s' % server_id)
if langs[scrapedlang] in list_language:
if (scrapedlang in langs) and langs[scrapedlang] in list_language:
language = IDIOMAS[langs[scrapedlang]]
else:
language = 'Latino'
if langs[scrapedlang] == 'Latino':
idioma = '[COLOR limegreen]LATINO[/COLOR]'
elif langs[scrapedlang] == 'Sub Español':
idioma = '[COLOR red]SUB[/COLOR]'
#
# if langs[scrapedlang] == 'Latino':
# idioma = '[COLOR limegreen]LATINO[/COLOR]'
# elif langs[scrapedlang] == 'Sub Español':
# idioma = '[COLOR red]SUB[/COLOR]'
if item.extra == 'peliculas':
title = item.contentTitle + ' (' + server + ') ' + idioma
title = item.contentTitle + ' (' + server + ') ' + language
plot = scrapertools.find_single_match(data, '<p>([^<]+)<\/p>')
else:
title = item.contentSerieName + ' (' + server + ') ' + idioma
title = item.contentSerieName + ' (' + server + ') ' + language
plot = item.plot
thumbnail = servertools.guess_server_thumbnail(title)
@@ -399,7 +400,6 @@ def findvideos(item):
quality='',
language=language
))
logger.debug('url: %s' % url)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
@@ -423,23 +423,13 @@ def findvideos(item):
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url, add_referer=True).data
data = httptools.downloadpage(item.url).data
if 'your' in item.url:
item.url = 'http://www.yourupload.com/embed/' + scrapertools.find_single_match(data, 'src=".*?code=(.*?)"')
itemlist.append(item)
else:
itemlist = servertools.find_video_items(data=data)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle
))
return itemlist

View File

@@ -323,6 +323,7 @@ def findvideos(item):
matches = re.compile(patron, re.DOTALL).findall(data)
for option, videoitem in matches:
sub = ''
lang = scrapertools.find_single_match(src,
'<a href=#(?:div|player)%s.*?>.*?(.*?)<\/a>' % option)
if 'audio ' in lang.lower():
@@ -333,12 +334,21 @@ def findvideos(item):
video_urls = scrapertools.find_multiple_matches(data, '<li><a href=(.*?)><span')
for video in video_urls:
video_data = get_source(video)
if not 'fastplay' in video:
if sub == '' and 'sub' in lang:
sub_file = scrapertools.find_single_match(video, '&sub=([^+]+)')
sub = 'http://miradetodo.io/stream/subt/%s' % sub_file
if 'openload' in video or 'your' in video:
new_url= scrapertools.find_single_match(video_data,'<li><a href=(.*?srt)><span')
data_final = get_source(new_url)
else:
data_final=video_data
url = scrapertools.find_single_match(data_final,'iframe src=(.*?) scrolling')
if url == '':
url = scrapertools.find_single_match(data_final, "'file':'(.*?)'")
quality = item.quality
server = servertools.get_server_from_url(url)
title = item.contentTitle + ' [%s] [%s]' % (server, lang)
@@ -346,8 +356,8 @@ def findvideos(item):
title = item.contentTitle + ' [%s] [%s] [%s]' % (server, quality, lang)
if url!='':
itemlist.append(item.clone(title=title, url=url, action='play', server=server, language=lang))
itemlist.append(item.clone(title=title, url=url, action='play', language=lang, subtitle=sub))
itemlist = servertools.get_servers_itemlist(itemlist)
if item.infoLabels['mediatype'] == 'movie':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel,

View File

@@ -0,0 +1,88 @@
{
"id": "pelismedia",
"name": "PelisMedia",
"active": true,
"adult": false,
"language": ["lat"],
"thumbnail": "https://s14.postimg.cc/eclmujsch/12715507_1112827435402340_7302361220060367711_n.jpg",
"categories": ["movie", "tvshow"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Incluir en Novedades - Series",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 2,
"enabled": true,
"visible": true,
"lvalues": [
"Perfil 3",
"Perfil 2",
"Perfil 1",
"Ninguno"
]
},
{
"id": "episodios_x_pag",
"type": "list",
"label": "Episodios por página",
"default": 2,
"enabled": true,
"visible": true,
"lvalues": [
"10",
"15",
"20",
"25",
"30"
]
},
{
"id": "temporada_o_todos",
"type": "bool",
"label": "Mostrar temporadas",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,289 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
__perfil__ = int(config.get_setting('perfil', 'pelisultra'))
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']]
if __perfil__ < 3:
color1, color2, color3 = perfil[__perfil__]
else:
color1 = color2 = color3 = ""
host="http://www.pelismedia.com"
def mainlist(item):
logger.info()
itemlist = []
item.thumbnail = get_thumb('movies', auto=True)
itemlist.append(item.clone(title="Películas:", folder=False, text_color="0xFFD4AF37", text_bold=True))
itemlist.append(Item(channel = item.channel, title = " Novedades", action = "peliculas", url = host,
thumbnail=get_thumb('newest', auto=True)))
itemlist.append(Item(channel = item.channel, title = " Estrenos", action = "peliculas", url = host + "/genero/estrenos/",
thumbnail=get_thumb('premieres', auto=True)))
itemlist.append(Item(channel = item.channel, title = " Por género", action = "genero", url = host + "/genero/",
thumbnail=get_thumb('genres', auto=True) ))
item.thumbnail = get_thumb('tvshows', auto=True)
itemlist.append(item.clone(title="Series:", folder=False, text_color="0xFFD4AF37", text_bold=True))
itemlist.append(Item(channel = item.channel, title = " Todas las series", action = "series", url = host + "/series/",
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel = item.channel, title = " Nuevos episodios", action = "nuevos_episodios", url = host + "/episodio/",
thumbnail=get_thumb('new episodes', auto=True)))
itemlist.append(Item(channel = item.channel, title = "Buscar...", action = "search", url = host, text_color="red", text_bold=True,
thumbnail=get_thumb('search', auto=True)))
itemlist.append(item.clone(title="Configurar canal...", text_color="green", action="configuracion", text_bold=True))
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ["peliculas", "latino"]:
item.url = host
itemlist = peliculas(item)
elif categoria == 'terror':
item.url = host + '/genero/terror/'
itemlist = peliculas(item)
elif categoria == "series":
item.url = host + "/episodio/"
itemlist = nuevos_episodios(item)
if "Pagina" in itemlist[-1].title:
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def peliculas(item):
#logger.info()
logger.info(item)
itemlist = []
data = httptools.downloadpage(item.url).data
data2 = scrapertools.find_single_match(data,'(?s)<div class="item_1.*?>(.*?)id="paginador">')
# Se saca la info
#(?s)class="ml-item.*?a href="([^"]+).*?img src="([^"]+).*?alt="([^"]+).*?class="year">(\d{4})</span>(.*?)<div
patron = '(?s)class="ml-item.*?' # base
patron += 'a href="([^"]+).*?' # url
patron += 'img src="([^"]+).*?' # imagen
patron += 'alt="([^"]+).*?' # titulo
patron += 'class="year">(\d{4})' # año
patron += '</span>(.*?)<div' # calidad
matches = scrapertools.find_multiple_matches(data2, patron)
scrapertools.printMatches(matches)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedquality in matches:
if not "/series/" in scrapedurl:
scrapedquality = scrapertools.find_single_match(scrapedquality, '<span class="calidad2">(.*?)</span>')
itemlist.append(Item(action = "findvideos", channel = item.channel, title = scrapedtitle + " (" + scrapedyear + ") [" + scrapedquality + "]", contentTitle=scrapedtitle, thumbnail = scrapedthumbnail, url = scrapedurl, quality=scrapedquality, infoLabels={'year':scrapedyear}))
else:
if item.action == "search":
itemlist.append(Item(action = "temporadas", channel = item.channel, title = scrapedtitle + " (" + scrapedyear + ")", contentSerieName=scrapedtitle, contentType="tvshow", thumbnail = scrapedthumbnail, url = scrapedurl, infoLabels={'year':scrapedyear}))
# InfoLabels:
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Pagina siguiente
patron_siguiente='class="pag_b"><a href="([^"]+)'
url_pagina_siguiente=scrapertools.find_single_match(data, patron_siguiente)
if url_pagina_siguiente != "":
pagina = ">>> Pagina: " + scrapertools.find_single_match(url_pagina_siguiente, '\d+')
itemlist.append(Item(channel = item.channel, action = "peliculas", title = pagina, url = url_pagina_siguiente))
return itemlist
def genero(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
# Delimita la búsqueda a la lista donde estan los géneros
data = scrapertools.find_single_match(data,'(?s)<ul id="menu-generos" class="">(.*?)</ul>')
# Extrae la url y el género
patron = '<a href="(.*?)">(.*?)</a></li>'
matches = scrapertools.find_multiple_matches(data, patron)
# Se quita "Estrenos" de la lista porque tiene su propio menu
matches.pop(0)
for scrapedurl, scrapedtitle in matches:
itemlist.append(Item(action = "peliculas", channel = item.channel, title = scrapedtitle, url = scrapedurl))
return itemlist
def series(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
# Se saca la info
patron = '(?s)class="ml-item.*?' # base
patron += 'a href="([^"]+).*?' # url
patron += 'img src="([^"]+).*?' # imagen
patron += 'alt="([^"]+).*?' # titulo
patron += 'class="year">(\d{4})' # año
matches = scrapertools.find_multiple_matches(data, patron)
#if config.get_setting('temporada_o_todos', 'pelisultra') == 0:
if config.get_setting('temporada_o_todos', 'pelisultra'):
accion="temporadas"
else:
accion="episodios"
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
itemlist.append(Item(action = accion, channel = item.channel, title = scrapedtitle + " (" + scrapedyear + ")", contentSerieName=scrapedtitle, contentType="tvshow", thumbnail = scrapedthumbnail, url = scrapedurl, infoLabels={'year':scrapedyear}))
# InfoLabels:
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Pagina siguiente
patron_siguiente='class="pag_b"><a href="([^"]+)'
url_pagina_siguiente=scrapertools.find_single_match(data, patron_siguiente)
if url_pagina_siguiente != "":
pagina = ">>> Pagina: " + scrapertools.find_single_match(url_pagina_siguiente, '\d+')
itemlist.append(Item(channel = item.channel, action = "series", title = pagina, url = url_pagina_siguiente))
return itemlist
def temporadas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
# Extrae las temporadas
patron = '<span class="se-t.*?>(.*?)</span>'
matches = scrapertools.find_multiple_matches(data, patron)
# Excepción para la serie Sin Límites
if item.contentTitle == 'Amar sin límites':
item.contentSerieName = "limitless"
item.infoLabels['tmdb_id']=''
for scrapedseason in matches:
itemlist.append(item.clone(action = "episodios", title = "Temporada " + scrapedseason, contentSeason=scrapedseason))
# InfoLabels:
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '(?s)class="episodiotitle">.*?a href="(.*?)">(.*?)</a>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
# Saco el numero de capitulo de la url
numero_capitulo=scrapertools.get_season_and_episode(scrapedurl)
if numero_capitulo != "":
temporada=numero_capitulo.split("x")[0]
capitulo=numero_capitulo.split("x")[1]
else:
temporada="_"
capitulo="_"
if item.contentSeason and str(item.contentSeason) != temporada:
continue
itemlist.append(item.clone(action = "findvideos", title = numero_capitulo + " - " + scrapedtitle.strip(), url = scrapedurl, contentSeason=temporada, contentEpisodeNumber=capitulo))
# if item.contentTitle.startswith('Temporada'):
# if str(item.contentSeason) == temporada:
# itemlist.append(item.clone(action = "findvideos", title = numero_capitulo + " - " + scrapedtitle.strip(), url = scrapedurl, contentSeason=temporada, contentEpisodeNumber=capitulo))
# else:
# itemlist.append(item.clone(action = "findvideos", title = numero_capitulo + " - " + scrapedtitle.strip(), url = scrapedurl, contentSeason=temporada, contentEpisodeNumber=capitulo))
#episodios_por_pagina=20
# if config.get_setting('episodios_x_pag', 'pelisultra').isdigit():
# episodios_por_pagina=int(config.get_setting('episodios_x_pag', 'pelisultra'))
# else:
# episodios_por_pagina=20
# config.set_setting('episodios_x_pag', '20', 'pelisultra')
episodios_por_pagina= int(config.get_setting('episodios_x_pag', 'pelisultra')) * 5 + 10
if not item.page:
item.page = 0
itemlist_page = itemlist[item.page: item.page + episodios_por_pagina]
if len(itemlist) > item.page + episodios_por_pagina:
itemlist_page.append(item.clone(title = ">>> Pagina siguiente", page = item.page + episodios_por_pagina))
# InfoLabels:
tmdb.set_infoLabels_itemlist(itemlist_page, seekTmdb=True)
return itemlist_page
def nuevos_episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '(?s)<td class="bb">.*?' # base
patron += '<a href="(.*?)">' # url
patron += '(.*?)</a>.*?' # nombre_serie
patron += '<img src="(.*?)>.*?' # imagen
patron += '<h2>(.*?)</h2>' # titulo
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedseriename, scrapedthumbnail, scrapedtitle in matches:
numero_capitulo=scrapertools.get_season_and_episode(scrapedurl)
if numero_capitulo != "":
temporada=numero_capitulo.split("x")[0]
capitulo=numero_capitulo.split("x")[1]
else:
temporada="_"
capitulo="_"
itemlist.append(Item(channel = item.channel, action = "findvideos", title = scrapedseriename +": " + numero_capitulo + " - " + scrapedtitle.strip(), url = scrapedurl, thumbnail = scrapedthumbnail, contentSerieName=scrapedseriename, contentSeason=temporada, contentEpisodeNumber=capitulo))
# InfoLabels:
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Pagina siguiente
patron_siguiente='class="pag_b"><a href="([^"]+)'
url_pagina_siguiente=scrapertools.find_single_match(data, patron_siguiente)
if url_pagina_siguiente != "":
pagina = ">>> Pagina: " + scrapertools.find_single_match(url_pagina_siguiente, '\d+')
itemlist.append(Item(channel = item.channel, action = "nuevos_episodios", title = pagina, url = url_pagina_siguiente))
return itemlist
def search(item, texto):
logger.info()
itemlist = []
texto = texto.replace(" ", "+")
try:
item.url = host + "/?s=%s" % texto
itemlist.extend(peliculas(item))
return itemlist
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []

View File

@@ -0,0 +1,79 @@
{
"id": "poseidonhd",
"name": "PoseidonHD",
"active": true,
"adult": false,
"language": ["lat", "vose"],
"thumbnail": "https://poseidonhd.com/wp-content/uploads/2017/06/logo2.png",
"banner": "",
"categories": [
"movie",
"tvshow",
"direct"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino",
"Castellano",
"VOSE"
]
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verificar si los enlaces existen",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
}
]
}

View File

@@ -0,0 +1,366 @@
# -*- coding: utf-8 -*-
# -*- Channel PoseidonHD -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
from channelselector import get_thumb
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from channels import filtertools
from channels import autoplay
from platformcode import config, logger
IDIOMAS = {'mx': 'Latino', 'dk':'Latino', 'es': 'Castellano', 'en': 'VOSE', 'gb':'VOSE'}
list_language = IDIOMAS.values()
list_quality = ['HD', 'SD', 'CAM']
list_servers = [
'directo',
'gvideo',
'openload',
'streamango',
'rapidvideo'
]
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'poseidonhd')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'poseidonhd')
host = 'https://poseidonhd.com/'
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title='Peliculas', action='menu_movies',
thumbnail= get_thumb('movies', auto=True)))
itemlist.append(Item(channel=item.channel, title='Series', url=host+'tvshows', action='list_all', type='tvshows',
thumbnail= get_thumb('tvshows', auto=True)))
itemlist.append(
item.clone(title="Buscar", action="search", url=host + '?s=', thumbnail=get_thumb("search", auto=True),
extra='movie'))
autoplay.show_option(item.channel, itemlist)
return itemlist
def menu_movies(item):
logger.info()
itemlist=[]
itemlist.append(Item(channel=item.channel, title='Todas', url=host + 'movies', action='list_all',
thumbnail=get_thumb('all', auto=True), type='movies'))
itemlist.append(Item(channel=item.channel, title='Genero', action='section',
thumbnail=get_thumb('genres', auto=True), type='movies'))
itemlist.append(Item(channel=item.channel, title='Por Año', action='section',
thumbnail=get_thumb('year', auto=True), type='movies'))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def get_language(lang_data):
logger.info()
language = []
lang_list = scrapertools.find_multiple_matches(lang_data, '/flags/(.*?).png\)')
for lang in lang_list:
if lang == 'en':
lang = 'vose'
if lang not in language:
language.append(lang)
return language
def section(item):
logger.info()
itemlist=[]
duplicados=[]
data = get_source(host)
if 'Genero' in item.title:
patron = '<li class=cat-item cat-item-\d+><a href=(.*?) >(.*?)/i>'
elif 'Año' in item.title:
patron = '<li><a href=(.*?release.*?)>(.*?)</a>'
elif 'Calidad' in item.title:
patron = 'menu-item-object-dtquality menu-item-\d+><a href=(.*?)>(.*?)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle
plot=''
if 'Genero' in item.title:
quantity = scrapertools.find_single_match(scrapedtitle,'</a> <i>(.*?)<')
title = scrapertools.find_single_match(scrapedtitle,'(.*?)</')
title = title
plot = '%s elementos' % quantity.replace('.','')
else:
title = scrapedtitle
if title not in duplicados:
itemlist.append(Item(channel=item.channel, url=scrapedurl, title=title, plot=plot, action='list_all',
type=item.type))
duplicados.append(title)
return itemlist
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
if item.type == 'movies':
patron = '<article id=post-\d+ class=item movies><div class=poster><img src=(.*?) alt=(.*?)>.*?quality>(.*?)'
patron += '</span><\/div><a href=(.*?)>.*?<\/h3><span>(.*?)<\/span><\/div>.*?flags(.*?)metadata'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, quality, scrapedurl, year, lang_data in matches:
title = '%s [%s] [%s]' % (scrapedtitle, year, quality)
contentTitle = scrapedtitle
thumbnail = scrapedthumbnail
url = scrapedurl
language = get_language(lang_data)
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
language=language,
quality=quality,
infoLabels={'year':year}))
elif item.type == 'tvshows':
patron = '<article id=post-\d+ class=item tvshows><div class=poster><img src=(.*?) alt=(.*?)>.*?'
patron += '<a href=(.*?)>.*?<\/h3><span>(.*?)<\/span><\/div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, scrapedurl, year in matches:
title = scrapedtitle
contentSerieName = scrapedtitle
thumbnail = scrapedthumbnail
url = scrapedurl
itemlist.append(item.clone(action='seasons',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=contentSerieName,
infoLabels={'year':year}))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
# Paginación
url_next_page = scrapertools.find_single_match(data,"<a class='arrow_pag' href=([^>]+)><i id='nextpagination'")
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
return itemlist
def seasons(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron='Temporada\d+'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for season in matches:
season = season.lower().replace('temporada','')
infoLabels['season']=season
title = 'Temporada %s' % season
itemlist.append(Item(channel=item.channel, title=title, url=item.url, action='episodesxseasons',
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="all_episodes", contentSerieName=item.contentSerieName))
return itemlist
def all_episodes(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist = []
data=get_source(item.url)
patron='class=numerando>%s - (\d+)</div><div class=episodiotitle><a href=(.*?)>(.*?)<' % item.infoLabels['season']
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedepisode, scrapedurl, scrapedtitle in matches:
infoLabels['episode'] = scrapedepisode
url = scrapedurl
title = '%sx%s - %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle)
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
selector_url = scrapertools.find_multiple_matches(data, 'class=metaframe rptss src=(.*?) frameborder=0 ')
for lang in selector_url:
data = get_source('https:'+lang)
urls = scrapertools.find_multiple_matches(data, 'data-playerid=(.*?)>')
subs = ''
lang = scrapertools.find_single_match(lang, 'lang=([^+]+)')
language = IDIOMAS[lang]
if item.contentType == 'episode':
quality = 'SD'
else:
quality = item.quality
for url in urls:
final_url = httptools.downloadpage('https:'+url).data
if 'vip' in url:
file_id = scrapertools.find_single_match(url, 'file=(.*?)&')
if language=='VOSE':
sub = scrapertools.find_single_match(url, 'sub=(.*?)&')
subs = 'https:%s' % sub
post = {'link':file_id}
post = urllib.urlencode(post)
hidden_url = 'https://streamango.poseidonhd.com/repro//plugins/gkpluginsphp.php'
data_url = httptools.downloadpage(hidden_url, post=post).data
dict_vip_url = jsontools.load(data_url)
url = dict_vip_url['link']
else:
url = 'https:%s' % url
new_url = url.replace('embed','stream')
url = httptools.downloadpage(new_url, follow_redirects=False).headers.get('location')
#title = '%s [%s]' % (item.title, language)
itemlist.append(item.clone(title='[%s] [%s]', url=url, action='play', subtitle=subs,
language=language, quality=quality, infoLabels = item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return search_results(item)
else:
return []
def search_results(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron = '<article>.*?<a href=(.*?)><img src=(.*?) alt=(.*?) />.*?meta.*?year>(.*?)<(.*?)<p>(.*?)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumb, scrapedtitle, year, lang_data, scrapedplot in matches:
title = scrapedtitle
url = scrapedurl
thumbnail = scrapedthumb
plot = scrapedplot
language = get_language(lang_data)
if language:
action = 'findvideos'
else:
action = 'seasons'
new_item=Item(channel=item.channel, title=title, url=url, thumbnail=thumbnail, plot=plot,
action=action,
language=language, infoLabels={'year':year})
if new_item.action == 'findvideos':
new_item.contentTitle = new_item.title
else:
new_item.contentSerieName = new_item.title
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas']:
item.url = host + 'movies'
elif categoria == 'infantiles':
item.url = host + 'genre/animacion/'
elif categoria == 'terror':
item.url = host + 'genre/terror/'
item.type='movies'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -1,7 +1,7 @@
{
"id": "seriesblanco",
"name": "Seriesblanco",
"active": true,
"active": false,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "seriesblanco.png",
@@ -59,4 +59,4 @@
]
}
]
}
}

View File

@@ -15,7 +15,15 @@
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Incluir en Novedades - Episodios de series",
"default": true,
"enabled": true,
"visible": true
},
@@ -33,6 +41,19 @@
"VO",
"VOS"
]
},
{
"id": "filterlinks",
"type": "list",
"label": "Mostrar enlaces de tipo...",
"default": 2,
"enabled": true,
"visible": true,
"lvalues": [
"Solo Descarga",
"Solo Online",
"No filtrar"
]
}
]
}

View File

@@ -33,12 +33,20 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search",
url=urlparse.urljoin(HOST, "all.php")))
#itemlist = filtertools.show_option(itemlist, item.channel, list_idiomas, CALIDADES)
itemlist = filtertools.show_option(itemlist, item.channel, list_idiomas, CALIDADES)
autoplay.show_option(item.channel, itemlist)
return itemlist
def newest(categoria):
logger.info("categoria: %s" % categoria)
itemlist = []
if categoria == 'series':
itemlist = novedades(Item(url = HOST))
return itemlist
def novedades(item):
logger.info()
@@ -220,7 +228,7 @@ def episodios(item):
infoLabels=infoLabels))
#itemlist = filtertools.get_links(itemlist, item, list_idiomas, CALIDADES)
itemlist = filtertools.get_links(itemlist, item, list_idiomas, CALIDADES)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Opción "Añadir esta serie a la videoteca de XBMC"
@@ -241,14 +249,24 @@ def findvideos(item):
online = re.findall('<table class=.+? cellpadding=.+? cellspacing=.+?>(.+?)</table>', data,
re.MULTILINE | re.DOTALL)
itemlist = parse_videos(item, "Ver", online[0])
itemlist.extend(parse_videos(item, "Descargar", online[1]))
itemlist = []
try:
filtro_enlaces = config.get_setting("filterlinks", item.channel)
except:
filtro_enlaces = 2
if filtro_enlaces != 0:
itemlist.extend(parse_videos(item, "Ver", online[0]))
if filtro_enlaces != 1:
itemlist.extend(parse_videos(item, "Descargar", online[1]))
itemlist = filtertools.get_links(itemlist, item, list_idiomas, CALIDADES)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_idiomas)
itemlist = filtertools.get_links(itemlist, item, list_idiomas, CALIDADES)
# Requerido para AutoPlay

View File

@@ -11,6 +11,22 @@
"anime"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Incluir en Novedades - Episodios de series",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
@@ -28,20 +44,17 @@
]
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"id": "filterlinks",
"type": "list",
"label": "Mostrar enlaces de tipo...",
"default": 2,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Incluir en Novedades - Episodios de series",
"default": true,
"enabled": true,
"visible": true
"visible": true,
"lvalues": [
"Solo Descarga",
"Solo Online",
"No filtrar"
]
}
]
}

View File

@@ -132,17 +132,7 @@ def newest(categoria):
if categoria != 'series':
return []
try:
return novedades(Item())
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return novedades(Item())
def episodios(item):
logger.info("url: %s" % item.url)
@@ -177,7 +167,10 @@ def search(item, texto):
logger.info("texto: %s" % texto)
data = httptools.downloadpage(urlparse.urljoin(HOST, "/buscar.php?term=%s" % texto)).data
data_dict = jsontools.load(data)
tvshows = data_dict["myData"]
try:
tvshows = data_dict["myData"]
except:
return []
return [item.clone(action="episodios",
title=show["titulo"],
@@ -203,23 +196,34 @@ def findvideos(item):
links = re.findall(expr, data, re.MULTILINE | re.DOTALL)
itemlist = [item.clone(
action="play",
title="{linkType} en {server} [{lang}] [{quality}] ({uploader}: {date})".format(
linkType="Ver" if linkType != "descargar" else "Descargar",
lang=IDIOMAS.get(lang, lang),
date=date,
server=server.rstrip(),
quality=quality,
uploader=uploader),
server=server.rstrip(),
url=urlparse.urljoin(HOST, url),
language=IDIOMAS.get(lang,lang),
quality=quality
) for lang, date, server, url, linkType, quality, uploader in links]
itemlist = []
try:
filtro_enlaces = config.get_setting("filterlinks", item.channel)
except:
filtro_enlaces = 2
typeListStr = ["Descargar", "Ver"]
for lang, date, server, url, linkType, quality, uploader in links:
linkTypeNum = 0 if linkType == "descargar" else 1
if filtro_enlaces != 2 and filtro_enlaces != linkTypeNum:
continue
itemlist.append(item.clone(
action="play",
title="{linkType} en {server} [{lang}] [{quality}] ({uploader}: {date})".format(
linkType=typeListStr[linkTypeNum],
lang=IDIOMAS.get(lang, lang),
date=date,
server=server.rstrip(),
quality=quality,
uploader=uploader),
server=server.rstrip(),
url=urlparse.urljoin(HOST, url),
language=IDIOMAS.get(lang,lang),
quality=quality
)
)
# Requerido para FilterTools

View File

@@ -0,0 +1,65 @@
{
"id": "solocastellano",
"name": "SoloCastellano",
"active": true,
"adult": false,
"language": ["cast"],
"thumbnail": "https://s31.postimg.cc/uotcf3owb/solocastellano.png",
"banner": "",
"categories": [
"movie"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar"
]
},
{
"id": "include_in_newest_castellano",
"type": "bool",
"label": "Incluir en Novedades - Castellano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - Terror",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,429 @@
# -*- coding: utf-8 -*-
import re
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'http://solocastellano.com/'
headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
IDIOMAS = {'Castellano': 'Castellano'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['yourupload', 'openload', 'sendvid']
vars = {
'ef5ca18f089cf01316bbc967fa10f72950790c39ef5ca18f089cf01316bbc967fa10f72950790c39': 'http://tawnestdplsnetps.pw/',
'b48699bb49d4550f27879deeb948d4f7d9c5949a8': 'embed',
'JzewJkLlrvcFnLelj2ikbA': 'php?url=',
'p889c6853a117aca83ef9d6523335dc065213ae86': 'player',
'e20fb341325556c0fc0145ce10d08a970538987': 'http://yourupload.com/embed/'
}
tgenero = {"acción": "https://s3.postimg.cc/y6o9puflv/accion.png",
"animación": "https://s13.postimg.cc/5on877l87/animacion.png",
"aventura": "https://s10.postimg.cc/6su40czih/aventura.png",
"belico": "https://s23.postimg.cc/71itp9hcr/belica.png",
"ciencia ficción": "https://s9.postimg.cc/diu70s7j3/cienciaficcion.png",
"comedia": "https://s7.postimg.cc/ne9g9zgwb/comedia.png",
"comedia romántica": "https://s21.postimg.cc/xfsj7ua0n/romantica.png",
"cortometrajes": "https://s15.postimg.cc/kluxxwg23/cortometraje.png",
"crimen": "https://s4.postimg.cc/6z27zhirx/crimen.png",
"cristianas": "https://s7.postimg.cc/llo852fwr/religiosa.png",
"deportivas": "https://s13.postimg.cc/xuxf5h06v/deporte.png",
"drama": "https://s16.postimg.cc/94sia332d/drama.png",
"familiar": "https://s7.postimg.cc/6s7vdhqrf/familiar.png",
"fantasía": "https://s13.postimg.cc/65ylohgvb/fantasia.png",
"guerra": "https://s4.postimg.cc/n1h2jp2jh/guerra.png",
"historia": "https://s15.postimg.cc/fmc050h1n/historia.png",
"intriga": "https://s27.postimg.cc/v9og43u2b/intriga.png",
"misterios": "https://s1.postimg.cc/w7fdgf2vj/misterio.png",
"musical": "https://s29.postimg.cc/bbxmdh9c7/musical.png",
"romance": "https://s15.postimg.cc/fb5j8cl63/romance.png",
"suspenso": "https://s13.postimg.cc/wmw6vl1cn/suspenso.png",
"terror": "https://s7.postimg.cc/yi0gij3gb/terror.png",
"thriller": "https://s22.postimg.cc/5y9g0jsu9/thriller.png"}
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(item.clone(title="Todas",
action="lista",
thumbnail=get_thumb('all', auto=True),
fanart='https://s18.postimg.cc/fwvaeo6qh/todas.png',
url=host + 'lista-de-peliculas/',
extra='peliculas'
))
itemlist.append(item.clone(title="Ultimas",
action="lista",
thumbnail=get_thumb('last', auto=True),
fanart='https://s22.postimg.cc/cb7nmhwv5/ultimas.png',
url=host,
extra='peliculas'
))
itemlist.append(item.clone(title="Generos",
action="generos",
thumbnail=get_thumb('genres', auto=True),
fanart='https://s3.postimg.cc/5s9jg2wtf/generos.png',
url=host,
extra='peliculas'
))
itemlist.append(item.clone(title="Buscar",
action="search",
url=host + 'search?q=',
thumbnail=get_thumb('search', auto=True),
fanart='https://s30.postimg.cc/pei7txpa9/buscar.png'))
autoplay.show_option(item.channel, itemlist)
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
contentSerieName = ''
patron = '<div class=movie><div class=imagen><img src=(.*?) alt=(.*?) width=.*? height=.*?\/><a href=(.*?)><span '
patron += 'class=player>.*?class=year>(.*?)<\/span>'
matches = re.compile(patron, re.DOTALL).findall(data)
if item.extra == 'peliculas':
accion = 'findvideos'
else:
accion = 'temporadas'
for scrapedthumbnail, scrapedtitle, scrapedurl, scrapedyear in matches:
scrapedurl = scrapedurl.translate(None, '"')
scrapedurl = scrapedurl.translate(None, "'")
url = host + scrapedurl
thumbnail = scrapedthumbnail
title = scrapedtitle
year = scrapedyear
if item.extra == 'series':
contentSerieName = scrapedtitle
itemlist.append(Item(channel=item.channel,
action=accion,
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=scrapedtitle,
extra=item.extra,
contentSerieName=contentSerieName,
infoLabels={'year': year}
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# #Paginacion
if itemlist != []:
actual_page_url = item.url
next_page = scrapertools.find_single_match(data, '<div class=siguiente><a href=(.*?)>')
url = host + next_page
if next_page != '':
itemlist.append(Item(channel=item.channel,
action="lista",
title='Siguiente >>>',
url=url,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
extra=item.extra
))
return itemlist
def generos(item):
logger.info()
itemlist = []
norep = []
data = httptools.downloadpage(item.url).data
patron = '<li class="cat-item cat-item-.*?"><a href="([^"]+)".*?>([^<]+)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
url = host + scrapedurl
title = scrapedtitle.lower()
if title in tgenero:
thumbnail = tgenero[title.lower()]
else:
thumbnail = ''
itemactual = Item(channel=item.channel,
action='lista',
title=title, url=url,
thumbnail=thumbnail,
extra=item.extra
)
if title not in norep:
itemlist.append(itemactual)
norep.append(itemactual.title)
return itemlist
def temporadas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<li class="has-sub"><a href="([^"]+)"><span><b class="icon-bars"><\/b> ([^<]+)<\/span><\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
temp = 1
infoLabels = item.infoLabels
for scrapedurl, scrapedtitle in matches:
url = scrapedurl
title = scrapedtitle.strip('')
contentSeasonNumber = temp
infoLabels['season'] = contentSeasonNumber
thumbnail = item.thumbnail
plot = scrapertools.find_single_match(data, '<p>([^<]+)<\/p>')
itemlist.append(Item(channel=item.channel,
action="episodiosxtemp",
title=title,
fulltitle=item.title,
url=url,
thumbnail=thumbnail,
contentSerieName=item.contentSerieName,
contentSeasonNumber=contentSeasonNumber,
plot=plot,
infoLabels=infoLabels
))
temp = temp + 1
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url=item.url,
action="add_serie_to_library",
extra="episodios",
contentSerieName=item.contentSerieName,
extra1=item.extra1,
temp=str(temp)
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
temp = 'temporada-' + str(item.contentSeasonNumber)
patron = '<li>.\s*<a href="(.*?)">.\s*<span.*?datex">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedepisode in matches:
url = host + scrapedurl
title = item.contentSerieName + ' ' + scrapedepisode
thumbnail = item.thumbnail
fanart = ''
itemlist.append(Item(channel=item.channel,
action="findvideos",
title=title,
fulltitle=item.fulltitle,
url=url,
thumbnail=item.thumbnail,
plot=item.plot,
extra=item.extra,
contentSerieName=item.contentSerieName
))
return itemlist
def episodiosxtemp(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
temp = 'temporada-' + str(item.contentSeasonNumber)
patron = '<li>.\s*<a href="(.*?-' + temp + '.*?)">.\s*<span.*?datex">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, scrapedepisode in matches:
url = host + scrapedurl
title = item.contentSerieName + ' ' + scrapedepisode
scrapedepisode = re.sub(r'.*?x', '', scrapedepisode)
infoLabels['episode'] = scrapedepisode
thumbnail = item.thumbnail
fanart = ''
itemlist.append(Item(channel=item.channel,
action="findvideos",
title=title,
fulltitle=item.fulltitle,
url=url,
thumbnail=item.thumbnail,
plot=item.plot,
extra=item.extra,
contentSerieName=item.contentSerieName,
infoLabels=infoLabels
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def dec(encurl):
logger.info()
url = ''
encurl = encurl.translate(None, "',(,),;")
encurl = encurl.split('+')
for cod in encurl:
if cod in vars:
url = url + vars[cod]
else:
url = url + cod
return url
def findvideos(item):
logger.info()
itemlist = []
langs = dict()
data = httptools.downloadpage(item.url).data
patron = '<a onclick="return (play\d+).*?;"> (.*?) <\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for key, value in matches:
langs[key] = value.strip()
patron = 'function (play\d).*?servidores.*?attr.*?src.*?\+([^;]+);'
matches = re.compile(patron, re.DOTALL).findall(data)
title = item.title
enlace = scrapertools.find_single_match(data,
'var e20fb341325556c0fc0145ce10d08a970538987 =.*?"\/your\.".*?"([^"]+)"')
for scrapedlang, encurl in matches:
if 'e20fb34' in encurl:
url = dec(encurl)
url = url + enlace
else:
url = dec(encurl)
title = ''
server = ''
servers = {'/opl': 'openload', '/your': 'yourupload', '/sen': 'senvid', '/face': 'netutv', '/vk': 'vk',
'/jk':'streamcherry'}
server_id = re.sub(r'.*?embed|\.php.*', '', url)
if server_id and server_id in servers:
server = servers[server_id]
if (scrapedlang in langs) and langs[scrapedlang] in list_language:
language = IDIOMAS[langs[scrapedlang]]
else:
language = 'Latino'
#
# if langs[scrapedlang] == 'Latino':
# idioma = '[COLOR limegreen]LATINO[/COLOR]'
# elif langs[scrapedlang] == 'Sub Español':
# idioma = '[COLOR red]SUB[/COLOR]'
if item.extra == 'peliculas':
title = item.contentTitle + ' (' + server + ') ' + language
plot = scrapertools.find_single_match(data, '<p>([^<]+)<\/p>')
else:
title = item.contentSerieName + ' (' + server + ') ' + language
plot = item.plot
thumbnail = servertools.guess_server_thumbnail(title)
if 'player' not in url and 'php' in url:
itemlist.append(item.clone(title=title,
url=url,
action="play",
plot=plot,
thumbnail=thumbnail,
server=server,
quality='',
language=language
))
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle
))
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if 'your' in item.url:
item.url = 'http://www.yourupload.com/embed/' + scrapertools.find_single_match(data, 'src=".*?code=(.*?)"')
itemlist.append(item)
else:
itemlist = servertools.find_video_items(data=data)
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return lista(item)
def newest(categoria):
logger.info()
itemlist = []
item = Item()
# categoria='peliculas'
try:
if categoria in ['peliculas','latino']:
item.url = host
elif categoria == 'infantiles':
item.url = host + 'search?q=animación'
elif categoria == 'terror':
item.url = host + 'search?q=terror'
item.extra = 'peliculas'
itemlist = lista(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -37,21 +37,15 @@ class UnshortenIt(object):
_anonymz_regex = r'anonymz\.com'
_shrink_service_regex = r'shrink-service\.it'
_rapidcrypt_regex = r'rapidcrypt\.net'
_maxretries = 5
_this_dir, _this_filename = os.path.split(__file__)
_timeout = 10
def unshorten(self, uri, type=None):
domain = urlsplit(uri).netloc
if not domain:
return uri, "No domain found in URI!"
had_google_outbound, uri = self._clear_google_outbound_proxy(uri)
if re.search(self._adfly_regex, domain,
re.IGNORECASE) or type == 'adfly':
return self._unshorten_adfly(uri)
@@ -74,21 +68,15 @@ class UnshortenIt(object):
return self._unshorten_anonymz(uri)
if re.search(self._rapidcrypt_regex, domain, re.IGNORECASE):
return self._unshorten_rapidcrypt(uri)
return uri, 200
def unwrap_30x(self, uri, timeout=10):
domain = urlsplit(uri).netloc
self._timeout = timeout
loop_counter = 0
try:
if loop_counter > 5:
raise ValueError("Infinitely looping redirect from URL: '%s'" %
(uri,))
# headers stop t.co from working so omit headers if this is a t.co link
if domain == 't.co':
r = httptools.downloadpage(uri, timeout=self._timeout)
@@ -108,7 +96,6 @@ class UnshortenIt(object):
only_headers=True)
if not r.success:
return uri, -1
retries = 0
if 'location' in r.headers and retries < self._maxretries:
r = httptools.downloadpage(
@@ -120,10 +107,8 @@ class UnshortenIt(object):
retries = retries + 1
else:
return r.url, r.code
except Exception as e:
return uri, str(e)
def _clear_google_outbound_proxy(self, url):
'''
So google proxies all their outbound links through a redirect so they can detect outbound links.
@@ -132,16 +117,13 @@ class UnshortenIt(object):
This is useful for doing things like parsing google search results, or if you're scraping google
docs, where google inserts hit-counters on all outbound links.
'''
# This is kind of hacky, because we need to check both the netloc AND
# part of the path. We could use urllib.parse.urlsplit, but it's
# easier and just as effective to use string checks.
if url.startswith("http://www.google.com/url?") or \
url.startswith("https://www.google.com/url?"):
qs = urlparse(url).query
query = parse_qs(qs)
if "q" in query: # Google doc outbound links (maybe blogspot, too)
return True, query["q"].pop()
elif "url" in query: # Outbound links from google searches
@@ -150,7 +132,6 @@ class UnshortenIt(object):
raise ValueError(
"Google outbound proxy URL without a target url ('%s')?" %
url)
return False, url
def _unshorten_adfly(self, uri):
@@ -163,14 +144,11 @@ class UnshortenIt(object):
if len(ysmm) > 0:
ysmm = re.sub(r'var ysmm \= \'|\'\;', '', ysmm[0])
left = ''
right = ''
for c in [ysmm[i:i + 2] for i in range(0, len(ysmm), 2)]:
left += c[0]
right = c[1] + right
# Additional digit arithmetic
encoded_uri = list(left + right)
numbers = ((i, n) for i, n in enumerate(encoded_uri) if str.isdigit(n))
@@ -178,12 +156,9 @@ class UnshortenIt(object):
xor = int(first[1]) ^ int(second[1])
if xor < 10:
encoded_uri[first[0]] = str(xor)
decoded_uri = b64decode("".join(encoded_uri).encode())[16:-16].decode()
if re.search(r'go\.php\?u\=', decoded_uri):
decoded_uri = b64decode(re.sub(r'(.*?)u=', '', decoded_uri)).decode()
return decoded_uri, r.code
else:
return uri, 'No ysmm variable found'
@@ -195,23 +170,15 @@ class UnshortenIt(object):
'''
(Attempt) to decode linkbucks content. HEAVILY based on the OSS jDownloader codebase.
This has necessidated a license change.
'''
r = httptools.downloadpage(uri, timeout=self._timeout)
firstGet = time.time()
baseloc = r.url
if "/notfound/" in r.url or \
"(>Link Not Found<|>The link may have been deleted by the owner|To access the content, you must complete a quick survey\.)" in r.data:
return uri, 'Error: Link not found or requires a survey!'
link = None
content = r.data
regexes = [
r"<div id=\"lb_header\">.*?/a>.*?<a.*?href=\"(.*?)\".*?class=\"lb",
r"AdBriteInit\(\"(.*?)\"\)",
@@ -220,66 +187,49 @@ class UnshortenIt(object):
r"src=\"http://static\.linkbucks\.com/tmpl/mint/img/lb\.gif\" /></a>.*?<a href=\"(.*?)\"",
r"id=\"content\" src=\"([^\"]*)",
]
for regex in regexes:
if self.inValidate(link):
link = find_in_text(regex, content)
if self.inValidate(link):
match = find_in_text(r"noresize=\"[0-9+]\" src=\"(http.*?)\"", content)
if match:
link = find_in_text(r"\"frame2\" frameborder.*?src=\"(.*?)\"", content)
if self.inValidate(link):
scripts = re.findall("(<script type=\"text/javascript\">[^<]+</script>)", content)
if not scripts:
return uri, "No script bodies found?"
js = False
for script in scripts:
# cleanup
script = re.sub(r"[\r\n\s]+\/\/\s*[^\r\n]+", "", script)
if re.search(r"\s*var\s*f\s*=\s*window\['init'\s*\+\s*'Lb'\s*\+\s*'js'\s*\+\s*''\];[\r\n\s]+", script):
js = script
if not js:
return uri, "Could not find correct script?"
token = find_in_text(r"Token\s*:\s*'([a-f0-9]{40})'", js)
if not token:
token = find_in_text(r"\?t=([a-f0-9]{40})", js)
assert token
authKeyMatchStr = r"A(?:'\s*\+\s*')?u(?:'\s*\+\s*')?t(?:'\s*\+\s*')?h(?:'\s*\+\s*')?K(?:'\s*\+\s*')?e(?:'\s*\+\s*')?y"
l1 = find_in_text(r"\s*params\['" + authKeyMatchStr + r"'\]\s*=\s*(\d+?);", js)
l2 = find_in_text(
r"\s*params\['" + authKeyMatchStr + r"'\]\s*=\s?params\['" + authKeyMatchStr + r"'\]\s*\+\s*(\d+?);",
js)
if any([not l1, not l2, not token]):
return uri, "Missing required tokens?"
authkey = int(l1) + int(l2)
p1_url = urljoin(baseloc, "/director/?t={tok}".format(tok=token))
r2 = httptools.downloadpage(p1_url, timeout=self._timeout)
p1_url = urljoin(baseloc, "/scripts/jquery.js?r={tok}&{key}".format(tok=token, key=l1))
r2_1 = httptools.downloadpage(p1_url, timeout=self._timeout)
time_left = 5.033 - (time.time() - firstGet)
xbmc.sleep(max(time_left, 0) * 1000)
p3_url = urljoin(baseloc, "/intermission/loadTargetUrl?t={tok}&aK={key}&a_b=false".format(tok=token,
key=str(authkey)))
r3 = httptools.downloadpage(p3_url, timeout=self._timeout)
resp_json = json.loads(r3.data)
if "Url" in resp_json:
return resp_json['Url'], r3.code
return "Wat", "wat"
def inValidate(self, s):
@@ -287,30 +237,23 @@ class UnshortenIt(object):
# (s == null || s != null && (s.matches("[\r\n\t ]+") || s.equals("") || s.equalsIgnoreCase("about:blank")))
if not s:
return True
if re.search("[\r\n\t ]+", s) or s.lower() == "about:blank":
return True
else:
return False
def _unshorten_adfocus(self, uri):
orig_uri = uri
try:
r = httptools.downloadpage(uri, timeout=self._timeout)
html = r.data
adlink = re.findall("click_url =.*;", html)
if len(adlink) > 0:
uri = re.sub('^click_url = "|"\;$', '', adlink[0])
if re.search(r'http(s|)\://adfoc\.us/serve/skip/\?id\=', uri):
http_header = dict()
http_header["Host"] = "adfoc.us"
http_header["Referer"] = orig_uri
r = httptools.downloadpage(uri, headers=http_header, timeout=self._timeout)
uri = r.url
return uri, r.code
else:
@@ -340,20 +283,16 @@ class UnshortenIt(object):
try:
r = httptools.downloadpage(uri, timeout=self._timeout)
html = r.data
session_id = re.findall(r'sessionId\:(.*?)\"\,', html)
if len(session_id) > 0:
session_id = re.sub(r'\s\"', '', session_id[0])
http_header = dict()
http_header["Content-Type"] = "application/x-www-form-urlencoded"
http_header["Host"] = "sh.st"
http_header["Referer"] = uri
http_header["Origin"] = "http://sh.st"
http_header["X-Requested-With"] = "XMLHttpRequest"
xbmc.sleep(5 * 1000)
payload = {'adSessionId': session_id, 'callback': 'c'}
r = httptools.downloadpage(
'http://sh.st/shortest-url/end-adsession?' +
@@ -361,7 +300,6 @@ class UnshortenIt(object):
headers=http_header,
timeout=self._timeout)
response = r.data[6:-2].decode('utf-8')
if r.code == 200:
resp_uri = json.loads(response)['destinationUrl']
if resp_uri is not None:
@@ -401,12 +339,9 @@ class UnshortenIt(object):
try:
r = httptools.downloadpage(uri, timeout=self._timeout, cookies=False)
html = r.data
uri = re.findall(r"<input type='hidden' name='\d+' id='\d+' value='([^']+)'>", html)[0]
from core import scrapertools
uri = scrapertools.decodeHtmlentities(uri)
uri = uri.replace("&sol;", "/") \
.replace("&colon;", ":") \
.replace("&period;", ".") \
@@ -414,7 +349,6 @@ class UnshortenIt(object):
.replace("&num;", "#") \
.replace("&quest;", "?") \
.replace("&lowbar;", "_")
return uri, r.code
except Exception as e:
@@ -424,9 +358,7 @@ class UnshortenIt(object):
try:
r = httptools.downloadpage(uri, timeout=self._timeout, cookies=False)
html = r.data
uri = re.findall(r'<a class="button" href="([^"]+)">Click to continue</a>', html)[0]
return uri, r.code
except Exception as e:

View File

@@ -130,7 +130,10 @@ def render_items(itemlist, parent_item):
# Recorremos el itemlist
for item in itemlist:
channel_parameters = channeltools.get_channel_parameters(item.channel)
try:
channel_parameters = channeltools.get_channel_parameters(item.channel)
except:
pass
#logger.debug(item)
# Si el item no contiene categoria, le ponemos la del item padre
if item.category == "":
@@ -172,9 +175,11 @@ def render_items(itemlist, parent_item):
# Añade headers a las imagenes si estan en un servidor con cloudflare
from core import httptools
item.thumbnail = httptools.get_url_headers(item.thumbnail)
if item.action == 'play':
item.thumbnail = unify.thumbnail_type(item)
else:
item.thumbnail = httptools.get_url_headers(item.thumbnail)
item.fanart = httptools.get_url_headers(item.fanart)
item.thumbnail = unify.thumbnail_type(item)
# IconImage para folder y video
if item.folder:
icon_image = "DefaultFolder.png"
@@ -192,12 +197,12 @@ def render_items(itemlist, parent_item):
fanart = os.path.join(config.get_runtime_path(), "fanart.jpg")
# Creamos el listitem
listitem = xbmcgui.ListItem(item.title)
#listitem = xbmcgui.ListItem(item.title)
# values icon, thumb or poster are skin dependent.. so we set all to avoid problems
# if not exists thumb it's used icon value
if config.get_platform(True)['num_version'] >= 16.0:
listitem.setArt({'icon': icon_image, 'thumb': item.contentThumbnail, 'poster': item.thumbnail,
listitem.setArt({'icon': icon_image, 'thumb': item.thumbnail, 'poster': item.thumbnail,
'fanart': fanart})
else:
listitem.setIconImage(icon_image)

View File

@@ -1,34 +1,36 @@
# -*- coding: utf-8 -*-
import base64
from core import scrapertools
from core import httptools, scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "File was deleted" in data:
return False, "[speedvideo] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
def get_video_url(page_url,
premium=False,
user="",
password="",
video_password=""):
logger.info("url=" + page_url)
video_urls = []
data = scrapertools.cachePage(page_url)
data = httptools.downloadpage(page_url).data
codif = scrapertools.find_single_match(data, 'var [a-z]+ = ([0-9]+);')
link = scrapertools.find_single_match(data, 'linkfile ="([^"]+)"')
numero = int(codif)
media_urls = scrapertools.find_multiple_matches(data, r"file:[^']'([^']+)',\s*label:[^\"]\"([^\"]+)\"")
# Decrypt link base64 // python version of speedvideo's base64_decode() [javascript]
for media_url, label in media_urls:
media_url = httptools.downloadpage(media_url, only_headers=True, follow_redirects=False).headers.get("location", "")
link1 = link[:numero]
link2 = link[numero + 10:]
link = link1 + link2
media_url = base64.b64decode(link)
video_urls.append(["." + media_url.rsplit('.', 1)[1] + ' [speedvideo]', media_url])
if media_url:
video_urls.append([label + " " + media_url.rsplit('.', 1)[1] + ' [speedvideo]', media_url])
return video_urls

View File

@@ -12,6 +12,10 @@
{
"pattern": "embed[./]yourupload.com(?:/|.php\\?url=)([A-z0-9]+)",
"url": "http://www.yourupload.com/embed/\\1"
},
{
"pattern": "(yourupload.com/download\\?file=[A-z0-9]+)",
"url": "https://www.\\1&sendFile=true"
}
]
},

View File

@@ -17,21 +17,27 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
url1 = httptools.downloadpage(page_url, follow_redirects=False, only_headers=True).headers.get("location", "")
referer = {'Referer': page_url}
url = scrapertools.find_single_match(data, '<meta property="og:video" content="([^"]+)"')
if not url:
url = scrapertools.find_single_match(data, "file:\s*'([^']+)'")
if "download" in page_url:
url = httptools.downloadpage("https:" + url1, headers=referer, follow_redirects=False, only_headers=True).headers.get("location", "")
else:
url = scrapertools.find_single_match(data, "file:\s*'([^']+)'")
if url:
url = "https://www.yourupload.com%s" % url
referer = {'Referer': page_url}
location = httptools.downloadpage(url, headers=referer, follow_redirects=False, only_headers=True)
media_url = location.headers["location"].replace("?start=0", "").replace("https", "http")
media_url += "|Referer=%s" % url
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [yourupload]", media_url])
if "vidcache" not in url:
url = "https://www.yourupload.com%s" % url
location = httptools.downloadpage(url, headers=referer, follow_redirects=False, only_headers=True)
media_url = location.headers["location"].replace("?start=0", "").replace("https", "http")
ext = media_url[-4:]
media_url += "|Referer=%s" % url
else:
ext = url[-4:]
media_url = url +"|Referer=%s" % page_url
video_urls.append([ext + " [yourupload]", media_url])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls