Merge remote-tracking branch 'alfa-addon/master'

This commit is contained in:
unknown
2018-04-12 08:18:51 -03:00
36 changed files with 570 additions and 1054 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.5.8" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.5.9" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -20,10 +20,16 @@
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
» torrentrapid » torrentlocura
» mispelisyseries » descargas2020
» mispelisyseries » descargas2020
» mejortorrent » tvsinpagar
» cinefox » newpct
» peliculasdk » netutv
» pepecine » seriespapaya
» doomtv » dostream
» pelisgratis » estream
» plusdede
¤ arreglos internos
¤ Gracias a la colaboración de @pipcat y @lopezvg en ésta versión
¤ Gracias a @pipcat,@Rhinox117,@lopezvg por colaborar en ésta versión
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary>

15
plugin.video.alfa/channels/animeflv_me.py Executable file → Normal file
View File

@@ -213,7 +213,7 @@ def series(item):
context.extend(context2)
for show in show_list:
title, url, thumbnail, plot = show
items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, contentSerieName=title,
plot=plot, show=title, viewmode="movies_with_plot", context=context))
url_next_page = scrapertools.find_single_match(page_html, REGEX_NEXT_PAGE)
@@ -237,21 +237,26 @@ def episodios(item):
es_pelicula = False
for url, title, date in episodes:
episode = scrapertools.find_single_match(title, r'Episodio (\d+)')
new_item=itemlist.append(Item(channel=item.channel, action="findvideos",
url=url, thumbnail=item.thumbnail, plot=plot, show=item.show))
# El enlace pertenece a un episodio
if episode:
season = 1
episode = int(episode)
season, episode = renumbertools.numbered_for_tratk(
item.channel, item.show, season, episode)
item.channel, item.contentSerieName, season, episode)
new_item.infoLabels["episode"] = episode
new_item.infoLabels["season"] = season
new_item.contentSerieName = item.contentSerieName
title = "%sx%s %s (%s)" % (season, str(episode).zfill(2), "Episodio %s" % episode, date)
# El enlace pertenece a una pelicula
else:
title = "%s (%s)" % (title, date)
item.url = url
es_pelicula = True
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=item.thumbnail,
plot=plot, show=item.show, fulltitle="%s %s" % (item.show, title)))
new_item.title=title
new_item.fulltitle="%s %s" % (item.show, title)
itemlist.append(new_item)
# El sistema soporta la videoteca y se encontro por lo menos un episodio
# o pelicula

View File

@@ -67,6 +67,23 @@
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verificar si los enlaces existen",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "perfil",
"type": "list",

View File

@@ -25,6 +25,8 @@ list_servers = ['openload', 'powvideo', 'rapidvideo', 'streamango', 'streamcloud
__modo_grafico__ = config.get_setting('modo_grafico', 'cinefox')
__perfil__ = int(config.get_setting('perfil', "cinefox"))
__menu_info__ = config.get_setting('menu_info', 'cinefox')
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'cinefox')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'cinefox')
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFF088A08'],
@@ -685,8 +687,23 @@ def findvideos(item):
autoplay.start(itemlist, item)
if __comprueba_enlaces__:
for it in itemlist:
if it.server != '' and it.url != '':
it.url = normalizar_url(it.url, it.server)
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
return itemlist
def normalizar_url(url, server):
# Pasar por findvideosbyserver para para obtener url a partir de los pattern/url de los json de servidores
# Excepciones copiadas de la funcion play
url = url.replace("http://miracine.tv/n/?etu=", "http://hqq.tv/player/embed_player.php?vid=")
url = url.replace("streamcloud.eu/embed-", "streamcloud.eu/")
enlaces = servertools.findvideosbyserver(url, server)[0]
if enlaces[1] != '':
return enlaces[1]
return url
def get_enlaces(item, url, type):
itemlist = []

View File

@@ -121,8 +121,8 @@ def episodios(item):
data_lista = scrapertools.find_single_match(data,
'<ul class="episodios">(.+?)<\/ul><\/div><\/div><\/div>')
show = item.title
patron_caps = '<img alt=".+?" title=".+?" src="([^"]+)">'
patron_caps += '<\/a><\/div><div class=".+?">([^"]+)<\/div>.+?<a .+? href="([^"]+)">([^"]+)<\/a>'
patron_caps = '<img alt=".+?" src="([^"]+)"><\/a><\/div><div class=".+?">([^"]+)<\/div>.+?'
patron_caps += '<a .+? href="([^"]+)">([^"]+)<\/a>'
#scrapedthumbnail,#scrapedtempepi, #scrapedurl, #scrapedtitle
matches = scrapertools.find_multiple_matches(data_lista, patron_caps)
for scrapedthumbnail, scrapedtempepi, scrapedurl, scrapedtitle in matches:

View File

@@ -133,7 +133,8 @@ def listado(item):
for scrapedurl, scrapedtitle, scrapedthumbnail, title_alt, calidad in matches:
url = scrapedurl
title = scrapedtitle
title = scrapedtitle.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "")
title_alt = title_alt.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "")
thumbnail = scrapedthumbnail
action = "findvideos"
extra = ""
@@ -209,10 +210,11 @@ def listado_busqueda(item):
for url, thumb, title in matches:
real_title = scrapertools.find_single_match(title, r'<strong.*?>(.*?)Temporada.*?<\/strong>') #series
if real_title == "":
if not real_title:
real_title = scrapertools.find_single_match(title, r'(.*?)\[.*?]') #movies
real_title = scrapertools.remove_htmltags(real_title).decode('iso-8859-1').encode('utf-8')
real_title = scrapertools.htmlclean(real_title)
real_title = real_title.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "").replace("de la Serie", "")
calidad = scrapertools.find_single_match(title, r'.*?\s*Calidad.*?<span[^>]+>[\[]\s*(?P<quality>.*?)\s*[\]]<\/span>') #series
if calidad == "":
calidad = scrapertools.find_single_match(title, r'..*?(\[.*?.*\])') #movies
@@ -220,9 +222,8 @@ def listado_busqueda(item):
# fix encoding for title
title = scrapertools.htmlclean(title)
title = title.replace("�", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng")
title = re.sub(r'(Calidad.*?\])', '', title)
title = title.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "").replace("de la Serie", "")
if real_title == "":
real_title = title
if calidad == "":
@@ -236,18 +237,22 @@ def listado_busqueda(item):
# Codigo para rescatar lo que se puede en pelisy.series.com de Series para la Videoteca. la URL apunta al capítulo y no a la Serie. Nombre de Serie frecuentemente en blanco. Se obtiene de Thumb, así como el id de la serie
if ("/serie" in url or "-serie" in url) and "pelisyseries.com" in host:
calidad_mps = "series/"
if "seriehd" in url:
calidad_mps = "series-hd/"
if "serievo" in url:
elif "serievo" in url:
calidad_mps = "series-vo/"
if "serie-vo" in url:
elif "serie-vo" in url:
calidad_mps = "series-vo/"
else:
calidad_mps = "series/"
real_title_mps = re.sub(r'.*?\/\d+_', '', thumb)
real_title_mps = re.sub(r'\.\w+.*?', '', real_title_mps)
if "no_image" in thumb:
real_title_mps = title
else:
real_title_mps = re.sub(r'.*?\/\d+_', '', thumb)
real_title_mps = re.sub(r'\.\w+.*?', '', real_title_mps)
if "/0_" not in thumb:
if "/0_" not in thumb and not "no_image" in thumb:
serieid = scrapertools.find_single_match(thumb, r'.*?\/\w\/(?P<serieid>\d+).*?.*')
if len(serieid) > 5:
serieid = ""
@@ -507,6 +512,11 @@ def episodios(item):
"[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
if "Especial" in info: # Capitulos Especiales
pattern = ".*?[^>]+>.*?Temporada.*?\[.*?(?P<season>\d+).*?\].*?Capitulo.*?\[\s*(?P<episode>\d+).*?\]?(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)?<\/span>\s*Calidad\s*<span[^>]+>[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
if not scrapertools.find_single_match(info, pattern): #en caso de error de formato, creo uno básico
logger.debug("patron episodioNEW: " + pattern)
logger.debug(info)
info = '><strong>%sTemporada %s Capitulo 0</strong> - <span >Español Castellano</span> Calidad <span >[%s]</span>' % (item.contentTitle, season, item.infoLabels['quality'])
r = re.compile(pattern)
match = [m.groupdict() for m in r.finditer(info)][0]
@@ -540,11 +550,11 @@ def episodios(item):
"(?P<episode2>\d{2}))?.*?\].*?(?:\[(?P<lang>.*?)\])?"
elif scrapertools.find_single_match(info, 'Cap.\d{2,3}'):
pattern = ".*?Temp.*?\s(?P<quality>.*?)\s.*?Cap.(?P<season>\d).*?(?P<episode>\d{2})(?:_(?P<season2>\d+)(?P<episode2>\d{2}))?.*?\s(?P<lang>.*)?"
else:
logger.debug("patron episodio: " + pattern)
if not scrapertools.find_single_match(info, pattern): #en caso de error de formato, creo uno básico
logger.debug("patron episodioOLD: " + pattern)
logger.debug(info)
continue
info = '%s [%s][Cap.%s00][Español]' % (item.contentTitle, item.infoLabels['quality'], season)
r = re.compile(pattern)
match = [m.groupdict() for m in r.finditer(info)][0]

View File

@@ -23,36 +23,6 @@ list_quality = CALIDADES.values()
list_servers = ['directo', 'openload']
host = 'http://doomtv.net/'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0 Chrome/58.0.3029.110',
'Referer': host}
tgenero = {"Comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png",
"Suspenso": "https://s13.postimg.org/wmw6vl1cn/suspenso.png",
"Drama": "https://s16.postimg.org/94sia332d/drama.png",
"Acción": "https://s3.postimg.org/y6o9puflv/accion.png",
"Aventura": "https://s10.postimg.org/6su40czih/aventura.png",
"Romance": "https://s15.postimg.org/fb5j8cl63/romance.png",
"Animación": "https://s13.postimg.org/5on877l87/animacion.png",
"Ciencia ficción": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png",
"Terror": "https://s7.postimg.org/yi0gij3gb/terror.png",
"Documental": "https://s16.postimg.org/7xjj4bmol/documental.png",
"Música": "https://s29.postimg.org/bbxmdh9c7/musical.png",
"Fantasía": "https://s13.postimg.org/65ylohgvb/fantasia.png",
"Bélico Guerra": "https://s23.postimg.org/71itp9hcr/belica.png",
"Misterio": "https://s1.postimg.org/w7fdgf2vj/misterio.png",
"Crimen": "https://s4.postimg.org/6z27zhirx/crimen.png",
"Biográfia": "https://s15.postimg.org/5lrpbx323/biografia.png",
"Familia": "https://s7.postimg.org/6s7vdhqrf/familiar.png",
"Familiar": "https://s7.postimg.org/6s7vdhqrf/familiar.png",
"Intriga": "https://s27.postimg.org/v9og43u2b/intriga.png",
"Thriller": "https://s22.postimg.org/5y9g0jsu9/thriller.png",
"Guerra": "https://s4.postimg.org/n1h2jp2jh/guerra.png",
"Estrenos": "https://s21.postimg.org/fy69wzm93/estrenos.png",
"Peleas": "https://s14.postimg.org/we1oyg05t/peleas.png",
"Policiales": "https://s21.postimg.org/n9e0ci31z/policial.png",
"Uncategorized": "https://s30.postimg.org/uj5tslenl/otros.png",
"LGBT": "https://s30.postimg.org/uj5tslenl/otros.png"}
def mainlist(item):
@@ -177,15 +147,13 @@ def seccion(item):
url = scrapedurl
title = scrapedtitle
thumbnail = ''
if title in tgenero:
thumbnail = tgenero[title]
if url not in duplicado:
itemlist.append(
Item(channel=item.channel,
action='lista',
title=title,
url=url,
thumbnail = thumbnail
thumbnail=thumbnail
))
return itemlist
@@ -221,64 +189,25 @@ def newest(categoria):
return itemlist
def get_vip(item, url):
logger.info()
itemlist = []
data = httptools.downloadpage(url+'/videocontent').data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
video_id = scrapertools.find_single_match(data, 'id=videoInfo ><span >(.*?)</span>')
new_url = 'https://v.d0stream.com/api/videoinfo/%s?src-url=https://Fv.d0stream.com' % video_id
json_data = httptools.downloadpage(new_url).data
dict_data = jsontools.load(json_data)
sources = dict_data['sources']
for vip_item in sources['mp4_cdn']:
vip_url= vip_item['url']
vip_quality = vip_item['label']
title ='%s [%s]' % (item.title, vip_quality)
itemlist.append(item.clone(title = title, url=vip_url, action='play', quality=vip_quality, server='directo'))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
player_vip = scrapertools.find_single_match(data, 'class=movieplay><iframe src=(https://v.d0stream.com.*?) frameborder')
itemlist.extend(get_vip(item, player_vip))
patron = 'id=(tab\d+)><div class=movieplay><(?:iframe|script) src=(.*?)(?:scrolling|><\/script>)'
patron = 'id=(tab\d+)><div class=movieplay><(?:iframe|script) src=(.*?)(?:scrolling|frameborder|><\/script>)'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, urls in matches:
if 'content' in urls:
urls = '%s%s'%('http:',urls)
hidden_data = httptools.downloadpage(urls).data
hidden_data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", hidden_data)
patron = 'sources: \[{file: (.*?),'
matches = re.compile(patron, re.DOTALL).findall(hidden_data)
for videoitem in matches:
new_item = Item(
channel = item.channel,
url = videoitem,
title = item.title,
contentTitle = item.title,
action = 'play',
)
itemlist.append(new_item)
else:
new_item = Item(
channel=item.channel,
url=urls,
title=item.title,
contentTitle=item.title,
action='play',
)
itemlist.append(new_item)
new_item = Item(
channel=item.channel,
url=urls,
title=item.title,
contentTitle=item.title,
action='play',
)
itemlist.append(new_item)
itemlist = servertools.get_servers_itemlist(itemlist)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':

View File

@@ -254,7 +254,7 @@ def episodios(item):
url = host + scrapertools.find_single_match(data,patron)
# "episodios%5B1%5D=11744&total_capis=5&tabla=series&titulo=Sea+Patrol+-+2%AA+Temporada"
post = urllib.urlencode({name: value, "total_capis": total_capis, "tabla": tabla, "titulo": titulo})
logger.debug("post=" + post)
#logger.debug("post=" + post)
if item.extra == "series":
epi = scrapedtitle.split("x")
@@ -311,7 +311,6 @@ def show_movie_info(item):
pass
data = httptools.downloadpage(item.url).data
logger.debug("data=" + data)
patron = "<a href='(secciones.php\?sec\=descargas[^']+)'"
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -319,9 +318,11 @@ def show_movie_info(item):
for scrapedurl in matches:
url = urlparse.urljoin(item.url, scrapedurl)
logger.debug("title=[" + item.title + "], url=[" + url + "], thumbnail=[" + item.thumbnail + "]")
torrent_data = httptools.downloadpage(url).data
link = scrapertools.get_match(torrent_data, "<a href='(\/uploads\/torrents\/peliculas\/.*?\.torrent)'>")
if scrapertools.find_single_match(torrent_data, "<a href='(\/uploads\/torrents\/peliculas\/.*?\.torrent)'>"):
link = scrapertools.get_match(torrent_data, "<a href='(\/uploads\/torrents\/peliculas\/.*?\.torrent)'>")
else:
link = scrapertools.get_match(torrent_data, "<a href='(http:\/\/www.mejortorrent.com\/uploads\/torrents\/.*?peliculas\/.*?\.torrent)'>")
link = urlparse.urljoin(url, link)
logger.debug("link=" + link)
itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link,
@@ -363,7 +364,7 @@ def play(item):
else:
#data = httptools.downloadpage(item.url, post=item.extra).data
data = httptools.downloadpage(item.url).data
logger.debug("data=" + data)
#logger.debug("data=" + data)
params = dict(urlparse.parse_qsl(item.extra))
patron = host + "/secciones.php?sec=descargas&ap=contar&tabla=" + params["tabla"] + "&id=" + item.id
@@ -373,7 +374,9 @@ def play(item):
data = httptools.downloadpage(patron).data
patron = "Pincha <a href='(.*?)'>"
link = host + scrapertools.find_single_match(data, patron)
link = scrapertools.find_single_match(data, patron)
if not host in link:
link = host + link
logger.info("link=" + link)
itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link,
thumbnail=item.thumbnail, plot=item.plot, folder=False))

View File

@@ -10,7 +10,7 @@ from core.item import Item
from platformcode import config, logger
from core import tmdb
host = 'http://www.mispelisyseries.com/'
host = 'http://mispelisyseries.com/'
def mainlist(item):
logger.info()
@@ -133,7 +133,8 @@ def listado(item):
for scrapedurl, scrapedtitle, scrapedthumbnail, title_alt, calidad in matches:
url = scrapedurl
title = scrapedtitle
title = scrapedtitle.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "")
title_alt = title_alt.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "")
thumbnail = scrapedthumbnail
action = "findvideos"
extra = ""
@@ -209,10 +210,11 @@ def listado_busqueda(item):
for url, thumb, title in matches:
real_title = scrapertools.find_single_match(title, r'<strong.*?>(.*?)Temporada.*?<\/strong>') #series
if real_title == "":
if not real_title:
real_title = scrapertools.find_single_match(title, r'(.*?)\[.*?]') #movies
real_title = scrapertools.remove_htmltags(real_title).decode('iso-8859-1').encode('utf-8')
real_title = scrapertools.htmlclean(real_title)
real_title = real_title.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "").replace("de la Serie", "")
calidad = scrapertools.find_single_match(title, r'.*?\s*Calidad.*?<span[^>]+>[\[]\s*(?P<quality>.*?)\s*[\]]<\/span>') #series
if calidad == "":
calidad = scrapertools.find_single_match(title, r'..*?(\[.*?.*\])') #movies
@@ -220,9 +222,8 @@ def listado_busqueda(item):
# fix encoding for title
title = scrapertools.htmlclean(title)
title = title.replace("�", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng")
title = re.sub(r'(Calidad.*?\])', '', title)
title = title.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "").replace("de la Serie", "")
if real_title == "":
real_title = title
if calidad == "":
@@ -236,18 +237,22 @@ def listado_busqueda(item):
# Codigo para rescatar lo que se puede en pelisy.series.com de Series para la Videoteca. la URL apunta al capítulo y no a la Serie. Nombre de Serie frecuentemente en blanco. Se obtiene de Thumb, así como el id de la serie
if ("/serie" in url or "-serie" in url) and "pelisyseries.com" in host:
calidad_mps = "series/"
if "seriehd" in url:
calidad_mps = "series-hd/"
if "serievo" in url:
elif "serievo" in url:
calidad_mps = "series-vo/"
if "serie-vo" in url:
elif "serie-vo" in url:
calidad_mps = "series-vo/"
else:
calidad_mps = "series/"
real_title_mps = re.sub(r'.*?\/\d+_', '', thumb)
real_title_mps = re.sub(r'\.\w+.*?', '', real_title_mps)
if "no_image" in thumb:
real_title_mps = title
else:
real_title_mps = re.sub(r'.*?\/\d+_', '', thumb)
real_title_mps = re.sub(r'\.\w+.*?', '', real_title_mps)
if "/0_" not in thumb:
if "/0_" not in thumb and not "no_image" in thumb:
serieid = scrapertools.find_single_match(thumb, r'.*?\/\w\/(?P<serieid>\d+).*?.*')
if len(serieid) > 5:
serieid = ""
@@ -507,6 +512,11 @@ def episodios(item):
"[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
if "Especial" in info: # Capitulos Especiales
pattern = ".*?[^>]+>.*?Temporada.*?\[.*?(?P<season>\d+).*?\].*?Capitulo.*?\[\s*(?P<episode>\d+).*?\]?(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)?<\/span>\s*Calidad\s*<span[^>]+>[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
if not scrapertools.find_single_match(info, pattern): #en caso de error de formato, creo uno básico
logger.debug("patron episodioNEW: " + pattern)
logger.debug(info)
info = '><strong>%sTemporada %s Capitulo 0</strong> - <span >Español Castellano</span> Calidad <span >[%s]</span>' % (item.contentTitle, season, item.infoLabels['quality'])
r = re.compile(pattern)
match = [m.groupdict() for m in r.finditer(info)][0]
@@ -540,11 +550,11 @@ def episodios(item):
"(?P<episode2>\d{2}))?.*?\].*?(?:\[(?P<lang>.*?)\])?"
elif scrapertools.find_single_match(info, 'Cap.\d{2,3}'):
pattern = ".*?Temp.*?\s(?P<quality>.*?)\s.*?Cap.(?P<season>\d).*?(?P<episode>\d{2})(?:_(?P<season2>\d+)(?P<episode2>\d{2}))?.*?\s(?P<lang>.*)?"
else:
logger.debug("patron episodio: " + pattern)
if not scrapertools.find_single_match(info, pattern): #en caso de error de formato, creo uno básico
logger.debug("patron episodioOLD: " + pattern)
logger.debug(info)
continue
info = '%s [%s][Cap.%s00][Español]' % (item.contentTitle, item.infoLabels['quality'], season)
r = re.compile(pattern)
match = [m.groupdict() for m in r.finditer(info)][0]

View File

@@ -20,6 +20,23 @@
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verificar si los enlaces existen",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
}
]
}
}

View File

@@ -10,6 +10,10 @@ from core.item import Item
from platformcode import logger
from core import httptools
from platformcode import config
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'newpct')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'newpct')
Host='http://www.tvsinpagar.com'
@@ -97,4 +101,6 @@ def findvideos(item):
itemlist.extend(new_item)
for it in itemlist:
it.channel = item.channel
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
return itemlist

View File

@@ -26,6 +26,23 @@
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verificar si los enlaces existen",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
}
]
}
}

View File

@@ -11,6 +11,9 @@ from platformcode import logger
from platformcode import config
from core import tmdb
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'peliculasdk')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'peliculasdk')
host = "http://www.peliculasdk.com"
def mainlist(item):
@@ -183,6 +186,8 @@ def findvideos(item):
language=idioma, quality=calidad))
tmdb.set_infoLabels(itemlist)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
if item.library and config.get_videolibrary_support() and len(itemlist) > 0:
infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'],
'title': item.fulltitle}

View File

@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
import re
import urllib
from core import httptools
from core import scrapertools
@@ -215,37 +216,29 @@ def findvideos(item):
itemlist = []
data = get_source(item.url)
patron = '<div class=TPlayer.*?\s+id=(.*?)><iframe width=560 height=315 src=(.*?) frameborder=0'
patron = '<div class=TPlayerTb.Current id=(.*?)>.*?src=(.*?) frameborder'
matches = re.compile(patron, re.DOTALL).findall(data)
base_link = 'https://repros.live/player/ajaxdata'
for opt, urls_page in matches:
logger.debug ('option: %s' % opt)
language = scrapertools.find_single_match (data,'data-TPlayerNv=%s><span>Opción <strong>.'
'<\/strong><\/span>.*?<span>(.*?)<\/span'%opt)
video_data = httptools.downloadpage(urls_page).data
servers = scrapertools.find_multiple_matches(video_data,'<button id="(.*?)"')
for server in servers:
quality = item.quality
info_urls = urls_page.replace('embed','get')
video_info=httptools.downloadpage(info_urls+'/'+server).data
video_info = jsontools.load(video_info)
video_id = video_info['extid']
video_server = video_info['server']
video_status = video_info['status']
if video_status in ['finished', 'propio']:
if video_status == 'finished':
url = 'https://'+video_server+'/embed/'+video_id
else:
url = 'https://'+video_server+'/e/'+video_id
title = item.contentTitle + ' [%s] [%s]'%(quality, language)
itemlist.append(item.clone(title=title,
url=url,
action='play',
language=language,
quality=quality
))
itemlist = servertools.get_servers_itemlist(itemlist)
language = scrapertools.find_single_match (data,'TPlayerNv>.*?tplayernv=%s><span>Opción.*?<span>(.*?)</span>' % opt)
headers = {'referer':item.url}
if 'trembed' in urls_page:
urls_page = scrapertools.decodeHtmlentities(urls_page)
sub_data=httptools.downloadpage(urls_page).data
urls_page = scrapertools.find_single_match(sub_data, 'src="(.*?)" ')
video_data = httptools.downloadpage(urls_page, headers=headers).data
servers = scrapertools.find_multiple_matches(video_data,'data-player="(.*?)" data-embed="(.*?)">')
for server, code in servers:
post = {'codigo':code}
post = urllib.urlencode(post)
video_json=jsontools.load(httptools.downloadpage('https://repros.live/player/ajaxdata', post=post).data)
url = video_json['url']
itemlist.append(item.clone(title='[%s][%s]',
url=url,
action='play',
language=language,
))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
return itemlist

View File

@@ -14,7 +14,7 @@ from core import tmdb
from core.item import Item, InfoLabels
from platformcode import config, logger
host = "https://pepecine.info"
host = "https://pepecinehd.tv"
perpage = 20
def mainlist1(item):
@@ -29,7 +29,7 @@ def mainlist(item):
itemlist = []
itemlist.append(Item(channel=item.channel,
title="Ultimas",
url=host+'/peliculas-tv-online',
url=host+'/tv-peliculas-online',
action='list_latest',
indexp=1,
type='movie'))
@@ -149,7 +149,7 @@ def list_latest(item):
logger.info()
itemlist = []
data = get_source(item.url)
data_url= scrapertools.find_single_match(data,'<iframe.*?src=(.*?) style')
data_url= scrapertools.find_single_match(data,'<iframe.*?src=(.*?) ')
data = get_source(data_url)
patron = "<div class='online'>.*?<img src=(.*?) class=.*?alt=(.*?) title=.*?"
patron += "<b><a href=(.*?) target=.*?align=right><div class=s7>(.*?) <"

View File

@@ -4,7 +4,7 @@
"active": true,
"adult": false,
"language": ["cast"],
"thumbnail": "plusdede.png",
"thumbnail": "https://s18.postimg.cc/e17e98eqh/6_-_4_Isbv_Q3.png",
"banner": "plusdede.png",
"categories": [
"movie",

View File

@@ -61,17 +61,17 @@ def mainlist(item):
item.url = HOST
item.fanart = fanart_host
item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/Directors%20Chair.png"
item.thumbnail = "https://s18.postimg.cc/r5cylu6rd/12_-_oi_RDsdv.png"
itemlist.append(item.clone(title="Películas", action="menupeliculas", text_color=color3, text_blod=True))
item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/TV%20Series.png"
item.thumbnail = "https://s18.postimg.cc/ruvqy6zl5/15_-_9m9_Dp1m.png"
itemlist.append(item.clone(title="Series", action="menuseries", text_color=color3, text_blod=True))
itemlist.append(item.clone(title="Listas", action="menulistas", text_color=color3, text_blod=True))
itemlist.append(item.clone(title="Listas", action="menulistas", text_color=color3, text_blod=True, thumbnail = 'https://s18.postimg.cc/xj21p46ih/10_-_Uf7e_XHE.png'))
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
item.thumbnail = ""
itemlist.append(item.clone(channel=item.channel, action="settingCanal", title="Configuración...", url=""))
itemlist.append(item.clone(channel=item.channel, action="settingCanal", title="Configuración...", url="", thumbnail='https://s18.postimg.cc/c9efeassp/3_-_QAHK2_Tc.png'))
return itemlist
@@ -86,31 +86,31 @@ def menuseries(item):
item.fanart = fanart_host
item.text_color = None
item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/Directors%20Chair.png"
item.thumbnail = "https://s18.postimg.cc/r5cylu6rd/12_-_oi_RDsdv.png"
itemlist.append(item.clone(title="Películas", action="menupeliculas", text_color=color3, text_blod=True))
item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/TV%20Series.png"
item.thumbnail = "https://s18.postimg.cc/ruvqy6zl5/15_-_9m9_Dp1m.png"
itemlist.append(item.clone(title="Series:", folder=False, text_color=color3, text_blod=True, select=True))
itemlist.append(item.clone(action="peliculas", title=" Novedades", url="https://www.plusdede.com/series"))
itemlist.append(item.clone(action="generos", title=" Por géneros", url="https://www.plusdede.com/series"))
itemlist.append(item.clone(action="peliculas", title=" Novedades", url="https://www.plusdede.com/series", thumbnail='https://s18.postimg.cc/in3ihji95/11_-_WPg_H5_Kx.png'))
itemlist.append(item.clone(action="generos", title=" Por géneros", url="https://www.plusdede.com/series", thumbnail='https://s18.postimg.cc/p0slktaah/5_-_c_Nf_KRvm.png'))
itemlist.append(
item.clone(action="peliculas", title=" Siguiendo", url="https://www.plusdede.com/series/following"))
item.clone(action="peliculas", title=" Siguiendo", url="https://www.plusdede.com/series/following", thumbnail='https://s18.postimg.cc/68gqh7j15/7_-_tqw_AHa5.png'))
itemlist.append(item.clone(action="peliculas", title=" Capítulos Pendientes",
url="https://www.plusdede.com/series/mypending/0?popup=1", viewmode="movie"))
url="https://www.plusdede.com/series/mypending/0?popup=1", viewmode="movie", thumbnail='https://s18.postimg.cc/9s2o71w1l/2_-_3dbbx7_K.png'))
itemlist.append(
item.clone(action="peliculas", title=" Favoritas", url="https://www.plusdede.com/series/favorites"))
item.clone(action="peliculas", title=" Favoritas", url="https://www.plusdede.com/series/favorites", thumbnail='https://s18.postimg.cc/n8zmpwynd/4_-_JGrig_Ep.png'))
itemlist.append(
item.clone(action="peliculas", title=" Pendientes", url="https://www.plusdede.com/series/pending"))
itemlist.append(item.clone(action="peliculas", title=" Terminadas", url="https://www.plusdede.com/series/seen"))
item.clone(action="peliculas", title=" Pendientes", url="https://www.plusdede.com/series/pending", thumbnail='https://s18.postimg.cc/4gnrmacix/13_-_cwl_TDog.png'))
itemlist.append(item.clone(action="peliculas", title=" Terminadas", url="https://www.plusdede.com/series/seen", thumbnail='https://s18.postimg.cc/5vpcay0qh/17_-_M2in_Fp_O.png'))
itemlist.append(
item.clone(action="peliculas", title=" Recomendadas", url="https://www.plusdede.com/series/recommended"))
itemlist.append(item.clone(action="search", title=" Buscar...", url="https://www.plusdede.com/series"))
item.clone(action="peliculas", title=" Recomendadas", url="https://www.plusdede.com/series/recommended", thumbnail='https://s18.postimg.cc/bwn182sih/14_-_fin32_Kp.png'))
itemlist.append(item.clone(action="search", title=" Buscar...", url="https://www.plusdede.com/series", thumbnaiil='https://s18.postimg.cc/s7n54ghvt/1_-_01_ZDYii.png'))
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
itemlist.append(item.clone(title="Listas", action="menulistas", text_color=color3, text_blod=True))
itemlist.append(item.clone(title="Listas", action="menulistas", text_color=color3, text_blod=True, thumbnail='https://s18.postimg.cc/xj21p46ih/10_-_Uf7e_XHE.png'))
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
item.thumbnail = ""
itemlist.append(Item(channel=item.channel, action="settingCanal", title="Configuración...", url=""))
itemlist.append(Item(channel=item.channel, action="settingCanal", title="Configuración...", url="", thumbnail='https://s18.postimg.cc/c9efeassp/3_-_QAHK2_Tc.png'))
return itemlist
@@ -122,29 +122,29 @@ def menupeliculas(item):
item.fanart = fanart_host
item.text_color = None
item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/Directors%20Chair.png"
item.thumbnail = "https://s18.postimg.cc/r5cylu6rd/12_-_oi_RDsdv.png"
itemlist.append(item.clone(title="Películas:", folder=False, text_color=color3, text_blod=True, select=True))
itemlist.append(item.clone(action="peliculas", title=" Novedades", url="https://www.plusdede.com/pelis"))
itemlist.append(item.clone(action="generos", title=" Por géneros", url="https://www.plusdede.com/pelis"))
itemlist.append(item.clone(action="peliculas", title=" Solo HD", url="https://www.plusdede.com/pelis?quality=3"))
itemlist.append(item.clone(action="peliculas", title=" Novedades", url="https://www.plusdede.com/pelis", thumbnail='https://s18.postimg.cc/in3ihji95/11_-_WPg_H5_Kx.png'))
itemlist.append(item.clone(action="generos", title=" Por géneros", url="https://www.plusdede.com/pelis", thumbnail='https://s18.postimg.cc/p0slktaah/5_-_c_Nf_KRvm.png'))
itemlist.append(item.clone(action="peliculas", title=" Solo HD", url="https://www.plusdede.com/pelis?quality=3", thumbnail='https://s18.postimg.cc/e17e95mfd/16_-_qmqn4_Si.png'))
itemlist.append(
item.clone(action="peliculas", title=" Pendientes", url="https://www.plusdede.com/pelis/pending"))
item.clone(action="peliculas", title=" Pendientes", url="https://www.plusdede.com/pelis/pending", thumbnail='https://s18.postimg.cc/4gnrmacix/13_-_cwl_TDog.png'))
itemlist.append(
item.clone(action="peliculas", title=" Recomendadas", url="https://www.plusdede.com/pelis/recommended"))
item.clone(action="peliculas", title=" Recomendadas", url="https://www.plusdede.com/pelis/recommended", thumbnail='https://s18.postimg.cc/bwn182sih/14_-_fin32_Kp.png'))
itemlist.append(
item.clone(action="peliculas", title=" Favoritas", url="https://www.plusdede.com/pelis/favorites"))
itemlist.append(item.clone(action="peliculas", title=" Vistas", url="https://www.plusdede.com/pelis/seen"))
itemlist.append(item.clone(action="search", title=" Buscar...", url="https://www.plusdede.com/pelis"))
item.clone(action="peliculas", title=" Favoritas", url="https://www.plusdede.com/pelis/favorites", thumbnail='https://s18.postimg.cc/n8zmpwynd/4_-_JGrig_Ep.png'))
itemlist.append(item.clone(action="peliculas", title=" Vistas", url="https://www.plusdede.com/pelis/seen", thumbnail='https://s18.postimg.cc/5vpcay0qh/17_-_M2in_Fp_O.png'))
itemlist.append(item.clone(action="search", title=" Buscar...", url="https://www.plusdede.com/pelis", thumbnail='https://s18.postimg.cc/s7n54ghvt/1_-_01_ZDYii.png'))
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/TV%20Series.png"
itemlist.append(item.clone(title="Series", action="menuseries", text_color=color3, text_blod=True))
itemlist.append(item.clone(title="Series", action="menuseries", text_color=color3, text_blod=True, thumbnail='https://s18.postimg.cc/ruvqy6zl5/15_-_9m9_Dp1m.png'))
itemlist.append(item.clone(title="Listas", action="menulistas", text_color=color3, text_blod=True))
itemlist.append(item.clone(title="Listas", action="menulistas", text_color=color3, text_blod=True, thumbnail='https://s18.postimg.cc/xj21p46ih/10_-_Uf7e_XHE.png'))
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
item.thumbnail = ""
itemlist.append(item.clone(channel=item.channel, action="settingCanal", title="Configuración...", url=""))
itemlist.append(item.clone(channel=item.channel, action="settingCanal", title="Configuración...", url="", thumbnail='https://s18.postimg.cc/c9efeassp/3_-_QAHK2_Tc.png'))
return itemlist
@@ -156,23 +156,23 @@ def menulistas(item):
item.fanart = fanart_host
item.text_color = None
item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/Directors%20Chair.png"
item.thumbnail = "https://s18.postimg.cc/r5cylu6rd/12_-_oi_RDsdv.png"
itemlist.append(item.clone(title="Películas", action="menupeliculas", text_color=color3, text_blod=True))
item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/TV%20Series.png"
item.thumbnail = "https://s18.postimg.cc/ruvqy6zl5/15_-_9m9_Dp1m.png"
itemlist.append(item.clone(title="Series", action="menuseries", text_color=color3, text_blod=True))
itemlist.append(item.clone(title="Listas:", folder=False, text_color=color3, text_blod=True))
itemlist.append(item.clone(title="Listas:", folder=False, text_color=color3, text_blod=True, thumbnail='https://s18.postimg.cc/xj21p46ih/10_-_Uf7e_XHE.png'))
itemlist.append(
item.clone(action="listas", tipo="populares", title=" Populares", url="https://www.plusdede.com/listas"))
item.clone(action="listas", tipo="populares", title=" Populares", url="https://www.plusdede.com/listas", thumbnail='https://s18.postimg.cc/7aqwzrha1/8_-_3rn14_Tq.png'))
itemlist.append(
item.clone(action="listas", tipo="siguiendo", title=" Siguiendo", url="https://www.plusdede.com/listas"))
item.clone(action="listas", tipo="siguiendo", title=" Siguiendo", url="https://www.plusdede.com/listas", thumbnail='https://s18.postimg.cc/4tf5sha89/9_-_z_F8c_UBT.png'))
itemlist.append(
item.clone(action="listas", tipo="tuslistas", title=" Tus Listas", url="https://www.plusdede.com/listas"))
itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host))
item.thumbnail = ""
itemlist.append(item.clone(channel=item.channel, action="settingCanal", title="Configuración...", url=""))
itemlist.append(item.clone(channel=item.channel, action="settingCanal", title="Configuración...", url="", thumbnail='https://s18.postimg.cc/c9efeassp/3_-_QAHK2_Tc.png'))
return itemlist

View File

@@ -24,7 +24,7 @@
"Inglés",
"Latino",
"Catalán",
"VOS"
"VOSE"
]
},
{
@@ -44,4 +44,4 @@
"visible": true
}
]
}
}

View File

@@ -133,7 +133,8 @@ def listado(item):
for scrapedurl, scrapedtitle, scrapedthumbnail, title_alt, calidad in matches:
url = scrapedurl
title = scrapedtitle
title = scrapedtitle.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "")
title_alt = title_alt.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "")
thumbnail = scrapedthumbnail
action = "findvideos"
extra = ""
@@ -209,10 +210,11 @@ def listado_busqueda(item):
for url, thumb, title in matches:
real_title = scrapertools.find_single_match(title, r'<strong.*?>(.*?)Temporada.*?<\/strong>') #series
if real_title == "":
if not real_title:
real_title = scrapertools.find_single_match(title, r'(.*?)\[.*?]') #movies
real_title = scrapertools.remove_htmltags(real_title).decode('iso-8859-1').encode('utf-8')
real_title = scrapertools.htmlclean(real_title)
real_title = real_title.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "").replace("de la Serie", "")
calidad = scrapertools.find_single_match(title, r'.*?\s*Calidad.*?<span[^>]+>[\[]\s*(?P<quality>.*?)\s*[\]]<\/span>') #series
if calidad == "":
calidad = scrapertools.find_single_match(title, r'..*?(\[.*?.*\])') #movies
@@ -220,9 +222,8 @@ def listado_busqueda(item):
# fix encoding for title
title = scrapertools.htmlclean(title)
title = title.replace("�", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng")
title = re.sub(r'(Calidad.*?\])', '', title)
title = title.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "").replace("de la Serie", "")
if real_title == "":
real_title = title
if calidad == "":
@@ -236,18 +237,22 @@ def listado_busqueda(item):
# Codigo para rescatar lo que se puede en pelisy.series.com de Series para la Videoteca. la URL apunta al capítulo y no a la Serie. Nombre de Serie frecuentemente en blanco. Se obtiene de Thumb, así como el id de la serie
if ("/serie" in url or "-serie" in url) and "pelisyseries.com" in host:
calidad_mps = "series/"
if "seriehd" in url:
calidad_mps = "series-hd/"
if "serievo" in url:
elif "serievo" in url:
calidad_mps = "series-vo/"
if "serie-vo" in url:
elif "serie-vo" in url:
calidad_mps = "series-vo/"
else:
calidad_mps = "series/"
real_title_mps = re.sub(r'.*?\/\d+_', '', thumb)
real_title_mps = re.sub(r'\.\w+.*?', '', real_title_mps)
if "no_image" in thumb:
real_title_mps = title
else:
real_title_mps = re.sub(r'.*?\/\d+_', '', thumb)
real_title_mps = re.sub(r'\.\w+.*?', '', real_title_mps)
if "/0_" not in thumb:
if "/0_" not in thumb and not "no_image" in thumb:
serieid = scrapertools.find_single_match(thumb, r'.*?\/\w\/(?P<serieid>\d+).*?.*')
if len(serieid) > 5:
serieid = ""
@@ -507,6 +512,11 @@ def episodios(item):
"[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
if "Especial" in info: # Capitulos Especiales
pattern = ".*?[^>]+>.*?Temporada.*?\[.*?(?P<season>\d+).*?\].*?Capitulo.*?\[\s*(?P<episode>\d+).*?\]?(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)?<\/span>\s*Calidad\s*<span[^>]+>[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
if not scrapertools.find_single_match(info, pattern): #en caso de error de formato, creo uno básico
logger.debug("patron episodioNEW: " + pattern)
logger.debug(info)
info = '><strong>%sTemporada %s Capitulo 0</strong> - <span >Español Castellano</span> Calidad <span >[%s]</span>' % (item.contentTitle, season, item.infoLabels['quality'])
r = re.compile(pattern)
match = [m.groupdict() for m in r.finditer(info)][0]
@@ -540,11 +550,11 @@ def episodios(item):
"(?P<episode2>\d{2}))?.*?\].*?(?:\[(?P<lang>.*?)\])?"
elif scrapertools.find_single_match(info, 'Cap.\d{2,3}'):
pattern = ".*?Temp.*?\s(?P<quality>.*?)\s.*?Cap.(?P<season>\d).*?(?P<episode>\d{2})(?:_(?P<season2>\d+)(?P<episode2>\d{2}))?.*?\s(?P<lang>.*)?"
else:
logger.debug("patron episodio: " + pattern)
if not scrapertools.find_single_match(info, pattern): #en caso de error de formato, creo uno básico
logger.debug("patron episodioOLD: " + pattern)
logger.debug(info)
continue
info = '%s [%s][Cap.%s00][Español]' % (item.contentTitle, item.infoLabels['quality'], season)
r = re.compile(pattern)
match = [m.groupdict() for m in r.finditer(info)][0]

View File

@@ -133,7 +133,8 @@ def listado(item):
for scrapedurl, scrapedtitle, scrapedthumbnail, title_alt, calidad in matches:
url = scrapedurl
title = scrapedtitle
title = scrapedtitle.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "")
title_alt = title_alt.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "")
thumbnail = scrapedthumbnail
action = "findvideos"
extra = ""
@@ -209,10 +210,11 @@ def listado_busqueda(item):
for url, thumb, title in matches:
real_title = scrapertools.find_single_match(title, r'<strong.*?>(.*?)Temporada.*?<\/strong>') #series
if real_title == "":
if not real_title:
real_title = scrapertools.find_single_match(title, r'(.*?)\[.*?]') #movies
real_title = scrapertools.remove_htmltags(real_title).decode('iso-8859-1').encode('utf-8')
real_title = scrapertools.htmlclean(real_title)
real_title = real_title.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "").replace("de la Serie", "")
calidad = scrapertools.find_single_match(title, r'.*?\s*Calidad.*?<span[^>]+>[\[]\s*(?P<quality>.*?)\s*[\]]<\/span>') #series
if calidad == "":
calidad = scrapertools.find_single_match(title, r'..*?(\[.*?.*\])') #movies
@@ -220,9 +222,8 @@ def listado_busqueda(item):
# fix encoding for title
title = scrapertools.htmlclean(title)
title = title.replace("�", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng")
title = re.sub(r'(Calidad.*?\])', '', title)
title = title.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "").replace("de la Serie", "")
if real_title == "":
real_title = title
if calidad == "":
@@ -236,18 +237,22 @@ def listado_busqueda(item):
# Codigo para rescatar lo que se puede en pelisy.series.com de Series para la Videoteca. la URL apunta al capítulo y no a la Serie. Nombre de Serie frecuentemente en blanco. Se obtiene de Thumb, así como el id de la serie
if ("/serie" in url or "-serie" in url) and "pelisyseries.com" in host:
calidad_mps = "series/"
if "seriehd" in url:
calidad_mps = "series-hd/"
if "serievo" in url:
elif "serievo" in url:
calidad_mps = "series-vo/"
if "serie-vo" in url:
elif "serie-vo" in url:
calidad_mps = "series-vo/"
else:
calidad_mps = "series/"
real_title_mps = re.sub(r'.*?\/\d+_', '', thumb)
real_title_mps = re.sub(r'\.\w+.*?', '', real_title_mps)
if "no_image" in thumb:
real_title_mps = title
else:
real_title_mps = re.sub(r'.*?\/\d+_', '', thumb)
real_title_mps = re.sub(r'\.\w+.*?', '', real_title_mps)
if "/0_" not in thumb:
if "/0_" not in thumb and not "no_image" in thumb:
serieid = scrapertools.find_single_match(thumb, r'.*?\/\w\/(?P<serieid>\d+).*?.*')
if len(serieid) > 5:
serieid = ""
@@ -507,6 +512,11 @@ def episodios(item):
"[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
if "Especial" in info: # Capitulos Especiales
pattern = ".*?[^>]+>.*?Temporada.*?\[.*?(?P<season>\d+).*?\].*?Capitulo.*?\[\s*(?P<episode>\d+).*?\]?(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)?<\/span>\s*Calidad\s*<span[^>]+>[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
if not scrapertools.find_single_match(info, pattern): #en caso de error de formato, creo uno básico
logger.debug("patron episodioNEW: " + pattern)
logger.debug(info)
info = '><strong>%sTemporada %s Capitulo 0</strong> - <span >Español Castellano</span> Calidad <span >[%s]</span>' % (item.contentTitle, season, item.infoLabels['quality'])
r = re.compile(pattern)
match = [m.groupdict() for m in r.finditer(info)][0]
@@ -540,11 +550,11 @@ def episodios(item):
"(?P<episode2>\d{2}))?.*?\].*?(?:\[(?P<lang>.*?)\])?"
elif scrapertools.find_single_match(info, 'Cap.\d{2,3}'):
pattern = ".*?Temp.*?\s(?P<quality>.*?)\s.*?Cap.(?P<season>\d).*?(?P<episode>\d{2})(?:_(?P<season2>\d+)(?P<episode2>\d{2}))?.*?\s(?P<lang>.*)?"
else:
logger.debug("patron episodio: " + pattern)
if not scrapertools.find_single_match(info, pattern): #en caso de error de formato, creo uno básico
logger.debug("patron episodioOLD: " + pattern)
logger.debug(info)
continue
info = '%s [%s][Cap.%s00][Español]' % (item.contentTitle, item.infoLabels['quality'], season)
r = re.compile(pattern)
match = [m.groupdict() for m in r.finditer(info)][0]

View File

@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import re
@@ -133,7 +133,8 @@ def listado(item):
for scrapedurl, scrapedtitle, scrapedthumbnail, title_alt, calidad in matches:
url = scrapedurl
title = scrapedtitle
title = scrapedtitle.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "")
title_alt = title_alt.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "")
thumbnail = scrapedthumbnail
action = "findvideos"
extra = ""
@@ -209,10 +210,11 @@ def listado_busqueda(item):
for url, thumb, title in matches:
real_title = scrapertools.find_single_match(title, r'<strong.*?>(.*?)Temporada.*?<\/strong>') #series
if real_title == "":
if not real_title:
real_title = scrapertools.find_single_match(title, r'(.*?)\[.*?]') #movies
real_title = scrapertools.remove_htmltags(real_title).decode('iso-8859-1').encode('utf-8')
real_title = scrapertools.htmlclean(real_title)
real_title = real_title.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "").replace("de la Serie", "")
calidad = scrapertools.find_single_match(title, r'.*?\s*Calidad.*?<span[^>]+>[\[]\s*(?P<quality>.*?)\s*[\]]<\/span>') #series
if calidad == "":
calidad = scrapertools.find_single_match(title, r'..*?(\[.*?.*\])') #movies
@@ -220,9 +222,8 @@ def listado_busqueda(item):
# fix encoding for title
title = scrapertools.htmlclean(title)
title = title.replace("�", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng")
title = re.sub(r'(Calidad.*?\])', '', title)
title = title.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "").replace("de la Serie", "")
if real_title == "":
real_title = title
if calidad == "":
@@ -236,18 +237,22 @@ def listado_busqueda(item):
# Codigo para rescatar lo que se puede en pelisy.series.com de Series para la Videoteca. la URL apunta al capítulo y no a la Serie. Nombre de Serie frecuentemente en blanco. Se obtiene de Thumb, así como el id de la serie
if ("/serie" in url or "-serie" in url) and "pelisyseries.com" in host:
calidad_mps = "series/"
if "seriehd" in url:
calidad_mps = "series-hd/"
if "serievo" in url:
elif "serievo" in url:
calidad_mps = "series-vo/"
if "serie-vo" in url:
elif "serie-vo" in url:
calidad_mps = "series-vo/"
else:
calidad_mps = "series/"
real_title_mps = re.sub(r'.*?\/\d+_', '', thumb)
real_title_mps = re.sub(r'\.\w+.*?', '', real_title_mps)
if "no_image" in thumb:
real_title_mps = title
else:
real_title_mps = re.sub(r'.*?\/\d+_', '', thumb)
real_title_mps = re.sub(r'\.\w+.*?', '', real_title_mps)
if "/0_" not in thumb:
if "/0_" not in thumb and not "no_image" in thumb:
serieid = scrapertools.find_single_match(thumb, r'.*?\/\w\/(?P<serieid>\d+).*?.*')
if len(serieid) > 5:
serieid = ""
@@ -507,6 +512,11 @@ def episodios(item):
"[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
if "Especial" in info: # Capitulos Especiales
pattern = ".*?[^>]+>.*?Temporada.*?\[.*?(?P<season>\d+).*?\].*?Capitulo.*?\[\s*(?P<episode>\d+).*?\]?(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)?<\/span>\s*Calidad\s*<span[^>]+>[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
if not scrapertools.find_single_match(info, pattern): #en caso de error de formato, creo uno básico
logger.debug("patron episodioNEW: " + pattern)
logger.debug(info)
info = '><strong>%sTemporada %s Capitulo 0</strong> - <span >Español Castellano</span> Calidad <span >[%s]</span>' % (item.contentTitle, season, item.infoLabels['quality'])
r = re.compile(pattern)
match = [m.groupdict() for m in r.finditer(info)][0]
@@ -540,11 +550,11 @@ def episodios(item):
"(?P<episode2>\d{2}))?.*?\].*?(?:\[(?P<lang>.*?)\])?"
elif scrapertools.find_single_match(info, 'Cap.\d{2,3}'):
pattern = ".*?Temp.*?\s(?P<quality>.*?)\s.*?Cap.(?P<season>\d).*?(?P<episode>\d{2})(?:_(?P<season2>\d+)(?P<episode2>\d{2}))?.*?\s(?P<lang>.*)?"
else:
logger.debug("patron episodio: " + pattern)
if not scrapertools.find_single_match(info, pattern): #en caso de error de formato, creo uno básico
logger.debug("patron episodioOLD: " + pattern)
logger.debug(info)
continue
info = '%s [%s][Cap.%s00][Español]' % (item.contentTitle, item.infoLabels['quality'], season)
r = re.compile(pattern)
match = [m.groupdict() for m in r.finditer(info)][0]

View File

@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import re
@@ -133,7 +133,8 @@ def listado(item):
for scrapedurl, scrapedtitle, scrapedthumbnail, title_alt, calidad in matches:
url = scrapedurl
title = scrapedtitle
title = scrapedtitle.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "")
title_alt = title_alt.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "")
thumbnail = scrapedthumbnail
action = "findvideos"
extra = ""
@@ -209,10 +210,11 @@ def listado_busqueda(item):
for url, thumb, title in matches:
real_title = scrapertools.find_single_match(title, r'<strong.*?>(.*?)Temporada.*?<\/strong>') #series
if real_title == "":
if not real_title:
real_title = scrapertools.find_single_match(title, r'(.*?)\[.*?]') #movies
real_title = scrapertools.remove_htmltags(real_title).decode('iso-8859-1').encode('utf-8')
real_title = scrapertools.htmlclean(real_title)
real_title = real_title.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "").replace("de la Serie", "")
calidad = scrapertools.find_single_match(title, r'.*?\s*Calidad.*?<span[^>]+>[\[]\s*(?P<quality>.*?)\s*[\]]<\/span>') #series
if calidad == "":
calidad = scrapertools.find_single_match(title, r'..*?(\[.*?.*\])') #movies
@@ -220,9 +222,8 @@ def listado_busqueda(item):
# fix encoding for title
title = scrapertools.htmlclean(title)
title = title.replace("�", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng")
title = re.sub(r'(Calidad.*?\])', '', title)
title = title.replace("�", "ñ").replace("ñ", "ñ").replace("Temp", " Temp").replace("Esp", " Esp").replace("Ing", " Ing").replace("Eng", " Eng").replace("Calidad", "").replace("de la Serie", "")
if real_title == "":
real_title = title
if calidad == "":
@@ -236,18 +237,22 @@ def listado_busqueda(item):
# Codigo para rescatar lo que se puede en pelisy.series.com de Series para la Videoteca. la URL apunta al capítulo y no a la Serie. Nombre de Serie frecuentemente en blanco. Se obtiene de Thumb, así como el id de la serie
if ("/serie" in url or "-serie" in url) and "pelisyseries.com" in host:
calidad_mps = "series/"
if "seriehd" in url:
calidad_mps = "series-hd/"
if "serievo" in url:
elif "serievo" in url:
calidad_mps = "series-vo/"
if "serie-vo" in url:
elif "serie-vo" in url:
calidad_mps = "series-vo/"
else:
calidad_mps = "series/"
real_title_mps = re.sub(r'.*?\/\d+_', '', thumb)
real_title_mps = re.sub(r'\.\w+.*?', '', real_title_mps)
if "no_image" in thumb:
real_title_mps = title
else:
real_title_mps = re.sub(r'.*?\/\d+_', '', thumb)
real_title_mps = re.sub(r'\.\w+.*?', '', real_title_mps)
if "/0_" not in thumb:
if "/0_" not in thumb and not "no_image" in thumb:
serieid = scrapertools.find_single_match(thumb, r'.*?\/\w\/(?P<serieid>\d+).*?.*')
if len(serieid) > 5:
serieid = ""
@@ -507,6 +512,11 @@ def episodios(item):
"[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
if "Especial" in info: # Capitulos Especiales
pattern = ".*?[^>]+>.*?Temporada.*?\[.*?(?P<season>\d+).*?\].*?Capitulo.*?\[\s*(?P<episode>\d+).*?\]?(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)?<\/span>\s*Calidad\s*<span[^>]+>[\[]\s*(?P<quality>.*?)?\s*[\]]<\/span>"
if not scrapertools.find_single_match(info, pattern): #en caso de error de formato, creo uno básico
logger.debug("patron episodioNEW: " + pattern)
logger.debug(info)
info = '><strong>%sTemporada %s Capitulo 0</strong> - <span >Español Castellano</span> Calidad <span >[%s]</span>' % (item.contentTitle, season, item.infoLabels['quality'])
r = re.compile(pattern)
match = [m.groupdict() for m in r.finditer(info)][0]
@@ -540,11 +550,11 @@ def episodios(item):
"(?P<episode2>\d{2}))?.*?\].*?(?:\[(?P<lang>.*?)\])?"
elif scrapertools.find_single_match(info, 'Cap.\d{2,3}'):
pattern = ".*?Temp.*?\s(?P<quality>.*?)\s.*?Cap.(?P<season>\d).*?(?P<episode>\d{2})(?:_(?P<season2>\d+)(?P<episode2>\d{2}))?.*?\s(?P<lang>.*)?"
else:
logger.debug("patron episodio: " + pattern)
if not scrapertools.find_single_match(info, pattern): #en caso de error de formato, creo uno básico
logger.debug("patron episodioOLD: " + pattern)
logger.debug(info)
continue
info = '%s [%s][Cap.%s00][Español]' % (item.contentTitle, item.infoLabels['quality'], season)
r = re.compile(pattern)
match = [m.groupdict() for m in r.finditer(info)][0]

View File

@@ -9,7 +9,7 @@ import urllib
import urlparse
from platformcode import logger
from decimal import Decimal, ROUND_UP
from decimal import Decimal
class Cloudflare:
def __init__(self, response):
@@ -62,18 +62,17 @@ class Cloudflare:
def get_url(self):
# Metodo #1 (javascript)
if self.js_data.get("wait", 0):
jschl_answer = self.decode2(self.js_data["value"])
jschl_answer = self.decode(self.js_data["value"])
for op, v in self.js_data["op"]:
#jschl_answer = eval(str(jschl_answer) + op + str(self.decode2(v)))
if op == '+':
jschl_answer = jschl_answer + self.decode2(v)
jschl_answer = jschl_answer + self.decode(v)
elif op == '-':
jschl_answer = jschl_answer - self.decode2(v)
jschl_answer = jschl_answer - self.decode(v)
elif op == '*':
jschl_answer = jschl_answer * self.decode2(v)
jschl_answer = jschl_answer * self.decode(v)
elif op == '/':
jschl_answer = jschl_answer / self.decode2(v)
jschl_answer = jschl_answer / self.decode(v)
self.js_data["params"]["jschl_answer"] = round(jschl_answer, 10) + len(self.domain)
@@ -93,7 +92,7 @@ class Cloudflare:
return response
def decode2(self, data):
def decode(self, data):
data = re.sub("\!\+\[\]", "1", data)
data = re.sub("\!\!\[\]", "1", data)
data = re.sub("\[\]", "0", data)
@@ -112,59 +111,4 @@ class Cloudflare:
for n in aux:
num2 += str(eval(n))
#return float(num1) / float(num2)
#return Decimal(Decimal(num1) / Decimal(num2)).quantize(Decimal('.0000000000000001'), rounding=ROUND_UP)
return Decimal(Decimal(num1) / Decimal(num2)).quantize(Decimal('.0000000000000001'))
def decode(self, data):
t = time.time()
timeout = False
while not timeout:
data = re.sub("\[\]", "''", data)
data = re.sub("!\+''", "+1", data)
data = re.sub("!''", "0", data)
data = re.sub("!0", "1", data)
if "(" in data:
x, y = data.rfind("("), data.find(")", data.rfind("(")) + 1
part = data[x + 1:y - 1]
else:
x = 0
y = len(data)
part = data
val = ""
if not part.startswith("+"): part = "+" + part
for i, ch in enumerate(part):
if ch == "+":
if not part[i + 1] == "'":
if val == "": val = 0
if type(val) == str:
val = val + self.get_number(part, i + 1)
else:
val = val + int(self.get_number(part, i + 1))
else:
val = str(val)
val = val + self.get_number(part, i + 1) or "0"
if type(val) == str: val = "'%s'" % val
data = data[0:x] + str(val) + data[y:]
timeout = time.time() - t > self.timeout
if not "+" in data and not "(" in data and not ")" in data:
return int(self.get_number(data))
def get_number(self, str, start=0):
ret = ""
for chr in str[start:]:
try:
int(chr)
except:
if ret: break
else:
ret += chr
return ret

View File

@@ -705,3 +705,46 @@ def filter_servers(servers_list):
servers_list = servers_list_filter
return servers_list
def check_list_links(itemlist, numero):
"""
Comprueba una lista de enlaces a videos y la devuelve modificando el titulo con la verificacion.
El segundo parametro (numero) indica cuantos enlaces hay que verificar (0:5, 1:10, 2:15, 3:20)
"""
numero = ((int(numero) + 1) * 5) if numero != '' else 10
for it in itemlist:
if numero > 0 and it.server != '' and it.url != '':
verificacion = check_video_link(it.url, it.server)
it.title = verificacion + ', ' + it.title.strip()
it.alive = verificacion
numero -= 1
return itemlist
def check_video_link(url, server):
"""
Comprueba si el enlace a un video es valido y devuelve un string de 2 posiciones con la verificacion.
:param url, server: Link y servidor
:return: str(2) '??':No se ha podido comprobar. 'Ok':Parece que el link funciona. 'NO':Parece que no funciona.
"""
try:
server_module = __import__('servers.%s' % server, None, None, ["servers.%s" % server])
except:
server_module = None
logger.info("[check_video_link] No se puede importar el servidor! %s" % server)
return "??"
if hasattr(server_module, 'test_video_exists'):
try:
video_exists, message = server_module.test_video_exists(page_url=url)
if not video_exists:
logger.info("[check_video_link] No existe! %s %s %s" % (message, server, url))
return "NO"
else:
logger.info("[check_video_link] comprovacion OK %s %s" % (server, url))
return "Ok"
except:
logger.info("[check_video_link] No se puede comprobar ahora! %s %s" % (server, url))
return "??"
logger.info("[check_video_link] No hay test_video_exists para servidor: %s" % server)
return "??"

View File

@@ -324,7 +324,9 @@ def title_format(item):
# Si el titulo no tiene contentSerieName entonces se formatea como pelicula
item.title = '%s' % set_color(item.contentTitle, 'movie')
if item.contentType=='movie':
item.context='Buscar esta pelicula en otros canales'
if item.context:
if isinstance(item.context, list):
item.context.append('Buscar esta pelicula en otros canales')
if 'Novedades' in item.category and item.from_channel=='news':
#logger.debug('novedades')
@@ -406,6 +408,7 @@ def title_format(item):
if 'Activar' in item.context[1]['title']:
item.title= '%s' % (set_color(item.title, 'no_update'))
#logger.debug('Despues del formato: %s' % item)
# Damos formato al servidor si existiera
if item.server:
server = '%s' % set_color(item.server.strip().capitalize(), 'server')
@@ -417,6 +420,7 @@ def title_format(item):
if item.action != 'play' and item.server:
item.title ='%s %s'%(item.title, server.strip())
elif item.action == 'play' and item.server:
if item.quality == 'default':
quality = ''
#logger.debug('language_color: %s'%language_color)
@@ -424,6 +428,12 @@ def title_format(item):
if lang:
item.title = add_languages(item.title, simple_language)
#logger.debug('item.title: %s' % item.title)
# si hay verificacion de enlaces
if item.alive != '':
if item.alive.lower() == 'no':
item.title = '[[COLOR red][B]X[/B][/COLOR]] %s' % item.title
elif item.alive == '??':
item.title = '[[COLOR yellow][B]?[/B][/COLOR]] %s' % item.title
else:
item.title = '%s' % item.title
#logger.debug('item.title despues de server: %s' % item.title)

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "https://.+.d0stream.com/embed/([a-z0-9]+)",
"url": "https://v.d0stream.com/embed/\\1"
}
]
},
"free": true,
"id": "dostream",
"name": "dostream",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://s14.postimg.org/lczc08bsx/dostream.png"
}

View File

@@ -0,0 +1,35 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector DoStream By Alfa development Group
# --------------------------------------------------------
import re
from core import httptools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if data.code == 404:
return False, "[Dostream] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
logger.debug(data)
patron = "(?:'src'|'url'):'(http.*?)'"
matches = re.compile(patron, re.DOTALL).findall(data)
for url in matches:
video_urls.append(['dostream',url])
return video_urls

View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "https://estream.to/embed-([a-z0-9]+).html",
"url": "https://estream.to/\\1.html"
}
]
},
"free": true,
"id": "estream",
"name": "estream",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://s14.postimg.org/ibd54ayf5/estream.png"
}

View File

@@ -0,0 +1,34 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector Estream By Alfa development Group
# --------------------------------------------------------
import re
from core import httptools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if data.code == 404:
return False, "[Estream] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = "<source src=([^ ]+) type='video/mp4' label='.*?x(.*?)'"
matches = re.compile(patron, re.DOTALL).findall(data)
for url, quality in matches:
video_urls.append(["%sp [estream]" % quality, url])
return video_urls

View File

@@ -8,6 +8,18 @@ from core import jsontools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
# http://netu.tv/watch_video.php=XX solo contiene una redireccion, ir directamente a http://hqq.tv/player/embed_player.php?vid=XX
page_url = page_url.replace("http://netu.tv/watch_video.php?v=", "http://hqq.tv/player/embed_player.php?vid=")
data = httptools.downloadpage(page_url).data
if "var userid = '';" in data.lower():
return False, "[netutv] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)

View File

@@ -33,377 +33,16 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
if 'Video is processing now' in data:
return "El vídeo está siendo procesado, intentalo de nuevo mas tarde"
var = scrapertools.find_single_match(data, 'var _0x[0-f]{4}=(\[[^;]+\]);')
packed = scrapertools.find_single_match(data, "<script type=[\"']text/javascript[\"']>(eval.*?)</script>")
unpacked = jsunpack.unpack(packed)
url = scrapertools.find_single_match(unpacked, "(?:src):\\\\'([^\\\\]+.mp4)\\\\'")
itemlist.append([".mp4" + " [powvideo]", S(var).decode(url)])
itemlist.append([".mp4" + " [powvideo]", decode_powvideo_url(url)])
itemlist.sort(key=lambda x: x[0], reverse=True)
return itemlist
class S:
def __init__(self, var):
self.r = None
self.s = None
self.k = None
self.n = None
self.c = None
self.b = None
self.d = None
var = eval(var)
for x in range(0xd3, 0, -1):
var.append(var.pop(0))
self.var = var
self.t(
self.decode_index('0xc') +
self.decode_index('0d') +
self.decode_index('0xe') +
self.decode_index('0xf'),
self.decode_index('0x10')
)
def decode_index(self, index, key=None):
b64_data = self.var[int(index, 16)]
result = ''
_0xb99338 = 0x0
_0x25e3f4 = 0x0
data = base64.b64decode(b64_data)
data = urllib.unquote(data).decode('utf8')
if key:
_0x5da081 = [x for x in range(0x100)]
for x in range(0x100):
_0xb99338 = (_0xb99338 + _0x5da081[x] + ord(key[x % len(key)])) % 0x100
_0x139847 = _0x5da081[x]
_0x5da081[x] = _0x5da081[_0xb99338]
_0x5da081[_0xb99338] = _0x139847
_0xb99338 = 0x0
for _0x11ebc5 in range(len(data)):
_0x25e3f4 = (_0x25e3f4 + 0x1) % 0x100
_0xb99338 = (_0xb99338 + _0x5da081[_0x25e3f4]) % 0x100
_0x139847 = _0x5da081[_0x25e3f4]
_0x5da081[_0x25e3f4] = _0x5da081[_0xb99338]
_0x5da081[_0xb99338] = _0x139847
result += chr(ord(data[_0x11ebc5]) ^ _0x5da081[(_0x5da081[_0x25e3f4] + _0x5da081[_0xb99338]) % 0x100])
return result
else:
return data
def decode(self, url):
_hash = re.compile('[A-z0-9_-]{40,}', re.DOTALL).findall(url)[0]
return url.replace(_hash, self.p(_hash))
def t(self, t, i):
self.r = 20
self.s = [1634760805, 857760878, 2036477234, 1797285236]
self.k = []
self.n = [0, 0]
self.c = [0, 0]
self.b = [None] * 64
self.d = 64
self.sk(self.sa(t))
self.sn(self.sa(i))
def e(self, t):
s = self.gb(len(t))
i = [s[h] ^ t[h] for h in range(len(t))]
return i
def p(self, t):
import base64
t += "=" * (4 - len(t) % 4)
t = base64.b64decode(t.replace('-', '+').replace('_', '/'))
return self._as(self.e(self.sa(t)))
@staticmethod
def sa(t):
s = [ord(t[i]) for i in range(len(t))]
return s
@staticmethod
def _as(t):
s = [chr(t[i]) for i in range(len(t))]
return ''.join(s)
def sk(self, t):
s = 0
for i in range(8):
self.k.append(
255 & t[s] | self.lshift((255 & t[s + 1]), 8) | self.lshift((255 & t[s + 2]), 16) | self.lshift(
(255 & t[s + 3]), 24))
s += 4
self._r()
def sn(self, t):
self.n[0] = 255 & t[0] | self.lshift((255 & t[1]), 8) | self.lshift((255 & t[2]), 16) | self.lshift(
(255 & t[3]), 24)
self.n[1] = 255 & t[4] | self.lshift((255 & t[5]), 8) | self.lshift((255 & t[6]), 16) | self.lshift(
(255 & t[7]), 24)
self._r()
def gb(self, t):
i = [None] * t
for s in range(t):
if 64 == self.d:
self._g()
self._i()
self.d = 0
i[s] = self.b[self.d]
self.d += 1
return i
def gh(self, t):
i = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']
h = self.gb(t)
s = [i[self.rshift(h[b], 4) & 15] for b in range(len(h))]
s.append(i[15 & h[len(h)]])
return ''.join(s)
def _r(self):
self.c[0] = 0
self.c[1] = 0
self.d = 64
def _i(self):
self.c[0] = self.c[0] + 1 & 4294967295
if 0 == self.c[0]:
self.c[1] = self.c[1] + 1 & 4294967295
def _g(self):
i = self.s[0]
s = self.k[0]
h = self.k[1]
b = self.k[2]
r = self.k[3]
n = self.s[1]
o = self.n[0]
e = self.n[1]
c = self.c[0]
p = self.c[1]
a = self.s[2]
f = self.k[4]
u = self.k[5]
g = self.k[6]
y = self.k[7]
k = self.s[3]
l = i
d = s
v = h
_ = b
A = r
w = n
C = o
S = e
j = c
m = p
q = a
x = f
z = u
B = g
D = y
E = k
for F in range(0, self.r, 2):
# 0
t = l + z
A ^= self.lshift(t, 7) | self.bshift(t, 25)
t = A + l
j ^= self.lshift(t, 9) | self.bshift(t, 23)
t = j + A
z ^= self.lshift(t, 13) | self.bshift(t, 19)
t = z + j
l ^= self.lshift(t, 18) | self.bshift(t, 14)
# 1
t = w + d
m ^= self.lshift(t, 7) | self.bshift(t, 25)
t = m + w
B ^= self.lshift(t, 9) | self.bshift(t, 23)
t = B + m
d ^= self.lshift(t, 13) | self.bshift(t, 19)
t = d + B
w ^= self.lshift(t, 18) | self.bshift(t, 14)
# 2
t = q + C
D ^= self.lshift(t, 7) | self.bshift(t, 25)
t = D + q
v ^= self.lshift(t, 9) | self.bshift(t, 23)
t = v + D
C ^= self.lshift(t, 13) | self.bshift(t, 19)
t = C + v
q ^= self.lshift(t, 18) | self.bshift(t, 14)
# 3
t = E + x
_ ^= self.lshift(t, 7) | self.bshift(t, 25)
t = _ + E
S ^= self.lshift(t, 9) | self.bshift(t, 23)
t = S + _
x ^= self.lshift(t, 13) | self.bshift(t, 19)
t = x + S
E ^= self.lshift(t, 18) | self.bshift(t, 14)
# 4
t = l + _
d ^= self.lshift(t, 7) | self.bshift(t, 25)
t = d + l
v ^= self.lshift(t, 9) | self.bshift(t, 23)
t = v + d
_ ^= self.lshift(t, 13) | self.bshift(t, 19)
t = _ + v
l ^= self.lshift(t, 18) | self.bshift(t, 14)
# 5
t = w + A
C ^= self.lshift(t, 7) | self.bshift(t, 25)
t = C + w
S ^= self.lshift(t, 9) | self.bshift(t, 23)
t = S + C
A ^= self.lshift(t, 13) | self.bshift(t, 19)
t = A + S
w ^= self.lshift(t, 18) | self.bshift(t, 14)
# 6
t = q + m
x ^= self.lshift(t, 7) | self.bshift(t, 25)
t = x + q
j ^= self.lshift(t, 9) | self.bshift(t, 23)
t = j + x
m ^= self.lshift(t, 13) | self.bshift(t, 19)
t = m + j
q ^= self.lshift(t, 18) | self.bshift(t, 14)
# 7
t = E + D
z ^= self.lshift(t, 7) | self.bshift(t, 25)
t = z + E
B ^= self.lshift(t, 9) | self.bshift(t, 23)
t = B + z
D ^= self.lshift(t, 13) | self.bshift(t, 19)
t = D + B
E ^= self.lshift(t, 18) | self.bshift(t, 14)
l += i
d += s
v += h
_ += b
A += r
w += n
C += o
S += e
j += c
m += p
q += a
x += f
z += u
B += g
D += y
E += k
self.b[0] = self.bshift(l, 0) & 255
self.b[1] = self.bshift(l, 8) & 255
self.b[2] = self.bshift(l, 16) & 255
self.b[3] = self.bshift(l, 24) & 255
self.b[4] = self.bshift(d, 0) & 255
self.b[5] = self.bshift(d, 8) & 255
self.b[6] = self.bshift(d, 16) & 255
self.b[7] = self.bshift(d, 24) & 255
self.b[8] = self.bshift(v, 0) & 255
self.b[9] = self.bshift(v, 8) & 255
self.b[10] = self.bshift(v, 16) & 255
self.b[11] = self.bshift(v, 24) & 255
self.b[12] = self.bshift(_, 0) & 255
self.b[13] = self.bshift(_, 8) & 255
self.b[14] = self.bshift(_, 16) & 255
self.b[15] = self.bshift(_, 24) & 255
self.b[16] = self.bshift(A, 0) & 255
self.b[17] = self.bshift(A, 8) & 255
self.b[18] = self.bshift(A, 16) & 255
self.b[19] = self.bshift(A, 24) & 255
self.b[20] = self.bshift(w, 0) & 255
self.b[21] = self.bshift(w, 8) & 255
self.b[22] = self.bshift(w, 16) & 255
self.b[23] = self.bshift(w, 24) & 255
self.b[24] = self.bshift(C, 0) & 255
self.b[25] = self.bshift(C, 8) & 255
self.b[26] = self.bshift(C, 16) & 255
self.b[27] = self.bshift(C, 24) & 255
self.b[28] = self.bshift(S, 0) & 255
self.b[29] = self.bshift(S, 8) & 255
self.b[30] = self.bshift(S, 16) & 255
self.b[31] = self.bshift(S, 24) & 255
self.b[32] = self.bshift(j, 0) & 255
self.b[33] = self.bshift(j, 8) & 255
self.b[34] = self.bshift(j, 16) & 255
self.b[35] = self.bshift(j, 24) & 255
self.b[36] = self.bshift(m, 0) & 255
self.b[37] = self.bshift(m, 8) & 255
self.b[38] = self.bshift(m, 16) & 255
self.b[39] = self.bshift(m, 24) & 255
self.b[40] = self.bshift(q, 0) & 255
self.b[41] = self.bshift(q, 8) & 255
self.b[42] = self.bshift(q, 16) & 255
self.b[43] = self.bshift(q, 24) & 255
self.b[44] = self.bshift(x, 0) & 255
self.b[45] = self.bshift(x, 8) & 255
self.b[46] = self.bshift(x, 16) & 255
self.b[47] = self.bshift(x, 24) & 255
self.b[48] = self.bshift(z, 0) & 255
self.b[49] = self.bshift(z, 8) & 255
self.b[50] = self.bshift(z, 16) & 255
self.b[51] = self.bshift(z, 24) & 255
self.b[52] = self.bshift(B, 0) & 255
self.b[53] = self.bshift(B, 8) & 255
self.b[54] = self.bshift(B, 16) & 255
self.b[55] = self.bshift(B, 24) & 255
self.b[56] = self.bshift(D, 0) & 255
self.b[57] = self.bshift(D, 8) & 255
self.b[58] = self.bshift(D, 16) & 255
self.b[59] = self.bshift(D, 24) & 255
self.b[60] = self.bshift(E, 0) & 255
self.b[61] = self.bshift(E, 8) & 255
self.b[62] = self.bshift(E, 16) & 255
self.b[63] = self.bshift(E, 24) & 255
def lshift(self, num, other):
lnum = self.ToInt32(num)
rnum = self.ToUint32(other)
shift_count = rnum & 0x1F
return self.ToInt32(lnum << shift_count)
def rshift(self, num, other):
lnum = self.ToInt32(num)
rnum = self.ToUint32(other)
shift_count = rnum & 0x1F
return self.ToInt32(lnum >> shift_count)
def bshift(self, num, other):
lnum = self.ToUint32(num)
rnum = self.ToUint32(other)
shift_count = rnum & 0x1F
return self.ToUint32(lnum >> shift_count)
@staticmethod
def ToInt32(num):
int32 = num % 2 ** 32
return int32 - 2 ** 32 if int32 >= 2 ** 31 else int32
@staticmethod
def ToUint32(num):
return num % 2 ** 32
def decode_powvideo_url(url):
tria = re.compile('[0-9a-z]{40,}', re.IGNORECASE).findall(url)[0]
gira = tria[::-1]
x = gira[:2] + gira[3:]
return re.sub(tria, x, url)

View File

@@ -36,375 +36,15 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
packed = scrapertools.find_single_match(data, "<script type=[\"']text/javascript[\"']>(eval.*?)</script>")
unpacked = jsunpack.unpack(packed)
var = scrapertools.find_single_match(data, 'var _0x[0-f]+=(\[[^;]+\]);')
url = scrapertools.find_single_match(unpacked, '(http[^,]+\.mp4)')
itemlist.append([".mp4" + " [streamplay]", S(var).decode(url)])
itemlist.append([".mp4" + " [streamplay]", decode_video_url(url)])
itemlist.sort(key=lambda x: x[0], reverse=True)
return itemlist
class S:
def __init__(self, var):
self.r = None
self.s = None
self.k = None
self.n = None
self.c = None
self.b = None
self.d = None
var = eval(var)
for x in range(0xf8, 0, -1):
var.append(var.pop(0))
self.var = var
self.t(
self.decode_index('0xb') +
self.decode_index('0xc') +
self.decode_index('0xd') +
self.decode_index('0xe'),
self.decode_index('0xf')
)
def decode_index(self, index, key=None):
b64_data = self.var[int(index, 16)]
result = ''
_0xb99338 = 0x0
_0x25e3f4 = 0x0
data = base64.b64decode(b64_data)
data = urllib.unquote(data).decode('utf8')
if key:
_0x5da081 = [x for x in range(0x100)]
for x in range(0x100):
_0xb99338 = (_0xb99338 + _0x5da081[x] + ord(key[x % len(key)])) % 0x100
_0x139847 = _0x5da081[x]
_0x5da081[x] = _0x5da081[_0xb99338]
_0x5da081[_0xb99338] = _0x139847
_0xb99338 = 0x0
for _0x11ebc5 in range(len(data)):
_0x25e3f4 = (_0x25e3f4 + 0x1) % 0x100
_0xb99338 = (_0xb99338 + _0x5da081[_0x25e3f4]) % 0x100
_0x139847 = _0x5da081[_0x25e3f4]
_0x5da081[_0x25e3f4] = _0x5da081[_0xb99338]
_0x5da081[_0xb99338] = _0x139847
result += chr(ord(data[_0x11ebc5]) ^ _0x5da081[(_0x5da081[_0x25e3f4] + _0x5da081[_0xb99338]) % 0x100])
return result
else:
return data
def decode(self, url):
_hash = re.compile('[A-z0-9_-]{40,}', re.DOTALL).findall(url)[0]
return url.replace(_hash, self.p(_hash))
def t(self, t, i):
self.r = 20
self.s = [1634760805, 857760878, 2036477234, 1797285236]
self.k = []
self.n = [0, 0]
self.c = [0, 0]
self.b = [None] * 64
self.d = 64
self.sk(self.sa(t))
self.sn(self.sa(i))
def e(self, t):
s = self.gb(len(t))
i = [s[h] ^ t[h] for h in range(len(t))]
return i
def p(self, t):
import base64
t += "=" * (4 - len(t) % 4)
t = base64.b64decode(t.replace('-', '+').replace('_', '/'))
return self._as(self.e(self.sa(t)))
@staticmethod
def sa(t):
s = [ord(t[i]) for i in range(len(t))]
return s
@staticmethod
def _as(t):
s = [chr(t[i]) for i in range(len(t))]
return ''.join(s)
def sk(self, t):
s = 0
for i in range(8):
self.k.append(
255 & t[s] | self.lshift((255 & t[s + 1]), 8) | self.lshift((255 & t[s + 2]), 16) | self.lshift(
(255 & t[s + 3]), 24))
s += 4
self._r()
def sn(self, t):
self.n[0] = 255 & t[0] | self.lshift((255 & t[1]), 8) | self.lshift((255 & t[2]), 16) | self.lshift(
(255 & t[3]), 24)
self.n[1] = 255 & t[4] | self.lshift((255 & t[5]), 8) | self.lshift((255 & t[6]), 16) | self.lshift(
(255 & t[7]), 24)
self._r()
def gb(self, t):
i = [None] * t
for s in range(t):
if 64 == self.d:
self._g()
self._i()
self.d = 0
i[s] = self.b[self.d]
self.d += 1
return i
def gh(self, t):
i = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']
h = self.gb(t)
s = [i[self.rshift(h[b], 4) & 15] for b in range(len(h))]
s.append(i[15 & h[len(h)]])
return ''.join(s)
def _r(self):
self.c[0] = 0
self.c[1] = 0
self.d = 64
def _i(self):
self.c[0] = self.c[0] + 1 & 4294967295
if 0 == self.c[0]:
self.c[1] = self.c[1] + 1 & 4294967295
def _g(self):
i = self.s[0]
s = self.k[0]
h = self.k[1]
b = self.k[2]
r = self.k[3]
n = self.s[1]
o = self.n[0]
e = self.n[1]
c = self.c[0]
p = self.c[1]
a = self.s[2]
f = self.k[4]
u = self.k[5]
g = self.k[6]
y = self.k[7]
k = self.s[3]
l = i
d = s
v = h
_ = b
A = r
w = n
C = o
S = e
j = c
m = p
q = a
x = f
z = u
B = g
D = y
E = k
for F in range(0, self.r, 2):
# 0
t = l + z
A ^= self.lshift(t, 7) | self.bshift(t, 25)
t = A + l
j ^= self.lshift(t, 9) | self.bshift(t, 23)
t = j + A
z ^= self.lshift(t, 13) | self.bshift(t, 19)
t = z + j
l ^= self.lshift(t, 18) | self.bshift(t, 14)
# 1
t = w + d
m ^= self.lshift(t, 7) | self.bshift(t, 25)
t = m + w
B ^= self.lshift(t, 9) | self.bshift(t, 23)
t = B + m
d ^= self.lshift(t, 13) | self.bshift(t, 19)
t = d + B
w ^= self.lshift(t, 18) | self.bshift(t, 14)
# 2
t = q + C
D ^= self.lshift(t, 7) | self.bshift(t, 25)
t = D + q
v ^= self.lshift(t, 9) | self.bshift(t, 23)
t = v + D
C ^= self.lshift(t, 13) | self.bshift(t, 19)
t = C + v
q ^= self.lshift(t, 18) | self.bshift(t, 14)
# 3
t = E + x
_ ^= self.lshift(t, 7) | self.bshift(t, 25)
t = _ + E
S ^= self.lshift(t, 9) | self.bshift(t, 23)
t = S + _
x ^= self.lshift(t, 13) | self.bshift(t, 19)
t = x + S
E ^= self.lshift(t, 18) | self.bshift(t, 14)
# 4
t = l + _
d ^= self.lshift(t, 7) | self.bshift(t, 25)
t = d + l
v ^= self.lshift(t, 9) | self.bshift(t, 23)
t = v + d
_ ^= self.lshift(t, 13) | self.bshift(t, 19)
t = _ + v
l ^= self.lshift(t, 18) | self.bshift(t, 14)
# 5
t = w + A
C ^= self.lshift(t, 7) | self.bshift(t, 25)
t = C + w
S ^= self.lshift(t, 9) | self.bshift(t, 23)
t = S + C
A ^= self.lshift(t, 13) | self.bshift(t, 19)
t = A + S
w ^= self.lshift(t, 18) | self.bshift(t, 14)
# 6
t = q + m
x ^= self.lshift(t, 7) | self.bshift(t, 25)
t = x + q
j ^= self.lshift(t, 9) | self.bshift(t, 23)
t = j + x
m ^= self.lshift(t, 13) | self.bshift(t, 19)
t = m + j
q ^= self.lshift(t, 18) | self.bshift(t, 14)
# 7
t = E + D
z ^= self.lshift(t, 7) | self.bshift(t, 25)
t = z + E
B ^= self.lshift(t, 9) | self.bshift(t, 23)
t = B + z
D ^= self.lshift(t, 13) | self.bshift(t, 19)
t = D + B
E ^= self.lshift(t, 18) | self.bshift(t, 14)
l += i
d += s
v += h
_ += b
A += r
w += n
C += o
S += e
j += c
m += p
q += a
x += f
z += u
B += g
D += y
E += k
self.b[0] = self.bshift(l, 0) & 255
self.b[1] = self.bshift(l, 8) & 255
self.b[2] = self.bshift(l, 16) & 255
self.b[3] = self.bshift(l, 24) & 255
self.b[4] = self.bshift(d, 0) & 255
self.b[5] = self.bshift(d, 8) & 255
self.b[6] = self.bshift(d, 16) & 255
self.b[7] = self.bshift(d, 24) & 255
self.b[8] = self.bshift(v, 0) & 255
self.b[9] = self.bshift(v, 8) & 255
self.b[10] = self.bshift(v, 16) & 255
self.b[11] = self.bshift(v, 24) & 255
self.b[12] = self.bshift(_, 0) & 255
self.b[13] = self.bshift(_, 8) & 255
self.b[14] = self.bshift(_, 16) & 255
self.b[15] = self.bshift(_, 24) & 255
self.b[16] = self.bshift(A, 0) & 255
self.b[17] = self.bshift(A, 8) & 255
self.b[18] = self.bshift(A, 16) & 255
self.b[19] = self.bshift(A, 24) & 255
self.b[20] = self.bshift(w, 0) & 255
self.b[21] = self.bshift(w, 8) & 255
self.b[22] = self.bshift(w, 16) & 255
self.b[23] = self.bshift(w, 24) & 255
self.b[24] = self.bshift(C, 0) & 255
self.b[25] = self.bshift(C, 8) & 255
self.b[26] = self.bshift(C, 16) & 255
self.b[27] = self.bshift(C, 24) & 255
self.b[28] = self.bshift(S, 0) & 255
self.b[29] = self.bshift(S, 8) & 255
self.b[30] = self.bshift(S, 16) & 255
self.b[31] = self.bshift(S, 24) & 255
self.b[32] = self.bshift(j, 0) & 255
self.b[33] = self.bshift(j, 8) & 255
self.b[34] = self.bshift(j, 16) & 255
self.b[35] = self.bshift(j, 24) & 255
self.b[36] = self.bshift(m, 0) & 255
self.b[37] = self.bshift(m, 8) & 255
self.b[38] = self.bshift(m, 16) & 255
self.b[39] = self.bshift(m, 24) & 255
self.b[40] = self.bshift(q, 0) & 255
self.b[41] = self.bshift(q, 8) & 255
self.b[42] = self.bshift(q, 16) & 255
self.b[43] = self.bshift(q, 24) & 255
self.b[44] = self.bshift(x, 0) & 255
self.b[45] = self.bshift(x, 8) & 255
self.b[46] = self.bshift(x, 16) & 255
self.b[47] = self.bshift(x, 24) & 255
self.b[48] = self.bshift(z, 0) & 255
self.b[49] = self.bshift(z, 8) & 255
self.b[50] = self.bshift(z, 16) & 255
self.b[51] = self.bshift(z, 24) & 255
self.b[52] = self.bshift(B, 0) & 255
self.b[53] = self.bshift(B, 8) & 255
self.b[54] = self.bshift(B, 16) & 255
self.b[55] = self.bshift(B, 24) & 255
self.b[56] = self.bshift(D, 0) & 255
self.b[57] = self.bshift(D, 8) & 255
self.b[58] = self.bshift(D, 16) & 255
self.b[59] = self.bshift(D, 24) & 255
self.b[60] = self.bshift(E, 0) & 255
self.b[61] = self.bshift(E, 8) & 255
self.b[62] = self.bshift(E, 16) & 255
self.b[63] = self.bshift(E, 24) & 255
def lshift(self, num, other):
lnum = self.ToInt32(num)
rnum = self.ToUint32(other)
shift_count = rnum & 0x1F
return self.ToInt32(lnum << shift_count)
def rshift(self, num, other):
lnum = self.ToInt32(num)
rnum = self.ToUint32(other)
shift_count = rnum & 0x1F
return self.ToInt32(lnum >> shift_count)
def bshift(self, num, other):
lnum = self.ToUint32(num)
rnum = self.ToUint32(other)
shift_count = rnum & 0x1F
return self.ToUint32(lnum >> shift_count)
@staticmethod
def ToInt32(num):
int32 = num % 2 ** 32
return int32 - 2 ** 32 if int32 >= 2 ** 31 else int32
@staticmethod
def ToUint32(num):
return num % 2 ** 32
def decode_video_url(url):
tria = re.compile('[0-9a-z]{40,}', re.IGNORECASE).findall(url)[0]
gira = tria[::-1]
x = gira[:2] + gira[3:]
return re.sub(tria, x, url)