Merge remote-tracking branch 'alfa-addon/master' into Fixes

This commit is contained in:
Unknown
2018-03-02 21:23:00 -03:00
21 changed files with 318 additions and 598 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.5.0" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.5.1" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,14 +19,13 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
» newpct » newpct1
» youtube » flashx
» kbagi » pelismagnet
» gnula » animemovil
» cinecalidad » cuelgame
» divxtotal » cinemahd
» gnula » pelismagnet
» allcalidad » streamcherry
» streamango » pepecine
» mejortorrent » torrentrapid
» cinecalidad
¤ arreglos internos
¤ Agradecimientos a @Paquito Porras por PelisUltra.
¤ Agradecimientos a @prpeaprendiz y @ikarywarriors por colaborar.
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary>

View File

@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
@@ -20,12 +21,12 @@ except:
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel = item.channel, title = "Novedades", action = "peliculas", url = host))
itemlist.append(Item(channel = item.channel, title = "Por género", action = "generos_years", url = host, extra = "Genero" ))
itemlist.append(Item(channel = item.channel, title = "Por año", action = "generos_years", url = host, extra = ">Año<"))
itemlist.append(Item(channel = item.channel, title = "Favoritas", action = "peliculas", url = host + "/favorites" ))
itemlist.append(Item(channel = item.channel, title = "Novedades", action = "peliculas", url = host, thumbnail = get_thumb("newest", auto = True)))
itemlist.append(Item(channel = item.channel, title = "Por género", action = "generos_years", url = host, extra = "Genero", thumbnail = get_thumb("genres", auto = True) ))
itemlist.append(Item(channel = item.channel, title = "Por año", action = "generos_years", url = host, extra = ">Año<", thumbnail = get_thumb("year", auto = True)))
itemlist.append(Item(channel = item.channel, title = "Favoritas", action = "peliculas", url = host + "/favorites", thumbnail = get_thumb("favorites", auto = True) ))
itemlist.append(Item(channel = item.channel, title = ""))
itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "?s="))
itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "?s=", thumbnail = get_thumb("search", auto = True)))
return itemlist
def newest(categoria):

View File

@@ -7,7 +7,8 @@
"thumbnail": "https://s31.postimg.org/puxmvsi7v/cinecalidad.png",
"banner": "https://s32.postimg.org/kihkdpx1x/banner_cinecalidad.png",
"categories": [
"movie"
"movie",
"torrent"
],
"settings": [
{
@@ -29,7 +30,7 @@
"No filtrar",
"Latino",
"Español",
"Portuges"
"Portugues"
]
},
{

View File

@@ -29,16 +29,20 @@ list_servers = [
'usersfiles',
'vidbull',
'openload',
'directo'
'rapidvideo',
'streamango',
'directo',
'torrent'
]
host = 'http://www.cinecalidad.to'
thumbmx = 'http://flags.fmcdn.net/data/flags/normal/mx.png'
thumbes = 'http://flags.fmcdn.net/data/flags/normal/es.png'
thumbbr = 'http://flags.fmcdn.net/data/flags/normal/br.png'
current_lang = ''
def mainlist(item):
global host
idioma2 = "destacadas"
logger.info()
@@ -48,14 +52,14 @@ def mainlist(item):
itemlist.append(
item.clone(title="CineCalidad Latino",
action="submenu",
host="http://cinecalidad.com/",
host="http://cinecalidad.to/",
thumbnail=thumbmx,
extra="peliculas",
))
itemlist.append(item.clone(title="CineCalidad Castellano",
action="submenu",
host="http://cinecalidad.com/espana/",
host="http://cinecalidad.to/espana/",
thumbnail=thumbes,
extra="peliculas",
))
@@ -63,7 +67,7 @@ def mainlist(item):
itemlist.append(
item.clone(title="CineCalidad Portugues",
action="submenu",
host="http://cinemaqualidade.com/",
host="http://cinemaqualidade.to/",
thumbnail=thumbbr,
extra="filmes",
))
@@ -77,7 +81,7 @@ def submenu(item):
idioma = 'peliculas'
idioma2 = "destacada"
host = item.host
if item.host == "http://cinemaqualidade.com/":
if item.host == "http://cinemaqualidade.to/":
idioma = "filmes"
idioma2 = "destacado"
logger.info()
@@ -106,7 +110,7 @@ def submenu(item):
itemlist.append(Item(channel=item.channel,
title="Por Año",
action="anyos",
url=host + "/" + idioma + "-por-ano",
url=host + idioma + "-por-ano",
thumbnail=get_thumb('year', auto=True),
fanart='https://s8.postimg.org/7eoedwfg5/pora_o.png',
))
@@ -126,7 +130,7 @@ def anyos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<a href="([^"]+)">([^<]+)</a> '
patron = '<a href="([^"]+)">([^<]+)</a><br'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
@@ -173,7 +177,7 @@ def generos(item):
for scrapedurl, scrapedtitle in matches:
url = urlparse.urljoin(item.url, scrapedurl)
title = scrapedtitle
thumbnail = tgenero[scrapedtitle]
thumbnail = ''
plot = item.plot
itemlist.append(
Item(channel=item.channel,
@@ -190,13 +194,17 @@ def generos(item):
def peliculas(item):
logger.info()
global current_lang
itemlist = []
if 'espana' in host:
item.language = 'castellano'
elif 'cinecalidad' in host:
item.language = 'latino'
else:
item.language = 'portugues'
if 'cinemaqualidade' in item.url:
current_lang = 'portugues'
elif 'espana' in item.url:
current_lang = 'castellano'
elif 'cinecalidad' in item.url:
current_lang = 'latino'
data = httptools.downloadpage(item.url).data
patron = '<div class="home_post_cont.*? post_box">.*?<a href="(.*?)".*?'
patron += 'src="(.*?)".*?title="(.*?) \((.*?)\).*?".*?p&gt;(.*?)&lt'
@@ -219,7 +227,7 @@ def peliculas(item):
fanart='https://s31.postimg.org/puxmvsi7v/cinecalidad.png',
contentTitle=contentTitle,
infoLabels={'year': year},
language=item.language,
language=current_lang,
context=autoplay.context
))
@@ -245,7 +253,7 @@ def dec(item):
val = item.split(' ')
link = map(int, val)
for i in range(len(link)):
link[i] = link[i] - 7
link[i] = link[i] - 6
real = ''.join(map(chr, link))
return (real)
@@ -273,8 +281,11 @@ def findvideos(item):
"https://www.yourupload.com/watch/": "yourupload",
"http://www.cinecalidad.to/protect/gdredirect.php?l=": "directo",
"https://openload.co/embed/": "openload",
"https://streamango.com/embed/f/": "streamango",
"https://www.rapidvideo.com/embed/": "rapidvideo",
}
logger.info()
itemlist = []
duplicados = []
@@ -285,37 +296,52 @@ def findvideos(item):
server_url = {'YourUpload': 'https://www.yourupload.com/embed/',
'Openload': 'https://openload.co/embed/',
'TVM': 'https://thevideo.me/embed-',
'Streamango': 'https://streamango.com/embed/',
'RapidVideo': 'https://www.rapidvideo.com/embed/',
'Trailer': '',
'BitTorrent': '',
'Mega': '',
'MediaFire': ''}
for video_cod, server_id in matches:
if server_id not in ['BitTorrent', 'Mega', 'MediaFire', 'Trailer', '']:
if server_id not in ['Mega', 'MediaFire', 'Trailer', '']:
video_id = dec(video_cod)
logger.debug('server_id %s' % server_id)
if server_id in server_url:
server = server_id.lower()
thumbnail = item.thumbnail
if server_id == 'TVM':
server = 'thevideome'
url = server_url[server_id] + video_id + '.html'
elif server_id == 'BitTorrent':
base_url = 'http://www.cinecalidad.to/protect/contenido.php'
post = 'i=%s&title=%s' % (video_id, item.contentTitle)
protect = httptools.downloadpage(base_url, post=post).data
url = scrapertools.find_single_match(protect, 'value="(magnet.*?)"')
server = 'torrent'
else:
url = server_url[server_id] + video_id
title = item.contentTitle + ' (%s)' % server
quality = 'default'
if server_id not in ['BitTorrent', 'Mega', 'MediaFire', 'Trailer']:
if server_id not in ['Mega', 'MediaFire', 'Trailer']:
if server != 'torrent':
language = IDIOMAS[item.language]
else:
language = [IDIOMAS[item.language], 'vose']
if url not in duplicados:
itemlist.append(item.clone(action='play',
title=title,
fulltitle=item.contentTitle,
url=url,
language=IDIOMAS[item.language],
thumbnail=thumbnail,
quality=quality,
server=server
))
new_item = Item(channel=item.channel,
action='play',
title=title,
fulltitle=item.contentTitle,
url=url,
language= language,
thumbnail=thumbnail,
quality=quality,
server=server
)
itemlist.append(new_item)
duplicados.append(url)
# Requerido para FilterTools

View File

@@ -146,8 +146,6 @@ def scraper(item):
except:
pass
for item_tmdb in itemlist:
logger.info(str(item_tmdb.infoLabels['tmdb_id']))
return itemlist

View File

@@ -7,7 +7,8 @@ from core.item import Item
from platformcode import config, logger
host = "http://gnula.nu/"
host_search = "https://www.googleapis.com/customsearch/v1element?key=AIzaSyCVAXiUzRYsML1Pv6RwSG1gunmMikTzQqY&rsz=small&num=10&hl=es&prettyPrint=false&source=gcsc&gss=.es&sig=45e50696e04f15ce6310843f10a3a8fb&cx=014793692610101313036:vwtjajbclpq&q=%s&cse_tok=AOdTmaBgzSiy5RxoV4cZSGGEr17reWoGLg:1519145966291&googlehost=www.google.com&callback=google.search.Search.apiary10745&nocache=1519145965573&start=0"
host_search = "https://www.googleapis.com/customsearch/v1element?key=AIzaSyCVAXiUzRYsML1Pv6RwSG1gunmMikTzQqY&rsz=small&num=20&hl=es&prettyPrint=false&source=gcsc&gss=.es&sig=45e50696e04f15ce6310843f10a3a8fb&cx=014793692610101313036:vwtjajbclpq&q=%s&cse_tok=%s&googlehost=www.google.com&callback=google.search.Search.apiary10745&nocache=1519145965573&start=0"
item_per_page = 20
def mainlist(item):
@@ -28,7 +29,16 @@ def mainlist(item):
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url %texto
data = httptools.downloadpage(host).data
url_cse = scrapertools.find_single_match(data, '<form action="([^"]+)"') + "?"
bloque = scrapertools.find_single_match(data, '<form action=.*?</form>').replace('name="q"', "")
matches = scrapertools.find_multiple_matches(bloque, 'name="([^"]+).*?value="([^"]+)')
post = "q=" + texto + "&"
for name, value in matches:
post += name + "=" + value + "&"
data = httptools.downloadpage(url_cse + post).data
cse_token = scrapertools.find_single_match(data, "var cse_token='([^']+)'")
item.url = host_search %(texto, cse_token)
try:
return sub_search(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
@@ -42,35 +52,31 @@ def search(item, texto):
def sub_search(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '(?s)clicktrackUrl":".*?q=(.*?)".*?'
patron += 'title":"([^"]+)".*?'
patron += 'cseImage":{"src":"([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedurl = scrapertools.find_single_match(scrapedurl, ".*?online/")
scrapedtitle = scrapedtitle.decode("unicode-escape").replace(" online", "").replace("<b>", "").replace("</b>", "")
if "ver-" not in scrapedurl:
continue
year = scrapertools.find_single_match(scrapedtitle, "\d{4}")
contentTitle = scrapedtitle.replace("(%s)" %year,"").replace("Ver","").strip()
itemlist.append(Item(action = "findvideos",
channel = item.channel,
contentTitle = contentTitle,
infoLabels = {"year":year},
title = scrapedtitle,
thumbnail = scrapedthumbnail,
url = scrapedurl
))
if itemlist:
page = int(scrapertools.find_single_match(item.url, ".*?start=(\d+)")) + 10
npage = (page / 10) + 1
item_page = scrapertools.find_single_match(item.url, "(.*?start=)") + str(page)
itemlist.append(Item(action = "sub_search",
channel = item.channel,
title = "[COLOR green]Página %s[/COLOR]" %npage,
url = item_page
))
while True:
data = httptools.downloadpage(item.url).data
if len(data) < 500 :
break
page = int(scrapertools.find_single_match(item.url, ".*?start=(\d+)")) + item_per_page
item.url = scrapertools.find_single_match(item.url, "(.*?start=)") + str(page)
patron = '(?s)clicktrackUrl":".*?q=(.*?)".*?'
patron += 'title":"([^"]+)".*?'
patron += 'cseImage":{"src":"([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedurl = scrapertools.find_single_match(scrapedurl, ".*?online/")
scrapedtitle = scrapedtitle.decode("unicode-escape").replace(" online", "").replace("<b>", "").replace("</b>", "")
if "ver-" not in scrapedurl:
continue
year = scrapertools.find_single_match(scrapedtitle, "\d{4}")
contentTitle = scrapedtitle.replace("(%s)" %year,"").replace("Ver","").strip()
itemlist.append(Item(action = "findvideos",
channel = item.channel,
contentTitle = contentTitle,
infoLabels = {"year":year},
title = scrapedtitle,
thumbnail = scrapedthumbnail,
url = scrapedurl,
))
return itemlist
@@ -125,7 +131,7 @@ def peliculas(item):
def findvideos(item):
logger.info("item=" + item.tostring())
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
item.plot = scrapertools.find_single_match(data, '<div class="entry">(.*?)<div class="iframes">')

View File

@@ -106,8 +106,7 @@ def buscador(item):
title = scrapertools.remove_htmltags(scrapedtitle).decode('iso-8859-1').encode('utf-8')
url = urlparse.urljoin(item.url, scrapedurl)
logger.debug("title=[" + title + "], url=[" + url + "]")
itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, folder=False, extra=""))
itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, folder=False, extra="pelicula"))
# busca docu
patron = "<a href='(/doc-descargar-torrent[^']+)' .*?"
@@ -205,6 +204,7 @@ def getlist(item):
def episodios(item):
#import web_pdb; web_pdb.set_trace()
logger.info()
itemlist = []
@@ -217,7 +217,7 @@ def episodios(item):
item.thumbnail = scrapertools.find_single_match(data,
"src='http://www\.mejortorrent\.com(/uploads/imagenes/" + tabla + "/[a-zA-Z0-9_ ]+.jpg)'")
item.thumbnail = host + + urllib.quote(item.thumbnail)
item.thumbnail = host + urllib.quote(item.thumbnail)
# <form name='episodios' action='secciones.php?sec=descargas&ap=contar_varios' method='post'>
data = scrapertools.get_match(data,
@@ -245,10 +245,11 @@ def episodios(item):
scrapedtitle = scrapedtitle.strip()
if scrapedtitle.endswith('.'):
scrapedtitle = scrapedtitle[:-1]
#import web_pdb; web_pdb.set_trace()
title = scrapedtitle + " (" + fecha + ")"
url = host + "/secciones.php?sec=descargas&ap=contar_varios"
patron = "<a href='(.*?)'>"
url = "https://mejortorrent.website"+scrapertools.find_single_match(data,patron)
# "episodios%5B1%5D=11744&total_capis=5&tabla=series&titulo=Sea+Patrol+-+2%AA+Temporada"
post = urllib.urlencode({name: value, "total_capis": total_capis, "tabla": tabla, "titulo": titulo})
logger.debug("post=" + post)
@@ -287,7 +288,7 @@ def episodios(item):
itemlist.append(
Item(channel=item.channel, action="play", title=title, url=url, thumbnail=item.thumbnail, plot=item.plot,
fanart=item.fanart, extra=post, folder=False))
fanart=item.fanart, extra=post, folder=False, id=value))
return itemlist
@@ -328,20 +329,49 @@ def show_movie_info(item):
def play(item):
#import web_pdb; web_pdb.set_trace()
logger.info()
itemlist = []
if item.extra == "":
itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=item.url,
thumbnail=item.thumbnail, plot=item.plot, fanart=item.fanart, folder=False))
if item.extra == "pelicula":
#itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=item.url,
# thumbnail=item.thumbnail, plot=item.plot, fanart=item.fanart, folder=False))
data = httptools.downloadpage(item.url).data
logger.debug("data=" + data)
#url https://mejortorrent.website/peli-descargar-torrent-16443-Thor-Ragnarok.html
patron = "https://mejortorrent.website/peli-descargar-torrent-((.*?))-"
newid = scrapertools.find_single_match(item.url, patron)
#params = dict(urlparse.parse_qsl(item.extra))
patron = "https://mejortorrent.website/secciones.php?sec=descargas&ap=contar&tabla=peliculas&id=" + newid[0] + "&link_bajar=1"
#https://mejortorrent.website/secciones.php?sec=descargas&ap=contar&tabla=peliculas&id=16443&link_bajar=1
#link=scrapertools.find_single_match(data,patron)
#data = httptools.downloadpage(link).data
data = httptools.downloadpage(patron).data
patron = "Pincha <a href='(.*?)'>"
link = "https://mejortorrent.website" + scrapertools.find_single_match(data, patron)
logger.info("link=" + link)
itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link,
thumbnail=item.thumbnail, plot=item.plot, folder=False))
else:
data = httptools.downloadpage(item.url, post=item.extra).data
#data = httptools.downloadpage(item.url, post=item.extra).data
data = httptools.downloadpage(item.url).data
logger.debug("data=" + data)
params = dict(urlparse.parse_qsl(item.extra))
patron = '<a href="(http://www.mejortorrent.com/uploads/torrents/' + params["tabla"] + '/.*?\.torrent)"'
link = scrapertools.get_match(data, patron)
patron = "https://mejortorrent.website/secciones.php?sec=descargas&ap=contar&tabla=" + params["tabla"] + "&id=" + item.id
#link=scrapertools.find_single_match(data,patron)
#data = httptools.downloadpage(link).data
data = httptools.downloadpage(patron).data
patron = "Pincha <a href='(.*?)'>"
link = "https://mejortorrent.website" + scrapertools.find_single_match(data, patron)
logger.info("link=" + link)
itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link,
thumbnail=item.thumbnail, plot=item.plot, folder=False))

View File

@@ -1,22 +0,0 @@
{
"id": "pelisadicto",
"name": "Pelisadicto",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "pelisadicto.png",
"banner": "pelisadicto.png",
"categories": [
"movie"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,220 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(
Item(channel=item.channel, title="Últimas agregadas", action="agregadas", url="http://pelisadicto.com",
viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, title="Listado por género", action="porGenero", url="http://pelisadicto.com"))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="http://pelisadicto.com"))
return itemlist
def porGenero(item):
logger.info()
itemlist = []
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Acción", url="http://pelisadicto.com/genero/Acción/1",
viewmode="movie_with_plot"))
if config.get_setting("adult_mode") != 0:
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Adulto", url="http://pelisadicto.com/genero/Adulto/1",
viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, action="agregadas", title="Animación",
url="http://pelisadicto.com/genero/Animación/1", viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Aventura", url="http://pelisadicto.com/genero/Aventura/1",
viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, action="agregadas", title="Biográfico",
url="http://pelisadicto.com/genero/Biográfico/1", viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, action="agregadas", title="Ciencia Ficción",
url="http://pelisadicto.com/genero/Ciencia Ficción/1", viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, action="agregadas", title="Cine Negro",
url="http://pelisadicto.com/genero/Cine Negro/1", viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Comedia", url="http://pelisadicto.com/genero/Comedia/1",
viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Corto", url="http://pelisadicto.com/genero/Corto/1",
viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Crimen", url="http://pelisadicto.com/genero/Crimen/1",
viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Deporte", url="http://pelisadicto.com/genero/Deporte/1",
viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, action="agregadas", title="Documental",
url="http://pelisadicto.com/genero/Documental/1", viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Drama", url="http://pelisadicto.com/genero/Drama/1",
viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Familiar", url="http://pelisadicto.com/genero/Familiar/1",
viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Fantasía", url="http://pelisadicto.com/genero/Fantasía/1",
viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Guerra", url="http://pelisadicto.com/genero/Guerra/1",
viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Historia", url="http://pelisadicto.com/genero/Historia/1",
viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Misterio", url="http://pelisadicto.com/genero/Misterio/1",
viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Música", url="http://pelisadicto.com/genero/Música/1",
viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Musical", url="http://pelisadicto.com/genero/Musical/1",
viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Romance", url="http://pelisadicto.com/genero/Romance/1",
viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Terror", url="http://pelisadicto.com/genero/Terror/1",
viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Thriller", url="http://pelisadicto.com/genero/Thriller/1",
viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Western", url="http://pelisadicto.com/genero/Western/1",
viewmode="movie_with_plot"))
return itemlist
def search(item, texto):
logger.info()
'''
texto_get = texto.replace(" ","%20")
texto_post = texto.replace(" ","+")
item.url = "http://pelisadicto.com/buscar/%s?search=%s" % (texto_get,texto_post)
'''
texto = texto.replace(" ", "+")
item.url = "http://pelisadicto.com/buscar/%s" % texto
try:
return agregadas(item)
# Se captura la excepci?n, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def agregadas(item):
logger.info()
itemlist = []
'''
# Descarga la pagina
if "?search=" in item.url:
url_search = item.url.split("?search=")
data = scrapertools.cache_page(url_search[0], url_search[1])
else:
data = scrapertools.cache_page(item.url)
logger.info("data="+data)
'''
data = scrapertools.cache_page(item.url)
# logger.info("data="+data)
# Extrae las entradas
fichas = re.sub(r"\n|\s{2}", "", scrapertools.get_match(data, '<ul class="thumbnails">(.*?)</ul>'))
# <li class="col-xs-6 col-sm-2 CALDVD"><a href="/pelicula/101-dalmatas" title="Ver 101 dálmatas Online" class="thumbnail thumbnail-artist-grid"><img class="poster" style="width: 180px; height: 210px;" src="/img/peliculas/101-dalmatas.jpg" alt="101 dálmatas"/><div class="calidad">DVD</div><div class="idiomas"><img src="/img/1.png" height="20" width="30" /></div><div class="thumbnail-artist-grid-name-container-1"><div class="thumbnail-artist-grid-name-container-2"><span class="thumbnail-artist-grid-name">101 dálmatas</span></div></div></a></li>
patron = 'href="([^"]+)".*?' # url
patron += 'src="([^"]+)" ' # thumbnail
patron += 'alt="([^"]+)' # title
matches = re.compile(patron, re.DOTALL).findall(fichas)
for url, thumbnail, title in matches:
url = urlparse.urljoin(item.url, url)
thumbnail = urlparse.urljoin(url, thumbnail)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title + " ", fulltitle=title, url=url,
thumbnail=thumbnail, show=title))
# Paginación
try:
# <ul class="pagination"><li class="active"><span>1</span></li><li><span><a href="2">2</a></span></li><li><span><a href="3">3</a></span></li><li><span><a href="4">4</a></span></li><li><span><a href="5">5</a></span></li><li><span><a href="6">6</a></span></li></ul>
current_page_number = int(scrapertools.get_match(item.url, '/(\d+)$'))
item.url = re.sub(r"\d+$", "%s", item.url)
next_page_number = current_page_number + 1
next_page = item.url % (next_page_number)
itemlist.append(Item(channel=item.channel, action="agregadas", title="Página siguiente >>", url=next_page,
viewmode="movie_with_plot"))
except:
pass
return itemlist
def findvideos(item):
logger.info()
itemlist = []
plot = ""
data = re.sub(r"\n|\s{2}", "", scrapertools.cache_page(item.url))
# <!-- SINOPSIS --> <h2>Sinopsis de 101 dálmatas</h2> <p>Pongo y Perdita, los dálmatas protagonistas, son una feliz pareja canina que vive rodeada de sus cachorros y con sus amos Roger y Anita. Pero su felicidad está amenazada. Cruella de Ville, una pérfida mujer que vive en una gran mansión y adora los abrigos de pieles, se entera de que los protagonistas tienen quince cachorros dálmatas. Entonces, la idea de secuestrarlos para hacerse un exclusivo abrigo de pieles se convierte en una obsesión enfermiza. Para hacer realidad su sueño contrata a dos ladrones.</p>
patron = "<!-- SINOPSIS --> "
patron += "<h2>[^<]+</h2> "
patron += "<p>([^<]+)</p>"
matches = re.compile(patron, re.DOTALL).findall(data)
if matches:
plot = matches[0]
# Descarga la pagina
data = scrapertools.cache_page(item.url)
patron = '<tr>.*?'
patron += '<td><img src="(.*?)".*?<td>(.*?)</td>.*?<td>(.*?)</td>.*?<a href="(.*?)".*?</tr>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedidioma, scrapedcalidad, scrapedserver, scrapedurl in matches:
idioma = ""
if "/img/1.png" in scrapedidioma: idioma = "Castellano"
if "/img/2.png" in scrapedidioma: idioma = "Latino"
if "/img/3.png" in scrapedidioma: idioma = "Subtitulado"
title = item.title + " [" + scrapedcalidad + "][" + idioma + "][" + scrapedserver + "]"
itemlist.append(
Item(channel=item.channel, action="play", title=title, fulltitle=title, url=scrapedurl, thumbnail="",
plot=plot, show=item.show))
return itemlist
def play(item):
logger.info()
itemlist = servertools.find_video_items(data=item.url)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist

View File

@@ -171,7 +171,7 @@ def episodios(item):
# post = "page=%s&x=34&y=14" % urllib.quote(item.url)
# response = httptools.downloadpage(url, post, follow_redirects=False).data
# url = scrapertools.find_single_match(response, '<meta http-equiv="refresh".*?url=([^"]+)"')
# data = httptools.downloadpage(item.url).data
data = httptools.downloadpage(item.url).data
data = jsontools.load(data)
@@ -214,14 +214,12 @@ def episodios(item):
if dict_episodes[numero]["plot"] == "":
dict_episodes[numero]["plot"] = j.get("overviewcapitul", "")
# logger.debug("\n\n\n dict_episodes: %s " % dict_episodes)
for key, value in dict_episodes.items():
list_no_duplicate = list(set(value["quality"]))
title = "%s %s [%s]" % (key, value["title"], "][".join(list_no_duplicate))
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url,
Item(channel=item.channel, action="findvideos", title=title, url=dict_episodes[numero]["url"],
thumbnail=item.thumbnail, fanart=item.fanart, show=item.show, data=value,
contentSerieName=item.contentTitle, contentSeason=value["season"],
contentEpisodeNumber=value["episode"]))

View File

@@ -14,7 +14,7 @@ from core import tmdb
from core.item import Item, InfoLabels
from platformcode import config, logger
host = "https://pepecine.tv"
host = "https://pepecine.info"
perpage = 20
def mainlist1(item):
@@ -29,7 +29,7 @@ def mainlist(item):
itemlist = []
itemlist.append(Item(channel=item.channel,
title="Ultimas",
url=host+'/tv-peliculas-online',
url=host+'/peliculas-tv-online',
action='list_latest',
indexp=1,
type='movie'))

View File

@@ -1,7 +1,7 @@
{
"id": "repelis",
"name": "Repelis",
"active": true,
"active": false,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "repelis.png",
@@ -21,4 +21,4 @@
"visible": true
}
]
}
}

View File

@@ -1,23 +0,0 @@
{
"id": "seriesadicto",
"name": "Seriesadicto",
"active": true,
"adult": false,
"language": ["cast"],
"thumbnail": "seriesadicto.png",
"banner": "seriesadicto.png",
"categories": [
"tvshow",
"anime"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,224 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(
Item(channel=item.channel, action="letras", title="Todas por orden alfabético", url="http://seriesadicto.com/",
folder=True))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar..."))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "http://seriesadicto.com/buscar/" + texto
try:
return series(item)
# Se captura la excepci?n, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def letras(item):
logger.info()
itemlist = []
# Descarga la página
data = scrapertools.cachePage(item.url)
data = scrapertools.find_single_match(data, '<li class="nav-header">Por inicial</li>(.*?)</ul>')
logger.info("data=" + data)
patronvideos = '<li><a rel="nofollow" href="([^"]+)">([^<]+)</a>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle
plot = ""
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action='series', title=title, url=url, thumbnail=thumbnail, plot=plot))
return itemlist
def series(item):
logger.info()
itemlist = []
'''
<li class="col-xs-6 col-sm-4 col-md-2">
<a href="/serie/justicia-ciega-blind-justuce" title="Ver Justicia ciega ( Blind Justuce ) Online" class="thumbnail thumbnail-artist-grid">
<img style="width: 120px; height: 180px;" src="/img/series/justicia-ciega-blind-justuce-th.jpg" alt="Justicia ciega ( Blind Justuce )"/>
'''
data = scrapertools.cachePage(item.url)
logger.info("data=" + data)
patron = '<li class="col-xs-6[^<]+'
patron += '<a href="([^"]+)"[^<]+'
patron += '<img style="[^"]+" src="([^"]+)" alt="([^"]+)"'
logger.info("patron=" + patron)
matches = re.compile(patron, re.DOTALL).findall(data)
logger.info("matches=" + repr(matches))
scrapertools.printMatches(matches)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
title = scrapertools.htmlclean(scrapedtitle.strip())
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=url, thumbnail=thumbnail,
plot=plot, show=title, folder=True))
return itemlist
def episodios(item):
logger.info()
itemlist = []
'''
<tr>
<td class="sape"><i class="glyphicon glyphicon-film"></i> <a href="/capitulo/saving-hope/1/2/82539" class="color4">Saving Hope 1x02</a></td>
<td><div class="vistodiv" title="82539"><a title="Marcar como Visto"><span class="visto visto-no"></span></a></div></td>
<td>
<img src="/img/3.png" border="0" height="14" width="22" />&nbsp;<img src="/img/4.png" border="0" height="14" width="22" />&nbsp; </td>
</tr>
'''
data = scrapertools.cachePage(item.url)
patron = '<tr[^<]+'
patron += '<td class="sape"><i[^<]+</i[^<]+<a href="([^"]+)"[^>]+>([^<]+)</a></td[^<]+'
patron += '<td><div[^<]+<a[^<]+<span[^<]+</span></a></div></td[^<]+'
patron += '<td>(.*?)</td'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl, scrapedtitle, bloqueidiomas in matches:
idiomas, language = extrae_idiomas(bloqueidiomas)
title = scrapedtitle.strip() + " (" + idiomas + ")"
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = ""
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumbnail,
plot=plot, show=item.show, folder=True, language=language))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show))
itemlist.append(Item(channel=item.channel, title="Descargar todos los episodios de la serie", url=item.url,
action="download_all_episodes", extra="episodios", show=item.show))
return itemlist
def extrae_idiomas(bloqueidiomas):
logger.info("idiomas=" + bloqueidiomas)
patronidiomas = '([a-z0-9]+).png"'
idiomas = re.compile(patronidiomas, re.DOTALL).findall(bloqueidiomas)
textoidiomas = ""
language=[]
for idioma in idiomas:
if idioma == "1":
textoidiomas = textoidiomas + "Español" + "/"
if idioma == "2":
textoidiomas = textoidiomas + "Latino" + "/"
if idioma == "3":
textoidiomas = textoidiomas + "VOSE" + "/"
if idioma == "4":
textoidiomas = textoidiomas + "VO" + "/"
language.append(codigo_a_idioma(idioma))
textoidiomas = textoidiomas[:-1]
return textoidiomas, language
def codigo_a_idioma(codigo):
idioma = ""
if codigo == "1":
idioma = "Español"
if codigo == "2":
idioma = "Latino"
if codigo == "3":
idioma = "VOSE"
if codigo == "4":
idioma = "VO"
return idioma
def findvideos(item):
logger.info()
itemlist = []
'''
<tr class="lang_3 no-mobile">
<td><img src="/img/3.png" border="0" height="14" width="22" /></td>
<td>Nowvideo</td>
<td class="enlacevideo" title="82539"><a href="http://www.nowvideo.eu/video/4fdc641896fe8" rel="nofollow" target="_blank" class="btn btn-primary btn-xs bg2"><i class="glyphicon glyphicon-play"></i> Reproducir</a></td>
</td>
</tr>
'''
# Descarga la pagina
data = scrapertools.cachePage(item.url)
patron = '<tr class="lang_[^<]+'
patron += '<td><img src="/img/(\d).png"[^<]+</td[^<]+'
patron += '<td>([^<]+)</td[^<]+'
patron += '<td class="enlacevideo"[^<]+<a href="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for idioma, servername, scrapedurl in matches:
title = "Mirror en " + servername + " (" + codigo_a_idioma(idioma) + ")"
language = codigo_a_idioma(idioma)
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = ""
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="play", title=title, fulltitle=title, url=url, thumbnail=thumbnail,
plot=plot, folder=False, language=language))
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist
def play(item):
logger.info()
itemlist = servertools.find_video_items(data=item.url)
for videoitem in itemlist:
videoitem.title = "Enlace encontrado en " + videoitem.server + " (" + scrapertools.get_filename_from_url(
videoitem.url) + ")"
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist

View File

@@ -0,0 +1,33 @@
{
"id": "torrentrapid",
"name": "Torrentrapid",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "torrentrapid.png",
"banner": "torrentrapid.png",
"categories": [
"movie",
"tvshow",
"anime",
"torrent"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_torrent",
"type": "bool",
"label": "Incluir en Novedades - Torrent",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,117 @@
# -*- coding: utf-8 -*-
import re
import urllib
import urlparse
import requests
from core import servertools
from core import scrapertools
from core.item import Item
from platformcode import logger
from core import httptools
Host='http://torrentrapid.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas",url=Host+"/peliculas/"))
itemlist.append(Item(channel=item.channel, action="submenu", title="Series",url=Host+"/series/"))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar"))
return itemlist
def search(item, texto):
logger.info()
itemlist = []
payload = {'q': 'data'}
payload["q"] = texto
data = requests.post("http://torrentrapid.com/buscar", data=payload)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data.text)
patron_data='<ul class="buscar-list">(.+?)</ul>'
data_listado = scrapertools.find_single_match(data, patron_data)
data_listado=re.sub("Descargar Todas ", "",data_listado)
data_listado=re.sub("Descargar Pel\xedculas ", "",data_listado)
data_listado=re.sub("Descargar ", "",data_listado)
patron_listado='<li><a href="(.+?)" title="(.+?)"><img src="(.+?)"'
matches = scrapertools.find_multiple_matches(data_listado, patron_listado)
for scrapedurl, scrapedtitle, scrapedimg in matches:
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,action="findvideos"))
return itemlist
def submenu(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<li><a href="'+item.url+'"><i.+?<ul>(.+?)<\/ul>' #Filtrado por url
data_cat = scrapertools.find_single_match(data, patron)
patron_cat='<li><a href="(.+?)" title="(.+?)".+?<\/a><\/li>'
matches = scrapertools.find_multiple_matches(data_cat, patron_cat)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,action="listado"))
return itemlist
def listado(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron_data='<ul class="pelilist">(.+?)</ul>'
data_listado = scrapertools.find_single_match(data, patron_data)
patron_listado='<li><a href="(.+?)" title=".+?"><img src="(.+?)".+?><h2'
if 'Serie' in item.title:
patron_listado+='.+?>'
else:
patron_listado+='>'
patron_listado+='(.+?)<\/h2><span>(.+?)<\/span><\/a><\/li>'
matches = scrapertools.find_multiple_matches(data_listado, patron_listado)
for scrapedurl, scrapedthumbnail,scrapedtitle,scrapedquality in matches:
if 'Serie' in item.title:
action="episodios"
else:
action="findvideos"
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,thumbnail=scrapedthumbnail, action=action, quality=scrapedquality,show=scrapedtitle))
# Página siguiente
patron_pag='<ul class="pagination"><li><a class="current" href=".+?">.+?<\/a>.+?<a href="(.+?)">'
siguiente = scrapertools.find_single_match(data, patron_pag)
itemlist.append(
Item(channel=item.channel, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=siguiente, action="listado"))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron_data='<ul class="buscar-list">(.+?)</ul>'
data_listado = scrapertools.find_single_match(data, patron_data)
patron = '<img src="(.+?)" alt=".+?">.+?<div class=".+?">.+?<a href="(.+?)" title=".+?">.+?>Serie.+?>(.+?)<'
matches = scrapertools.find_multiple_matches(data_listado, patron)
for scrapedthumbnail,scrapedurl, scrapedtitle in matches:
if " al " in scrapedtitle:
titulo=scrapedurl.split('http')
scrapedurl="http"+titulo[1]
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,thumbnail=scrapedthumbnail, action="findvideos", show=scrapedtitle))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
new_item = []
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data = data)
url = scrapertools.find_single_match( data, 'location.href = "([^"]+)"')
new_item.append(Item(url = url, title = "Torrent", server = "torrent", action = "play"))
itemlist.extend(new_item)
for it in itemlist:
it.channel = item.channel
return itemlist

View File

@@ -90,7 +90,7 @@ thumb_dict = {"movies": "https://s10.postimg.org/fxtqzdog9/peliculas.png",
}
def set_genre(string):
logger.info()
#logger.info()
genres_dict = {'accion':['accion', 'action', 'accion y aventura', 'action & adventure'],
'adultos':['adultos', 'adultos +', 'adulto'],
@@ -131,7 +131,7 @@ def set_genre(string):
return string
def remove_format(string):
logger.info()
#logger.info()
#logger.debug('entra en remove: %s' % string)
string = string.rstrip()
string = re.sub(r'(\[|\[\/)(?:color|COLOR|b|B|i|I).*?\]|\[|\]|\(|\)|\:|\.', '', string)
@@ -140,7 +140,7 @@ def remove_format(string):
def simplify(string):
logger.info()
#logger.info()
#logger.debug('entra en simplify: %s'%string)
string = remove_format(string)
string = string.replace('-',' ').replace('_',' ')
@@ -155,7 +155,7 @@ def simplify(string):
return string
def add_languages(title, languages):
logger.info()
#logger.info()
if isinstance(languages, list):
for language in languages:
@@ -165,7 +165,7 @@ def add_languages(title, languages):
return title
def set_color(title, category):
logger.info()
#logger.info()
color_scheme = {'otro': 'white'}
@@ -199,7 +199,7 @@ def set_color(title, category):
return title
def set_lang(language):
logger.info()
#logger.info()
cast =['castellano','espanol','cast','esp','espaol', 'es','zc', 'spa', 'spanish', 'vc']
lat=['latino','lat','la', 'espanol latino', 'espaol latino', 'zl', 'mx', 'co', 'vl']
@@ -237,7 +237,7 @@ def set_lang(language):
def title_format(item):
logger.info()
#logger.info()
lang = False
valid = True
@@ -349,7 +349,7 @@ def title_format(item):
else:
simple_language = ''
item.language = simple_language
#item.language = simple_language
# Damos formato al año si existiera y lo agregamos
# al titulo excepto que sea un episodio
@@ -446,7 +446,7 @@ def title_format(item):
return item
def thumbnail_type(item):
logger.info()
#logger.info()
# Se comprueba que tipo de thumbnail se utilizara en findvideos,
# Poster o Logo del servidor

Binary file not shown.

After

Width:  |  Height:  |  Size: 93 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 94 KiB

View File

@@ -27,7 +27,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
for ext, encoded, code, quality in matches:
media_url = decode(encoded, int(code))
media_url = media_url.replace("@","")
if not media_url.startswith("http"):
media_url = "http:" + media_url
video_urls.append([".%s %sp [streamango]" % (ext, quality), media_url])

View File

@@ -32,7 +32,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
for ext, encoded, code, quality in matches:
media_url = decode(encoded, int(code))
media_url = media_url.replace("@","")
if not media_url.startswith("http"):
media_url = "http:" + media_url
video_urls.append([".%s %sp [streamcherry]" % (ext, quality), media_url])