@@ -34,12 +34,40 @@ def mainlist(item):
|
||||
itemlist.append(Item(channel = item.channel, title = "Novedades", action = "peliculas", url = host, thumbnail = get_thumb("newest", auto = True)))
|
||||
itemlist.append(Item(channel = item.channel, title = "Por género", action = "generos_years", url = host, extra = "Genero", thumbnail = get_thumb("genres", auto = True) ))
|
||||
itemlist.append(Item(channel = item.channel, title = "Por año", action = "generos_years", url = host, extra = ">Año<", thumbnail = get_thumb("year", auto = True)))
|
||||
itemlist.append(Item(channel = item.channel, title = "Favoritas", action = "peliculas", url = host + "/favorites", thumbnail = get_thumb("favorites", auto = True) ))
|
||||
itemlist.append(Item(channel = item.channel, title = "Favoritas", action = "favorites", url = host + "/favorites", thumbnail = get_thumb("favorites", auto = True) ))
|
||||
itemlist.append(Item(channel = item.channel, title = ""))
|
||||
itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "?s=", thumbnail = get_thumb("search", auto = True)))
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
def favorites(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '(?s)short_overlay.*?<a href="([^"]+)'
|
||||
patron += '.*?img.*?src="([^"]+)'
|
||||
patron += '.*?title="([^"]+).*?'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for url, thumbnail, titulo in matches:
|
||||
idioma = "Latino"
|
||||
mtitulo = titulo + " (" + idioma + ")"
|
||||
itemlist.append(item.clone(channel = item.channel,
|
||||
action = "findvideos",
|
||||
title = mtitulo,
|
||||
fulltitle = titulo,
|
||||
thumbnail = thumbnail,
|
||||
url = url,
|
||||
contentType="movie",
|
||||
language = idioma
|
||||
))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
url_pagina = scrapertools.find_single_match(data, 'next" href="([^"]+)')
|
||||
if url_pagina != "":
|
||||
pagina = "Pagina: " + scrapertools.find_single_match(url_pagina, "page/([0-9]+)")
|
||||
itemlist.append(Item(channel = item.channel, action = "peliculas", title = pagina, url = url_pagina))
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
@@ -289,6 +289,14 @@ def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
duplicados = []
|
||||
|
||||
if 'cinemaqualidade' in item.url:
|
||||
lang = 'portugues'
|
||||
elif 'espana' in item.url:
|
||||
lang = 'castellano'
|
||||
elif 'cinecalidad' in item.url:
|
||||
lang = 'latino'
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'target="_blank".*? service=".*?" data="(.*?)"><li>(.*?)<\/li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
@@ -316,7 +324,7 @@ def findvideos(item):
|
||||
url = server_url[server_id] + video_id + '.html'
|
||||
elif server_id == 'BitTorrent':
|
||||
import urllib
|
||||
base_url = 'http://www.cinecalidad.to/protect/v.php'
|
||||
base_url = '%sprotect/v.php' % host
|
||||
post = {'i':video_id, 'title':item.title}
|
||||
post = urllib.urlencode(post)
|
||||
headers = {'Referer':item.url}
|
||||
@@ -330,9 +338,9 @@ def findvideos(item):
|
||||
|
||||
if server_id not in ['Mega', 'MediaFire', 'Trailer']:
|
||||
if server != 'torrent':
|
||||
language = IDIOMAS[item.language]
|
||||
language = IDIOMAS[lang]
|
||||
else:
|
||||
language = [IDIOMAS[item.language], 'vose']
|
||||
language = [IDIOMAS[lang], 'vose']
|
||||
if url not in duplicados:
|
||||
new_item = Item(channel=item.channel,
|
||||
action='play',
|
||||
@@ -426,6 +434,6 @@ def newest(categoria):
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "-")
|
||||
item.url = host + '/?s=' + texto
|
||||
item.url = item.host + '?s=' + texto
|
||||
if texto != '':
|
||||
return peliculas(item)
|
||||
|
||||
@@ -19,7 +19,6 @@ if __perfil__ - 1 >= 0:
|
||||
color1, color2, color3, color4, color5 = perfil[__perfil__ - 1]
|
||||
else:
|
||||
color1 = color2 = color3 = color4 = color5 = ""
|
||||
|
||||
host = "http://www.crunchyroll.com"
|
||||
proxy_u = "http://anonymouse.org/cgi-bin/anon-www.cgi/"
|
||||
proxy_e = "http://proxyanonimo.es/browse.php?u="
|
||||
@@ -27,24 +26,20 @@ proxy_e = "http://proxyanonimo.es/browse.php?u="
|
||||
|
||||
def login():
|
||||
logger.info()
|
||||
|
||||
langs = ['deDE', 'ptPT', 'frFR', 'itIT', 'enUS', 'esLA', 'esES']
|
||||
lang = langs[config.get_setting("crunchyrollidioma", "crunchyroll")]
|
||||
httptools.downloadpage("http://www.crunchyroll.com/ajax/", "req=RpcApiTranslation_SetLang&locale=%s" % lang)
|
||||
|
||||
login_page = "https://www.crunchyroll.com/login"
|
||||
httptools.downloadpage(host + "/ajax/", "req=RpcApiTranslation_SetLang&locale=%s" % lang)
|
||||
login_page = host.replace("http","https") + "/login"
|
||||
user = config.get_setting("crunchyrolluser", "crunchyroll")
|
||||
password = config.get_setting("crunchyrollpassword", "crunchyroll")
|
||||
if not user or not password:
|
||||
return False, "", ""
|
||||
data = httptools.downloadpage(login_page).data
|
||||
|
||||
if not "<title>Redirecting" in data:
|
||||
token = scrapertools.find_single_match(data, 'name="login_form\[_token\]" value="([^"]+)"')
|
||||
redirect_url = scrapertools.find_single_match(data, 'name="login_form\[redirect_url\]" value="([^"]+)"')
|
||||
post = "login_form%5Bname%5D=" + user + "&login_form%5Bpassword%5D=" + password + \
|
||||
"&login_form%5Bredirect_url%5D=" + redirect_url + "&login_form%5B_token%5D=" + token
|
||||
|
||||
data = httptools.downloadpage(login_page, post).data
|
||||
if not "<title>Redirecting" in data:
|
||||
if "Usuario %s no disponible" % user in data:
|
||||
@@ -53,7 +48,6 @@ def login():
|
||||
return False, "Es necesario resolver un captcha. Loguéate desde un navegador y vuelve a intentarlo", ""
|
||||
else:
|
||||
return False, "No se ha podido realizar el login.", ""
|
||||
|
||||
data = httptools.downloadpage(host).data
|
||||
premium = scrapertools.find_single_match(data, ',"premium_status":"([^"]+)"')
|
||||
premium = premium.replace("_", " ").replace("free trial", "Prueba Gratuita").capitalize()
|
||||
@@ -67,7 +61,6 @@ def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item.text_color = color1
|
||||
|
||||
proxy_usa = config.get_setting("proxy_usa", "crunchyroll")
|
||||
proxy_spain = config.get_setting("proxy_spain", "crunchyroll")
|
||||
item.login = False
|
||||
@@ -82,14 +75,12 @@ def mainlist(item):
|
||||
httptools.downloadpage("http://proxyanonimo.es/")
|
||||
item.proxy = "spain"
|
||||
host = proxy_e + host
|
||||
|
||||
if not item.login and error_message:
|
||||
itemlist.append(item.clone(title=error_message, action="configuracion", folder=False, text_color=color4))
|
||||
elif item.login:
|
||||
itemlist.append(item.clone(title="Tipo de cuenta: %s" % premium, action="", text_color=color4))
|
||||
elif item.proxy:
|
||||
itemlist.append(item.clone(title="Usando proxy: %s" % item.proxy.capitalize(), action="", text_color=color4))
|
||||
|
||||
itemlist.append(item.clone(title="Anime", action="", text_color=color2))
|
||||
item.contentType = "tvshow"
|
||||
itemlist.append(
|
||||
@@ -104,39 +95,32 @@ def mainlist(item):
|
||||
itemlist.append(
|
||||
item.clone(title=" Popular", action="lista", url=host + "/videos/drama/popular/ajax_page?pg=0", page=0))
|
||||
itemlist.append(item.clone(title=" Índice Alfabético", action="indices",
|
||||
url="http://www.crunchyroll.com/videos/drama/alpha"))
|
||||
|
||||
url=host + "/videos/drama/alpha"))
|
||||
if item.proxy != "usa":
|
||||
itemlist.append(item.clone(action="calendario", title="Calendario de Estrenos Anime", text_color=color4,
|
||||
url=host + "/simulcastcalendar"))
|
||||
itemlist.append(item.clone(title="Configuración del canal", action="configuracion", text_color="gold"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def configuracion(item):
|
||||
from platformcode import platformtools
|
||||
ret = platformtools.show_channel_settings()
|
||||
|
||||
user = config.get_setting("crunchyrolluser", "crunchyroll")
|
||||
password = config.get_setting("crunchyrollpassword", "crunchyroll")
|
||||
sub = config.get_setting("crunchyrollsub", "crunchyroll")
|
||||
|
||||
config.set_setting("crunchyrolluser", user)
|
||||
config.set_setting("crunchyrollpassword", password)
|
||||
values = [6, 5, 4, 3, 2, 1, 0]
|
||||
config.set_setting("crunchyrollsub", str(values[sub]))
|
||||
platformtools.itemlist_refresh()
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
next = item.url.replace("?pg=%s" % item.page, "?pg=%s" % str(item.page + 1))
|
||||
data_next = httptools.downloadpage(next).data
|
||||
patron = '<li id="media_group_(\d+)".*?title="([^"]+)".*?href="([^"]+)".*?src="([^"]+)"' \
|
||||
@@ -154,18 +138,15 @@ def lista(item):
|
||||
itemlist.append(item.clone(action="episodios", url=url, title=scrapedtitle, thumbnail=thumb,
|
||||
contentTitle=title, contentSerieName=title, infoLabels={'plot': plot},
|
||||
text_color=color2))
|
||||
|
||||
if '<li id="media_group' in data_next:
|
||||
itemlist.append(item.clone(action="lista", url=next, title=">> Página Siguiente", page=item.page + 1,
|
||||
text_color=""))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'\n|\t|\s{2,}', '', data)
|
||||
patron = '<li id="showview_videos.*?href="([^"]+)".*?(?:src|data-thumbnailUrl)="([^"]+)".*?media_id="([^"]+)"' \
|
||||
@@ -215,12 +196,10 @@ def episodios(item):
|
||||
def indices(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
if not item.url:
|
||||
itemlist.append(item.clone(title="Alfabético", url="http://www.crunchyroll.com/videos/anime/alpha"))
|
||||
itemlist.append(item.clone(title="Géneros", url="http://www.crunchyroll.com/videos/anime"))
|
||||
itemlist.append(item.clone(title="Temporadas", url="http://www.crunchyroll.com/videos/anime"))
|
||||
|
||||
itemlist.append(item.clone(title="Alfabético", url=host + "/videos/anime/alpha"))
|
||||
itemlist.append(item.clone(title="Géneros", url=host + "/videos/anime"))
|
||||
itemlist.append(item.clone(title="Temporadas", url=host + "/videos/anime"))
|
||||
else:
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if "Alfabético" in item.title:
|
||||
@@ -235,7 +214,6 @@ def indices(item):
|
||||
url = proxy_u + host + url
|
||||
else:
|
||||
url = host + url
|
||||
|
||||
itemlist.append(item.clone(action="alpha", title=title, url=url, page=0))
|
||||
elif "Temporadas" in item.title:
|
||||
bloque = scrapertools.find_single_match(data,
|
||||
@@ -249,7 +227,6 @@ def indices(item):
|
||||
url = proxy_u + host + url
|
||||
else:
|
||||
url = host + url
|
||||
|
||||
itemlist.append(item.clone(action="lista", title=title, url=url, page=0))
|
||||
else:
|
||||
bloque = scrapertools.find_single_match(data, '<div class="genre-selectors selectors">(.*?)</div>')
|
||||
@@ -260,18 +237,14 @@ def indices(item):
|
||||
url = proxy_e + url.replace("&", "%26")
|
||||
elif item.proxy == "usa":
|
||||
url = proxy_u + url
|
||||
|
||||
itemlist.append(item.clone(action="lista", title=title, url=url, page=0))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def alpha(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = '<div class="wrapper hover-toggle-queue.*?title="([^"]+)".*?href="([^"]+)".*?src="([^"]+)"' \
|
||||
'.*?<span class="series-data.*?>\s*([^<]+)</span>.*?<p.*?>(.*?)</p>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
@@ -285,16 +258,13 @@ def alpha(item):
|
||||
itemlist.append(item.clone(action="episodios", url=url, title=scrapedtitle, thumbnail=thumb,
|
||||
contentTitle=title, contentSerieName=title, infoLabels={'plot': plot},
|
||||
text_color=color2))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def calendario(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = '<div class="specific-date">.*?datetime="\d+-(\d+)-(\d+).*?class="day-name">.*?>\s*([^<]+)</time>(.*?)</section>'
|
||||
bloques = scrapertools.find_multiple_matches(data, patron)
|
||||
for mes, dia, title, b in bloques:
|
||||
@@ -309,10 +279,8 @@ def calendario(item):
|
||||
subt = re.sub(r"\s{2,}", " ", subt)
|
||||
if "<time" in subt:
|
||||
subt = re.sub(r"<time.*?>", "", subt).replace("</time>", "")
|
||||
|
||||
scrapedtitle = " [%s] %s - %s" % (hora, scrapertools.htmlclean(title), subt)
|
||||
scrapedtitle = re.sub(r"\[email protected\]|\[email\xc2\xa0protected\]", "Idolm@ster", scrapedtitle)
|
||||
|
||||
if "Disponible" in scrapedtitle:
|
||||
if item.proxy == "spain":
|
||||
url = urllib.unquote(url.replace("/browse.php?u=", "").replace("&b=12", ""))
|
||||
@@ -329,7 +297,6 @@ def calendario(item):
|
||||
.replace("&b=12", "").replace("_large", "_full"))
|
||||
itemlist.append(item.clone(action=action, url=url, title=scrapedtitle, contentTitle=title, thumbnail=thumb,
|
||||
text_color=color2, contentSerieName=title, server=server))
|
||||
|
||||
next = scrapertools.find_single_match(data, 'js-pagination-next"\s*href="([^"]+)"')
|
||||
if next:
|
||||
if item.proxy == "spain":
|
||||
@@ -344,7 +311,6 @@ def calendario(item):
|
||||
else:
|
||||
prev = host + prev
|
||||
itemlist.append(item.clone(action="calendario", url=prev, title="<< Semana Anterior"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -353,6 +319,5 @@ def play(item):
|
||||
if item.login and not "[V]" in item.title:
|
||||
post = "cbelapsed=60&h=&media_id=%s" % item.media_id + "&req=RpcApiVideo_VideoView&cbcallcount=1&ht=0" \
|
||||
"&media_type=1&video_encode_id=0&playhead=10000"
|
||||
httptools.downloadpage("http://www.crunchyroll.com/ajax/", post)
|
||||
|
||||
httptools.downloadpage(host + "/ajax/", post)
|
||||
return [item]
|
||||
|
||||
@@ -21,7 +21,7 @@ from channelselector import get_thumb
|
||||
|
||||
__channel__ = "pedropolis"
|
||||
|
||||
host = "http://pedropolis.tv/"
|
||||
host = "https://pedropolis.tv/"
|
||||
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
@@ -74,7 +74,9 @@ def menumovies(item):
|
||||
itemlist = [item.clone(title="Todas", action="peliculas", text_blod=True, url=host + 'pelicula/',
|
||||
viewcontent='movies', viewmode="movie_with_plot"),
|
||||
item.clone(title="Más Vistas", action="peliculas", text_blod=True,
|
||||
viewcontent='movies', url=host + 'tendencias/?get=movie', viewmode="movie_with_plot"),
|
||||
viewcontent='movies', url=host + 'tendencias/?get=movies', viewmode="movie_with_plot"),
|
||||
item.clone(title="Mejor Valoradas", action="peliculas", text_blod=True,
|
||||
viewcontent='movies', url=host + 'tendencias/?get=movies', viewmode="movie_with_plot"),
|
||||
item.clone(title="Por año", action="p_portipo", text_blod=True, extra="Películas Por año",
|
||||
viewcontent='movies', url=host, viewmode="movie_with_plot"),
|
||||
item.clone(title="Por género", action="p_portipo", text_blod=True, extra="Categorías",
|
||||
@@ -115,41 +117,32 @@ def p_portipo(item):
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
url_next_page = ''
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<div class="poster"> <img src="([^"]+)" alt="([^"]+)">.*?' # img, title
|
||||
patron += '<div class="rating"><span class="[^"]+"></span>([^<]+).*?' # rating
|
||||
patron += '<span class="quality">([^<]+)</span></div> <a href="([^"]+)">.*?' # calidad, url
|
||||
patron += '<span>([^<]+)</span>' # year
|
||||
patron = '<div class="poster"> <img src="([^"]+)" alt="([^"]+)">.*?' # img, title
|
||||
patron += '<div class="rating"><span class="[^"]+"></span>([^<]+).*?' # rating
|
||||
patron += '<span class="quality">([^<]+)</span></div> <a href="([^"]+)">.*?' # calidad, url
|
||||
patron += '<span>([^<]+)</span>.*?' # year
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
# Paginación
|
||||
if item.next_page != 'b':
|
||||
if len(matches) > 19:
|
||||
url_next_page = item.url
|
||||
matches = matches[:19]
|
||||
next_page = 'b'
|
||||
else:
|
||||
matches = matches[19:]
|
||||
next_page = 'a'
|
||||
patron_next_page = "<span class=\"current\">\d+</span><a href='([^']+)'"
|
||||
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
|
||||
if len(matches_next_page) > 0:
|
||||
url_next_page = urlparse.urljoin(item.url, matches_next_page[0])
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, rating, quality, scrapedurl, year in matches:
|
||||
if 'Proximamente' not in quality:
|
||||
scrapedtitle = scrapedtitle.replace('Ver ', '').partition(' /')[0].partition(':')[0].replace(
|
||||
'Español Latino', '').strip()
|
||||
title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (scrapedtitle, year, quality)
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", contentTitle=scrapedtitle,
|
||||
infoLabels={"year":year, "rating":rating}, thumbnail=scrapedthumbnail,
|
||||
url=scrapedurl, next_page=next_page, quality=quality, title=title))
|
||||
scrapedtitle = scrapedtitle.replace('Ver ', '').partition(' /')[0].partition(':')[0].replace(
|
||||
'Español Latino', '').strip()
|
||||
title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (scrapedtitle, year, quality)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", contentTitle=scrapedtitle,
|
||||
infoLabels={"year":year, "rating":rating}, thumbnail=scrapedthumbnail,
|
||||
url=scrapedurl, quality=quality, title=title))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
if url_next_page:
|
||||
|
||||
pagination = scrapertools.find_single_match(data, "<span class=\"current\">\d+</span><a href='([^']+)'")
|
||||
|
||||
if pagination:
|
||||
itemlist.append(Item(channel=__channel__, action="peliculas", title="» Siguiente »",
|
||||
url=url_next_page, next_page=next_page, folder=True, text_blod=True,
|
||||
thumbnail=get_thumb("next.png")))
|
||||
url=pagination, folder=True, text_blod=True, thumbnail=get_thumb("next.png")))
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -191,11 +184,13 @@ def sub_search(item):
|
||||
itemlist.append(item.clone(title=title, url=scrapedurl, contentTitle=scrapedtitle, extra='buscar',
|
||||
action=action, infoLabels={"year": year}, contentType=contentType,
|
||||
thumbnail=scrapedthumbnail, text_color=color1, contentSerieName=scrapedtitle))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
paginacion = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
|
||||
if paginacion:
|
||||
|
||||
pagination = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
|
||||
if pagination:
|
||||
itemlist.append(Item(channel=item.channel, action="sub_search",
|
||||
title="» Siguiente »", url=paginacion, thumbnail=get_thumb("next.png")))
|
||||
title="» Siguiente »", url=pagination, thumbnail=get_thumb("next.png")))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -229,37 +224,26 @@ def newest(categoria):
|
||||
|
||||
def series(item):
|
||||
logger.info()
|
||||
url_next_page = ''
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<div class="poster"> <img src="([^"]+)"'
|
||||
patron += ' alt="([^"]+)">.*?'
|
||||
patron += '<a href="([^"]+)">'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
if item.next_page != 'b':
|
||||
if len(matches) > 19:
|
||||
url_next_page = item.url
|
||||
matches = matches[:19]
|
||||
next_page = 'b'
|
||||
else:
|
||||
matches = matches[19:]
|
||||
next_page = 'a'
|
||||
patron_next_page = '<link rel="next" href="([^"]+)" />'
|
||||
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
|
||||
if len(matches_next_page) > 0:
|
||||
url_next_page = urlparse.urljoin(item.url, matches_next_page[0])
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
|
||||
scrapedtitle = scrapedtitle.replace('’', "'")
|
||||
itemlist.append(Item(channel=__channel__, title=scrapedtitle, extra='serie',
|
||||
url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
contentSerieName=scrapedtitle, show=scrapedtitle,
|
||||
next_page=next_page, action="temporadas", contentType='tvshow'))
|
||||
action="temporadas", contentType='tvshow'))
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
if url_next_page:
|
||||
itemlist.append(Item(channel=__channel__, action="series", title="» Siguiente »", url=url_next_page,
|
||||
next_page=next_page, thumbnail=get_thumb("next.png")))
|
||||
|
||||
pagination = scrapertools.find_single_match(data, "<link rel='next' href='([^']+)' />")
|
||||
|
||||
if pagination:
|
||||
itemlist.append(Item(channel=__channel__, action="series", title="» Siguiente »", url=pagination,
|
||||
thumbnail=get_thumb("next.png")))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -267,8 +251,6 @@ def temporadas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<span class="title">([^<]+)<i>.*?' # season
|
||||
patron += '<img src="([^"]+)"></a></div>' # img
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
@@ -303,8 +285,6 @@ def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<div class="imagen"><a href="([^"]+)">.*?' # url
|
||||
patron += '<div class="numerando">(.*?)</div>.*?' # numerando cap
|
||||
patron += '<a href="[^"]+">([^<]+)</a>' # title de episodios
|
||||
@@ -350,16 +330,15 @@ def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<div id="option-(\d+)".*?<iframe.*?src="([^"]+)".*?</iframe>' # lang, url
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for option, url in matches:
|
||||
lang = scrapertools.find_single_match(data, '<li><a class="options" href="#option-%s">.*?</b>-->(\w+)' % option)
|
||||
lang = lang.lower()
|
||||
lang = scrapertools.find_single_match(data, '<li><a class="options" href="#option-%s">.*?</b>(.*?)<span' % option)
|
||||
lang = lang.lower().strip()
|
||||
idioma = {'latino': '[COLOR cornflowerblue](LAT)[/COLOR]',
|
||||
'drive': '[COLOR cornflowerblue](LAT)[/COLOR]',
|
||||
'castellano': '[COLOR green](CAST)[/COLOR]',
|
||||
'español': '[COLOR green](CAST)[/COLOR]',
|
||||
'subtitulado': '[COLOR red](VOS)[/COLOR]',
|
||||
'ingles': '[COLOR red](VOS)[/COLOR]'}
|
||||
if lang in idioma:
|
||||
|
||||
76
plugin.video.alfa/channels/repelis.json
Normal file
76
plugin.video.alfa/channels/repelis.json
Normal file
@@ -0,0 +1,76 @@
|
||||
{
|
||||
"id": "repelis",
|
||||
"name": "Repelis",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat","cast","vo"],
|
||||
"thumbnail": "https://s8.postimg.cc/yem7wyfw1/repelis1.png",
|
||||
"banner": "https://s8.postimg.cc/p6tzg9gjl/repelis2.png",
|
||||
"categories": [
|
||||
"movie"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"LAT",
|
||||
"ESP",
|
||||
"VO"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Buscar información extra",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
221
plugin.video.alfa/channels/repelis.py
Normal file
221
plugin.video.alfa/channels/repelis.py
Normal file
@@ -0,0 +1,221 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel Repelis -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import urllib
|
||||
|
||||
from channelselector import get_thumb
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
|
||||
idio = {'es-mx': 'LAT','es-es': 'ESP','en': 'VO'}
|
||||
cali = {'poor': 'SD','low': 'SD','high': 'HD'}
|
||||
|
||||
list_language = idio.values()
|
||||
list_quality = ["SD","HD"]
|
||||
list_servers = ['rapidvideo', 'streamango', 'fastplay', 'openload', 'netu', 'vidoza', 'uptobox']
|
||||
|
||||
|
||||
__channel__='repelis'
|
||||
|
||||
host = "https://repelis.io"
|
||||
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
except:
|
||||
__modo_grafico__ = True
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel = item.channel, title = "Destacadas", action = "destacadas", url = host, thumbnail = get_thumb("hot", auto = True)))
|
||||
itemlist.append(Item(channel = item.channel, title = "Agregadas recientemente", action = "peliculas", url = host + "/explorar?page=", page=1, thumbnail = get_thumb("last", auto = True)))
|
||||
itemlist.append(Item(channel = item.channel, title = "Por género", action = "generos", url = host, extra = "Genero", thumbnail = get_thumb("genres", auto = True) ))
|
||||
itemlist.append(Item(channel = item.channel, title = ""))
|
||||
itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "/search?term=", thumbnail = get_thumb("search", auto = True)))
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
def destacadas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'Películas destacadas(.*?)</section>'
|
||||
bloque = scrapertools.find_single_match(data, patron)
|
||||
patron = 'href="([^"]+).*?'
|
||||
patron += 'title="([^"]+).*?'
|
||||
patron += 'data-src="([^"]+).*?'
|
||||
patron += 'data-year="([^"]+)'
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear in matches:
|
||||
scrapedtitle = scrapedtitle.replace("Película ","")
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "findvideos",
|
||||
contentTitle = scrapedtitle,
|
||||
infoLabels = {'year':scrapedyear},
|
||||
thumbnail = host + scrapedthumbnail,
|
||||
title = scrapedtitle + " (%s)" %scrapedyear,
|
||||
url = host + scrapedurl
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
url = item.url
|
||||
headers = [
|
||||
['Content-Type', 'application/json']
|
||||
]
|
||||
if item.extra != "busca":
|
||||
url = item.url + str(item.page)
|
||||
data = httptools.downloadpage(url, post="", headers=headers).data
|
||||
bloquex = scrapertools.find_single_match(data, 'window.__NUXT__={.*?movies":(.*?\])')
|
||||
dict = jsontools.load(bloquex)
|
||||
else:
|
||||
dd = httptools.downloadpage("https://repelis.io/graph", post=jsontools.dump(item.post), headers=headers).data
|
||||
dict = jsontools.load(dd)["data"]["movies"]
|
||||
for datos in dict:
|
||||
scrapedurl = host + "/pelicula/" + datos["slug"] + "-" + datos["id"]
|
||||
scrapedtitle = datos["title"].replace("Película ","")
|
||||
scrapedthumbnail = host + "/_images/posters/" + datos["poster"] + "/180x270.jpg"
|
||||
scrapedyear = scrapertools.find_single_match(datos["releaseDate"],'\d{4}')
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "findvideos",
|
||||
contentTitle = scrapedtitle,
|
||||
infoLabels = {'year':scrapedyear},
|
||||
thumbnail = scrapedthumbnail,
|
||||
title = scrapedtitle + " (%s)" %scrapedyear,
|
||||
url = scrapedurl
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
#pagination
|
||||
if len(itemlist)>0:
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "peliculas",
|
||||
page = item.page + 1,
|
||||
title = "Página siguiente >>",
|
||||
url = item.url
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria in ['peliculas','latino']:
|
||||
item.url = host + "/explorar?page="
|
||||
item.page=1
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + '/genero/animacion-WYXS9?page'
|
||||
item.page = 1
|
||||
elif categoria == 'terror':
|
||||
item.url = host + 'genero/terror-dVbSb?page='
|
||||
item.page = 1
|
||||
itemlist = peliculas(item)
|
||||
if "Pagina" in itemlist[-1].title:
|
||||
itemlist.pop()
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
item.url = item.url + texto
|
||||
item.extra = "busca"
|
||||
item.page = 1
|
||||
item.texto = texto
|
||||
item.post = {"query":"\n query ($term: String) {\n movies: allMovies(search: $term) {\n id\n slug\n title\n rating\n releaseDate\n released\n poster\n nowPlaying\n }\n }\n ","variables":{"term":"%s" %texto}}
|
||||
if texto != '':
|
||||
return peliculas(item)
|
||||
else:
|
||||
return []
|
||||
|
||||
|
||||
def generos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.find_single_match(data, "Géneros.*?</ul>")
|
||||
patron = 'href="([^"]+)"'
|
||||
patron += '>([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for url, titulo in matches:
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "peliculas",
|
||||
title = titulo,
|
||||
url = host + url + "?page=",
|
||||
page = 1
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.find_single_match(data, ',"mirrors":(.*?\])')
|
||||
if bloque == "[]":
|
||||
return []
|
||||
dict = jsontools.load(bloque)
|
||||
urlx = httptools.downloadpage(host + dict[0]["url"]) #Para que pueda saltar el cloudflare, se tiene que descargar la página completa
|
||||
for datos in dict:
|
||||
url1 = httptools.downloadpage(host + datos["url"], follow_redirects=False, only_headers=True).headers.get("location", "")
|
||||
titulo = "Ver en: %s (" + cali[datos["quality"]] + ") (" + idio[datos["audio"]] + ")"
|
||||
text_color = "white"
|
||||
if "youtube" in url1:
|
||||
titulo = "Ver trailer: %s"
|
||||
text_color = "yellow"
|
||||
itemlist.append(
|
||||
item.clone(channel = item.channel,
|
||||
action = "play",
|
||||
language = idio[datos["audio"]],
|
||||
quality = cali[datos["quality"]],
|
||||
title = titulo,
|
||||
url = url1
|
||||
))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
itemlist.sort(key=lambda it: (it.language, it.server))
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if itemlist:
|
||||
itemlist.append(Item(channel = item.channel))
|
||||
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
|
||||
text_color="magenta"))
|
||||
# Opción "Añadir esta película a la biblioteca de KODI"
|
||||
if item.extra != "library":
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
|
||||
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
|
||||
contentTitle = item.contentTitle
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
item.thumbnail = item.contentThumbnail
|
||||
return [item]
|
||||
@@ -242,8 +242,9 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
|
||||
server_cloudflare = "cloudflare"
|
||||
|
||||
is_channel = inspect.getmodule(inspect.currentframe().f_back)
|
||||
# error 4xx o 5xx se lanza excepcion
|
||||
# response["code"] = 400
|
||||
# error 4xx o 5xx se lanza excepcion (menos para servidores)
|
||||
# response["code"] = 400 # linea de código para probar
|
||||
is_channel = str(is_channel).replace("/servers/","\\servers\\") # Para sistemas operativos diferente a Windows la ruta cambia
|
||||
if type(response["code"]) == int and "\\servers\\" not in str(is_channel):
|
||||
if response["code"] > 399 and (server_cloudflare == "cloudflare" and response["code"] != 503):
|
||||
raise WebErrorException(urlparse.urlparse(url)[1])
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "cloudsany.com/i/([A-z0-9]+)",
|
||||
"url": "https://cloudsany.com/i/\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "cloudsany",
|
||||
"name": "cloudsany",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "https://s1.postimg.cc/6wixo35myn/cloudsany1.png"
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Alfa addon - KODI Plugin
|
||||
# Conector para cloudsany
|
||||
# https://github.com/alfa-addon
|
||||
# ------------------------------------------------------------
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from lib import jsunpack
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "no longer exists" in data:
|
||||
return False, "[Cloudsany] El fichero ha sido borrado"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
data = scrapertools.find_single_match(data, 'p,a,c,k,e.*?</script>')
|
||||
unpack = jsunpack.unpack(data)
|
||||
logger.info("Intel11 %s" %unpack)
|
||||
video_urls = []
|
||||
videourl = scrapertools.find_single_match(unpack, 'config={file:"([^"]+)')
|
||||
video_urls.append([".MP4 [Cloudsany]", videourl])
|
||||
|
||||
return video_urls
|
||||
@@ -24,9 +24,9 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
packed_data = scrapertools.find_single_match(data, "javascript'>(eval.*?)</script>")
|
||||
unpacked = jsunpack.unpack(packed_data)
|
||||
patron = "file:(.*?),label:(.*?)}"
|
||||
patron = "sources..([^\]]+)"
|
||||
matches = re.compile(patron, re.DOTALL).findall(unpacked)
|
||||
for url, quality in matches:
|
||||
video_urls.append(['%s' % quality, url])
|
||||
video_urls.sort(key=lambda x: int(x[0]))
|
||||
for url in matches:
|
||||
url += "|Referer=%s" %page_url
|
||||
video_urls.append(['mp4', url])
|
||||
return video_urls
|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "nowvideo.../(?:video/|embed.php\\?.*v=)([A-z0-9]+)",
|
||||
"url": "http://www.nowvideo.sx/video/\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "nowvideo",
|
||||
"name": "nowvideo",
|
||||
"premium": [
|
||||
"nowvideo",
|
||||
"realdebrid"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "premium",
|
||||
"label": "Activar cuenta premium",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": "",
|
||||
"enabled": "eq(-1,true)",
|
||||
"id": "user",
|
||||
"label": "@30014",
|
||||
"type": "text",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": "",
|
||||
"enabled": "eq(-2,true)+!eq(-1,'')",
|
||||
"hidden": true,
|
||||
"id": "password",
|
||||
"label": "@30015",
|
||||
"type": "text",
|
||||
"visible": true
|
||||
}
|
||||
],
|
||||
"thumbnail": "server_nowvideo.png"
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
url = page_url.replace("http://www.nowvideo.sx/video/", "http://embed.nowvideo.sx/embed/?v=")
|
||||
data = httptools.downloadpage(url).data
|
||||
if "The file is being converted" in data or "Please try again later" in data:
|
||||
return False, "El fichero está en proceso"
|
||||
elif "no longer exists" in data:
|
||||
return False, "El fichero ha sido borrado"
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
video_urls = []
|
||||
if premium:
|
||||
login_url = "http://www.nowvideo.eu/login.php"
|
||||
data = httptools.downloadpage(login_url).data
|
||||
login_url = "http://www.nowvideo.eu/login.php?return="
|
||||
post = "user=" + user + "&pass=" + password + "®ister=Login"
|
||||
headers = {"Referer": "http://www.nowvideo.eu/login.php"}
|
||||
data = httptools.downloadpage(login_url, post, headers=headers).data
|
||||
data = httptools.downloadpage(page_url).data
|
||||
flashvar_file = scrapertools.get_match(data, 'flashvars.file="([^"]+)"')
|
||||
flashvar_filekey = scrapertools.get_match(data, 'flashvars.filekey=([^;]+);')
|
||||
flashvar_filekey = scrapertools.get_match(data, 'var ' + flashvar_filekey + '="([^"]+)"')
|
||||
flashvar_user = scrapertools.get_match(data, 'flashvars.user="([^"]+)"')
|
||||
flashvar_key = scrapertools.get_match(data, 'flashvars.key="([^"]+)"')
|
||||
flashvar_type = scrapertools.get_match(data, 'flashvars.type="([^"]+)"')
|
||||
url = "http://www.nowvideo.eu/api/player.api.php?user=" + flashvar_user + "&file=" + flashvar_file + "&pass=" + flashvar_key + "&cid=1&cid2=undefined&key=" + flashvar_filekey.replace(
|
||||
".", "%2E").replace("-", "%2D") + "&cid3=undefined"
|
||||
data = httptools.downloadpage(url).data
|
||||
location = scrapertools.get_match(data, 'url=([^\&]+)&')
|
||||
location = location + "?client=FLASH"
|
||||
video_urls.append([scrapertools.get_filename_from_url(location)[-4:] + " [premium][nowvideo]", location])
|
||||
else:
|
||||
url = page_url.replace("http://www.nowvideo.sx/video/", "http://embed.nowvideo.sx/embed/?v=")
|
||||
data = httptools.downloadpage(url).data
|
||||
videourls = scrapertools.find_multiple_matches(data, 'src\s*:\s*[\'"]([^\'"]+)[\'"]')
|
||||
if not videourls:
|
||||
videourls = scrapertools.find_multiple_matches(data, '<source src=[\'"]([^\'"]+)[\'"]')
|
||||
for videourl in videourls:
|
||||
if videourl.endswith(".mpd"):
|
||||
id = scrapertools.find_single_match(videourl, '/dash/(.*?)/')
|
||||
videourl = "http://www.nowvideo.sx/download.php%3Ffile=mm" + "%s.mp4" % id
|
||||
videourl = re.sub(r'/dl(\d)*/', '/dl/', videourl)
|
||||
ext = scrapertools.get_filename_from_url(videourl)[-4:]
|
||||
videourl = videourl.replace("%3F", "?") + \
|
||||
"|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0"
|
||||
video_urls.append([ext + " [nowvideo]", videourl])
|
||||
return video_urls
|
||||
@@ -1,44 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(oboom.com/[a-zA-Z0-9]+)",
|
||||
"url": "https://www.\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": false,
|
||||
"id": "oboom",
|
||||
"name": "oboom",
|
||||
"premium": [
|
||||
"realdebrid"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
video_urls = []
|
||||
return video_urls
|
||||
@@ -1,42 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "playwatch.me/(?:embed/|)([A-z0-9]+)",
|
||||
"url": "http://playwatch.me/embed/\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "playwatch",
|
||||
"name": "playwatch",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "http://i.imgur.com/c7LwCTc.png?1"
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import base64
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
response = httptools.downloadpage(page_url, follow_redirects=False)
|
||||
|
||||
if not response.sucess or response.headers.get("location"):
|
||||
return False, "[Playwatch] El fichero no existe o ha sido borrado"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
|
||||
data = httptools.downloadpage(page_url, follow_redirects=False).data
|
||||
|
||||
code = scrapertools.find_single_match(data, ' tracker:\s*"([^"]+)"')
|
||||
media_url = base64.b64decode(code)
|
||||
ext = scrapertools.get_filename_from_url(media_url)[-4:]
|
||||
video_urls = [["%s [playwatch]" % ext, media_url]]
|
||||
|
||||
for video_url in video_urls:
|
||||
logger.info("%s - %s" % (video_url[0], video_url[1]))
|
||||
|
||||
return video_urls
|
||||
@@ -1,41 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(?:cdn|config).playwire.com(?:/v2|)/(\\d+)/(?:embed|videos/v2|config)/(\\d+)",
|
||||
"url": "http://config.playwire.com/\\1/videos/v2/\\2/zeus.json"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "playwire",
|
||||
"name": "playwire",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import xml.etree.ElementTree as ET
|
||||
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = scrapertools.cachePage(page_url)
|
||||
if ("File was deleted" or "Not Found") in data: return False, "[playwire] El archivo no existe o ha sido borrado"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
|
||||
data = scrapertools.cachePage(page_url)
|
||||
data = jsontools.load(data)
|
||||
f4m = data['content']['media']['f4m']
|
||||
|
||||
video_urls = []
|
||||
data = scrapertools.downloadpageGzip(f4m)
|
||||
|
||||
xml = ET.fromstring(data)
|
||||
base_url = xml.find('{http://ns.adobe.com/f4m/1.0}baseURL').text
|
||||
for media in xml.findall('{http://ns.adobe.com/f4m/1.0}media'):
|
||||
if ".m3u8" in media.get('url'): continue
|
||||
media_url = base_url + "/" + media.get('url')
|
||||
try:
|
||||
height = media.get('height')
|
||||
width = media.get('width')
|
||||
label = "(" + width + "x" + height + ")"
|
||||
except:
|
||||
label = ""
|
||||
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " " + label + " [playwire]", media_url])
|
||||
|
||||
for video_url in video_urls:
|
||||
logger.info("%s - %s" % (video_url[0], video_url[1]))
|
||||
|
||||
return video_urls
|
||||
@@ -7,22 +7,16 @@ from platformcode import config, logger
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
try:
|
||||
response = httptools.downloadpage(page_url)
|
||||
except:
|
||||
pass
|
||||
|
||||
response = httptools.downloadpage(page_url)
|
||||
if response.code == 404:
|
||||
return False, config.get_localized_string(70449) % "RapidVideo"
|
||||
if not response.data or "urlopen error [Errno 1]" in str(response.code):
|
||||
from platformcode import config
|
||||
if config.is_xbmc():
|
||||
return False, config.get_localized_string(70302) % "RapidVideo"
|
||||
elif config.get_platform() == "plex":
|
||||
return False, config.get_localized_string(70303) % "RapidVideo"
|
||||
elif config.get_platform() == "mediaserver":
|
||||
return False, config.get_localized_string(70304) % "RapidVideo"
|
||||
|
||||
if "Object not found" in response.data:
|
||||
return False, config.get_localized_string(70449) % "RapidVideo"
|
||||
if response.code == 500:
|
||||
|
||||
@@ -1,45 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "rutube.ru\\/(?:video\\/([\\da-zA-Z]{32})|play\\/embed\\/([\\d]+))",
|
||||
"url": "http://rutube.ru/api/play/options/\\1/?format=json"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "rutube",
|
||||
"name": "rutube",
|
||||
"premium": [
|
||||
"realdebrid"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "server_rutube.png"
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = scrapertools.cachePage(page_url)
|
||||
if ("File was deleted" or "Not Found") in data: return False, "[rutube] El archivo no existe o ha sido borrado"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
|
||||
data = scrapertools.cachePage(page_url)
|
||||
if "embed" in page_url:
|
||||
link = scrapertools.find_single_match(data, '<link rel="canonical" href="https://rutube.ru/video/([\da-z]{32})')
|
||||
url = "http://rutube.ru/api/play/options/%s/?format=json" % link
|
||||
data = scrapertools.cachePage(url)
|
||||
|
||||
data = jsontools.load(data)
|
||||
m3u8 = data['video_balancer']['m3u8']
|
||||
data = scrapertools.downloadpageGzip(m3u8)
|
||||
video_urls = []
|
||||
mediaurls = scrapertools.find_multiple_matches(data, '(http://.*?)\?i=(.*?)_')
|
||||
for media_url, label in mediaurls:
|
||||
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " (" + label + ") [rutube]", media_url])
|
||||
|
||||
for video_url in video_urls:
|
||||
logger.info("%s - %s" % (video_url[0], video_url[1]))
|
||||
|
||||
return video_urls
|
||||
@@ -1,41 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "streame.net/(?:embed-|)([a-z0-9]+)",
|
||||
"url": "http://streame.net/embed-\\1.html"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "streame",
|
||||
"name": "streame",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = scrapertools.cache_page(page_url)
|
||||
if ("File was deleted" or "Not Found") in data: return False, "[Streame] El archivo no existe o ha sido borrado"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
|
||||
data = scrapertools.cache_page(page_url)
|
||||
media_urls = scrapertools.find_multiple_matches(data, '\{file:"([^"]+)",label:"([^"]+)"\}')
|
||||
video_urls = []
|
||||
for media_url, label in media_urls:
|
||||
video_urls.append(
|
||||
[scrapertools.get_filename_from_url(media_url)[-4:] + " (" + label + ") [streame]", media_url])
|
||||
|
||||
for video_url in video_urls:
|
||||
logger.info("%s - %s" % (video_url[0], video_url[1]))
|
||||
|
||||
return video_urls
|
||||
@@ -8,9 +8,9 @@ from platformcode import logger, config
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
return True, ""
|
||||
page_url = httptools.downloadpage(page_url, follow_redirects=False, only_headers=True).headers.get("location", "")
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "File was deleted" in data or "Page Cannot Be Found" in data:
|
||||
if "File was deleted" in data or "Page Cannot Be Found" in data or "<title>Video not found" in data:
|
||||
return False, "[thevideo.me] El archivo ha sido eliminado o no existe"
|
||||
return True, ""
|
||||
|
||||
|
||||
@@ -1,45 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "href=\"http://www.veoh.com/.*?permalinkId=([^&\"]+)\"",
|
||||
"url": "\\1"
|
||||
},
|
||||
{
|
||||
"pattern": "pattern=\"http://www.veoh.com/static/swf/webplayer/WebPlayer.swf.*?permalinkId=([^&]+)=videodetailsembedded=0=anonymous\"",
|
||||
"url": "\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "veoh",
|
||||
"name": "veoh",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
# Returns an array of possible video url's from the page_url
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
video_urls = []
|
||||
|
||||
# Lo extrae a partir de flashvideodownloader.org
|
||||
if page_url.startswith("http://"):
|
||||
url = 'http://www.flashvideodownloader.org/download.php?u=' + page_url
|
||||
else:
|
||||
url = 'http://www.flashvideodownloader.org/download.php?u=http://www.veoh.com/watch/' + page_url
|
||||
logger.info("url=" + url)
|
||||
data = scrapertools.cachePage(url)
|
||||
|
||||
# Extrae el vídeo
|
||||
patronvideos = '<a href="(http://content.veoh.com.*?)"'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
if len(matches) > 0:
|
||||
video_urls.append(["[veoh]", matches[0]])
|
||||
|
||||
for video_url in video_urls:
|
||||
logger.info("%s - %s" % (video_url[0], video_url[1]))
|
||||
|
||||
return video_urls
|
||||
@@ -1,41 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "vidabc.com/(?:embed-|)([a-z0-9]+)",
|
||||
"url": "http://vidabc.com/embed-\\1.html"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "vidabc",
|
||||
"name": "vidabc",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core.httptools import downloadpage
|
||||
from core.scrapertools import get_match, find_multiple_matches
|
||||
from platformcode import logger
|
||||
|
||||
host = "http://vidabc.com"
|
||||
id_server = "vidabc"
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = downloadpage(page_url).data
|
||||
if "Video is processing now" in data:
|
||||
return False, "[vidabc] El archivo se está procesando"
|
||||
if "File was deleted" in data:
|
||||
return False, "[vidabc] El archivo ha sido borrado"
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = downloadpage(page_url).data
|
||||
|
||||
try:
|
||||
sources = get_match(data, 'sources\s*:\s* \[([^\]]+)\]')
|
||||
except:
|
||||
from lib import jsunpack
|
||||
sources = jsunpack.unpack(get_match(data, '<script[^>]*>(eval.function.p,a,c,k,e,.*?)</script>'))
|
||||
sources = get_match(sources, 'sources\s*:\s*\[([^\]]+)\]')
|
||||
|
||||
video_urls = []
|
||||
for media_url in find_multiple_matches(sources, '"([^"]+)"'):
|
||||
if media_url.endswith(".mp4"):
|
||||
video_urls.append([".mp4 [%s]" % id_server, media_url])
|
||||
|
||||
if media_url.endswith(".m3u8"):
|
||||
video_urls.append(["M3U8 [%s]" % id_server, media_url])
|
||||
|
||||
if media_url.endswith(".smil"):
|
||||
smil_data = downloadpage(media_url).data
|
||||
|
||||
rtmp = get_match(smil_data, 'base="([^"]+)"')
|
||||
playpaths = find_multiple_matches(smil_data, 'src="([^"]+)" height="(\d+)"')
|
||||
|
||||
for playpath, inf in playpaths:
|
||||
h = get_match(playpath, 'h=([a-z0-9]+)')
|
||||
video_urls.append(["RTMP [%s] %s" % (id_server, inf), "%s playpath=%s" % (rtmp, playpath)])
|
||||
|
||||
for video_url in video_urls:
|
||||
logger.info("video_url: %s - %s" % (video_url[0], video_url[1]))
|
||||
|
||||
return video_urls
|
||||
@@ -1,41 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(https?://(?:www.)?videowood.tv/)(?:embed|video)(/[0-9a-z]+)",
|
||||
"url": "\\1embed\\2"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "videowood",
|
||||
"name": "videowood",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from aadecode import decode as aadecode
|
||||
from core import scrapertools
|
||||
from core import httptools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "This video doesn't exist." in data:
|
||||
return False, '[videowood] El video no puede ser encontrado o ha sido eliminado.'
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
video_urls = []
|
||||
data = httptools.downloadpage(page_url).data
|
||||
text_encode = scrapertools.find_single_match(data, "(eval\(function\(p,a,c,k,e,d.*?)</script>")
|
||||
text_decode = aadecode(text_encode)
|
||||
patron = "'([^']+)'"
|
||||
media_url = scrapertools.find_single_match(text_decode, patron)
|
||||
video_urls.append([media_url[-4:] + " [Videowood]", media_url])
|
||||
return video_urls
|
||||
@@ -1,41 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(?:vidgg.to|vid.gg)/(?:embed/|video/)([a-z0-9]+)",
|
||||
"url": "http://vidgg.to/video/\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "vidgg",
|
||||
"name": "vidgg",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import httptools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = jsontools.load(httptools.downloadpage("http://www.vidgg.to/api-v2/alive.php?link=" + page_url).data)
|
||||
if data["data"] == "NOT_FOUND" or data["data"] == "FAILED":
|
||||
return False, "[Vidgg] El archivo no existe o ha sido borrado"
|
||||
elif data["data"] == "CONVERTING":
|
||||
return False, "[Vidgg] El archivo se está procesando"
|
||||
else:
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
video_urls = []
|
||||
data = httptools.downloadpage(page_url).data
|
||||
|
||||
mediaurls = scrapertools.find_multiple_matches(data, '<source src="([^"]+)"')
|
||||
if not mediaurls:
|
||||
id_file = page_url.rsplit("/", 1)[1]
|
||||
key = scrapertools.find_single_match(data, 'flashvars\.filekey\s*=\s*"([^"]+)"')
|
||||
if not key:
|
||||
varkey = scrapertools.find_single_match(data, 'flashvars\.filekey\s*=\s*([^;]+);')
|
||||
key = scrapertools.find_single_match(data, varkey + '\s*=\s*"([^"]+)"')
|
||||
|
||||
# Primera url, se extrae una url erronea necesaria para sacar el enlace
|
||||
url = "http://www.vidgg.to//api/player.api.php?cid2=undefined&cid=undefined&numOfErrors=0&user=undefined&cid3=undefined&key=%s&file=%s&pass=undefined" % (
|
||||
key, id_file)
|
||||
data = httptools.downloadpage(url).data
|
||||
|
||||
url_error = scrapertools.find_single_match(data, 'url=([^&]+)&')
|
||||
url = "http://www.vidgg.to//api/player.api.php?cid2=undefined&cid=undefined&numOfErrors=1&errorUrl=%s&errorCode=404&user=undefined&cid3=undefined&key=%s&file=%s&pass=undefined" % (
|
||||
url_error, key, id_file)
|
||||
data = httptools.downloadpage(url).data
|
||||
mediaurls = scrapertools.find_multiple_matches(data, 'url=([^&]+)&')
|
||||
|
||||
for i, mediaurl in enumerate(mediaurls):
|
||||
title = scrapertools.get_filename_from_url(mediaurl)[-4:] + " Mirror %s [vidgg]" % str(i + 1)
|
||||
mediaurl += "|User-Agent=Mozilla/5.0"
|
||||
video_urls.append([title, mediaurl])
|
||||
|
||||
for video_url in video_urls:
|
||||
logger.info(" %s - %s" % (video_url[0], video_url[1]))
|
||||
|
||||
return video_urls
|
||||
@@ -1,36 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from lib import jsunpack
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = httptools.downloadpage(page_url).data
|
||||
|
||||
if "File was deleted" in data:
|
||||
return False, "[Vidgot] El fichero ha sido borrado de novamov"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = httptools.downloadpage(page_url).data
|
||||
data_js = scrapertools.find_single_match(data, "<script type='text/javascript'>(eval\(function.*?)</script>")
|
||||
data_js = jsunpack.unpack(data_js)
|
||||
|
||||
mediaurls = scrapertools.find_multiple_matches(data_js, '\{file\s*:\s*"([^"]+)"\}')
|
||||
|
||||
video_urls = []
|
||||
for mediaurl in mediaurls:
|
||||
ext = scrapertools.get_filename_from_url(mediaurl)[-4:]
|
||||
if "mp4" not in ext and "m3u8" not in ext:
|
||||
continue
|
||||
video_urls.append([ext + " [vidgot]", mediaurl])
|
||||
|
||||
return video_urls
|
||||
@@ -1,42 +1,42 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(?i)(https://vidlox.(?:tv|me)/embed-.*?.html)",
|
||||
"url": "\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "vidlox",
|
||||
"name": "vidlox",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "https://s1.postimg.cc/wathgtvin/logo-vidlox1.png"
|
||||
}
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(?i)(https://vidlox.(?:tv|me)/embed-.*?.html)",
|
||||
"url": "\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "vidlox",
|
||||
"name": "vidlox",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "https://s1.postimg.cc/wathgtvin/logo-vidlox1.png"
|
||||
}
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "https://vidzella.me/e/([a-zA-Z0-9]+)",
|
||||
"url": "https://vidzella.me/e/\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "vidzella",
|
||||
"name": "vidzella",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "https://s15.postimg.cc/albqao5pn/vidzella.png"
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
# Conector Vidzella By Alfa development Group
|
||||
# --------------------------------------------------------
|
||||
|
||||
import re
|
||||
from core import httptools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = httptools.downloadpage(page_url)
|
||||
|
||||
if data.code == 404:
|
||||
return False, "[Vidzella] El archivo no existe o ha sido borrado"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
|
||||
video_urls = []
|
||||
data = httptools.downloadpage(page_url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
logger.debug(data)
|
||||
patron = "src=([^ ]+) type='.*?/(.*?)'"
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for url, type in matches:
|
||||
video_urls.append(['vidzella %s' % type, url])
|
||||
|
||||
return video_urls
|
||||
@@ -1,42 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "watchers.to/(?:embed-|)([A-z0-9]+)",
|
||||
"url": "http://watchers.to/embed-\\1.html"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "watchers",
|
||||
"name": "watchers",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "http://i.imgur.com/WApzSMn.png?1"
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from lib import jsunpack
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "File Not Found" in data:
|
||||
return False, "[Watchers] El archivo no existe o ha sido borrado"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=%s" % page_url)
|
||||
video_urls = []
|
||||
|
||||
data = httptools.downloadpage(page_url).data
|
||||
packed = scrapertools.find_single_match(data, '(eval\(function\(p,a,c,k,e.*?)</script>').strip()
|
||||
unpack = jsunpack.unpack(packed)
|
||||
|
||||
bloque = scrapertools.find_single_match(unpack, 'sources:\[(.*?)\}\]')
|
||||
matches = scrapertools.find_multiple_matches(bloque, 'file:"([^"]+)"(?:,label:"([^"]+)"|\})')
|
||||
for media_url, calidad in matches:
|
||||
ext = scrapertools.get_filename_from_url(media_url)[-4:]
|
||||
if calidad:
|
||||
ext += " " + calidad + "p"
|
||||
media_url += "|Referer=%s" % page_url
|
||||
video_urls.append([ext + ' [watchers]', media_url])
|
||||
|
||||
return video_urls
|
||||
@@ -18,8 +18,13 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
logger.info("url=" + page_url)
|
||||
video_urls = []
|
||||
data = httptools.downloadpage(page_url).data
|
||||
media_urls = scrapertools.find_multiple_matches(data, 'file:"([^"]+)"')
|
||||
packed = scrapertools.find_single_match(data, "text/javascript'>(.*?)\s*</script>")
|
||||
unpacked = jsunpack.unpack(packed)
|
||||
media_urls = scrapertools.find_multiple_matches(unpacked, 'file:"([^"]+)"')
|
||||
for media_url in media_urls:
|
||||
media_url += "|Referer=%s" %page_url
|
||||
if ".png" in media_url:
|
||||
continue
|
||||
ext = "mp4"
|
||||
if "m3u8" in media_url:
|
||||
ext = "m3u8"
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "wholecloud.net/(?:video/|embed/?v=)([A-z0-9]+)",
|
||||
"url": "http://wholecloud.net/embed/?v=\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "wholecloud",
|
||||
"name": "wholecloud",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "http://i.imgur.com/yIAQurm.png"
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
|
||||
if "This file no longer exists on our servers" in data:
|
||||
return False, "[wholecloud] El archivo ha sido eliminado o no existe"
|
||||
if "This video is not yet ready" in data:
|
||||
return False, "[wholecloud] El archivo no está listo, se está subiendo o convirtiendo"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
|
||||
data = httptools.downloadpage(page_url).data
|
||||
|
||||
video_urls = []
|
||||
media_urls = scrapertools.find_multiple_matches(data, '<source src="([^"]+)"')
|
||||
if not media_urls:
|
||||
media_url = scrapertools.find_single_match(data, 'src="/api/toker.php\?f=([^"]+)"')
|
||||
ext = scrapertools.get_filename_from_url(media_url)[-4:]
|
||||
media_url = "http://wholecloud.net/download.php?file=%s|User-Agent=Mozilla/5.0" % media_url
|
||||
video_urls.append([ext + " [wholecloud]", media_url])
|
||||
else:
|
||||
for media_url in media_urls:
|
||||
ext = scrapertools.get_filename_from_url(media_url)[-4:]
|
||||
media_url += "|User-Agent=Mozilla/5.0"
|
||||
video_urls.append([ext + " [wholecloud]", media_url])
|
||||
|
||||
return video_urls
|
||||
Reference in New Issue
Block a user