@@ -35,12 +35,62 @@ def mainlist(item):
|
||||
url= host + "movies/newmovies?page=1", extra1 = 0))
|
||||
itemlist.append(item.clone(title="Por genero", action="generos", fanart="http://i.imgur.com/c3HS8kj.png",
|
||||
url= host + "movies/getGanres"))
|
||||
itemlist.append(item.clone(title="Colecciones", action="colecciones", fanart="http://i.imgur.com/c3HS8kj.png",
|
||||
url= host))
|
||||
itemlist.append(item.clone(title="", action=""))
|
||||
itemlist.append(item.clone(title="Buscar...", action="search"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def colecciones(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'href="(/peliculas[^"]+).*?'
|
||||
patron += 'title_geo"><span>([^<]+).*?'
|
||||
patron += 'title_eng"><span>([^<]+).*?'
|
||||
patron += 'src="([^"]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedtitle, scrapedcantidad, scrapedthumbnail in matches:
|
||||
if scrapedtitle == "LGTB" and config.get_setting("adult_mode") == 0:
|
||||
continue
|
||||
title = scrapedtitle.capitalize() + " (" + scrapedcantidad + ")"
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "listado_colecciones",
|
||||
thumbnail = host + scrapedthumbnail,
|
||||
title = title,
|
||||
url = host + scrapedurl
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def listado_colecciones(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data_url = scrapertools.find_single_match(data, "data_url: '([^']+)")
|
||||
post = "page=1"
|
||||
data = httptools.downloadpage(host + data_url, post=post).data
|
||||
patron = 'a href="(/peli[^"]+).*?'
|
||||
patron += 'src="([^"]+).*?'
|
||||
patron += 'class="c_fichas_title">([^<]+).*?'
|
||||
patron += 'Año:.*?href="">([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
|
||||
item.infoLabels['year'] = scrapedyear
|
||||
itemlist.append(item.clone(channel = item.channel,
|
||||
action = "findvideos",
|
||||
contentTitle = scrapedtitle,
|
||||
thumbnail = scrapedthumbnail,
|
||||
title = scrapedtitle,
|
||||
url = host + scrapedurl
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def generos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
@@ -61,6 +111,9 @@ def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if "Próximamente" in data:
|
||||
itemlist.append(Item(channel = item.channel, title = "Próximamente"))
|
||||
return itemlist
|
||||
patron = 'data-link="([^"]+).*?'
|
||||
patron += '>([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
@@ -137,7 +190,7 @@ def lista(item):
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
if texto != "":
|
||||
texto = texto.replace(" ", "+")
|
||||
texto = texto.replace(" ", "%20")
|
||||
item.url = host + "/movies/search/" + texto
|
||||
item.extra = "busqueda"
|
||||
try:
|
||||
|
||||
@@ -162,27 +162,20 @@ def novedades_anime(item):
|
||||
|
||||
def listado(item):
|
||||
logger.info()
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
|
||||
# logger.debug("datito %s" % data)
|
||||
|
||||
url_pagination = scrapertools.find_single_match(data, '<li class="current">.*?</li>[\s]<li><a href="([^"]+)">')
|
||||
|
||||
data = scrapertools.find_single_match(data, '</div><div class="full">(.*?)<div class="pagination')
|
||||
|
||||
matches = re.compile('<img.+?src="([^"]+)".+?<a href="([^"]+)">(.*?)</a>.+?'
|
||||
'<div class="full item_info genres_info">(.*?)</div>.+?class="full">(.*?)</p>',
|
||||
re.DOTALL).findall(data)
|
||||
|
||||
itemlist = []
|
||||
|
||||
for thumbnail, url, title, genres, plot in matches:
|
||||
|
||||
title = clean_title(title)
|
||||
url = urlparse.urljoin(HOST, url)
|
||||
thumbnail = urlparse.urljoin(HOST, thumbnail)
|
||||
|
||||
new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
|
||||
fulltitle=title, plot=plot)
|
||||
|
||||
@@ -192,28 +185,22 @@ def listado(item):
|
||||
else:
|
||||
new_item.show = title
|
||||
new_item.context = renumbertools.context(item)
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
if url_pagination:
|
||||
url = urlparse.urljoin(HOST, url_pagination)
|
||||
title = ">> Pagina Siguiente"
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
|
||||
|
||||
if item.plot == "":
|
||||
item.plot = scrapertools.find_single_match(data, 'Description[^>]+><p>(.*?)</p>')
|
||||
|
||||
data = scrapertools.find_single_match(data, '<div class="Sect Episodes full">(.*?)</div>')
|
||||
matches = re.compile('<a href="([^"]+)"[^>]+>(.+?)</a', re.DOTALL).findall(data)
|
||||
|
||||
@@ -221,7 +208,6 @@ def episodios(item):
|
||||
title = title.strip()
|
||||
url = urlparse.urljoin(item.url, url)
|
||||
thumbnail = item.thumbnail
|
||||
|
||||
try:
|
||||
episode = int(scrapertools.find_single_match(title, "Episodio (\d+)"))
|
||||
except ValueError:
|
||||
@@ -229,42 +215,36 @@ def episodios(item):
|
||||
episode = 1
|
||||
else:
|
||||
season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, episode)
|
||||
|
||||
title = "%s: %sx%s" % (item.title, season, str(episode).zfill(2))
|
||||
|
||||
itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumbnail, fulltitle=title,
|
||||
fanart=thumbnail, contentType="episode"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
_id = scrapertools.find_single_match(item.url, 'https://animeflv.ru/ver/([^/]+)/')
|
||||
post = "embed_id=%s" % _id
|
||||
data = httptools.downloadpage("https://animeflv.ru/get_video_info", post=post).data
|
||||
dict_data = jsontools.load(data)
|
||||
|
||||
headers = dict()
|
||||
headers["Referer"] = item.url
|
||||
data = httptools.downloadpage("https:" + dict_data["value"], headers=headers).data
|
||||
dict_data = jsontools.load(data)
|
||||
|
||||
list_videos = dict_data["playlist"][0]["sources"]
|
||||
|
||||
if not dict_data:
|
||||
return itemlist
|
||||
list_videos = dict_data["playlist"][0]
|
||||
if isinstance(list_videos, list):
|
||||
for video in list_videos:
|
||||
itemlist.append(Item(channel=item.channel, action="play", url=video["file"], show=re.escape(item.show),
|
||||
title="Ver en calidad [%s]" % video["label"], plot=item.plot, fulltitle=item.title,
|
||||
itemlist.append(Item(channel=item.channel, action="play", url=video["file"],
|
||||
show=re.escape(item.show),
|
||||
title=item.title, plot=item.plot, fulltitle=item.title,
|
||||
thumbnail=item.thumbnail))
|
||||
|
||||
else:
|
||||
for video in list_videos.values():
|
||||
itemlist.append(Item(channel=item.channel, action="play", url=video["file"], show=re.escape(item.show),
|
||||
title="Ver en calidad [%s]" % video["label"], plot=item.plot, fulltitle=item.title,
|
||||
video += "|User-Agent=Mozilla/5.0"
|
||||
itemlist.append(Item(channel=item.channel, action="play", url=video, show=re.escape(item.show),
|
||||
title=item.title, plot=item.plot, fulltitle=item.title,
|
||||
thumbnail=item.thumbnail))
|
||||
|
||||
return itemlist
|
||||
|
||||
50
plugin.video.alfa/channels/animemovil.json
Normal file
50
plugin.video.alfa/channels/animemovil.json
Normal file
@@ -0,0 +1,50 @@
|
||||
{
|
||||
"id": "animemovil",
|
||||
"name": "Animemovil",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://s1.postimg.org/92ji7stii7/animemovil1.png",
|
||||
"banner": "",
|
||||
"version": 1,
|
||||
"changes": [
|
||||
{
|
||||
"date": "24/10/2017",
|
||||
"description": "Primera version"
|
||||
}
|
||||
],
|
||||
"categories": [
|
||||
"anime"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Buscar información extra",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "perfil",
|
||||
"type": "list",
|
||||
"label": "Perfil de color",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Perfil 3",
|
||||
"Perfil 2",
|
||||
"Perfil 1"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
406
plugin.video.alfa/channels/animemovil.py
Normal file
406
plugin.video.alfa/channels/animemovil.py
Normal file
@@ -0,0 +1,406 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
|
||||
from channels import renumbertools
|
||||
from core import httptools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import platformtools, config, logger
|
||||
|
||||
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', 'animemovil')
|
||||
__perfil__ = int(config.get_setting('perfil', "animemovil"))
|
||||
|
||||
# Fijar perfil de color
|
||||
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
|
||||
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
|
||||
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
|
||||
if __perfil__ < 3:
|
||||
color1, color2, color3, color4, color5 = perfil[__perfil__]
|
||||
else:
|
||||
color1 = color2 = color3 = color4 = color5 = ""
|
||||
|
||||
host = "http://animemovil.com"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="recientes", title="Episodios Recientes", thumbnail=item.thumbnail,
|
||||
url=host, text_color=color1, contentType="tvshow", extra="recientes"))
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title="Animes", thumbnail=item.thumbnail,
|
||||
url="%s/_API/?src=animesRecientes&offset=0" % host, text_color=color1))
|
||||
itemlist.append(Item(channel=item.channel, action="emision", title="En emisión", thumbnail=item.thumbnail,
|
||||
url="%s/anime/emision" % host, text_color=color2, contentType="tvshow"))
|
||||
itemlist.append(Item(channel=item.channel, action="indices", title="Índices", thumbnail=item.thumbnail,
|
||||
text_color=color2))
|
||||
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="search", title="Buscar...",
|
||||
thumbnail=item.thumbnail, text_color=color3))
|
||||
|
||||
itemlist.append(item.clone(title="Configurar canal", action="openconfig", text_color=color5, folder=False))
|
||||
if renumbertools.context:
|
||||
itemlist = renumbertools.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def openconfig(item):
|
||||
ret = platformtools.show_channel_settings()
|
||||
platformtools.itemlist_refresh()
|
||||
return ret
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
item.url = "%s/?s=%s" % (host, texto.replace(" ", "+"))
|
||||
try:
|
||||
return recientes(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def recientes(item):
|
||||
logger.info()
|
||||
item.contentType = "tvshow"
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.find_single_match(data, '<ul class="emision"(.*?)</ul>')
|
||||
patron = '<li><a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for url, title, thumb in matches:
|
||||
url = host + url
|
||||
|
||||
try:
|
||||
contentTitle = re.split(r"(?i) \d+ (?:Sub Español|Audio Español|Español Latino)", title)[0]
|
||||
except:
|
||||
contentTitle = ""
|
||||
contentTitle = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub| Español| Peliculas| Audio| Latino", "", contentTitle)
|
||||
|
||||
tipo = "tvshow"
|
||||
show = contentTitle
|
||||
action = "episodios"
|
||||
context = renumbertools.context
|
||||
if item.extra == "recientes":
|
||||
action = "findvideos"
|
||||
context = ""
|
||||
if not item.extra and (url.endswith("-pelicula/") or url.endswith("-pelicula")):
|
||||
tipo = "movie"
|
||||
show = ""
|
||||
action = "peliculas"
|
||||
if not thumb.startswith("http"):
|
||||
thumb = "http:%s" % thumb
|
||||
|
||||
infoLabels = {'filtro': {"original_language": "ja"}.items()}
|
||||
itemlist.append(item.clone(action=action, title=title, url=url, thumbnail=thumb, text_color=color3,
|
||||
contentTitle=contentTitle, contentSerieName=show, infoLabels=infoLabels,
|
||||
thumb_=thumb, contentType=tipo, context=context))
|
||||
|
||||
try:
|
||||
from core import tmdb
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
if item.extra and itemlist:
|
||||
for it in itemlist:
|
||||
it.thumbnail = it.thumb_
|
||||
except:
|
||||
pass
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def listado(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = jsontools.load(httptools.downloadpage(item.url).data)
|
||||
|
||||
for it in data.get("items", []):
|
||||
scrapedtitle = it["title"]
|
||||
url = "%s/%s" % (host, it["url"])
|
||||
thumb = "http://img.animemovil.com/w440-h250-c/%s" % it["img"]
|
||||
title = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub| Español| Peliculas| Audio| Latino", "", scrapedtitle)
|
||||
|
||||
tipo = "tvshow"
|
||||
show = title
|
||||
action = "episodios"
|
||||
if url.endswith("-pelicula/") or url.endswith("-pelicula"):
|
||||
tipo = "movie"
|
||||
show = ""
|
||||
action = "peliculas"
|
||||
|
||||
infoLabels = {'filtro': {"original_language": "ja"}.items()}
|
||||
|
||||
itemlist.append(item.clone(action=action, title=scrapedtitle, url=url, thumbnail=thumb, text_color=color3,
|
||||
contentTitle=title, contentSerieName=show, infoLabels=infoLabels,
|
||||
context=renumbertools.context, contentType=tipo))
|
||||
|
||||
|
||||
try:
|
||||
from core import tmdb
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
except:
|
||||
pass
|
||||
|
||||
if data["buttom"] and itemlist:
|
||||
offset = int(scrapertools.find_single_match(item.url, 'offset=(\d+)')) + 1
|
||||
url = re.sub(r'offset=\d+', 'offset=%s' % offset, item.url)
|
||||
itemlist.append(Item(channel=item.channel, action="listado", url=url, title=">> Página Siguiente",
|
||||
thumbnail=item.thumbnail, text_color=color2))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def indices(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
if "Índices" in item.title:
|
||||
itemlist.append(item.clone(title="Por Género", url="%s/anime/generos/" % host))
|
||||
itemlist.append(item.clone(title="Por Letra", url="%s/anime/" % host))
|
||||
itemlist.append(item.clone(action="completo", title="Lista completa de Animes",
|
||||
url="%s/anime/lista/" % host))
|
||||
else:
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.find_single_match(data, '<div class="letras">(.*?)</div>')
|
||||
|
||||
patron = '<a title="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for title in matches:
|
||||
if "Letra" in item.title:
|
||||
url = "%s/_API/?src=animesLetra&offset=0&letra=%s" % (host, title)
|
||||
else:
|
||||
url = "%s/_API/?src=animesGenero&offset=0&genero=%s" % (host, title)
|
||||
itemlist.append(item.clone(action="listado", url=url, title=title))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def completo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.find_single_match(data, '<ul class="listadoAnime">(.*?)</ul>')
|
||||
patron = '<li><a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for url, title, thumb in matches:
|
||||
url = host + url
|
||||
scrapedtitle = title
|
||||
thumb = thumb.replace("s90-c", "w440-h250-c")
|
||||
title = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub Español| Peliculas", "", scrapedtitle)
|
||||
|
||||
tipo = "tvshow"
|
||||
show = title
|
||||
action = "episodios"
|
||||
if url.endswith("-pelicula/") or url.endswith("-pelicula"):
|
||||
tipo = "movie"
|
||||
show = ""
|
||||
action = "peliculas"
|
||||
infoLabels = {'filtro': {"original_language": "ja"}.items()}
|
||||
itemlist.append(Item(channel=item.channel, action=action, title=scrapedtitle, url=url, thumbnail=thumb,
|
||||
text_color=color3, contentTitle=title, contentSerieName=show, extra="completo",
|
||||
context=renumbertools.context, contentType=tipo, infoLabels=infoLabels))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
show = scrapertools.find_single_match(data, '<title>\s*([^<]+)\s*</title>')
|
||||
show = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub| Español| Peliculas| Audio| Latino", "", show)
|
||||
|
||||
if not item.infoLabels["plot"]:
|
||||
item.infoLabels["plot"] = scrapertools.find_single_match(data, '<div class="InfoSipnosis">.*?<p>(.*?)</p>')
|
||||
|
||||
bloque = scrapertools.find_single_match(data, 'ul class="lista"(.*?)</ul>')
|
||||
matches = scrapertools.find_multiple_matches(bloque, '<li><a href="([^"]+)" title="([^"]+)"')
|
||||
for url, title in matches:
|
||||
url = host + url
|
||||
epi = scrapertools.find_single_match(title, '(?i)%s.*? (\d+) (?:Sub|Audio|Español)' % item.contentSerieName)
|
||||
new_item = item.clone(action="findvideos", url=url, title=title, extra="", context=renumbertools.context)
|
||||
if epi:
|
||||
season, episode = renumbertools.numbered_for_tratk(
|
||||
item.channel, show, 1, int(epi))
|
||||
new_item.infoLabels["episode"] = episode
|
||||
new_item.infoLabels["season"] = season
|
||||
|
||||
new_item.title = "%sx%s %s" % (season, episode, title)
|
||||
itemlist.append(new_item)
|
||||
|
||||
if item.infoLabels.get("tmdb_id") or item.extra == "recientes" or item.extra == "completo":
|
||||
try:
|
||||
from core import tmdb
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
except:
|
||||
pass
|
||||
|
||||
if config.get_videolibrary_support() and itemlist:
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir serie a la videoteca", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", contentTitle=item.contentTitle,
|
||||
contentSerieName=item.contentSerieName, text_color=color4, fanart=item.fanart,
|
||||
thumbnail=item.thumbnail))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
if item.extra == "completo":
|
||||
try:
|
||||
from core import tmdb
|
||||
tmdb.set_infoLabels_item(item, __modo_grafico__)
|
||||
except:
|
||||
pass
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if not item.infoLabels["plot"]:
|
||||
item.infoLabels["plot"] = scrapertools.find_single_match(data, '<div class="InfoSipnosis">.*?<p>(.*?)</p>')
|
||||
|
||||
bloque = scrapertools.find_single_match(data, 'ul class="lista"(.*?)</ul>')
|
||||
matches = scrapertools.find_multiple_matches(bloque, '<li><a href="([^"]+)" title="([^"]+)"')
|
||||
if len(matches) == 1:
|
||||
item.url = host + matches[0][0]
|
||||
itemlist = findvideos(item)
|
||||
else:
|
||||
for url, title in matches:
|
||||
itemlist.append(item.clone(action="findvideos", title=title, url=url, extra=""))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def emision(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloques = scrapertools.find_multiple_matches(data, '<div class="horario">.*?</i>\s*(.*?)</span>(.*?)</ul>')
|
||||
patron = '<li><a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"'
|
||||
for dia, b in bloques:
|
||||
matches = scrapertools.find_multiple_matches(b, patron)
|
||||
if matches:
|
||||
itemlist.append(item.clone(action="", title=dia, text_color=color1))
|
||||
for url, title, thumb in matches:
|
||||
url = host + url
|
||||
scrapedtitle = " %s" % title
|
||||
title = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub Español| Peliculas", "", title)
|
||||
if not thumb.startswith("http"):
|
||||
thumb = "http:%s" % thumb
|
||||
|
||||
infoLabels = {'filtro': {"original_language": "ja"}.items()}
|
||||
itemlist.append(item.clone(action="episodios", title=scrapedtitle, url=url, thumbnail=thumb, text_color=color3,
|
||||
contentTitle=title, contentSerieName=title, extra="recientes",
|
||||
context=renumbertools.context, infoLabels=infoLabels))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
id = scrapertools.find_single_match(data, '"id":"([^"]+)"')
|
||||
bloque = scrapertools.find_single_match(data, 'ul class="controles">(.*?)</ul>')
|
||||
patron = '<li title="([^"]+)" id="[^"]*" host="([^"]+)">'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for title, server in matches:
|
||||
if title == "Vizard":
|
||||
continue
|
||||
title = "%s - %s" % (title, item.title)
|
||||
post = "host=%s&id=%s" % (server, id)
|
||||
itemlist.append(item.clone(action="play", url="http://server-2-stream.animemovil.com/V2/", title=title,
|
||||
post=post))
|
||||
|
||||
downl = scrapertools.find_single_match(data, '<div class="descargarCap">.*?<a href="([^"]+)"')
|
||||
if downl:
|
||||
downl = downl.replace("&", "&")
|
||||
itemlist.append(item.clone(action="play", title="Descarga - %s" % item.title, url=downl, server="directo"))
|
||||
|
||||
if not itemlist:
|
||||
itemlist.append(Item(channel=item.channel, title="No hay vídeos disponibles", action=""))
|
||||
if item.extra == "recientes":
|
||||
url = scrapertools.find_single_match(data, '<a class="CapList".*?href="([^"]+)"')
|
||||
if url:
|
||||
url = host + url
|
||||
itemlist.append(item.clone(action="episodios", title="Ir a lista de capítulos", url=url, text_color=color1))
|
||||
elif item.contentType == "movie" and config.get_library_support():
|
||||
if "No hay vídeos disponibles" not in itemlist[0].title:
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir película a la biblioteca", url=item.url,
|
||||
action="add_pelicula_to_library", contentTitle=item.contentTitle, text_color=color4,
|
||||
thumbnail=item.thumbnail, fanart=item.fanart))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
|
||||
if item.server:
|
||||
return [item]
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = jsontools.load(httptools.downloadpage(item.url, item.post).data)
|
||||
if data["jwplayer"] == False:
|
||||
content = data["eval"]["contenido"]
|
||||
urls = scrapertools.find_multiple_matches(content, 'file\s*:\s*"([^"]+)"')
|
||||
if not urls:
|
||||
urls = scrapertools.find_multiple_matches(content, '"GET","([^"]+)"')
|
||||
for url in urls:
|
||||
if "mediafire" in url:
|
||||
data_mf = httptools.downloadpage(url).data
|
||||
url = scrapertools.find_single_match(data_mf, 'kNO\s*=\s*"([^"]+)"')
|
||||
ext = url[-4:]
|
||||
itemlist.insert(0, ["%s [directo]" % ext, url])
|
||||
else:
|
||||
if data["jwplayer"].get("sources"):
|
||||
for source in data["jwplayer"]["sources"]:
|
||||
label = source.get("label", "")
|
||||
ext = source.get("type", "")
|
||||
if ext and "/" in ext:
|
||||
ext = ".%s " % ext.rsplit("/", 1)[1]
|
||||
url = source.get("file")
|
||||
if "server-3-stream" in url:
|
||||
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location")
|
||||
itemlist.insert(0, ["%s%s [directo]" % (ext, label), url])
|
||||
elif data["jwplayer"].get("file"):
|
||||
label = data["jwplayer"].get("label", "")
|
||||
url = data["jwplayer"]["file"]
|
||||
ext = data["jwplayer"].get("type", "")
|
||||
if ext and "/" in ext:
|
||||
ext = "%s " % ext.rsplit("/", 1)[1]
|
||||
if "server-3-stream" in url:
|
||||
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location")
|
||||
itemlist.insert(0, [".%s%s [directo]" % (ext, label), url])
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
item = Item()
|
||||
try:
|
||||
item.url = "http://skanime.net/"
|
||||
item.extra = "novedades"
|
||||
itemlist = recientes(item)
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
@@ -30,7 +30,7 @@ def mainlist(item):
|
||||
|
||||
data = httptools.downloadpage(CHANNEL_HOST).data
|
||||
total = scrapertools.find_single_match(data, "TENEMOS\s<b>(.*?)</b>")
|
||||
titulo = "Peliculas (%s)" % total
|
||||
titulo = "Peliculas"
|
||||
itemlist.append(item.clone(title=titulo, text_color=color2, action="", text_bold=True))
|
||||
itemlist.append(item.clone(action="peliculas", title=" Novedades", url=CHANNEL_HOST + "pelicula",
|
||||
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres"
|
||||
@@ -283,7 +283,7 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
|
||||
if type == "descarga": t_tipo = "Descargar"
|
||||
data = data.replace("\n", "")
|
||||
if type == "online":
|
||||
patron = '(?is)class="playex.*?visualizaciones'
|
||||
patron = '(?is)class="playex.*?sheader'
|
||||
bloque1 = scrapertools.find_single_match(data, patron)
|
||||
patron = '(?is)#(option-[^"]+).*?png">([^<]+)'
|
||||
match = scrapertools.find_multiple_matches(data, patron)
|
||||
@@ -303,7 +303,7 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
|
||||
bloque2 = scrapertools.find_single_match(data, '(?s)box_links.*?dt_social_single')
|
||||
bloque2 = bloque2.replace("\t", "").replace("\r", "")
|
||||
patron = '(?s)optn" href="([^"]+)'
|
||||
patron += '.*?title="([^\.]+)'
|
||||
patron += '.*?alt="([^\.]+)'
|
||||
patron += '.*?src.*?src="[^>]+"?/>([^<]+)'
|
||||
patron += '.*?src="[^>]+"?/>([^<]+)'
|
||||
patron += '.*?/span>([^<]+)'
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
from channelselector import get_thumb
|
||||
@@ -53,8 +53,7 @@ def listado(item):
|
||||
patron += '<b>Categoria:\s*</b>([^&]+)»\s*([^<]+).*?'
|
||||
patron += '<div class="OpcionesDescargasMini">(.*?)</div>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for thumbnail, title, cat_padres, cat_hijos, opciones in matches:
|
||||
# logger.debug(thumbnail + "\n" + title + "\n" + cat_padres + "\n" + cat_hijos + "\n" + opciones)
|
||||
# Obtenemos el año del titulo y eliminamos lo q sobre
|
||||
@@ -70,7 +69,7 @@ def listado(item):
|
||||
thumbnail = HOST + thumbnail[:-5] + 'b' + thumbnail[-4:]
|
||||
|
||||
# Buscamos opcion de ver online
|
||||
patron = '<a href="http://estrenosly.org/ver-online-([^"]+)'
|
||||
patron = '<a href="http://estrenos.*?/ver-online-([^"]+)'
|
||||
url_ver = scrapertools.find_single_match(opciones, patron)
|
||||
if url_ver:
|
||||
new_item = Item(channel=item.channel, action="findvideos", title=title,
|
||||
|
||||
@@ -124,7 +124,7 @@ def filtro(item):
|
||||
patron += '</span>([^<]+)</a>'
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for url, title in matches:
|
||||
if "eroti33cas" in title and config.get_setting("adult_mode") == 0:
|
||||
if "eroticas" in title and config.get_setting("adult_mode") == 0:
|
||||
continue
|
||||
itemlist.append(item.clone(action = "peliculas",
|
||||
title = title.title(),
|
||||
|
||||
@@ -32,13 +32,18 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
'Accept-Encoding': 'gzip, deflate, br', 'Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1',
|
||||
'Cookie': ''}
|
||||
data = httptools.downloadpage(page_url, headers=headers, replace_headers=True).data
|
||||
data = data.replace("\n","")
|
||||
cgi_counter = scrapertools.find_single_match(data, '(?s)SRC="(https://www.flashx.tv/counter.cgi\?fx=[^"]+)')
|
||||
cgi_counter = cgi_counter.replace("%0A","").replace("%22","")
|
||||
playnow = scrapertools.find_single_match(data, 'https://www.flashx.tv/dl[^"]+')
|
||||
# Para obtener el f y el fxfx
|
||||
js_fxfx = scrapertools.find_single_match(data, 'src="(https://www.flashx.tv/js/code.js\?cache=[0-9]+)')
|
||||
js_fxfx = scrapertools.find_single_match(data, 'src="(https://www.flashx.tv/js/code.js.*?cache=[0-9]+)')
|
||||
data_fxfx = httptools.downloadpage(js_fxfx).data
|
||||
mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","")
|
||||
matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')
|
||||
for f, v in matches:
|
||||
pfxfx += f + "=" + v + "&"
|
||||
coding_url = 'https://www.flashx.tv/flashx.php?%s' %pfxfx
|
||||
# {f: 'y', fxfx: '6'}
|
||||
flashx_id = scrapertools.find_single_match(data, 'name="id" value="([^"]+)"')
|
||||
fname = scrapertools.find_single_match(data, 'name="fname" value="([^"]+)"')
|
||||
@@ -51,10 +56,11 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
headers['Referer'] = "https://www.flashx.tv/"
|
||||
headers['Accept'] = "*/*"
|
||||
headers['Host'] = "www.flashx.tv"
|
||||
|
||||
coding_url = 'https://www.flashx.tv/flashx.php?%s' %pfxfx
|
||||
headers['X-Requested-With'] = 'XMLHttpRequest'
|
||||
httptools.downloadpage(coding_url, headers=headers)
|
||||
|
||||
# Obligatorio descargar estos 2 archivos, porque si no, muestra error
|
||||
httptools.downloadpage(coding_url, headers=headers, replace_headers=True)
|
||||
httptools.downloadpage(cgi_counter, headers=headers, replace_headers=True)
|
||||
|
||||
try:
|
||||
time.sleep(int(wait_time) + 1)
|
||||
@@ -63,7 +69,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
|
||||
headers.pop('X-Requested-With')
|
||||
headers['Content-Type'] = 'application/x-www-form-urlencoded'
|
||||
data = httptools.downloadpage('https://www.flashx.tv/dl?playitnow', post, headers, replace_headers=True).data
|
||||
data = httptools.downloadpage(playnow, post, headers, replace_headers=True).data
|
||||
|
||||
# Si salta aviso, se carga la pagina de comprobacion y luego la inicial
|
||||
# LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS
|
||||
@@ -71,7 +77,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
url_reload = scrapertools.find_single_match(data, 'try to reload the page.*?href="([^"]+)"')
|
||||
try:
|
||||
data = httptools.downloadpage(url_reload, cookies=False).data
|
||||
data = httptools.downloadpage('https://www.flashx.tv/dl?playitnow', post, headers, replace_headers=True).data
|
||||
data = httptools.downloadpage(playnow, post, headers, replace_headers=True).data
|
||||
# LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS
|
||||
except:
|
||||
pass
|
||||
|
||||
@@ -11,27 +11,20 @@ def test_video_exists(page_url):
|
||||
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "Not Found" in data:
|
||||
return False, "[streamixcloud] El archivo no existe o ha sido borrado"
|
||||
|
||||
return False, "[streamixcloud] El archivo no existe o ha sido borrado"
|
||||
if "Video is processing" in data:
|
||||
return False, "[streamixcloud] El video se está procesando, inténtelo mas tarde"
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = httptools.downloadpage(page_url).data
|
||||
|
||||
video_urls = []
|
||||
packed = scrapertools.find_single_match(data,
|
||||
"<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d.*?)</script")
|
||||
data = jsunpack.unpack(packed)
|
||||
|
||||
media_url = scrapertools.find_multiple_matches(data, '\{file:"([^"]+)",')
|
||||
# thumb = scrapertools.find_single_match(data, '\],image:"([^"]+)"')
|
||||
|
||||
ext = scrapertools.get_filename_from_url(media_url[0])[-4:]
|
||||
|
||||
for url in media_url:
|
||||
video_urls.append(["%s [streamixcloud]" % ext, url])
|
||||
|
||||
return video_urls
|
||||
|
||||
Reference in New Issue
Block a user