Merge branch 'master' into master
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.8.2" provider-name="Alfa Addon">
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.8.3" provider-name="Alfa Addon">
|
||||
<requires>
|
||||
<import addon="xbmc.python" version="2.1.0"/>
|
||||
<import addon="script.module.libtorrent" optional="true"/>
|
||||
@@ -19,12 +19,17 @@
|
||||
</assets>
|
||||
<news>[B]Estos son los cambios para esta versión:[/B]
|
||||
[COLOR green][B]Arreglos[/B][/COLOR]
|
||||
¤ allcalidad ¤ animeflv ¤ streamcloud
|
||||
¤ pack +18 ¤ divxtotal ¤ elitetorrent
|
||||
¤ estrenosgo ¤ mejortorrent ¤ mejortorrent1
|
||||
¤ newpct1 ¤ pelismagnet
|
||||
¤ animeshd ¤ gamovideo ¤ elitetorrent
|
||||
¤ newpct1 ¤ cinetux ¤ asialiveaction
|
||||
¤ gnula ¤ fembed ¤ hdfilmologia
|
||||
¤ gvideo ¤ vidlox ¤ javtasty
|
||||
¤ qwertyy
|
||||
|
||||
[COLOR green][B]Novedades[/B][/COLOR]
|
||||
¤ watchseries ¤ xstreamcdn ¤ videobb
|
||||
¤ animespace ¤ tvanime
|
||||
|
||||
Agradecimientos a @shlibidon y @nyicris por colaborar con esta versión
|
||||
Agradecimientos a @shlibidon por colaborar con esta versión
|
||||
|
||||
</news>
|
||||
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
|
||||
|
||||
@@ -3,8 +3,8 @@
|
||||
"name": "AnimeJL",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["esp", "lat", "cast"],
|
||||
"thumbnail": "https://www.animejl.net/img/Logo.png",
|
||||
"language": ["esp", "cast", "lat"],
|
||||
"thumbnail": "https://i.imgur.com/S6foTE9.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"anime"
|
||||
@@ -27,4 +27,4 @@
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -113,9 +113,10 @@ def lista(item):
|
||||
patron = 'class="anime"><a href="([^"]+)">'
|
||||
patron +='<div class="cover" style="background-image: url\((.*?)\)">.*?<h2>([^<]+)<\/h2>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
context = renumbertools.context(item)
|
||||
context2 = autoplay.context
|
||||
context.extend(context2)
|
||||
if item.extra != "next":
|
||||
context = renumbertools.context(item)
|
||||
context2 = autoplay.context
|
||||
context.extend(context2)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
url = scrapedurl
|
||||
thumbnail = host + scrapedthumbnail
|
||||
@@ -133,12 +134,12 @@ def lista(item):
|
||||
'<a href="([^"]+)" data-ci-pagination-page="\d+" rel="next"')
|
||||
next_page_url = scrapertools.decodeHtmlentities(next_page)
|
||||
if next_page_url != "":
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="lista",
|
||||
title=">> Página siguiente",
|
||||
url=next_page_url,
|
||||
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'
|
||||
))
|
||||
itemlist.append(item.clone(action="lista",
|
||||
title=">> Página siguiente",
|
||||
url=next_page_url,
|
||||
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
|
||||
extra="next"
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
|
||||
52
plugin.video.alfa/channels/animespace.json
Normal file
52
plugin.video.alfa/channels/animespace.json
Normal file
@@ -0,0 +1,52 @@
|
||||
{
|
||||
"id": "animespace",
|
||||
"name": "AnimeSpace",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": [],
|
||||
"thumbnail": "",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"anime",
|
||||
"vos"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"VOSE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces_num",
|
||||
"type": "list",
|
||||
"label": "Número de enlaces a verificar",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "5", "10", "15", "20" ]
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_anime",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Episodios de anime",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
264
plugin.video.alfa/channels/animespace.py
Normal file
264
plugin.video.alfa/channels/animespace.py
Normal file
@@ -0,0 +1,264 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel AnimeSpace -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
import urllib
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from channelselector import get_thumb
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from channels import renumbertools
|
||||
|
||||
host = "https://animespace.tv/"
|
||||
|
||||
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'animespace')
|
||||
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'animespace')
|
||||
|
||||
IDIOMAS = {'VOSE': 'VOSE'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = []
|
||||
list_servers = ['directo', 'openload', 'streamango']
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Nuevos Episodios",
|
||||
action="new_episodes",
|
||||
thumbnail=get_thumb('new_episodes', auto=True),
|
||||
url=host))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Ultimas",
|
||||
action="list_all",
|
||||
thumbnail=get_thumb('last', auto=True),
|
||||
url=host + 'emision'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Todas",
|
||||
action="list_all",
|
||||
thumbnail=get_thumb('all', auto=True),
|
||||
url=host + 'animes'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Anime",
|
||||
action="list_all",
|
||||
thumbnail=get_thumb('anime', auto=True),
|
||||
url=host + 'categoria/anime'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Películas",
|
||||
action="list_all",
|
||||
thumbnail=get_thumb('movies', auto=True),
|
||||
url=host + 'categoria/pelicula'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="OVAs",
|
||||
action="list_all",
|
||||
thumbnail='',
|
||||
url=host + 'categoria/ova'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="ONAs",
|
||||
action="list_all",
|
||||
thumbnail='',
|
||||
url=host + 'categoria/ona'))
|
||||
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Especiales",
|
||||
action="list_all",
|
||||
thumbnail='',
|
||||
url=host + 'categoria/especial'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar",
|
||||
action="search",
|
||||
url=host + 'search?q=',
|
||||
thumbnail=get_thumb('search', auto=True),
|
||||
fanart='https://s30.postimg.cc/pei7txpa9/buscar.png'
|
||||
))
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
itemlist = renumbertools.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def get_source(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
|
||||
def list_all(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
patron = '<article.*?href="([^"]+)">.*?src="([^"]+)".*?'
|
||||
patron += '<h3 class="Title">([^<]+)</h3>.*?"fecha">([^<]+)<.*?</i>([^<]+)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, year, type in matches:
|
||||
type = type.strip().lower()
|
||||
url = scrapedurl
|
||||
thumbnail = scrapedthumbnail
|
||||
lang = 'VOSE'
|
||||
title = scrapedtitle
|
||||
context = renumbertools.context(item)
|
||||
context2 = autoplay.context
|
||||
context.extend(context2)
|
||||
new_item= Item(channel=item.channel,
|
||||
action='episodios',
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
language = lang,
|
||||
infoLabels={'year':year}
|
||||
)
|
||||
if type != 'anime':
|
||||
new_item.contentTitle=title
|
||||
else:
|
||||
new_item.plot=type
|
||||
new_item.contentSerieName=title
|
||||
new_item.context = context
|
||||
itemlist.append(new_item)
|
||||
|
||||
# Paginacion
|
||||
next_page = scrapertools.find_single_match(data,
|
||||
'"page-item active">.*?</a>.*?<a class="page-link" href="([^"]+)">')
|
||||
|
||||
if next_page != "":
|
||||
actual_page = scrapertools.find_single_match(item.url, '([^\?]+)?')
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="list_all",
|
||||
title=">> Página siguiente",
|
||||
url=actual_page + next_page,
|
||||
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
try:
|
||||
if texto != '':
|
||||
return list_all(item)
|
||||
else:
|
||||
return []
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
def new_episodes(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
full_data = get_source(item.url)
|
||||
data = scrapertools.find_single_match(full_data, '<section class="caps">.*?</section>')
|
||||
patron = '<article.*?<a href="([^"]+)">.*?src="([^"]+)".*?'
|
||||
patron += '<span class="episode">.*?</i>([^<]+)</span>.*?<h2 class="Title">([^<]+)</h2>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, epi, scrapedtitle in matches:
|
||||
url = scrapedurl
|
||||
lang = 'VOSE'
|
||||
title = '%s - %s' % (scrapedtitle, epi)
|
||||
itemlist.append(Item(channel=item.channel, title=title, url=url, thumbnail=scrapedthumbnail,
|
||||
action='findvideos', language=lang))
|
||||
|
||||
return itemlist
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
patron = '<a class="item" href="([^"]+)">'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
infoLabels = item.infoLabels
|
||||
for scrapedurl in matches:
|
||||
episode = scrapertools.find_single_match(scrapedurl, '.*?capitulo-(\d+)')
|
||||
lang = 'VOSE'
|
||||
season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1, int(episode))
|
||||
title = "%sx%s - %s" % (season, str(episode).zfill(2),item.contentSerieName)
|
||||
url = scrapedurl
|
||||
infoLabels['season'] = season
|
||||
infoLabels['episode'] = episode
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title=title, contentSerieName=item.contentSerieName, url=url,
|
||||
action='findvideos', language=lang, infoLabels=infoLabels))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
itemlist = itemlist[::-1]
|
||||
if item.contentSerieName != '' and config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName,
|
||||
extra1='library'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
import urllib
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
patron = 'id="Opt\d+">.*?src=(.*?) frameborder'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl in matches:
|
||||
server = ''
|
||||
scrapedurl = scrapedurl.replace('"', '')
|
||||
new_data = get_source(scrapedurl)
|
||||
|
||||
if "/stream/" in scrapedurl:
|
||||
scrapedurl = scrapertools.find_single_match(new_data, '<source src="([^"]+)"')
|
||||
server = "directo"
|
||||
else:
|
||||
scrapedurl = scrapertools.find_single_match(scrapedurl, '.*?url=([^&]+)?')
|
||||
scrapedurl = urllib.unquote(scrapedurl)
|
||||
|
||||
if scrapedurl != '':
|
||||
itemlist.append(Item(channel=item.channel, title='%s', url=scrapedurl, action='play',
|
||||
language = item.language, infoLabels=item.infoLabels, server=server))
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
|
||||
|
||||
if __comprueba_enlaces__:
|
||||
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
def newest(categoria):
|
||||
itemlist = []
|
||||
item = Item()
|
||||
if categoria == 'anime':
|
||||
item.url=host
|
||||
itemlist = new_episodes(item)
|
||||
return itemlist
|
||||
@@ -14,7 +14,7 @@ from lib import jsunpack
|
||||
from platformcode import config, logger
|
||||
|
||||
|
||||
host = "http://www.asialiveaction.com"
|
||||
host = "https://asialiveaction.com"
|
||||
|
||||
IDIOMAS = {'Japones': 'Japones'}
|
||||
list_language = IDIOMAS.values()
|
||||
@@ -26,9 +26,9 @@ def mainlist(item):
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = list()
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Peliculas",
|
||||
url=urlparse.urljoin(host, "/category/pelicula"), type='pl', pag=1))
|
||||
url=urlparse.urljoin(host, "/pelicula"), type='pl'))
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Series",
|
||||
url=urlparse.urljoin(host, "/category/serie"), type='sr', pag=1))
|
||||
url=urlparse.urljoin(host, "/serie"), type='sr'))
|
||||
itemlist.append(Item(channel=item.channel, action="category", title="Géneros", url=host, cat='genre'))
|
||||
itemlist.append(Item(channel=item.channel, action="category", title="Calidad", url=host, cat='quality'))
|
||||
itemlist.append(Item(channel=item.channel, action="category", title="Orden Alfabético", url=host, cat='abc'))
|
||||
@@ -58,7 +58,7 @@ def category(item):
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
if scrapedtitle != 'Próximas Películas':
|
||||
if not scrapedurl.startswith("http"): scrapedurl = host + scrapedurl
|
||||
itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, type='cat', pag=0))
|
||||
itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, type='cat'))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -88,7 +88,6 @@ def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
item.pag = 0
|
||||
if texto != '':
|
||||
return lista(item)
|
||||
|
||||
@@ -119,12 +118,13 @@ def lista_a(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '(?is)Num">.*?href="([^"]+)".*?'
|
||||
patron += 'src="([^"]+)".*?>.*?'
|
||||
patron += 'data-src="([^"]+)".*?>.*?'
|
||||
patron += '<strong>([^<]+)<.*?'
|
||||
patron += '<td>([^<]+)<.*?'
|
||||
patron += 'href.*?>([^"]+)<\/a>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedtype in matches:
|
||||
if not scrapedthumbnail.startswith("http"): scrapedthumbnail = "https:" + scrapedthumbnail
|
||||
action = "findvideos"
|
||||
if "Serie" in scrapedtype: action = "episodios"
|
||||
itemlist.append(item.clone(action=action, title=scrapedtitle, contentTitle=scrapedtitle, contentSerieName=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
@@ -140,14 +140,14 @@ def lista(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
patron = '<article .*?">'
|
||||
patron += '<a href="([^"]+)"><.*?><figure.*?>' #scrapedurl
|
||||
patron += '<img.*?src="([^"]+)".*?>.*?' #scrapedthumbnail
|
||||
patron += '<h3 class=".*?">([^"]+)<\/h3>' #scrapedtitle
|
||||
patron += '<span.*?>([^"]+)<\/span>.+?' #scrapedyear
|
||||
patron += '<a.+?>([^"]+)<\/a>' #scrapedtype
|
||||
patron = '(?is)class="TPost C">.*?href="([^"]+)".*?' #scrapedurl
|
||||
patron += 'lazy-src="([^"]+)".*?>.*?' #scrapedthumbnail
|
||||
patron += 'title">([^<]+)<.*?' #scrapedtitle
|
||||
patron += 'year">([^<]+)<.*?' #scrapedyear
|
||||
patron += 'href.*?>([^"]+)<\/a>' #scrapedtype
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedtype in matches:
|
||||
if not scrapedthumbnail.startswith("http"): scrapedthumbnail = "https:" + scrapedthumbnail
|
||||
title="%s - %s" % (scrapedtitle,scrapedyear)
|
||||
|
||||
new_item = Item(channel=item.channel, title=title, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
@@ -158,16 +158,12 @@ def lista(item):
|
||||
else:
|
||||
new_item.contentTitle = scrapedtitle
|
||||
new_item.action = 'findvideos'
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
itemlist.append(new_item)
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
#pagination
|
||||
pag = item.pag + 1
|
||||
url_next_page = item.url+"/page/"+str(pag)+"/"
|
||||
if len(itemlist)>19:
|
||||
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='lista', pag=pag))
|
||||
url_next_page = scrapertools.find_single_match(data, 'rel="next" href="([^"]+)"')
|
||||
if len(itemlist)>0 and url_next_page:
|
||||
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='lista'))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -189,14 +185,16 @@ def findvideos(item):
|
||||
data1 = httptools.downloadpage(url, headers={"Referer":url1}).data
|
||||
url = scrapertools.find_single_match(data1, 'src: "([^"]+)"')
|
||||
if "embed.php" not in url:
|
||||
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + language + ")", language = language, url = url))
|
||||
if url:
|
||||
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + language + ")", language = language, url = url))
|
||||
continue
|
||||
data1 = httptools.downloadpage(url).data
|
||||
packed = scrapertools.find_single_match(data1, "(?is)eval\(function\(p,a,c,k,e.*?</script>")
|
||||
unpack = jsunpack.unpack(packed)
|
||||
urls = scrapertools.find_multiple_matches(unpack, '"file":"([^"]+).*?label":"([^"]+)')
|
||||
for url2, quality in urls:
|
||||
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + quality + ") (" + language + ")", language = language, url = url2))
|
||||
if url2:
|
||||
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + quality + ") (" + language + ")", language = language, url = url2))
|
||||
# Segundo grupo de enlaces
|
||||
matches = scrapertools.find_multiple_matches(data, '<span><a rel="nofollow" target="_blank" href="([^"]+)"')
|
||||
for url in matches:
|
||||
@@ -212,7 +210,8 @@ def findvideos(item):
|
||||
language = "Sub. Español"
|
||||
matches2 = scrapertools.find_multiple_matches(ser, 'href="([^"]+)')
|
||||
for url2 in matches2:
|
||||
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + quality + ") (" + language + ")", language = language, url = url2))
|
||||
if url2:
|
||||
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + quality + ") (" + language + ")", language = language, url = url2))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
@@ -242,7 +242,7 @@ def findvideos(item):
|
||||
else:
|
||||
title = ''
|
||||
url = scrapertools.find_single_match(new_data, "src='([^']+)'")
|
||||
url = get_url(url.replace('\\/', '/'))
|
||||
url = get_url(url)
|
||||
if url:
|
||||
itemlist.append(item.clone(title ='%s'+title, url=url, action='play',
|
||||
language=IDIOMAS[language], text_color = ""))
|
||||
@@ -255,7 +255,7 @@ def findvideos(item):
|
||||
title = ''
|
||||
new_data = httptools.downloadpage(hidden_url).data
|
||||
url = scrapertools.find_single_match(new_data, 'id="link" href="([^"]+)"')
|
||||
url = get_url(url.replace('\\/', '/'))
|
||||
url = get_url(url)
|
||||
if url:
|
||||
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', quality=quality,
|
||||
language=IDIOMAS[language], infoLabels=item.infoLabels, text_color = ""))
|
||||
@@ -280,6 +280,7 @@ def findvideos(item):
|
||||
|
||||
def get_url(url):
|
||||
logger.info()
|
||||
url = url.replace('\\/', '/')
|
||||
if "cinetux.me" in url:
|
||||
d1 = httptools.downloadpage(url).data
|
||||
if "mail" in url or "drive" in url or "ok.cinetux" in url or "mp4/" in url:
|
||||
@@ -288,6 +289,8 @@ def get_url(url):
|
||||
url = scrapertools.find_single_match(d1, '<iframe src="([^"]+)') + id
|
||||
if "drive" in url:
|
||||
url += "/preview"
|
||||
if "FFFFFF" in url:
|
||||
url = scrapertools.find_single_match(d1, 'class="cta" href="([^"]+)"')
|
||||
else:
|
||||
url = scrapertools.find_single_match(d1, 'document.location.replace\("([^"]+)')
|
||||
url = url.replace("povwideo","powvideo")
|
||||
|
||||
@@ -84,9 +84,9 @@ def submenu(item):
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: La Web no responde o ha cambiado de URL. Si la Web está activa, reportar el error con el log'))
|
||||
return itemlist #Algo no funciona, pintamos lo que tenemos
|
||||
|
||||
patron = '<div class="cab_menu">.*?<\/div>' #Menú principal
|
||||
patron = '<div class="cab_menu"\s*>.*?<\/div>' #Menú principal
|
||||
data1 = scrapertools.find_single_match(data, patron)
|
||||
patron = '<div id="menu_langen">.*?<\/div>' #Menú de idiomas
|
||||
patron = '<div id="menu_langen"\s*>.*?<\/div>' #Menú de idiomas
|
||||
data1 += scrapertools.find_single_match(data, patron)
|
||||
|
||||
patron = '<a href="(.*?)".*?title="(.*?)"' #Encontrar todos los apartados
|
||||
@@ -332,7 +332,7 @@ def findvideos(item):
|
||||
if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca...
|
||||
return item #Devolvemos el Item de la llamada
|
||||
else:
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
#data = unicode(data, "utf-8", errors="replace")
|
||||
|
||||
patron_t = '<div class="enlace_descarga".*?<a href="(.*?\.torrent)"'
|
||||
@@ -355,11 +355,11 @@ def findvideos(item):
|
||||
return item #... y nos vamos
|
||||
|
||||
#Añadimos el tamaño para todos
|
||||
size = scrapertools.find_single_match(item.quality, '\s\[(\d+,?\d*?\s\w\s?[b|B]s)\]')
|
||||
size = scrapertools.find_single_match(item.quality, '\s\[(\d+,?\d*?\s\w\s*[b|B]s*)\]')
|
||||
if size:
|
||||
item.title = re.sub('\s\[\d+,?\d*?\s\w[b|B]s\]', '', item.title) #Quitamos size de título, si lo traía
|
||||
item.title = re.sub('\s\[\d+,?\d*?\s\w\s*[b|B]s*\]', '', item.title) #Quitamos size de título, si lo traía
|
||||
item.title = '%s [%s]' % (item.title, size) #Agregamos size al final del título
|
||||
item.quality = re.sub('\s\[\d+,?\d*?\s\w\s?[b|B]s\]', '', item.quality) #Quitamos size de calidad, si lo traía
|
||||
item.quality = re.sub('\s\[\d+,?\d*?\s\w\s*[b|B]s*\]', '', item.quality) #Quitamos size de calidad, si lo traía
|
||||
|
||||
if not link_torrent and not link_magnet: #error
|
||||
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
|
||||
@@ -376,7 +376,7 @@ def findvideos(item):
|
||||
if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca...
|
||||
return item #Devolvemos el Item de la llamada
|
||||
else:
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
#Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB
|
||||
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist)
|
||||
@@ -385,8 +385,7 @@ def findvideos(item):
|
||||
size = generictools.get_torrent_size(link_torrent) #Buscamos el tamaño en el .torrent
|
||||
if size:
|
||||
item.quality = '%s [%s]' % (item.quality, size) #Agregamos size al final de calidad
|
||||
item.quality = item.quality.replace("GB", "G B").replace("MB", "M B") #Se evita la palabra reservada en Unify
|
||||
|
||||
item.quality = item.quality.replace("GB", "G B").replace("MB", "M B").replace("Gb", "G B").replace("Mb", "M B") #Se evita la palabra reservada en Unify
|
||||
#Ahora pintamos el link del Torrent, si lo hay
|
||||
if link_torrent: # Hay Torrent ?
|
||||
#Generamos una copia de Item para trabajar sobre ella
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
@@ -68,6 +69,7 @@ def sub_search(item):
|
||||
if "ver-" not in scrapedurl:
|
||||
continue
|
||||
year = scrapertools.find_single_match(scrapedtitle, "\d{4}")
|
||||
contentTitle = scrapedtitle.replace(scrapertools.find_single_match('\[.+', scrapedtitle),"")
|
||||
contentTitle = scrapedtitle.replace("(%s)" %year,"").replace("Ver","").strip()
|
||||
itemlist.append(Item(action = "findvideos",
|
||||
channel = item.channel,
|
||||
@@ -77,6 +79,7 @@ def sub_search(item):
|
||||
thumbnail = scrapedthumbnail,
|
||||
url = scrapedurl,
|
||||
))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, True)
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -89,11 +92,11 @@ def generos(item):
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for genero, scrapedurl in matches:
|
||||
title = scrapertools.htmlclean(genero)
|
||||
url = item.url + scrapedurl
|
||||
if not item.url.startswith("http"): scrapedurl = item.url + scrapedurl
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = 'peliculas',
|
||||
title = title,
|
||||
url = url,
|
||||
url = scrapedurl,
|
||||
viewmode = "movie",
|
||||
first=0))
|
||||
itemlist = sorted(itemlist, key=lambda item: item.title)
|
||||
@@ -124,19 +127,21 @@ def peliculas(item):
|
||||
title = scrapedtitle + " " + plot
|
||||
if not scrapedurl.startswith("http"):
|
||||
scrapedurl = item.url + scrapedurl
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = 'findvideos',
|
||||
title = title,
|
||||
url = scrapedurl,
|
||||
thumbnail = scrapedthumbnail,
|
||||
plot = plot,
|
||||
year = scrapertools.find_single_match(scrapedurl, "\-(\d{4})\-")
|
||||
contentTitle = scrapedtitle.replace(scrapertools.find_single_match('\[.+', scrapedtitle),"")
|
||||
itemlist.append(Item(action = 'findvideos',
|
||||
channel = item.channel,
|
||||
contentTitle = scrapedtitle,
|
||||
contentType = "movie",
|
||||
infoLabels = {"year":year},
|
||||
language=language,
|
||||
quality=quality
|
||||
plot = plot,
|
||||
quality=quality,
|
||||
title = title,
|
||||
thumbnail = scrapedthumbnail,
|
||||
url = scrapedurl
|
||||
))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, True)
|
||||
#paginacion
|
||||
|
||||
url_next_page = item.url
|
||||
first = last
|
||||
if next:
|
||||
@@ -149,9 +154,9 @@ def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
item.plot = scrapertools.find_single_match(data, '<div class="entry">(.*?)<div class="iframes">')
|
||||
item.plot = scrapertools.htmlclean(item.plot).strip()
|
||||
item.contentPlot = item.plot
|
||||
#item.plot = scrapertools.find_single_match(data, '<div class="entry">(.*?)<div class="iframes">')
|
||||
#item.plot = scrapertools.htmlclean(item.plot).strip()
|
||||
#item.contentPlot = item.plot
|
||||
patron = '<strong>Ver película online.*?>.*?>([^<]+)'
|
||||
scrapedopcion = scrapertools.find_single_match(data, patron)
|
||||
titulo_opcional = scrapertools.find_single_match(scrapedopcion, ".*?, (.*)").upper()
|
||||
@@ -167,14 +172,12 @@ def findvideos(item):
|
||||
urls = scrapertools.find_multiple_matches(datos, '(?:src|href)="([^"]+)')
|
||||
titulo = "Ver en %s " + titulo_opcion
|
||||
for url in urls:
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "play",
|
||||
contentThumbnail = item.thumbnail,
|
||||
fulltitle = item.contentTitle,
|
||||
itemlist.append(item.clone(action = "play",
|
||||
title = titulo,
|
||||
url = url
|
||||
))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
#tmdb.set_infoLabels_itemlist(itemlist, True)
|
||||
if itemlist:
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(Item(channel = item.channel, action = ""))
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"adult": false,
|
||||
"language": ["esp", "lat", "cast", "vose"],
|
||||
"fanart": "https://i.postimg.cc/qvFCZNKT/Alpha-652355392-large.jpg",
|
||||
"thumbnail": "https://hdfilmologia.com/templates/gorstyle/images/logo.png",
|
||||
"thumbnail": "https://hdfilmologia.com/templates/hdfilmologia/images/logo.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"movie",
|
||||
|
||||
@@ -179,7 +179,7 @@ def genres(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = httptools.downloadpage(item.url)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
patron = '<li class="myli"><a href="/([^"]+)">([^<]+)</a>'
|
||||
@@ -221,12 +221,11 @@ def findvideos(item):
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|amp;|#038;|\(.*?\)|\s{2}| ", "", data)
|
||||
|
||||
patron = '(\w+)src\d+="([^"]+)"'
|
||||
patron = '>([^<]+)</a></li><li><a class="src_tab" id="[^"]+" data-src="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for lang, url in matches:
|
||||
|
||||
lang = re.sub(r"1|2|3|4", "", lang)
|
||||
server = servertools.get_server_from_url(url)
|
||||
if 'dropbox' in url:
|
||||
server = 'dropbox'
|
||||
@@ -243,9 +242,9 @@ def findvideos(item):
|
||||
for key in matches:
|
||||
url = 'https://www.dropbox.com/s/%s?dl=1' % (key)
|
||||
server = 'dropbox'
|
||||
languages = {'l': '[COLOR cornflowerblue](LAT)[/COLOR]',
|
||||
'e': '[COLOR green](CAST)[/COLOR]',
|
||||
's': '[COLOR red](VOS)[/COLOR]'}
|
||||
languages = {'Latino': '[COLOR cornflowerblue](LAT)[/COLOR]',
|
||||
'Castellano': '[COLOR green](CAST)[/COLOR]',
|
||||
'Subtitulado': '[COLOR red](VOS)[/COLOR]'}
|
||||
if lang in languages:
|
||||
lang = languages[lang]
|
||||
|
||||
|
||||
@@ -49,7 +49,8 @@ def lista(item):
|
||||
action = "play"
|
||||
if config.get_setting("menu_info", "javtasty"):
|
||||
action = "menu_info"
|
||||
patron = 'div class="video-item.*?href="([^"]+)".*?'
|
||||
# PURGA los PRIVATE
|
||||
patron = 'div class="video-item\s+".*?href="([^"]+)".*?'
|
||||
patron += 'data-original="([^"]+)" '
|
||||
patron += 'alt="([^"]+)"(.*?)fa fa-clock-o"></i>([^<]+)<'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
@@ -89,12 +90,15 @@ def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
videourl = scrapertools.find_single_match(data, "video_url:\s*'([^']+)'")
|
||||
videourl = scrapertools.find_single_match(data, "video_alt_url2:\s*'([^']+)'")
|
||||
if videourl:
|
||||
itemlist.append(['.mp4 [directo]', videourl])
|
||||
itemlist.append(['.mp4 HD [directo]', videourl])
|
||||
videourl = scrapertools.find_single_match(data, "video_alt_url:\s*'([^']+)'")
|
||||
if videourl:
|
||||
itemlist.append(['.mp4 HD [directo]', videourl])
|
||||
videourl = scrapertools.find_single_match(data, "video_url:\s*'([^']+)'")
|
||||
if videourl:
|
||||
itemlist.append(['.mp4 [directo]', videourl])
|
||||
if item.extra == "play_menu":
|
||||
return itemlist, data
|
||||
return itemlist
|
||||
|
||||
@@ -93,7 +93,7 @@
|
||||
"id": "clonenewpct1_channels_list",
|
||||
"type": "text",
|
||||
"label": "Lista de clones de NewPct1 y orden de uso",
|
||||
"default": "('1', 'descargas2020', 'https://descargas2020.com/', 'movie, tvshow, season, episode', ''), ('1', 'tumejortorrent', 'https://tumejortorrent.com/', 'movie, tvshow, season, episode', ''), ('1', 'torrentrapid', 'https://torrentrapid.com/', 'movie, tvshow, season, episode', 'serie_episodios'), ('1', 'pctnew', 'https://pctnew.com/', 'movie, tvshow, season, episode', ''), ('1', 'torrentlocura', 'http://torrentlocura.com/', 'movie, tvshow, season, episode', ''), ('1', 'tvsinpagar', 'http://www.tvsinpagar.com/', 'tvshow, season, episode', ''), ('1', 'planetatorrent', 'http://planetatorrent.com/', 'movie, tvshow, season, episode', ''), ('1', 'mispelisyseries', 'http://mispelisyseries.com/', 'movie', 'search, listado_busqueda')",
|
||||
"default": "('1', 'descargas2020', 'https://descargas2020.com/', 'movie, tvshow, season, episode', ''), ('0', 'tumejortorrent', 'https://tumejortorrent.com/', 'movie, tvshow, season, episode', ''), ('1', 'torrentrapid', 'https://torrentrapid.com/', 'movie, tvshow, season, episode', 'serie_episodios'), ('1', 'pctnew', 'https://pctnew.com/', 'movie, tvshow, season, episode', ''), ('1', 'torrentlocura', 'http://torrentlocura.com/', 'movie, tvshow, season, episode', ''), ('1', 'tvsinpagar', 'http://www.tvsinpagar.com/', 'tvshow, season, episode', ''), ('1', 'planetatorrent', 'http://planetatorrent.com/', 'movie, tvshow, season, episode', ''), ('1', 'mispelisyseries', 'http://mispelisyseries.com/', 'movie', 'search, listado_busqueda')",
|
||||
"enabled": true,
|
||||
"visible": false
|
||||
},
|
||||
@@ -101,7 +101,7 @@
|
||||
"id": "intervenidos_channels_list",
|
||||
"type": "text",
|
||||
"label": "Lista de canales y clones de NewPct1 intervenidos y orden de sustitución de URLs",
|
||||
"default": "('0', 'canal_org', 'canal_des', 'url_org', 'url_des', 'patron1', 'patron2', 'patron3', 'patron4', 'patron5', 'content_inc', 'content_exc', 'ow_force'), ('0', 'canal_org', 'canal_des', 'url_org', 'url_des', 'patron1', 'patron2', 'patron3', 'patron4', 'patron5', 'content_inc', 'content_exc', 'ow_force')",
|
||||
"default": "('0', 'canal_org', 'canal_des', 'url_org', 'url_des', 'patron1', 'patron2', 'patron3', 'patron4', 'patron5', 'content_inc', 'content_exc', 'ow_force'), ('0', 'canal_org', 'canal_des', 'url_org', 'url_des', 'patron1', 'patron2', 'patron3', 'patron4', 'patron5', 'content_inc', 'content_exc', 'ow_force'), ('1', 'tumejortorrent', 'descargas2020', '://tumejortorrent.com/', '://descargas2020.com/', '', '', '', '', '', '*', '', 'no')",
|
||||
"enabled": true,
|
||||
"visible": false
|
||||
},
|
||||
|
||||
@@ -49,8 +49,6 @@ if host_index == 0:
|
||||
for active_clone, channel_clone, host_clone, contentType_clone, info_clone in clone_list:
|
||||
if i <= j and active_clone == "1":
|
||||
clone_list_random += [clone_list[i]] #... añadimos el clone activo "bueno" a la lista
|
||||
else:
|
||||
break
|
||||
i += 1
|
||||
if clone_list_random: #Si hay clones en la lista aleatoria...
|
||||
clone_list = [random.choice(clone_list_random)] #Seleccionamos un clone aleatorio
|
||||
|
||||
@@ -1,88 +1,88 @@
|
||||
{
|
||||
"id": "peliculonhd",
|
||||
"name": "PeliculonHD",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["esp", "lat", "cast"],
|
||||
"thumbnail": "https://peliculonhd.com/wp-content/uploads/2018/09/peliculonnewlogo3-.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"vos",
|
||||
"direct"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"Latino",
|
||||
"Castellano",
|
||||
"VOSE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_documentales",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Documentales",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces",
|
||||
"type": "bool",
|
||||
"label": "Verificar si los enlaces existen",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces_num",
|
||||
"type": "list",
|
||||
"label": "Número de enlaces a verificar",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "5", "10", "15", "20" ]
|
||||
}
|
||||
]
|
||||
}
|
||||
{
|
||||
"id": "peliculonhd",
|
||||
"name": "PeliculonHD",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["esp", "lat", "cast"],
|
||||
"thumbnail": "https://www.peliculonhd.tv/wp-content/uploads/2018/09/peliculonnewlogo3-.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"vos",
|
||||
"direct"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"Latino",
|
||||
"Castellano",
|
||||
"VOSE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_documentales",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Documentales",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces",
|
||||
"type": "bool",
|
||||
"label": "Verificar si los enlaces existen",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces_num",
|
||||
"type": "list",
|
||||
"label": "Número de enlaces a verificar",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "5", "10", "15", "20" ]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -58,14 +58,17 @@ def lista(item):
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<article id="post-\d+".*?'
|
||||
patron += '<a href="([^"]+)" title="([^"]+)">.*?'
|
||||
patron += '<img data-src="(.*?)".*?'
|
||||
patron += '<div class="post-thumbnail(.*?)<span class="views">.*?'
|
||||
patron += '<span class="duration"><i class="fa fa-clock-o"></i>([^<]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,duracion in matches:
|
||||
scrapedplot = ""
|
||||
thumbnail = scrapertools.find_single_match(scrapedthumbnail, 'poster="([^"]+)"')
|
||||
if thumbnail == "":
|
||||
thumbnail = scrapertools.find_single_match(scrapedthumbnail, "data-thumbs='(.*?jpg)")
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl,
|
||||
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
fanart=thumbnail, thumbnail=thumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)">Next</a>')
|
||||
if next_page=="":
|
||||
next_page = scrapertools.find_single_match(data,'<li><a class="current">.*?<li><a href=\'([^\']+)\' class="inactive">')
|
||||
@@ -82,7 +85,17 @@ def play(item):
|
||||
url = scrapertools.find_single_match(data,'<meta itemprop="embedURL" content="([^"]+)"')
|
||||
url = url.replace("pornhub.com/embed/", "pornhub.com/view_video.php?viewkey=")
|
||||
data = httptools.downloadpage(url).data
|
||||
# https://www.spankwire.com/EmbedPlayer.aspx?ArticleId=14049072
|
||||
if "spankwire" in url :
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'Copy Embed Code(.*?)For Desktop')
|
||||
patron = '<div class="shareDownload_container__item__dropdown">.*?<a href="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl in matches:
|
||||
url = scrapedurl
|
||||
if url=="#":
|
||||
scrapedurl = scrapertools.find_single_match(data,'playerData.cdnPath480 = \'([^\']+)\'')
|
||||
itemlist.append(item.clone(action="play", title=scrapedurl, fulltitle = scrapedurl, url=scrapedurl))
|
||||
|
||||
if "xvideos" in url :
|
||||
scrapedurl = scrapertools.find_single_match(data,'setVideoHLS\(\'([^\']+)\'')
|
||||
if "pornhub" in url :
|
||||
|
||||
54
plugin.video.alfa/channels/tvanime.json
Normal file
54
plugin.video.alfa/channels/tvanime.json
Normal file
@@ -0,0 +1,54 @@
|
||||
{
|
||||
"id": "tvanime",
|
||||
"name": "TVAnime",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat", "cast"],
|
||||
"thumbnail": "https://monoschinos.com/image/files/155/45/logo.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"anime",
|
||||
"vos"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"VOSE",
|
||||
"LAT",
|
||||
"CAST"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces_num",
|
||||
"type": "list",
|
||||
"label": "Número de enlaces a verificar",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "5", "10", "15", "20" ]
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_anime",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Episodios de anime",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
275
plugin.video.alfa/channels/tvanime.py
Normal file
275
plugin.video.alfa/channels/tvanime.py
Normal file
@@ -0,0 +1,275 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel TVAnime -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
import urllib
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from channelselector import get_thumb
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from channels import renumbertools
|
||||
|
||||
host = "https://monoschinos.com/"
|
||||
|
||||
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'animespace')
|
||||
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'animespace')
|
||||
|
||||
IDIOMAS = {'VOSE': 'VOSE', 'Latino':'LAT', 'Castellano':'CAST'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = []
|
||||
list_servers = ['directo', 'openload', 'streamango']
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Nuevos Episodios",
|
||||
action="new_episodes",
|
||||
thumbnail=get_thumb('new_episodes', auto=True),
|
||||
url=host))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Ultimas",
|
||||
action="list_all",
|
||||
thumbnail=get_thumb('last', auto=True),
|
||||
url=host + 'emision'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Todas",
|
||||
action="list_all",
|
||||
thumbnail=get_thumb('all', auto=True),
|
||||
url=host + 'animes'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Anime",
|
||||
action="list_all",
|
||||
thumbnail=get_thumb('anime', auto=True),
|
||||
url=host + 'categoria/anime'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Películas",
|
||||
action="list_all",
|
||||
thumbnail=get_thumb('movies', auto=True),
|
||||
url=host + 'categoria/pelicula'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="OVAs",
|
||||
action="list_all",
|
||||
thumbnail='',
|
||||
url=host + 'categoria/ova'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="ONAs",
|
||||
action="list_all",
|
||||
thumbnail='',
|
||||
url=host + 'categoria/ona'))
|
||||
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Especiales",
|
||||
action="list_all",
|
||||
thumbnail='',
|
||||
url=host + 'categoria/especial'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar",
|
||||
action="search",
|
||||
url=host + 'search?q=',
|
||||
thumbnail=get_thumb('search', auto=True),
|
||||
fanart='https://s30.postimg.cc/pei7txpa9/buscar.png'
|
||||
))
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
itemlist = renumbertools.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def get_source(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
|
||||
def list_all(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
patron = '<article.*?href="([^"]+)">.*?src="([^"]+)".*?'
|
||||
patron += '<h3 class="Title">([^<]+)</h3>.*?"fecha">([^<]+)<.*?</i>([^<]+)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, year, type in matches:
|
||||
type = type.strip().lower()
|
||||
url = scrapedurl
|
||||
thumbnail = scrapedthumbnail
|
||||
if 'latino' in scrapedtitle.lower():
|
||||
lang = 'Latino'
|
||||
elif 'castellano' in scrapedtitle.lower():
|
||||
lang = 'Castellano'
|
||||
else:
|
||||
lang = 'VOSE'
|
||||
title = re.sub('Audio|Latino|Castellano', '', scrapedtitle)
|
||||
context = renumbertools.context(item)
|
||||
context2 = autoplay.context
|
||||
context.extend(context2)
|
||||
new_item= Item(channel=item.channel,
|
||||
action='episodios',
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
language = lang,
|
||||
infoLabels={'year':year}
|
||||
)
|
||||
if type != 'anime':
|
||||
new_item.contentTitle=title
|
||||
else:
|
||||
new_item.plot=type
|
||||
new_item.contentSerieName=title
|
||||
new_item.context = context
|
||||
itemlist.append(new_item)
|
||||
|
||||
# Paginacion
|
||||
next_page = scrapertools.find_single_match(data,
|
||||
'"page-item active">.*?</a>.*?<a class="page-link" href="([^"]+)">')
|
||||
|
||||
if next_page != "":
|
||||
actual_page = scrapertools.find_single_match(item.url, '([^\?]+)?')
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="list_all",
|
||||
title=">> Página siguiente",
|
||||
url=actual_page + next_page,
|
||||
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
try:
|
||||
if texto != '':
|
||||
return list_all(item)
|
||||
else:
|
||||
return []
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
def new_episodes(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
full_data = get_source(item.url)
|
||||
data = scrapertools.find_single_match(full_data, '<section class="caps">.*?</section>')
|
||||
patron = '<article.*?<a href="([^"]+)">.*?src="([^"]+)".*?'
|
||||
patron += '<span class="episode">.*?</i>([^<]+)</span>.*?<h2 class="Title">([^<]+)</h2>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, epi, scrapedtitle in matches:
|
||||
url = scrapedurl
|
||||
if 'latino' in scrapedtitle.lower():
|
||||
lang = 'Latino'
|
||||
elif 'castellano' in scrapedtitle.lower():
|
||||
lang = 'Castellano'
|
||||
else:
|
||||
lang = 'VOSE'
|
||||
scrapedtitle = re.sub('Audio|Latino|Castellano', '', scrapedtitle)
|
||||
title = '%s - Episodio %s' % (scrapedtitle, epi)
|
||||
itemlist.append(Item(channel=item.channel, title=title, url=url, thumbnail=scrapedthumbnail,
|
||||
action='findvideos', language=lang))
|
||||
|
||||
return itemlist
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
patron = '<a class="item" href="([^"]+)">'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
infoLabels = item.infoLabels
|
||||
for scrapedurl in matches:
|
||||
episode = scrapertools.find_single_match(scrapedurl, '.*?episodio-(\d+)')
|
||||
lang = item.language
|
||||
season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1, int(episode))
|
||||
title = "%sx%s - %s" % (season, str(episode).zfill(2),item.contentSerieName)
|
||||
url = scrapedurl
|
||||
infoLabels['season'] = season
|
||||
infoLabels['episode'] = episode
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title=title, contentSerieName=item.contentSerieName, url=url,
|
||||
action='findvideos', language=lang, infoLabels=infoLabels))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
itemlist = itemlist[::-1]
|
||||
if item.contentSerieName != '' and config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName,
|
||||
extra1='library'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
import urllib
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
patron = 'id="Opt\d+">.*?src=(.*?) frameborder'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl in matches:
|
||||
server = ''
|
||||
scrapedurl = scrapedurl.replace('"', '')
|
||||
new_data = get_source(scrapedurl)
|
||||
|
||||
if "/stream/" in scrapedurl:
|
||||
scrapedurl = scrapertools.find_single_match(new_data, '<source src="([^"]+)"')
|
||||
server = "directo"
|
||||
else:
|
||||
scrapedurl = scrapertools.find_single_match(scrapedurl, '.*?url=([^&]+)?')
|
||||
scrapedurl = urllib.unquote(scrapedurl)
|
||||
|
||||
if scrapedurl != '':
|
||||
itemlist.append(Item(channel=item.channel, title='%s', url=scrapedurl, action='play',
|
||||
language = item.language, infoLabels=item.infoLabels, server=server))
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
|
||||
|
||||
if __comprueba_enlaces__:
|
||||
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
def newest(categoria):
|
||||
itemlist = []
|
||||
item = Item()
|
||||
if categoria == 'anime':
|
||||
item.url=host
|
||||
itemlist = new_episodes(item)
|
||||
return itemlist
|
||||
23
plugin.video.alfa/channels/watchseries.json
Normal file
23
plugin.video.alfa/channels/watchseries.json
Normal file
@@ -0,0 +1,23 @@
|
||||
{
|
||||
"id": "watchseries",
|
||||
"name": "WatchSeries",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": [],
|
||||
"thumbnail": "https://www2.watchmovie.io/img/icon/new-logo.png",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"vos"
|
||||
],
|
||||
"settings":[
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
268
plugin.video.alfa/channels/watchseries.py
Normal file
268
plugin.video.alfa/channels/watchseries.py
Normal file
@@ -0,0 +1,268 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urllib
|
||||
import base64
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from channels import autoplay
|
||||
from platformcode import config, logger
|
||||
|
||||
|
||||
IDIOMAS = {'default': 'VO'}
|
||||
title2 = {'Action': 'Action2','Xmas':'Christmas', 'Kungfu':'Martial%20Arts','Psychological':'Genres','TV Show':'TV', 'Sitcom':'Genres', 'Costume':'Genres', 'Mythological':'Genres'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['directo', 'rapidvideo', 'streamango', 'openload', 'xstreamcdn']
|
||||
list_quality = ['default']
|
||||
|
||||
|
||||
host = "https://www2.watchmovie.io/"
|
||||
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(item.clone(title="Películas", action='menu_movies', text_color="0xFFD4AF37", text_bold=True, thumbnail= "https://github.com/master-1970/resources/raw/master/images/genres/0/Directors%20Chair.png"))
|
||||
itemlist.append(item.clone(title='Series', action='menu_series', thumbnail= "https://github.com/master-1970/resources/raw/master/images/genres/0/TV%20Series.png", text_color="0xFFD4AF37", text_bold=True))
|
||||
itemlist.append(
|
||||
item.clone(title="Buscar...", action="search", text_color="0xFF5AC0E0", text_bold=True, url=host + 'search.html?keyword=', thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Search.png"))
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
def menu_movies(item):
|
||||
logger.info()
|
||||
|
||||
itemlist=[]
|
||||
|
||||
itemlist.append(Item(channel=item.channel, text_color="0xFF5AC0E0", text_bold=True, title="Estrenos", fanart="http://i.imgur.com/c3HS8kj.png", action="novedades_cine", url=host, thumbnail="https://github.com/master-1970/resources/raw/master/images/genres/0/New%20Releases.png"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, text_color="0xFF5AC0E0", text_bold=True, title="Más Vistas", action="popular", url=host + "popular", extra="popular", thumbnail="https://github.com/master-1970/resources/raw/master/images/genres/0/All%20Movies%20by%20Watched.png"))
|
||||
itemlist.append(Item(channel=item.channel, text_color="0xFFD4AF37", text_bold=True, title="Géneros", action="section", url=host + "popular", thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Genres.png"))
|
||||
itemlist.append(Item(channel=item.channel, text_color="0xFFD4AF37", text_bold=True, title="Año", action="section", url=host + "popular", thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Year.png"))
|
||||
|
||||
return itemlist
|
||||
|
||||
def menu_series(item):
|
||||
logger.info()
|
||||
|
||||
itemlist=[]
|
||||
itemlist.append(Item(channel=item.channel, text_color="0xFF399437", text_bold=True, action="novedades_episodios", title="Últimos Episodios de:", folder=False, thumbnail=item.thumbnail))
|
||||
itemlist.append(Item(channel=item.channel, text_color="0xFF5AC0E0", text_bold=True, action="novedades_episodios", title=" Series Tv", url=host + "watch-series", extra= "watch-series", thumbnail='https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/New%20TV%20Episodes.png', type='tvshows'))
|
||||
itemlist.append(Item(channel=item.channel, text_color="0xFF5AC0E0", text_bold=True, action="novedades_episodios", title=" Doramas", url=host + "drama", extra= "drama", thumbnail='https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Asian%20Movies.png', type='tvshows'))
|
||||
itemlist.append(Item(channel=item.channel, text_color="0xFF5AC0E0", text_bold=True, action="novedades_episodios", title=" Animes", url=host + "anime", extra= "anime", thumbnail='https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Anime.png', type='anime'))
|
||||
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
if texto != '':
|
||||
try:
|
||||
return popular(item)
|
||||
except:
|
||||
itemlist.append(item.clone(url='', title='No match found...', action=''))
|
||||
return itemlist
|
||||
|
||||
def section(item):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = []
|
||||
if 'Géneros' in item.title:
|
||||
patron = '<a href="([^"]+)" class="wpb_button wpb_btn-primary wpb_btn-small ">(.*?)</a>'
|
||||
action = 'popular'
|
||||
icono = ''
|
||||
elif 'Año' in item.title:
|
||||
patron = '<a href="([^"]+)" class="wpb_button wpb_btn-info wpb_btn-small ">(.*?)</a>'
|
||||
action = 'popular'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
|
||||
url = host + scrapedurl
|
||||
title = scrapedtitle
|
||||
if 'Géneros' in item.title:
|
||||
if title in title2:
|
||||
title1 = title2[title]
|
||||
else:
|
||||
title1 = title
|
||||
icono = 'https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/'+ title1 +'.png'
|
||||
else:
|
||||
icono = 'https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Year.png'
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action=action,
|
||||
title=title,
|
||||
url=url,
|
||||
text_color="0xFF5AC0E0",
|
||||
extra="popular",
|
||||
thumbnail = icono
|
||||
))
|
||||
return itemlist
|
||||
|
||||
def novedades_episodios(item):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
url_pagination = scrapertools.find_single_match(data, "<li class='next next page-numbers'><a href='(.*?)'")
|
||||
matches = re.compile('<div class="video_likes icon-tag"> (.*?)</div>[\s\S]+?<a href="(.*?)" class="view_more"></a>[\s\S]+?<img src="([^"]+)" alt="" class="imgHome" title="" alt="([^"]+)"[\s\S]+?</li>', re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
for episode, url, thumbnail,season in matches:
|
||||
|
||||
if item.extra == "watch-series":
|
||||
scrapedinfo = season.split(' - ')
|
||||
scrapedtitle = scrapedinfo[0]
|
||||
season = scrapertools.find_single_match(scrapedinfo[1], 'Season (\d+)')
|
||||
episode = scrapertools.find_single_match(episode, 'Episode (\d+)')
|
||||
title = scrapedtitle + " %sx%s" % (season, episode)
|
||||
else:
|
||||
scrapedtitle = season
|
||||
title = scrapedtitle + ' - ' + episode
|
||||
url = urlparse.urljoin(host, url)
|
||||
|
||||
new_item = Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail,
|
||||
contentSerieName=scrapedtitle,)
|
||||
itemlist.append(new_item)
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True, idioma_busqueda='en')
|
||||
if url_pagination:
|
||||
url = urlparse.urljoin(host + item.extra, url_pagination)
|
||||
title = ">> Pagina Siguiente"
|
||||
itemlist.append(Item(channel=item.channel, action="novedades_episodios", title=title, url=url, extra=item.extra))
|
||||
return itemlist
|
||||
|
||||
|
||||
def novedades_cine(item):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
url_pagination = scrapertools.find_single_match(data, "<li class='next next page-numbers'><a href='(.*?)'")
|
||||
matches = re.compile('<div class="video_likes icon-tag"> (.*?)</div>[\s\S]+?<a href="(.*?)" class="view_more"></a>[\s\S]+?<img src="([^"]+)" alt="" class="imgHome" title="" alt="([^"]+)"[\s\S]+?</li>', re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
for episode, url, thumbnail,season in matches:
|
||||
scrapedyear = '-'
|
||||
title = "%s [%s]" % (season, episode)
|
||||
url = urlparse.urljoin(host, url)
|
||||
new_item = Item(channel=item.channel, action="findvideos",title=title, url=url, contentTitle=season, thumbnail=thumbnail,infoLabels={'year':scrapedyear})
|
||||
itemlist.append(new_item)
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True, idioma_busqueda='en')
|
||||
if url_pagination:
|
||||
url = urlparse.urljoin(host + item.extra, url_pagination)
|
||||
title = ">> Pagina Siguiente"
|
||||
itemlist.append(Item(channel=item.channel, action="novedades_cine", title=title, url=url))
|
||||
return itemlist
|
||||
|
||||
def popular(item):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
url_pagination = scrapertools.find_single_match(data, "<li class='next next page-numbers'><a href='(.*?)'")
|
||||
matches = re.compile('<div class="video_image_container sdimg">[\s\S]+?<a href="(.*?)" class="view_more" title="([^"]+)"></a>[\s\S]+?<img src="([^"]+)" alt=""', re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
for url, title, thumbnail in matches:
|
||||
scrapedyear = '-'
|
||||
if "- Season " in title:
|
||||
scrapedinfo = title.split(' - Season ')
|
||||
title2 = scrapedinfo[0]
|
||||
season = scrapedinfo[1]
|
||||
url = urlparse.urljoin(host, url)+ "/season"
|
||||
new_item = Item(channel=item.channel, action="episodios",title=title, contentSerieName=title2, url=url, thumbnail=thumbnail,infoLabels={'season':season})
|
||||
elif "-info/" in url:
|
||||
url = urlparse.urljoin(host, url)
|
||||
url = url.replace("-info/", "/")+ "/all"
|
||||
new_item = Item(channel=item.channel, action="episodios",title=title, contentSerieName=title, url=url, thumbnail=thumbnail)
|
||||
else:
|
||||
url = urlparse.urljoin(host, url)+"-episode-0"
|
||||
extra = "film"
|
||||
new_item = Item(channel=item.channel, action="findvideos",title=title, url=url, extra=extra, contentTitle=title, thumbnail=thumbnail,infoLabels={'year':scrapedyear})
|
||||
itemlist.append(new_item)
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True, idioma_busqueda='en')
|
||||
if url_pagination:
|
||||
url = urlparse.urljoin(host + item.extra, url_pagination)
|
||||
title = ">> Pagina Siguiente"
|
||||
itemlist.append(Item(channel=item.channel, action="popular", title=title, url=url))
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
infoLabels = item.infoLabels
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
|
||||
matches = re.compile('<div class="vid_info"><span><a href="(.*?)" title="(.*?)" class="videoHname"><b>Episode (\d+)', re.DOTALL).findall(data)
|
||||
for url, title, episode in matches:
|
||||
url = urlparse.urljoin(host, url)
|
||||
thumbnail = item.thumbnail
|
||||
title = title + " - Ep. " + episode
|
||||
if " Season " in title:
|
||||
scrapedinfo = title.split(' Season ')
|
||||
title = scrapedinfo[0] + " " + infoLabels['season'] + "x" + episode
|
||||
infoLabels['episode'] = episode
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
infoLabels=infoLabels
|
||||
))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True, idioma_busqueda='en')
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
if "-episode-0" in item.url:
|
||||
data1 = httptools.downloadpage(item.url).data
|
||||
if "Page not found</h1>" in data1:
|
||||
item.url = item.url.replace("-episode-0", "-episode-1")
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", httptools.downloadpage(item.url).data)
|
||||
matches = scrapertools.find_multiple_matches(data, 'data-video="(.*?)"')
|
||||
url = ''
|
||||
urlsub = ''
|
||||
urlsub = scrapertools.find_single_match(data, "&sub=(.*?)&cover")
|
||||
if urlsub != '':
|
||||
urlsub = base64.b64decode(urlsub)
|
||||
urlsub = 'https://sub.movie-series.net' + urlsub
|
||||
for source in matches:
|
||||
if '/streaming.php' in source:
|
||||
new_data = httptools.downloadpage("https:" + source).data
|
||||
url = scrapertools.find_single_match(new_data, "file: '(https://redirector.*?)'")
|
||||
thumbnail= "https://martechforum.com/wp-content/uploads/2015/07/drive-300x300.png"
|
||||
if url == "":
|
||||
source = source.replace("streaming.php", "load.php")
|
||||
elif '/load.php' in source:
|
||||
new_data = httptools.downloadpage("https:" + source).data
|
||||
url = scrapertools.find_single_match(new_data, "file: '(https://[A-z0-9]+.cdnfile.info/.*?)'")
|
||||
thumbnail= "https://vidcloud.icu/img/logo_vid.png"
|
||||
else:
|
||||
url = source
|
||||
thumbnail= ""
|
||||
if "https://redirector." in url or "cdnfile.info" in url:
|
||||
url = url+"|referer=https://vidcloud.icu/"
|
||||
|
||||
if url != "":
|
||||
itemlist.append(Item(channel=item.channel, url=url, title='%s', action='play',plot=item.plot, thumbnail=thumbnail, subtitle=urlsub))
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server)
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra == 'film':
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir a la Videoteca", text_color="yellow",
|
||||
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
|
||||
contentTitle = item.contentTitle
|
||||
))
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
@@ -10,7 +10,6 @@ import urlparse
|
||||
|
||||
from platformcode import logger
|
||||
from decimal import Decimal
|
||||
from js2py.internals import seval
|
||||
|
||||
|
||||
class Cloudflare:
|
||||
@@ -47,25 +46,50 @@ class Cloudflare:
|
||||
logger.debug("Metodo #2 (headers): NO disponible")
|
||||
self.header_data = {}
|
||||
|
||||
|
||||
def solve_cf(self, body, domain):
|
||||
k = re.compile('<div style="display:none;visibility:hidden;" id=".*?">(.*?)<\/div>', re.DOTALL).findall(body)
|
||||
k1 = re.compile('function\(p\){var p = eval\(eval.*?atob.*?return \+\(p\)}\(\)', re.DOTALL).findall(body)
|
||||
if k1:
|
||||
body = body.replace(k1[0], k[0])
|
||||
js = re.search(r"setTimeout\(function\(\){\s+(var "
|
||||
"s,t,o,p,b,r,e,a,k,i,n,g,f.+?\r?\n[\s\S]+?a\.value =.+?)\r?\n", body).group(1)
|
||||
js = re.search(
|
||||
r"setTimeout\(function\(\){\s+(var s,t,o,p,b,r,e,a,k,i,n,g,f.+?\r?\n[\s\S]+?a\.value =.+?)\r?\n",
|
||||
body
|
||||
).group(1)
|
||||
|
||||
js = re.sub(r"a\.value = ((.+).toFixed\(10\))?", r"\1", js)
|
||||
js = re.sub(r'(e\s=\sfunction\(s\)\s{.*?};)', '', js, flags=re.DOTALL|re.MULTILINE)
|
||||
js = re.sub(r"\s{3,}[a-z](?: = |\.).+", "", js).replace("t.length", str(len(domain)))
|
||||
js = js.replace('; 121', '')
|
||||
reemplazar = re.compile('(?is)function\(p\)\{return eval.*?\+p\+"\)"\)}', re.DOTALL).findall(js)
|
||||
if reemplazar:
|
||||
js = js.replace(reemplazar[0],'t.charCodeAt')
|
||||
js = re.sub(r"[\n\\']", "", js)
|
||||
js = 'a = {{}}; t = "{}";{}'.format(domain, js)
|
||||
result = seval.eval_js_vm(js)
|
||||
jsEnv = """
|
||||
var t = "{domain}";
|
||||
var g = String.fromCharCode;
|
||||
o = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
|
||||
e = function(s) {{
|
||||
s += "==".slice(2 - (s.length & 3));
|
||||
var bm, r = "", r1, r2, i = 0;
|
||||
for (; i < s.length;) {{
|
||||
bm = o.indexOf(s.charAt(i++)) << 18 | o.indexOf(s.charAt(i++)) << 12 | (r1 = o.indexOf(s.charAt(i++))) << 6 | (r2 = o.indexOf(s.charAt(i++)));
|
||||
r += r1 === 64 ? g(bm >> 16 & 255) : r2 === 64 ? g(bm >> 16 & 255, bm >> 8 & 255) : g(bm >> 16 & 255, bm >> 8 & 255, bm & 255);
|
||||
}}
|
||||
return r;
|
||||
}};
|
||||
function italics (str) {{ return '<i>' + this + '</i>'; }};
|
||||
var document = {{
|
||||
getElementById: function () {{
|
||||
return {{'innerHTML': '{innerHTML}'}};
|
||||
}}
|
||||
}};
|
||||
{js}
|
||||
"""
|
||||
innerHTML = re.search('<div(?: [^<>]*)? id="([^<>]*?)">([^<>]*?)<\/div>', body , re.MULTILINE | re.DOTALL)
|
||||
innerHTML = innerHTML.group(2).replace("'", r"\'") if innerHTML else ""
|
||||
import js2py
|
||||
from jsc import jsunc
|
||||
js = jsunc(jsEnv.format(domain=domain, innerHTML=innerHTML, js=js))
|
||||
def atob(s):
|
||||
return base64.b64decode('{}'.format(s)).decode('utf-8')
|
||||
js2py.disable_pyimport()
|
||||
context = js2py.EvalJs({'atob': atob})
|
||||
result = context.eval(js)
|
||||
return float(result)
|
||||
|
||||
|
||||
|
||||
@property
|
||||
def wait_time(self):
|
||||
|
||||
@@ -32,7 +32,6 @@ import urlparse
|
||||
from StringIO import StringIO
|
||||
from threading import Lock
|
||||
|
||||
from core.cloudflare import Cloudflare
|
||||
from platformcode import config, logger
|
||||
from platformcode.logger import WebErrorException
|
||||
|
||||
@@ -76,21 +75,21 @@ def get_url_headers(url):
|
||||
return url + "|" + "&".join(["%s=%s" % (h, headers[h]) for h in headers])
|
||||
|
||||
|
||||
def load_cookies():
|
||||
def load_cookies(alfa_s=False):
|
||||
cookies_lock.acquire()
|
||||
if os.path.isfile(ficherocookies):
|
||||
logger.info("Leyendo fichero cookies")
|
||||
if not alfa_s: logger.info("Leyendo fichero cookies")
|
||||
try:
|
||||
cj.load(ficherocookies, ignore_discard=True)
|
||||
except:
|
||||
logger.info("El fichero de cookies existe pero es ilegible, se borra")
|
||||
if not alfa_s: logger.info("El fichero de cookies existe pero es ilegible, se borra")
|
||||
os.remove(ficherocookies)
|
||||
cookies_lock.release()
|
||||
|
||||
|
||||
def save_cookies():
|
||||
def save_cookies(alfa_s=False):
|
||||
cookies_lock.acquire()
|
||||
logger.info("Guardando cookies...")
|
||||
if not alfa_s: logger.info("Guardando cookies...")
|
||||
cj.save(ficherocookies, ignore_discard=True)
|
||||
cookies_lock.release()
|
||||
|
||||
@@ -99,7 +98,7 @@ load_cookies()
|
||||
|
||||
|
||||
def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=True, cookies=True, replace_headers=False,
|
||||
add_referer=False, only_headers=False, bypass_cloudflare=True, count_retries=0, count_retries_tot=5, random_headers=False, ignore_response_code=False, alfa_s=False, proxy=True, proxy_web=False, forced_proxy=None, proxy_retries=1):
|
||||
add_referer=False, only_headers=False, bypass_cloudflare=True, count_retries=0, count_retries_tot=5, random_headers=False, ignore_response_code=False, alfa_s=False, proxy=True, proxy_web=False, proxy_addr_forced=None,forced_proxy=None, proxy_retries=1):
|
||||
"""
|
||||
Abre una url y retorna los datos obtenidos
|
||||
|
||||
@@ -174,19 +173,28 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
|
||||
proxy_CF_addr = ''
|
||||
proxy_web_name = ''
|
||||
proxy_log = ''
|
||||
import proxytools
|
||||
|
||||
try:
|
||||
if (proxy or proxy_web) and (forced_proxy or proxytools.channel_proxy_list(url, forced_proxy=forced_proxy)):
|
||||
if (proxy or proxy_web) and (forced_proxy or proxy_addr_forced or channel_proxy_list(url, forced_proxy=forced_proxy)):
|
||||
import proxytools
|
||||
proxy_addr, proxy_CF_addr, proxy_web_name, proxy_log = proxytools.get_proxy_addr(url, post=post, forced_proxy=forced_proxy)
|
||||
if proxy_addr_forced and proxy_log:
|
||||
import scrapertools
|
||||
proxy_log = scrapertools.find_single_match(str(proxy_addr_forced), "{'http.*':\s*'(.*?)'}")
|
||||
|
||||
if proxy and proxy_addr:
|
||||
if proxy_addr_forced: proxy_addr = proxy_addr_forced
|
||||
handlers.append(urllib2.ProxyHandler(proxy_addr))
|
||||
proxy_stat = ', Proxy Direct ' + proxy_log
|
||||
elif proxy and proxy_CF_addr:
|
||||
if proxy_addr_forced: proxy_CF_addr = proxy_addr_forced
|
||||
handlers.append(urllib2.ProxyHandler(proxy_CF_addr))
|
||||
proxy_stat = ', Proxy CF ' + proxy_log
|
||||
elif proxy and not proxy_addr and not proxy_CF_addr:
|
||||
elif proxy and proxy_addr_forced:
|
||||
proxy_addr = proxy_addr_forced
|
||||
handlers.append(urllib2.ProxyHandler(proxy_addr))
|
||||
proxy_stat = ', Proxy Direct ' + proxy_log
|
||||
elif proxy and not proxy_addr and not proxy_CF_addr and not proxy_addr_forced:
|
||||
proxy = False
|
||||
if not proxy_web_name:
|
||||
proxy_addr, proxy_CF_addr, proxy_web_name, proxy_log = proxytools.get_proxy_addr(url, forced_proxy='Total')
|
||||
@@ -335,7 +343,7 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
|
||||
raise WebErrorException(urlparse.urlparse(url)[1])
|
||||
|
||||
if cookies:
|
||||
save_cookies()
|
||||
save_cookies(alfa_s=alfa_s)
|
||||
|
||||
if not alfa_s:
|
||||
logger.info("Encoding: %s" % (response["headers"].get('content-encoding')))
|
||||
@@ -362,6 +370,7 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
|
||||
|
||||
# Anti Cloudflare
|
||||
if bypass_cloudflare and count_retries < count_retries_tot:
|
||||
from core.cloudflare import Cloudflare
|
||||
cf = Cloudflare(response)
|
||||
if cf.is_cloudflare:
|
||||
count_retries += 1
|
||||
@@ -370,15 +379,15 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
|
||||
auth_url = cf.get_url()
|
||||
if not alfa_s:
|
||||
logger.info("Autorizando... intento %d url: %s" % (count_retries, auth_url))
|
||||
tt = downloadpage(auth_url, headers=request_headers, replace_headers=True, count_retries=count_retries, ignore_response_code=True, count_retries_tot=count_retries_tot, proxy=proxy, proxy_web=proxy_web)
|
||||
tt = downloadpage(auth_url, headers=request_headers, replace_headers=True, count_retries=count_retries, ignore_response_code=True, count_retries_tot=count_retries_tot, proxy=proxy, proxy_web=proxy_web, forced_proxy=forced_proxy, proxy_addr_forced=proxy_addr_forced, alfa_s=alfa_s)
|
||||
if tt.code == 403:
|
||||
tt = downloadpage(url, headers=request_headers, replace_headers=True, count_retries=count_retries, ignore_response_code=True, count_retries_tot=count_retries_tot, proxy=proxy, proxy_web=proxy_web)
|
||||
tt = downloadpage(url, headers=request_headers, replace_headers=True, count_retries=count_retries, ignore_response_code=True, count_retries_tot=count_retries_tot, proxy=proxy, proxy_web=proxy_web, forced_proxy=forced_proxy, proxy_addr_forced=proxy_addr_forced, alfa_s=alfa_s)
|
||||
if tt.sucess:
|
||||
if not alfa_s:
|
||||
logger.info("Autorización correcta, descargando página")
|
||||
resp = downloadpage(url=response["url"], post=post, headers=headers, timeout=timeout,
|
||||
follow_redirects=follow_redirects, count_retries=count_retries,
|
||||
cookies=cookies, replace_headers=replace_headers, add_referer=add_referer, proxy=proxy, proxy_web=proxy_web, count_retries_tot=count_retries_tot)
|
||||
cookies=cookies, replace_headers=replace_headers, add_referer=add_referer, proxy=proxy, proxy_web=proxy_web, count_retries_tot=count_retries_tot, forced_proxy=forced_proxy, proxy_addr_forced=proxy_addr_forced, alfa_s=alfa_s)
|
||||
response["sucess"] = resp.sucess
|
||||
response["code"] = resp.code
|
||||
response["error"] = resp.error
|
||||
@@ -435,6 +444,30 @@ def random_useragent():
|
||||
return UserAgentIem
|
||||
|
||||
return default_headers["User-Agent"]
|
||||
|
||||
|
||||
def channel_proxy_list(url, forced_proxy=None):
|
||||
import base64
|
||||
import ast
|
||||
import scrapertools
|
||||
|
||||
try:
|
||||
proxy_channel_bloqued_str = base64.b64decode(config.get_setting('proxy_channel_bloqued')).decode('utf-8')
|
||||
proxy_channel_bloqued = dict()
|
||||
proxy_channel_bloqued = ast.literal_eval(proxy_channel_bloqued_str)
|
||||
except:
|
||||
logger.debug('Proxytools no inicializado correctamente')
|
||||
return False
|
||||
|
||||
if not url.endswith('/'):
|
||||
url += '/'
|
||||
if scrapertools.find_single_match(url, '(?:http.*:\/\/)?([^\?|\/]+)(?:\?|\/)') in proxy_channel_bloqued:
|
||||
if forced_proxy:
|
||||
return True
|
||||
if 'ON' in proxy_channel_bloqued[scrapertools.find_single_match(url, '(?:http.*:\/\/)?([^\?|\/]+)(?:\?|\/)')]:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
class NoRedirectHandler(urllib2.HTTPRedirectHandler):
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -10,25 +10,6 @@ from core import httptools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def downloadpage(url, post=None, headers=None, follow_redirects=True, timeout=None, header_to_get=None):
|
||||
response = httptools.downloadpage(url, post=post, headers=headers, follow_redirects=follow_redirects,
|
||||
timeout=timeout)
|
||||
if header_to_get:
|
||||
return response.headers.get(header_to_get)
|
||||
else:
|
||||
return response.data
|
||||
|
||||
|
||||
def downloadpageGzip(url):
|
||||
response = httptools.downloadpage(url, add_referer=True)
|
||||
return response.data
|
||||
|
||||
|
||||
def getLocationHeaderFromResponse(url):
|
||||
response = httptools.downloadpage(url, only_headers=True)
|
||||
return response.headers.get("location")
|
||||
|
||||
|
||||
def get_header_from_response(url, header_to_get="", post=None, headers=None):
|
||||
header_to_get = header_to_get.lower()
|
||||
response = httptools.downloadpage(url, post=post, headers=headers, only_headers=True)
|
||||
@@ -48,11 +29,6 @@ def printMatches(matches):
|
||||
i = i + 1
|
||||
|
||||
|
||||
def get_match(data, patron, index=0):
|
||||
matches = re.findall(patron, data, flags=re.DOTALL)
|
||||
return matches[index]
|
||||
|
||||
|
||||
def find_single_match(data, patron, index=0):
|
||||
try:
|
||||
matches = re.findall(patron, data, flags=re.DOTALL)
|
||||
|
||||
@@ -18,10 +18,6 @@ def printMatches(matches):
|
||||
i = i + 1
|
||||
|
||||
|
||||
def get_match(data, patron, index=0):
|
||||
return find_single_match(data, patron, index=0)
|
||||
|
||||
|
||||
def find_single_match(data, patron, index=0):
|
||||
try:
|
||||
matches = re.findall(patron, data, flags=re.DOTALL)
|
||||
|
||||
@@ -1841,7 +1841,7 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F
|
||||
item.category = channel_alt.capitalize()
|
||||
category = "'%s'" % channel_alt
|
||||
channel_py_alt = 'xyz123'
|
||||
if channel in fail_over_list : #Si es un clone de Newpct1, se actualiza el canal y la categoría
|
||||
if channel in fail_over_list : #Si es un clone de Newpct1, se actualiza el canal y la categoría
|
||||
item.channel = channel_py
|
||||
channel_py_alt = "'%s'" % channel_py
|
||||
if item.channel_host: #y se borran resto de pasadas anteriores
|
||||
@@ -1948,15 +1948,20 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F
|
||||
|
||||
if lookup == True:
|
||||
overwrite = False #Solo avisamos si hay cambios
|
||||
i = 0
|
||||
for activo, canal_org, canal_des, url_org, url_des, patron1, patron2, patron3, patron4, patron5, content_inc, content_exc, ow_force in intervencion_list:
|
||||
i += 1
|
||||
opt = ''
|
||||
#Es esta nuestra entrada?
|
||||
if activo == '1' and (canal_org == channel_alt or canal_org == item.category.lower() or channel_alt == 'videolibrary' or ow_force == 'del' or ow_force == 'emerg'):
|
||||
|
||||
if ow_force == 'del' or ow_force == 'emerg': #Si es un borrado de estructuras erroneas, hacemos un proceso aparte
|
||||
canal_des_def = canal_des #Si hay canal de sustitución para item.library_urls, lo usamos
|
||||
if item.url:
|
||||
logger.debug('INTERV. LIST: ' + str(intervencion_list[i-1]) + ' / CHANNEL: ' + str(channel_alt) + ' / URL: ' + str(item.url))
|
||||
|
||||
if ow_force == 'del' or ow_force == 'emerg': #Si es un borrado de estructuras erroneas, hacemos un proceso aparte
|
||||
canal_des_def = canal_des #Si hay canal de sustitución para item.library_urls, lo usamos
|
||||
if not canal_des_def and canal_org in item.library_urls and len(item.library_urls) == 1: #Si no, lo extraemos de la url
|
||||
canal_des_def = scrapertools.find_single_match(item.library_urls[canal_org], 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').lower() #salvamos la url actual de la estructura a borrar
|
||||
canal_des_def = scrapertools.find_single_match(item.library_urls[canal_org], 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').lower() #salvamos la url actual de la estructura a borrar
|
||||
url_total = ''
|
||||
if item.url:
|
||||
url_total = item.url #Si existe item.url, lo salvamos para futuro uso
|
||||
@@ -1986,8 +1991,8 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F
|
||||
if canal_vid_alt not in intervencion: #... la sustituimos por la primera válida
|
||||
item.url = url_vid
|
||||
break
|
||||
if canal_vid_alt in fail_over_list: #Si es un clone de Newpct1, salvamos la nueva categoría
|
||||
item.category = scrapertools.find_single_match(item.url, 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').lower() #Salvamos categoría
|
||||
if canal_vid_alt in fail_over_list: #Si es un clone de Newpct1, salvamos la nueva categoría
|
||||
item.category = scrapertools.find_single_match(item.url, 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').lower() #Salvamos categoría
|
||||
else:
|
||||
item.category = canal_vid.capitalize() #si no, salvamos nueva categoría
|
||||
logger.error('item.library_urls ACTUALIZADA: ' + str(item.library_urls))
|
||||
@@ -2000,7 +2005,7 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F
|
||||
else:
|
||||
if channel_alt == 'videolibrary': #Viene de videolibrary.list_movies: IMPRESCINDIBLE
|
||||
for canal_vid, url_vid in item.library_urls.items():
|
||||
if canal_org != canal_vid: #Miramos si canal_org de la regla está en item.library_urls
|
||||
if canal_org != canal_vid: #Miramos si canal_org de la regla está en item.library_urls
|
||||
continue
|
||||
else:
|
||||
channel_alt = canal_org #Sí, ponermos el nombre del canal de origen
|
||||
@@ -2009,10 +2014,10 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F
|
||||
channel_alt = channel_py
|
||||
if channel_alt == 'videolibrary':
|
||||
continue
|
||||
if item.contentType == "list": #Si viene de Videolibrary, le cambiamos ya el canal
|
||||
if item.contentType == "list": #Si viene de Videolibrary, le cambiamos ya el canal
|
||||
if item.channel != channel_py:
|
||||
item.channel = canal_des #Cambiamos el canal. Si es clone, lo hace el canal
|
||||
continue #Salimos sin hacer nada más. item está casi vacío
|
||||
item.channel = canal_des #Cambiamos el canal. Si es clone, lo hace el canal
|
||||
continue #Salimos sin hacer nada más. item está casi vacío
|
||||
if item.contentType not in content_inc and "*" not in content_inc: #Está el contenido el la lista de incluidos
|
||||
continue
|
||||
if item.contentType in content_exc: #Está el contenido excluido?
|
||||
@@ -2030,7 +2035,7 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F
|
||||
continue #... una intervención que afecte solo a una región
|
||||
if ow_force == 'no' and it.library_urls: #Esta regla solo vale para findvideos...
|
||||
continue #... salidmos si estamos actualizando
|
||||
if lookup == True: #Queremos que el canal solo visualice sin migración?
|
||||
if lookup == True: #Queremos que el canal solo visualice sin migración?
|
||||
if ow_force != 'no':
|
||||
overwrite = True #Avisamos que hay cambios
|
||||
continue #Salimos sin tocar archivos
|
||||
@@ -2052,14 +2057,14 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F
|
||||
if patron5: #Hay más expresión regex?
|
||||
url += scrapertools.find_single_match(url_total, patron5) #La aplicamos a url
|
||||
if url:
|
||||
url_total = url #Guardamos la suma de los resultados intermedios
|
||||
url_total = url #Guardamos la suma de los resultados intermedios
|
||||
if item.channel == channel_py or channel in fail_over_list: #Si es Newpct1...
|
||||
if item.contentType == "tvshow":
|
||||
url_total = re.sub(r'\/\d+\/?$', '', url_total) #parece que con el título encuentra la serie, normalmente...
|
||||
update_stat += 1 #Ya hemos actualizado algo
|
||||
canal_org_des_list += [(canal_org, canal_des, url_total, opt, ow_force)] #salvamos el resultado para su proceso
|
||||
|
||||
if update_stat > 0 or delete_stat > 0: #Ha habido alguna actualización o borrado? Entonces salvamos
|
||||
if update_stat > 0 or delete_stat > 0: #Ha habido alguna actualización o borrado? Entonces salvamos
|
||||
if (update_stat > 0 and path != False) or item.ow_force == '1':
|
||||
logger.error('** Lista de Actualizaciones a realizar: ' + str(canal_org_des_list))
|
||||
for canal_org_def, canal_des_def, url_total, opt_def, ow_force_def in canal_org_des_list: #pasamos por todas las "parejas" cambiadas
|
||||
@@ -2072,7 +2077,7 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F
|
||||
item.library_urls.update({canal_des_def: url_total})
|
||||
it.library_urls = item.library_urls
|
||||
if item.channel != channel_py and item.channel != 'videolibrary':
|
||||
item.channel = canal_des_def #Cambiamos el canal. Si es clone, lo hace el canal
|
||||
item.channel = canal_des_def #Cambiamos el canal. Si es clone, lo hace el canal
|
||||
if channel_alt == item.category.lower(): #Actualizamos la Categoría y si la tenía
|
||||
item.category = item.channel.capitalize()
|
||||
if ow_force_def == 'force' and item.contentType != 'movie': #Queremos que el canal revise la serie entera?
|
||||
@@ -2082,7 +2087,7 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F
|
||||
|
||||
if it.library_urls and path != False and ow_force_def != 'no': #Continuamos si hay .nfo, path, y queremos actualizarlo
|
||||
item.update_next = '1'
|
||||
del item.update_next #Borramos estos campos para forzar la actualización ya
|
||||
del item.update_next #Borramos estos campos para forzar la actualización ya
|
||||
it.update_next = '1'
|
||||
del it.update_next
|
||||
|
||||
@@ -2091,7 +2096,7 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F
|
||||
canal_org_des_list_ALT = [] #Creamos esta lista para salvar las parejas
|
||||
canal_org_des_list_ALT.extend(canal_org_des_list) #... y borrar de la original las web caidas
|
||||
for canal_org_def, canal_des_def, url_total, opt_def, ow_force_def in canal_org_des_list_ALT: #pasamos por las "parejas" a borrar
|
||||
if "magnet:" in url_total or type(url_total) != str: #Si la url es un Magnet, o es una lista, pasamos
|
||||
if "magnet:" in url_total or type(url_total) != str: #Si la url es un Magnet, o es una lista, pasamos
|
||||
i += 1
|
||||
continue
|
||||
try:
|
||||
@@ -2202,7 +2207,7 @@ def redirect_clone_newpct1(item, head_nfo=None, it=None, path=False, overwrite=F
|
||||
else:
|
||||
logger.error('Error en FINDVIDEOS: ' + archivo + ' / Regla: ' + canal_org_def + ', ' + opt_def + ', ' + ow_force_def)
|
||||
|
||||
if ow_force_def == 'emerg' and opt_def == '2': #Si era una operación para borrar urls de emergencia ...
|
||||
if ow_force_def == 'emerg' and opt_def == '2': #Si era una operación para borrar urls de emergencia ...
|
||||
if it.emergency_urls and not isinstance(it.emergency_urls, dict):
|
||||
del it.emergency_urls
|
||||
if it.emergency_urls and it.emergency_urls.get(item_movie.channel, False):
|
||||
|
||||
@@ -155,7 +155,7 @@ def limited(func):
|
||||
inf = float('inf')
|
||||
|
||||
|
||||
def Literal(type, value, raw, regex=None):
|
||||
def Literal(type, value, raw, regex=None, comments=None):
|
||||
if regex: # regex
|
||||
return 'JsRegExp(%s)' % repr(compose_regex(value))
|
||||
elif value is None: # null
|
||||
@@ -165,12 +165,12 @@ def Literal(type, value, raw, regex=None):
|
||||
return 'Js(%s)' % repr(value) if value != inf else 'Js(float("inf"))'
|
||||
|
||||
|
||||
def Identifier(type, name):
|
||||
def Identifier(type, name, comments=None):
|
||||
return 'var.get(%s)' % repr(name)
|
||||
|
||||
|
||||
@limited
|
||||
def MemberExpression(type, computed, object, property):
|
||||
def MemberExpression(type, computed, object, property, comments=None):
|
||||
far_left = trans(object)
|
||||
if computed: # obj[prop] type accessor
|
||||
# may be literal which is the same in every case so we can save some time on conversion
|
||||
@@ -183,12 +183,12 @@ def MemberExpression(type, computed, object, property):
|
||||
return far_left + '.get(%s)' % prop
|
||||
|
||||
|
||||
def ThisExpression(type):
|
||||
def ThisExpression(type, comments=None):
|
||||
return 'var.get(u"this")'
|
||||
|
||||
|
||||
@limited
|
||||
def CallExpression(type, callee, arguments):
|
||||
def CallExpression(type, callee, arguments, comments=None):
|
||||
arguments = [trans(e) for e in arguments]
|
||||
if callee['type'] == 'MemberExpression':
|
||||
far_left = trans(callee['object'])
|
||||
@@ -210,14 +210,14 @@ def CallExpression(type, callee, arguments):
|
||||
# ========== ARRAYS ============
|
||||
|
||||
|
||||
def ArrayExpression(type, elements): # todo fix null inside problem
|
||||
def ArrayExpression(type, elements, comments=None): # todo fix null inside problem
|
||||
return 'Js([%s])' % ', '.join(trans(e) if e else 'None' for e in elements)
|
||||
|
||||
|
||||
# ========== OBJECTS =============
|
||||
|
||||
|
||||
def ObjectExpression(type, properties):
|
||||
def ObjectExpression(type, properties, comments=None):
|
||||
name = inline_stack.require('Object')
|
||||
elems = []
|
||||
after = ''
|
||||
@@ -241,7 +241,7 @@ def ObjectExpression(type, properties):
|
||||
return name
|
||||
|
||||
|
||||
def Property(type, kind, key, computed, value, method, shorthand):
|
||||
def Property(type, kind, key, computed, value, method, shorthand, comments=None):
|
||||
if shorthand or computed:
|
||||
raise NotImplementedError(
|
||||
'Shorthand and Computed properties not implemented!')
|
||||
@@ -256,7 +256,7 @@ def Property(type, kind, key, computed, value, method, shorthand):
|
||||
|
||||
|
||||
@limited
|
||||
def UnaryExpression(type, operator, argument, prefix):
|
||||
def UnaryExpression(type, operator, argument, prefix, comments=None):
|
||||
a = trans(
|
||||
argument, standard=True
|
||||
) # unary involve some complex operations so we cant use line shorteners here
|
||||
@@ -271,7 +271,7 @@ def UnaryExpression(type, operator, argument, prefix):
|
||||
|
||||
|
||||
@limited
|
||||
def BinaryExpression(type, operator, left, right):
|
||||
def BinaryExpression(type, operator, left, right, comments=None):
|
||||
a = trans(left)
|
||||
b = trans(right)
|
||||
# delegate to our friends
|
||||
@@ -279,7 +279,7 @@ def BinaryExpression(type, operator, left, right):
|
||||
|
||||
|
||||
@limited
|
||||
def UpdateExpression(type, operator, argument, prefix):
|
||||
def UpdateExpression(type, operator, argument, prefix, comments=None):
|
||||
a = trans(
|
||||
argument, standard=True
|
||||
) # also complex operation involving parsing of the result so no line length reducing here
|
||||
@@ -287,7 +287,7 @@ def UpdateExpression(type, operator, argument, prefix):
|
||||
|
||||
|
||||
@limited
|
||||
def AssignmentExpression(type, operator, left, right):
|
||||
def AssignmentExpression(type, operator, left, right, comments=None):
|
||||
operator = operator[:-1]
|
||||
if left['type'] == 'Identifier':
|
||||
if operator:
|
||||
@@ -319,12 +319,12 @@ six
|
||||
|
||||
|
||||
@limited
|
||||
def SequenceExpression(type, expressions):
|
||||
def SequenceExpression(type, expressions, comments=None):
|
||||
return reduce(js_comma, (trans(e) for e in expressions))
|
||||
|
||||
|
||||
@limited
|
||||
def NewExpression(type, callee, arguments):
|
||||
def NewExpression(type, callee, arguments, comments=None):
|
||||
return trans(callee) + '.create(%s)' % ', '.join(
|
||||
trans(e) for e in arguments)
|
||||
|
||||
@@ -332,7 +332,7 @@ def NewExpression(type, callee, arguments):
|
||||
@limited
|
||||
def ConditionalExpression(
|
||||
type, test, consequent,
|
||||
alternate): # caused plenty of problems in my home-made translator :)
|
||||
alternate, comments=None): # caused plenty of problems in my home-made translator :)
|
||||
return '(%s if %s else %s)' % (trans(consequent), trans(test),
|
||||
trans(alternate))
|
||||
|
||||
@@ -340,49 +340,49 @@ def ConditionalExpression(
|
||||
# =========== STATEMENTS =============
|
||||
|
||||
|
||||
def BlockStatement(type, body):
|
||||
def BlockStatement(type, body, comments=None):
|
||||
return StatementList(
|
||||
body) # never returns empty string! In the worst case returns pass\n
|
||||
|
||||
|
||||
def ExpressionStatement(type, expression):
|
||||
def ExpressionStatement(type, expression, comments=None):
|
||||
return trans(expression) + '\n' # end expression space with new line
|
||||
|
||||
|
||||
def BreakStatement(type, label):
|
||||
def BreakStatement(type, label, comments=None):
|
||||
if label:
|
||||
return 'raise %s("Breaked")\n' % (get_break_label(label['name']))
|
||||
else:
|
||||
return 'break\n'
|
||||
|
||||
|
||||
def ContinueStatement(type, label):
|
||||
def ContinueStatement(type, label, comments=None):
|
||||
if label:
|
||||
return 'raise %s("Continued")\n' % (get_continue_label(label['name']))
|
||||
else:
|
||||
return 'continue\n'
|
||||
|
||||
|
||||
def ReturnStatement(type, argument):
|
||||
def ReturnStatement(type, argument, comments=None):
|
||||
return 'return %s\n' % (trans(argument)
|
||||
if argument else "var.get('undefined')")
|
||||
|
||||
|
||||
def EmptyStatement(type):
|
||||
def EmptyStatement(type, comments=None):
|
||||
return 'pass\n'
|
||||
|
||||
|
||||
def DebuggerStatement(type):
|
||||
def DebuggerStatement(type, comments=None):
|
||||
return 'pass\n'
|
||||
|
||||
|
||||
def DoWhileStatement(type, body, test):
|
||||
def DoWhileStatement(type, body, test, comments=None):
|
||||
inside = trans(body) + 'if not %s:\n' % trans(test) + indent('break\n')
|
||||
result = 'while 1:\n' + indent(inside)
|
||||
return result
|
||||
|
||||
|
||||
def ForStatement(type, init, test, update, body):
|
||||
def ForStatement(type, init, test, update, body, comments=None):
|
||||
update = indent(trans(update)) if update else ''
|
||||
init = trans(init) if init else ''
|
||||
if not init.endswith('\n'):
|
||||
@@ -398,7 +398,7 @@ def ForStatement(type, init, test, update, body):
|
||||
return result
|
||||
|
||||
|
||||
def ForInStatement(type, left, right, body, each):
|
||||
def ForInStatement(type, left, right, body, each, comments=None):
|
||||
res = 'for PyJsTemp in %s:\n' % trans(right)
|
||||
if left['type'] == "VariableDeclaration":
|
||||
addon = trans(left) # make sure variable is registered
|
||||
@@ -417,7 +417,7 @@ def ForInStatement(type, left, right, body, each):
|
||||
return res
|
||||
|
||||
|
||||
def IfStatement(type, test, consequent, alternate):
|
||||
def IfStatement(type, test, consequent, alternate, comments=None):
|
||||
# NOTE we cannot do elif because function definition inside elif statement would not be possible!
|
||||
IF = 'if %s:\n' % trans(test)
|
||||
IF += indent(trans(consequent))
|
||||
@@ -427,7 +427,7 @@ def IfStatement(type, test, consequent, alternate):
|
||||
return IF + ELSE
|
||||
|
||||
|
||||
def LabeledStatement(type, label, body):
|
||||
def LabeledStatement(type, label, body, comments=None):
|
||||
# todo consider using smarter approach!
|
||||
inside = trans(body)
|
||||
defs = ''
|
||||
@@ -448,7 +448,7 @@ def LabeledStatement(type, label, body):
|
||||
return defs + inside
|
||||
|
||||
|
||||
def StatementList(lis):
|
||||
def StatementList(lis, comments=None):
|
||||
if lis: # ensure we don't return empty string because it may ruin indentation!
|
||||
code = ''.join(trans(e) for e in lis)
|
||||
return code if code else 'pass\n'
|
||||
@@ -456,7 +456,7 @@ def StatementList(lis):
|
||||
return 'pass\n'
|
||||
|
||||
|
||||
def PyimportStatement(type, imp):
|
||||
def PyimportStatement(type, imp, comments=None):
|
||||
lib = imp['name']
|
||||
jlib = 'PyImport_%s' % lib
|
||||
code = 'import %s as %s\n' % (lib, jlib)
|
||||
@@ -471,7 +471,7 @@ def PyimportStatement(type, imp):
|
||||
return code
|
||||
|
||||
|
||||
def SwitchStatement(type, discriminant, cases):
|
||||
def SwitchStatement(type, discriminant, cases, comments=None):
|
||||
#TODO there will be a problem with continue in a switch statement.... FIX IT
|
||||
code = 'while 1:\n' + indent('SWITCHED = False\nCONDITION = (%s)\n')
|
||||
code = code % trans(discriminant)
|
||||
@@ -491,12 +491,12 @@ def SwitchStatement(type, discriminant, cases):
|
||||
return code
|
||||
|
||||
|
||||
def ThrowStatement(type, argument):
|
||||
def ThrowStatement(type, argument, comments=None):
|
||||
return 'PyJsTempException = JsToPyException(%s)\nraise PyJsTempException\n' % trans(
|
||||
argument)
|
||||
|
||||
|
||||
def TryStatement(type, block, handler, handlers, guardedHandlers, finalizer):
|
||||
def TryStatement(type, block, handler, handlers, guardedHandlers, finalizer, comments=None):
|
||||
result = 'try:\n%s' % indent(trans(block))
|
||||
# complicated catch statement...
|
||||
if handler:
|
||||
@@ -516,13 +516,13 @@ def TryStatement(type, block, handler, handlers, guardedHandlers, finalizer):
|
||||
return result
|
||||
|
||||
|
||||
def LexicalDeclaration(type, declarations, kind):
|
||||
def LexicalDeclaration(type, declarations, kind, comments=None):
|
||||
raise NotImplementedError(
|
||||
'let and const not implemented yet but they will be soon! Check github for updates.'
|
||||
)
|
||||
|
||||
|
||||
def VariableDeclarator(type, id, init):
|
||||
def VariableDeclarator(type, id, init, comments=None):
|
||||
name = id['name']
|
||||
# register the name if not already registered
|
||||
Context.register(name)
|
||||
@@ -531,21 +531,21 @@ def VariableDeclarator(type, id, init):
|
||||
return ''
|
||||
|
||||
|
||||
def VariableDeclaration(type, declarations, kind):
|
||||
def VariableDeclaration(type, declarations, kind, comments=None):
|
||||
code = ''.join(trans(d) for d in declarations)
|
||||
return code if code else 'pass\n'
|
||||
|
||||
|
||||
def WhileStatement(type, test, body):
|
||||
def WhileStatement(type, test, body, comments=None):
|
||||
result = 'while %s:\n' % trans(test) + indent(trans(body))
|
||||
return result
|
||||
|
||||
|
||||
def WithStatement(type, object, body):
|
||||
def WithStatement(type, object, body, comments=None):
|
||||
raise NotImplementedError('With statement not implemented!')
|
||||
|
||||
|
||||
def Program(type, body):
|
||||
def Program(type, body, comments=None):
|
||||
inline_stack.reset()
|
||||
code = ''.join(trans(e) for e in body)
|
||||
# here add hoisted elements (register variables and define functions)
|
||||
@@ -559,7 +559,7 @@ def Program(type, body):
|
||||
|
||||
|
||||
def FunctionDeclaration(type, id, params, defaults, body, generator,
|
||||
expression):
|
||||
expression, comments=None):
|
||||
if generator:
|
||||
raise NotImplementedError('Generators not supported')
|
||||
if defaults:
|
||||
@@ -610,7 +610,7 @@ def FunctionDeclaration(type, id, params, defaults, body, generator,
|
||||
|
||||
|
||||
def FunctionExpression(type, id, params, defaults, body, generator,
|
||||
expression):
|
||||
expression, comments=None):
|
||||
if generator:
|
||||
raise NotImplementedError('Generators not supported')
|
||||
if defaults:
|
||||
|
||||
83
plugin.video.alfa/lib/jsc.py
Normal file
83
plugin.video.alfa/lib/jsc.py
Normal file
@@ -0,0 +1,83 @@
|
||||
MAPPING = {
|
||||
'a': '(false+"")[1]',
|
||||
'b': '([]["entries"]()+"")[2]',
|
||||
'c': '([]["fill"]+"")[3]',
|
||||
'd': '(undefined+"")[2]',
|
||||
'e': '(true+"")[3]',
|
||||
'f': '(false+"")[0]',
|
||||
'g': '(false+[0]+String)[20]',
|
||||
'h': '(+(101))["to"+String["name"]](21)[1]',
|
||||
'i': '([false]+undefined)[10]',
|
||||
'j': '([]["entries"]()+"")[3]',
|
||||
'k': '(+(20))["to"+String["name"]](21)',
|
||||
'l': '(false+"")[2]',
|
||||
'm': '(Number+"")[11]',
|
||||
'n': '(undefined+"")[1]',
|
||||
'o': '(true+[]["fill"])[10]',
|
||||
'p': '(+(211))["to"+String["name"]](31)[1]',
|
||||
'q': '(+(212))["to"+String["name"]](31)[1]',
|
||||
'r': '(true+"")[1]',
|
||||
's': '(false+"")[3]',
|
||||
't': '(true+"")[0]',
|
||||
'u': '(undefined+"")[0]',
|
||||
'v': '(+(31))["to"+String["name"]](32)',
|
||||
'w': '(+(32))["to"+String["name"]](33)',
|
||||
'x': '(+(101))["to"+String["name"]](34)[1]',
|
||||
'y': '(NaN+[Infinity])[10]',
|
||||
'z': '(+(35))["to"+String["name"]](36)',
|
||||
'A': '(+[]+Array)[10]',
|
||||
'B': '(+[]+Boolean)[10]',
|
||||
'C': 'Function("return escape")()(("")["italics"]())[2]',
|
||||
'D': 'Function("return escape")()([]["fill"])["slice"]("-1")',
|
||||
'E': '(RegExp+"")[12]',
|
||||
'F': '(+[]+Function)[10]',
|
||||
'G': '(false+Function("return Date")()())[30]',
|
||||
'I': '(Infinity+"")[0]',
|
||||
'M': '(true+Function("return Date")()())[30]',
|
||||
'N': '(NaN+"")[0]',
|
||||
'O': '(NaN+Function("return{}")())[11]',
|
||||
'R': '(+[]+RegExp)[10]',
|
||||
'S': '(+[]+String)[10]',
|
||||
'T': '(NaN+Function("return Date")()())[30]',
|
||||
'U': '(NaN+Function("return{}")()["to"+String["name"]]["call"]())[11]',
|
||||
' ': '(NaN+[]["fill"])[11]',
|
||||
'"': '("")["fontcolor"]()[12]',
|
||||
'%': 'Function("return escape")()([]["fill"])[21]',
|
||||
'&': '("")["link"](0+")[10]',
|
||||
'(': '(undefined+[]["fill"])[22]',
|
||||
')': '([0]+false+[]["fill"])[20]',
|
||||
'+': '(+(+!+[]+(!+[]+[])[!+[]+!+[]+!+[]]+[+!+[]]+[+[]]+[+[]])+[])[2]',
|
||||
',': '([]["slice"]["call"](false+"")+"")[1]',
|
||||
'-': '(+(.+[0000000001])+"")[2]',
|
||||
'.': '(+(+!+[]+[+!+[]]+(!![]+[])[!+[]+!+[]+!+[]]+[!+[]+!+[]]+[+[]])+[])[+!+[]]',
|
||||
'/': '(false+[0])["italics"]()[10]',
|
||||
':': '(RegExp()+"")[3]',
|
||||
';': '("")["link"](")[14]',
|
||||
'<': '("")["italics"]()[0]',
|
||||
'=': '("")["fontcolor"]()[11]',
|
||||
'>': '("")["italics"]()[2]',
|
||||
'?': '(RegExp()+"")[2]',
|
||||
'[': '([]["entries"]()+"")[0]',
|
||||
']': '([]["entries"]()+"")[22]',
|
||||
'{': '(true+[]["fill"])[20]',
|
||||
'}': '([]["fill"]+"")["slice"]("-1")'
|
||||
}
|
||||
|
||||
SIMPLE = {
|
||||
'false': '![]',
|
||||
'true': '!![]',
|
||||
'undefined': '[][[]]',
|
||||
'NaN': '+[![]]',
|
||||
'Infinity': '+(+!+[]+(!+[]+[])[!+[]+!+[]+!+[]]+[+!+[]]+[+[]]+[+[]]+[+[]])' # +"1e1000"
|
||||
}
|
||||
|
||||
def jsunc(jscString):
|
||||
|
||||
for key in sorted(MAPPING, key=lambda k: len(MAPPING[k]), reverse=True):
|
||||
if MAPPING.get(key) in jscString:
|
||||
jscString = jscString.replace(MAPPING.get(key), '"{}"'.format(key))
|
||||
|
||||
for key in sorted(SIMPLE, key=lambda k: len(SIMPLE[k]), reverse=True):
|
||||
if SIMPLE.get(key) in jscString:
|
||||
jscString = jscString.replace(SIMPLE.get(key), '{}'.format(key))
|
||||
return jscString
|
||||
@@ -275,6 +275,7 @@ def quasard_thread(monitor):
|
||||
except IOError:
|
||||
time.sleep(1) # nothing to read, sleep
|
||||
|
||||
log.info("quasard: proc.return code: %s" % str(proc.returncode))
|
||||
if proc.returncode == 0 or xbmc.abortRequested:
|
||||
break
|
||||
|
||||
|
||||
@@ -57,6 +57,9 @@ def init():
|
||||
"""
|
||||
|
||||
try:
|
||||
#Verifica si Kodi tiene algún achivo de Base de Datos de Vídeo de versiones anteriores, entonces los borra
|
||||
verify_Kodi_video_DB()
|
||||
|
||||
#QUASAR: Preguntamos si se hacen modificaciones a Quasar
|
||||
if not filetools.exists(os.path.join(config.get_data_path(), "quasar.json")) and not config.get_setting('addon_quasar_update', default=False):
|
||||
question_update_external_addon("quasar")
|
||||
@@ -186,4 +189,41 @@ def update_external_addon(addon_name):
|
||||
else:
|
||||
logger.error('Alguna carpeta no existe: Alfa: %s o %s: %s' % (alfa_addon_updates, addon_name, addon_path))
|
||||
|
||||
return False
|
||||
return False
|
||||
|
||||
|
||||
def verify_Kodi_video_DB():
|
||||
logger.info()
|
||||
import random
|
||||
|
||||
platform = {}
|
||||
path = ''
|
||||
db_files = []
|
||||
|
||||
try:
|
||||
path = filetools.join(xbmc.translatePath("special://masterprofile/"), "Database")
|
||||
if filetools.exists(path):
|
||||
platform = config.get_platform(full_version=True)
|
||||
if platform:
|
||||
db_files = filetools.walk(path)
|
||||
if filetools.exists(filetools.join(path, platform['video_db'])):
|
||||
for root, folders, files in db_files:
|
||||
for file in files:
|
||||
if file != platform['video_db']:
|
||||
if file.startswith('MyVideos'):
|
||||
randnum = str(random.randrange(1, 999999))
|
||||
filetools.rename(filetools.join(path, file), 'OLD_' + randnum +'_' + file)
|
||||
logger.error('BD obsoleta: ' + file)
|
||||
|
||||
else:
|
||||
logger.error('Video_DB: ' + str(platform['video_db']) + ' para versión Kodi ' + str(platform['num_version']) + ' NO EXISTE. Analizar carpeta: ' + str(db_files))
|
||||
else:
|
||||
logger.error('Estructura de get_platform(full_version=True) incorrecta')
|
||||
else:
|
||||
logger.error('Path a Userdata/Database (' + path + ') no encontrado')
|
||||
|
||||
except:
|
||||
logger.error('Platform: ' + str(platform) + ' / Path: ' + str(path) + ' / Files: ' + str(db_files))
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
return
|
||||
@@ -26,47 +26,47 @@ def check_addon_init():
|
||||
|
||||
# Obtiene el íntervalo entre actualizaciones y si se quieren mensajes
|
||||
try:
|
||||
timer = int(config.get_setting('addon_update_timer')) # Intervalo entre actualizaciones, en Ajustes de Alfa
|
||||
timer = int(config.get_setting('addon_update_timer')) # Intervalo entre actualizaciones, en Ajustes de Alfa
|
||||
if timer <= 0:
|
||||
return # 0. No se quieren actualizaciones
|
||||
return # 0. No se quieren actualizaciones
|
||||
verbose = config.get_setting('addon_update_message')
|
||||
except:
|
||||
timer = 12 # Por defecto cada 12 horas
|
||||
verbose = False # Por defecto, sin mensajes
|
||||
timer = timer * 3600 # Lo pasamos a segundos
|
||||
timer = 12 # Por defecto cada 12 horas
|
||||
verbose = False # Por defecto, sin mensajes
|
||||
timer = timer * 3600 # Lo pasamos a segundos
|
||||
|
||||
if config.get_platform(True)['num_version'] >= 14: # Si es Kodi, lanzamos el monitor
|
||||
if config.get_platform(True)['num_version'] >= 14: # Si es Kodi, lanzamos el monitor
|
||||
import xbmc
|
||||
monitor = xbmc.Monitor()
|
||||
else: # Lanzamos solo una actualización y salimos
|
||||
check_addon_updates(verbose) # Lanza la actualización
|
||||
else: # Lanzamos solo una actualización y salimos
|
||||
check_addon_updates(verbose) # Lanza la actualización
|
||||
return
|
||||
|
||||
while not monitor.abortRequested(): # Loop infinito hasta cancelar Kodi
|
||||
while not monitor.abortRequested(): # Loop infinito hasta cancelar Kodi
|
||||
|
||||
check_addon_updates(verbose) # Lanza la actualización
|
||||
check_addon_updates(verbose) # Lanza la actualización
|
||||
|
||||
if monitor.waitForAbort(timer): # Espera el tiempo programado o hasta que cancele Kodi
|
||||
break # Cancelación de Kodi, salimos
|
||||
if monitor.waitForAbort(timer): # Espera el tiempo programado o hasta que cancele Kodi
|
||||
break # Cancelación de Kodi, salimos
|
||||
|
||||
return
|
||||
|
||||
# Lanzamos en Servicio de actualización de FIXES
|
||||
try:
|
||||
threading.Thread(target=check_addon_monitor).start() # Creamos un Thread independiente, hasta el fin de Kodi
|
||||
time.sleep(5) # Dejamos terminar la primera verificación...
|
||||
except: # Si hay problemas de threading, se llama una sola vez
|
||||
threading.Thread(target=check_addon_monitor).start() # Creamos un Thread independiente, hasta el fin de Kodi
|
||||
time.sleep(5) # Dejamos terminar la primera verificación...
|
||||
except: # Si hay problemas de threading, se llama una sola vez
|
||||
try:
|
||||
timer = int(config.get_setting('addon_update_timer')) # Intervalo entre actualizaciones, en Ajustes de Alfa
|
||||
timer = int(config.get_setting('addon_update_timer')) # Intervalo entre actualizaciones, en Ajustes de Alfa
|
||||
if timer <= 0:
|
||||
return # 0. No se quieren actualizaciones
|
||||
return # 0. No se quieren actualizaciones
|
||||
verbose = config.get_setting('addon_update_message')
|
||||
except:
|
||||
verbose = False # Por defecto, sin mensajes
|
||||
verbose = False # Por defecto, sin mensajes
|
||||
pass
|
||||
check_addon_updates(verbose) # Lanza la actualización, en Ajustes de Alfa
|
||||
time.sleep(5) # Dejamos terminar la primera verificación...
|
||||
|
||||
check_addon_updates(verbose) # Lanza la actualización, en Ajustes de Alfa
|
||||
time.sleep(5) # Dejamos terminar la primera verificación...
|
||||
|
||||
return
|
||||
|
||||
|
||||
@@ -110,7 +110,7 @@ def check_addon_updates(verbose=False):
|
||||
lastfix = {}
|
||||
lastfix = jsontools.load(filetools.read(last_fix_json))
|
||||
if lastfix['addon_version'] == data['addon_version'] and lastfix['fix_version'] == data['fix_version']:
|
||||
logger.info(config.get_localized_string(70669) % (data['addon_version'], data['fix_version']))
|
||||
logger.info(config.get_localized_string(70670) % (data['addon_version'], data['fix_version']))
|
||||
if verbose:
|
||||
platformtools.dialog_notification(config.get_localized_string(70667), config.get_localized_string(70671) % (data['addon_version'], data['fix_version']))
|
||||
return False
|
||||
|
||||
@@ -342,6 +342,8 @@ def mark_content_as_watched_on_alfa(path):
|
||||
FOLDER_MOVIES = config.get_setting("folder_movies")
|
||||
FOLDER_TVSHOWS = config.get_setting("folder_tvshows")
|
||||
VIDEOLIBRARY_PATH = config.get_videolibrary_config_path()
|
||||
if not VIDEOLIBRARY_PATH:
|
||||
return
|
||||
|
||||
# Solo podemos marcar el contenido como vista en la BBDD de Kodi si la BBDD es local,
|
||||
# en caso de compartir BBDD esta funcionalidad no funcionara
|
||||
|
||||
@@ -140,3 +140,145 @@
|
||||
</category>
|
||||
|
||||
</settings>
|
||||
=======
|
||||
<?xml version="1.0" encoding="utf-8" standalone="yes"?>
|
||||
<settings>
|
||||
<category label="70168">
|
||||
<setting id="player_mode" type="enum" values="Direct|SetResolvedUrl|Built-In|Download and Play" label="30044" default="0"/>
|
||||
<setting id="default_action" type="enum" lvalues="30006|30007|30008" label="30005" default="0"/>
|
||||
<setting id="autoplay" type="bool" label="70562" default="false" visible="true"/>
|
||||
<setting id="thumbnail_type" type="enum" lvalues="30011|30012|30200" label="30010" default="2"/>
|
||||
<setting id="channel_language" type="labelenum" values="all|cast|lat" label="30019" default="all"/>
|
||||
<setting id="trakt_sync" type="bool" label="70109" default="false"/>
|
||||
<setting id="forceview" type="bool" label="30043" default="false"/>
|
||||
<setting id="faster_item_serialization" type="bool" label="30300" default="false"/>
|
||||
<setting id="debug" type="bool" label="30003" default="false"/>
|
||||
<setting label="70169" type="lsep"/>
|
||||
<setting id="resolve_priority" type="enum" label="70110" lvalues="70164|70165|70166" default="0"/>
|
||||
<setting id="resolve_stop" type="bool" label="70111" default="true"/>
|
||||
<setting id="hidepremium" type="bool" label="70112" default="false"/>
|
||||
<setting type="sep"/>
|
||||
<setting label="60305" type="lsep"/>
|
||||
<setting id="adult_aux_intro_password" type="text" label="70113" option="hidden" default=""/>
|
||||
<setting id="adult_mode" type="enum" lvalues="60602|60616|70114" label="30002" enable="!eq(-1,)" default="0"/>
|
||||
<setting id="adult_request_password" type="bool" label="70115" enable="!eq(-1,0)+!eq(-2,)" default="true"/>
|
||||
<setting id="adult_aux_new_password1" type="text" label="70116" option="hidden" enable="!eq(-3,)" default=""/>
|
||||
<setting id="adult_aux_new_password2" type="text" label="70117" option="hidden" enable="!eq(-1,)" default=""/>
|
||||
</category>
|
||||
|
||||
<!-- Path downloads -->
|
||||
<category label="30501">
|
||||
<setting id="downloadpath" type="folder" label="30017" default=""/>
|
||||
<setting id="downloadlistpath" type="folder" label="30018" default=""/>
|
||||
<setting id="videolibrarypath" type="folder" label="30067" default=""/>
|
||||
|
||||
<setting type="sep"/>
|
||||
<setting label="30131" type="lsep"/>
|
||||
<setting id="folder_tvshows" type="text" label="70118" default="SERIES"/>
|
||||
<setting id="folder_movies" type="text" label="70119" default="CINE"/>
|
||||
<setting id="videolibrary_kodi_flag" type="number" label="" default="0" visible="false"/>
|
||||
<setting id="videolibrary_kodi" type="bool" label="70120" enable="lt(-1,2)+eq(0,false)" default="false"/>
|
||||
</category>
|
||||
<category label="70121">
|
||||
<setting id="start_page" type="bool" label="70121" default="false"/>
|
||||
<setting id="custom_start" type="bool" label="70122" default="false"
|
||||
visible="eq(-1,True)"/>
|
||||
<setting id="news_start" type="bool" label="70123" default="false" visible="eq(-2,True)"
|
||||
enable="eq(-1,False)+eq(-2,True"/>
|
||||
<setting id="category" type="labelenum" label="70124"
|
||||
lvalues="70137|30123|30124|70018|60513|70013|70014|59976|70171"
|
||||
default="70137" visible="eq(-3,True)+eq(-1,True)+eq(-2,False)" enable="eq(-3,True)+eq(-1,True)+(-2,false)"/>
|
||||
</category>
|
||||
<category label="70126">
|
||||
<setting id="icon_set" type="labelenum" label="70108" values="default|dark|angedam" default="default"/>
|
||||
<setting id="infoplus_set" type="labelenum" label="70128" lvalues="70129|70130" default="70129"/>
|
||||
<setting id="video_thumbnail_type" type="enum" label="70131" lvalues="70132|70133" default="0"/>
|
||||
<setting label="70167" type="lsep"/>
|
||||
<setting id="unify" type="bool" label="70134" default="false"/>
|
||||
<setting id="title_color" type="bool" label="70135" default="false" visible="eq(-1,true)"/>
|
||||
<setting id="movie_color" type="labelenum" label="70137"
|
||||
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
|
||||
default="white" visible="eq(-1,true)+eq(-2,true)"/>
|
||||
<setting id="tvshow_color" type="labelenum" label="30123"
|
||||
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
|
||||
default="white" visible="eq(-2,true)+eq(-3,true)"/>
|
||||
<setting id="year_color" type="labelenum" label="60232"
|
||||
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
|
||||
default="white" visible="eq(-3,true)+eq(-4,true)"/>
|
||||
<setting id="rating_1_color" type="labelenum" label="70138"
|
||||
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
|
||||
default="white" visible="eq(-4,true)+eq(-5,true)"/>
|
||||
<setting id="rating_2_color" type="labelenum" label="70139"
|
||||
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
|
||||
default="white" visible="eq(-5,true)+eq(-6,true)"/>
|
||||
<setting id="rating_3_color" type="labelenum" label="70140"
|
||||
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
|
||||
default="white" visible="eq(-6,true)+eq(-7,true)"/>
|
||||
<setting id="quality_color" type="labelenum" label="70141"
|
||||
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
|
||||
default="white" visible="eq(-7,true)+eq(-8,true)"/>
|
||||
<setting id="cast_color" type="labelenum" label="59980"
|
||||
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
|
||||
default="white" visible="eq(-8,true)+eq(-9,true)"/>
|
||||
<setting id="lat_color" type="labelenum" label="59981"
|
||||
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
|
||||
default="white" visible="eq(-9,true)+eq(-10,true)"/>
|
||||
<setting id="vose_color" type="labelenum" label="70142"
|
||||
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
|
||||
default="white" visible="eq(-10,true)+eq(-11,true)"/>
|
||||
<setting id="vos_color" type="labelenum" label="70143"
|
||||
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
|
||||
default="white" visible="eq(-11,true)+eq(-12,true)"/>
|
||||
<setting id="vo_color" type="labelenum" label="70144"
|
||||
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
|
||||
default="white" visible="eq(-12,true)+eq(-13,true)"/>
|
||||
<setting id="server_color" type="labelenum" label="70145"
|
||||
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
|
||||
default="white" visible="eq(-13,true)+eq(-14,true)"/>
|
||||
<setting id="library_color" type="labelenum" label="70146"
|
||||
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
|
||||
default="white" visible="eq(-14,true)+eq(-15,true)"/>
|
||||
<setting id="update_color" type="labelenum" label="70147"
|
||||
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
|
||||
default="white" visible="eq(-15,true)+eq(-16,true)"/>
|
||||
<setting id="no_update_color" type="labelenum" label="70148"
|
||||
values="[COLOR white]white[/COLOR]|[COLOR cyan]cyan[/COLOR]|[COLOR deepskyblue]deepskyblue[/COLOR]|[COLOR firebrick]firebrick[/COLOR]|[COLOR gold]gold[/COLOR]|[COLOR goldenrod]goldenrod[/COLOR]|[COLOR hotpink]hotpink[/COLOR]|[COLOR limegreen]limegreen[/COLOR]|[COLOR orange]orange[/COLOR]|[COLOR orchid]orchid[/COLOR]|[COLOR red]red[/COLOR]|[COLOR salmon]salmon[/COLOR]|[COLOR yellow]yellow[/COLOR]"
|
||||
default="white" visible="eq(-16,true)+eq(-17,true)"/>
|
||||
</category>
|
||||
<category label="70149">
|
||||
<setting label="70150" type="lsep"/>
|
||||
<setting id="infoplus" type="bool" label="70151" default="true"/>
|
||||
<setting id="extended_info" type="bool" label="70152" default="false"/>
|
||||
|
||||
<setting label="70153" type="lsep"/>
|
||||
<setting id="shortcut_key" type="action" label="30999" action="RunPlugin(plugin://plugin.video.alfa/?ew0KICAgICJhY3Rpb24iOiAia2V5bWFwIg0KfQ==)" />
|
||||
|
||||
<setting type="sep"/>
|
||||
<setting label="70154" type="lsep"/>
|
||||
<setting id="tmdb_threads" type="labelenum" values="5|10|15|20|25|30" label="70155" default="20"/>
|
||||
<setting id="tmdb_plus_info" type="bool" label="70156" default="false"/>
|
||||
<setting id="tmdb_cache" type="bool" label="70157" default="true"/>
|
||||
<setting id="tmdb_cache_expire" type="enum" lvalues="70158|70159|70160|70161|70170" label="70162" enable="eq(-1,true)" default="4"/>
|
||||
<setting id="tmdb_clean_db_cache" type="action" label="70163" action="RunPlugin(plugin://plugin.video.alfa/?ew0KICAgICJhY3Rpb24iOiAic2NyaXB0Ig0KfQ==)" />
|
||||
|
||||
<setting type="sep"/>
|
||||
<setting label="Para evitar esperar demasiado cuando un servidor no responde:" type="lsep"/>
|
||||
<setting id="httptools_timeout" type="labelenum" values="0|5|10|15|20|25|30" label="Timeout (tiempo de espera máximo)" default="15"/>
|
||||
|
||||
<setting type="sep"/>
|
||||
<setting label="Gestión de actualizaciones urgentes de módulos de Alfa (Quick Fixes):" type="lsep"/>
|
||||
<setting id="addon_update_timer" type="labelenum" values="0|6|12|24" label="Intervalo entre actualizaciones automáticas (horas)" default="12"/>
|
||||
<setting id="addon_update_message" type="bool" label="¿Quiere ver mensajes de las actualizaciones?" default="false"/>
|
||||
|
||||
<setting label="Lista activa" type="text" id="lista_activa" default="alfavorites-default.json" visible="false"/>
|
||||
|
||||
<setting type="sep"/>
|
||||
<setting label="Gestión de actualizaciones de otros addon relacionados con Alfa:" type="lsep"/>
|
||||
<setting id="addon_quasar_update" type="bool" label="¿Quiere actualizar Quasar para evitar errores?" default="false"/>
|
||||
|
||||
<setting type="sep"/>
|
||||
<setting label="Método alternativo de acceso a webs:" type="lsep"/>
|
||||
<setting id="alternative_web_access" type="bool" label="¿Modo -en Demanda-: NO? ¿Modo -Forzado-: SÍ?" default="false"/>
|
||||
</category>
|
||||
|
||||
</settings>
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "((?:fembed|divload).com/v/[A-z0-9_-]+)",
|
||||
"pattern": "((?:fembed|divload).com/(?:f|v)/[A-z0-9_-]+)",
|
||||
"url": "https://www.\\1"
|
||||
}
|
||||
]
|
||||
|
||||
@@ -16,6 +16,7 @@ def test_video_exists(page_url):
|
||||
def get_video_url(page_url, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
video_urls = []
|
||||
page_url = page_url.replace("/f/","/v/")
|
||||
page_url = page_url.replace("/v/","/api/source/")
|
||||
data = httptools.downloadpage(page_url, post={}).data
|
||||
data = jsontools.load(data)
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "gamovideo.com/(?:embed-|)([a-z0-9]+)",
|
||||
"url": "http://gamovideo.com/embed-\\1.html"
|
||||
"url": "http://gamovideo.com/\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -39,4 +39,4 @@
|
||||
}
|
||||
],
|
||||
"thumbnail": "server_gamovideo.png"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,17 +7,14 @@ from core import scrapertools
|
||||
from lib import jsunpack
|
||||
from platformcode import logger
|
||||
|
||||
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0"}
|
||||
headers = {"User-Agent":"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:66.0) Gecko/20100101 Firefox/66.0"}
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url)
|
||||
if data.code==404:
|
||||
data = httptools.downloadpage(page_url, headers=headers, add_referer=True)
|
||||
data = data.data
|
||||
data = httptools.downloadpage(page_url, headers=headers, cookies=False).data
|
||||
|
||||
if "File was deleted" in data or "Not Found" in data or "File was locked by administrator" in data:
|
||||
if "File was deleted" in data or "<noscript>" not in data or "File was locked by administrator" in data:
|
||||
return False, "[Gamovideo] El archivo no existe o ha sido borrado"
|
||||
if "Video is processing now" in data:
|
||||
return False, "[Gamovideo] El video está procesándose en estos momentos. Inténtelo mas tarde."
|
||||
@@ -28,10 +25,7 @@ def test_video_exists(page_url):
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url)
|
||||
if data.code==404:
|
||||
data = httptools.downloadpage(page_url, headers=headers, add_referer=True)
|
||||
data = data.data
|
||||
data = httptools.downloadpage(page_url, headers=headers, cookies=False).data
|
||||
packer = scrapertools.find_single_match(data,
|
||||
"<script type='text/javascript'>(eval.function.p,a,c,k,e,d..*?)</script>")
|
||||
if packer != "":
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"url": "http://docs.google.com/get_video_info?docid=\\1"
|
||||
},
|
||||
{
|
||||
"pattern": "(?s)https://(?:docs|drive).google.com/file/d/([^/]+)/(?:preview|edit)",
|
||||
"pattern": "(?s)(?:https|http)://(?:docs|drive).google.com/file/d/([^/]+)/(?:preview|edit|view)",
|
||||
"url": "http://docs.google.com/get_video_info?docid=\\1"
|
||||
},
|
||||
{
|
||||
|
||||
42
plugin.video.alfa/servers/videobb.json
Normal file
42
plugin.video.alfa/servers/videobb.json
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "videobb.ru/v/([A-z0-9]+)",
|
||||
"url": "https://videobb.ru/api/source/\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "videobb",
|
||||
"name": "videobb",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "https://www.cinetux.to/videobb/logo.jpg"
|
||||
}
|
||||
31
plugin.video.alfa/servers/videobb.py
Normal file
31
plugin.video.alfa/servers/videobb.py
Normal file
@@ -0,0 +1,31 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# --------------------------------------------------------
|
||||
# Conector videobb By Alfa development Group
|
||||
# --------------------------------------------------------
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import jsontools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "no longer exists" in data:
|
||||
return False, "[videobb] El video ha sido borrado"
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
video_urls = []
|
||||
id = scrapertools.find_single_match("v/(\w+)", page_url)
|
||||
post = "r=&d=videobb.ru"
|
||||
data = httptools.downloadpage(page_url, post=post).data
|
||||
data = jsontools.load(data)["data"]
|
||||
for url in data:
|
||||
video_urls.append([url["label"] + "p [videobb]", url["file"]])
|
||||
logger.info("Intel11 %s" %data)
|
||||
|
||||
return video_urls
|
||||
@@ -4,8 +4,8 @@
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(?i)(https://vidlox.(?:tv|me)/embed-.*?.html)",
|
||||
"url": "\\1"
|
||||
"pattern": "(?i)(vidlox.(?:tv|me)/embed-\\w+)",
|
||||
"url": "https://\\1.html"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
42
plugin.video.alfa/servers/xstreamcdn.json
Normal file
42
plugin.video.alfa/servers/xstreamcdn.json
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(https://www.xstreamcdn.com/v/[A-z0-9_-]+)",
|
||||
"url": "\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "xstream",
|
||||
"name": "xstream",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "https://library.vodkr.com/media/24364/xstreamlogo.jpg"
|
||||
}
|
||||
33
plugin.video.alfa/servers/xstreamcdn.py
Normal file
33
plugin.video.alfa/servers/xstreamcdn.py
Normal file
@@ -0,0 +1,33 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import urllib
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import jsontools
|
||||
from platformcode import logger, config
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "ile was deleted" in data or "Page Cannot Be Found" in data or "<title>Sorry 404 not found" in data:
|
||||
return False, "[xstreamcdn.com] El archivo ha sido eliminado o no existe"
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
video_urls = []
|
||||
post = {}
|
||||
post = urllib.urlencode(post)
|
||||
data = httptools.downloadpage("https://xstreamcdn.com/api/source/" + scrapertools.find_single_match(page_url, "/v/([A-z0-9_-]+)"), post=post, add_referer=page_url).data
|
||||
|
||||
json_data = jsontools.load(data)
|
||||
check = json_data['success']
|
||||
if check == True:
|
||||
for element in json_data['data']:
|
||||
media_url = element['file']
|
||||
res = element['label']
|
||||
tipo = element['type']
|
||||
video_urls.append([tipo + " (" + res + ") [xstreamcdn]", media_url])
|
||||
return video_urls
|
||||
@@ -3,7 +3,7 @@
|
||||
# Service for updating new episodes on library series
|
||||
# ------------------------------------------------------------
|
||||
|
||||
import datetime, imp, math, threading, traceback
|
||||
import datetime, imp, math, threading, traceback, sys
|
||||
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user