Merge pull request #333 from Intel11/master

Actualizados
This commit is contained in:
Alfa
2018-07-04 14:14:59 -05:00
committed by GitHub
18 changed files with 474 additions and 249 deletions

View File

@@ -18,6 +18,7 @@ import HTTPAndWSServer
http_port = config.get_setting("server.port")
myip = config.get_local_ip()
version = config.get_addon_version()
def thread_name_wrap(func):
@@ -41,7 +42,7 @@ if sys.version_info < (2, 7, 11):
def show_info():
os.system('cls' if os.name == 'nt' else 'clear')
print ("--------------------------------------------------------------------")
print ("Alfa Iniciado")
print ("Alfa %s Iniciado" %version)
print ("La URL para acceder es http://%s:%s" % (myip, http_port))
print ("--------------------------------------------------------------------")
print ("Runtime Path : " + config.get_runtime_path())
@@ -68,7 +69,7 @@ def start():
# Da por levantado el servicio
logger.info("--------------------------------------------------------------------")
logger.info("Alfa Iniciado")
logger.info("Alfa %s Iniciado" %version)
logger.info("La URL para acceder es http://%s:%s" % (myip, http_port))
logger.info("--------------------------------------------------------------------")
logger.info("Runtime Path : " + config.get_runtime_path())

View File

@@ -14,6 +14,27 @@ settings_dic = {}
adult_setting = {}
def get_addon_version(linea_inicio=0, total_lineas=2):
'''
Devuelve el número de de versión del addon, obtenido desde el archivo addon.xml
'''
path = get_runtime_path() + "\\addon.xml"
f = open(path, "rb")
data = []
for x, line in enumerate(f):
if x < linea_inicio: continue
if len(data) == total_lineas: break
data.append(line)
f.close()
data1 = "".join(data)
# <addon id="plugin.video.alfa" name="Alfa" version="2.5.21" provider-name="Alfa Addon">
aux = re.findall('<addon id="plugin.video.alfa" name="Alfa" version="([^"]+)"', data1, re.MULTILINE | re.DOTALL)
version = "???"
if len(aux) > 0:
version = aux[0]
return version
def get_platform(full_version=False):
# full_version solo es util en xbmc/kodi
ret = {

View File

@@ -15,14 +15,9 @@ from platformcode import config
from core.item import Item
from core.tmdb import Tmdb
from platformcode import launcher, logger
from core import filetools
# <addon id="plugin.video.alfa" name="Alfa" version="2.3.0" provider-name="Alfa Addon">
data = filetools.read(filetools.join(config.get_runtime_path(), "addon.xml"))
aux = re.findall('<addon id="plugin.video.alfa" name="Alfa" version="([^"]+)"', data, re.MULTILINE | re.DOTALL)
version = "???"
if len(aux) > 0:
version = aux[0]
## Obtiene la versión del addon
version = config.get_addon_version()
class html(Controller):
pattern = re.compile("##")
@@ -113,7 +108,7 @@ class platform(Platformtools):
thumbnail=channelselector.get_thumb("back.png", "banner_")))
else:
itemlist.insert(0, Item(title="Atrás", action="go_back",
thumbnail=channelselector.get_thumb("back.png")))
thumbnail=channelselector.get_thumb("back.png", "banner_")))
JsonData = {}
JsonData["action"] = "EndItems"
@@ -127,17 +122,9 @@ class platform(Platformtools):
# Recorremos el itemlist
for item in itemlist:
if not item.thumbnail and item.action == "search": item.thumbnail = channelselector.get_thumb("search.png")
if not item.thumbnail and item.folder == True: item.thumbnail = channelselector.get_thumb("folder.png", "banner")
if not item.thumbnail and item.folder == False: item.thumbnail = channelselector.get_thumb("nofolder.png")
if "http://media.xxxxx/" in item.thumbnail and not item.thumbnail.startswith(
"http://media.xxxxxxxx/thumb_"):
if parent_item.viewmode in ["banner", "channel"]:
item.thumbnail = channelselector.get_thumbnail_path("banner") + os.path.basename(item.thumbnail)
else:
item.thumbnail = channelselector.get_thumbnail_path() + os.path.basename(item.thumbnail)
if not item.thumbnail and item.action == "search": item.thumbnail = channelselector.get_thumb("search.png", "banner_")
#if not item.thumbnail and item.folder == True: item.thumbnail = channelselector.get_thumb("folder.png", "banner_")
if not item.thumbnail and item.folder == False: item.thumbnail = channelselector.get_thumb("nofolder.png", "banner_")
# Estas imagenes no estan en banner, asi que si queremos banner, para que no se vean mal las quitamos
elif parent_item.viewmode in ["banner", "channel"] and item.thumbnail.startswith(
"http://media.xxxxx/thumb_"):

View File

@@ -147,10 +147,12 @@ def findvideos(item):
match = scrapertools.find_multiple_matches(bloque, '(?is)(?:iframe|script) .*?src="([^"]+)')
for url in match:
titulo = "Ver en: %s"
text_color = "white"
if "goo.gl" in url:
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
if "youtube" in url:
titulo = "[COLOR = yellow]Ver trailer: %s[/COLOR]"
titulo = "Ver trailer: %s"
text_color = "yellow"
if "ad.js" in url or "script" in url or "jstags.js" in url:
continue
elif "vimeo" in url:
@@ -158,6 +160,7 @@ def findvideos(item):
itemlist.append(
item.clone(channel = item.channel,
action = "play",
text_color = text_color,
title = titulo,
url = url
))

View File

@@ -1,9 +1,7 @@
# -*- coding: utf-8 -*-
import re
import time
import urlparse
import urllib
from channels import renumbertools
from core import httptools
@@ -18,13 +16,10 @@ HOST = "https://animeflv.net/"
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Últimos episodios", url=HOST))
itemlist.append(Item(channel=item.channel, action="novedades_anime", title="Últimos animes", url=HOST))
itemlist.append(Item(channel=item.channel, action="listado", title="Animes", url=HOST + "browse?order=title"))
itemlist.append(Item(channel=item.channel, title="Buscar por:"))
itemlist.append(Item(channel=item.channel, action="search", title=" Título"))
itemlist.append(Item(channel=item.channel, action="search_section", title=" Género", url=HOST + "browse",
@@ -35,9 +30,7 @@ def mainlist(item):
extra="year"))
itemlist.append(Item(channel=item.channel, action="search_section", title=" Estado", url=HOST + "browse",
extra="status"))
itemlist = renumbertools.show_option(item.channel, itemlist)
return itemlist
@@ -48,36 +41,29 @@ def search(item, texto):
texto = texto.replace(" ", "+")
post = "value=%s" % texto
data = httptools.downloadpage(item.url, post=post).data
try:
dict_data = jsontools.load(data)
for e in dict_data:
if e["id"] != e["last_id"]:
_id = e["last_id"]
else:
_id = e["id"]
url = "%sanime/%s/%s" % (HOST, _id, e["slug"])
title = e["title"]
thumbnail = "%suploads/animes/covers/%s.jpg" % (HOST, e["id"])
new_item = item.clone(action="episodios", title=title, url=url, thumbnail=thumbnail)
if e["type"] != "movie":
new_item.show = title
new_item.context = renumbertools.context(item)
else:
new_item.contentType = "movie"
new_item.contentTitle = title
itemlist.append(new_item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return itemlist
@@ -88,39 +74,30 @@ def search_section(item):
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
patron = 'id="%s_select"[^>]+>(.*?)</select>' % item.extra
data = scrapertools.find_single_match(data, patron)
matches = re.compile('<option value="([^"]+)">(.*?)</option>', re.DOTALL).findall(data)
for _id, title in matches:
url = "%s?%s=%s&order=title" % (item.url, item.extra, _id)
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url,
context=renumbertools.context(item)))
return itemlist
def newest(categoria):
itemlist = []
if categoria == 'anime':
itemlist = novedades_episodios(Item(url=HOST))
return itemlist
def novedades_episodios(item):
logger.info()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
data = scrapertools.find_single_match(data, '<h2>Últimos episodios</h2>.+?<ul class="ListEpisodios[^>]+>(.*?)</ul>')
matches = re.compile('<a href="([^"]+)"[^>]+>.+?<img src="([^"]+)".+?"Capi">(.*?)</span>'
'<strong class="Title">(.*?)</strong>', re.DOTALL).findall(data)
itemlist = []
for url, thumbnail, str_episode, show in matches:
try:
episode = int(str_episode.replace("Episodio ", ""))
except ValueError:
@@ -135,28 +112,21 @@ def novedades_episodios(item):
new_item = Item(channel=item.channel, action="findvideos", title=title, url=url, show=show, thumbnail=thumbnail,
fulltitle=title)
itemlist.append(new_item)
return itemlist
def novedades_anime(item):
logger.info()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
data = scrapertools.find_single_match(data, '<ul class="ListAnimes[^>]+>(.*?)</ul>')
matches = re.compile('href="([^"]+)".+?<img src="([^"]+)".+?<span class=.+?>(.*?)</span>.+?<h3.+?>(.*?)</h3>.+?'
'(?:</p><p>(.*?)</p>.+?)?</article></li>', re.DOTALL).findall(data)
itemlist = []
for url, thumbnail, _type, title, plot in matches:
url = urlparse.urljoin(HOST, url)
thumbnail = urlparse.urljoin(HOST, thumbnail)
new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
fulltitle=title, plot=plot)
if _type != "Película":
@@ -165,173 +135,75 @@ def novedades_anime(item):
else:
new_item.contentType = "movie"
new_item.contentTitle = title
itemlist.append(new_item)
return itemlist
def listado(item):
logger.info()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
url_pagination = scrapertools.find_single_match(data, '<li class="active">.*?</li><li><a href="([^"]+)">')
data = scrapertools.find_multiple_matches(data, '<ul class="ListAnimes[^>]+>(.*?)</ul>')
data = "".join(data)
matches = re.compile('<a href="([^"]+)">.+?<img src="([^"]+)".+?<span class=.+?>(.*?)</span>.+?<h3.*?>(.*?)</h3>'
'.*?</p><p>(.*?)</p>', re.DOTALL).findall(data)
itemlist = []
for url, thumbnail, _type, title, plot in matches:
url = urlparse.urljoin(HOST, url)
thumbnail = urlparse.urljoin(HOST, thumbnail)
new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
fulltitle=title, plot=plot)
if _type == "Anime":
new_item.show = title
new_item.context = renumbertools.context(item)
else:
new_item.contentType = "movie"
new_item.contentTitle = title
itemlist.append(new_item)
if url_pagination:
url = urlparse.urljoin(HOST, url_pagination)
title = ">> Pagina Siguiente"
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
# fix para renumbertools
item.show = scrapertools.find_single_match(data, '<h1 class="Title">(.*?)</h1>')
if item.plot == "":
item.plot = scrapertools.find_single_match(data, 'Description[^>]+><p>(.*?)</p>')
matches = re.compile('href="([^"]+)"><figure><img class="[^"]+" data-original="([^"]+)".+?</h3>'
'\s*<p>(.*?)</p>', re.DOTALL).findall(data)
if matches:
for url, thumb, title in matches:
title = title.strip()
url = urlparse.urljoin(item.url, url)
# thumbnail = item.thumbnail
try:
episode = int(scrapertools.find_single_match(title, "^.+?\s(\d+)$"))
except ValueError:
season = 1
episode = 1
else:
season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, episode)
title = "%sx%s : %s" % (season, str(episode).zfill(2), item.title)
itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumb, fulltitle=title,
fanart=item.thumbnail, contentType="episode"))
else:
# no hay thumbnail
matches = re.compile('<a href="(/ver/[^"]+)"[^>]+>(.*?)<', re.DOTALL).findall(data)
for url, title in matches:
title = title.strip()
url = urlparse.urljoin(item.url, url)
thumb = item.thumbnail
try:
episode = int(scrapertools.find_single_match(title, "^.+?\s(\d+)$"))
except ValueError:
season = 1
episode = 1
else:
season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, episode)
title = "%sx%s : %s" % (season, str(episode).zfill(2), item.title)
itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumb, fulltitle=title,
fanart=item.thumbnail, contentType="episode"))
info = eval(scrapertools.find_single_match(data, 'anime_info = (.*?);'))
episodes = eval(scrapertools.find_single_match(data, 'var episodes = (.*?);'))
for episode in episodes:
url = '%s/ver/%s/%s-%s' % (HOST, episode[1], info[2], episode[0])
title = '1x%s Episodio %s' % (episode[0], episode[0])
itemlist.append(item.clone(title=title, url=url, action='findvideos', show=info[1]))
itemlist = itemlist[::-1]
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca",
action="add_serie_to_library", extra="episodios"))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", httptools.downloadpage(item.url).data)
list_videos = scrapertools.find_multiple_matches(data, 'video\[\d\]\s=\s\'<iframe.+?src="([^"]+)"')
download_list = scrapertools.find_multiple_matches(data, 'href="http://ouo.io/s/y0d65LCP\?s=([^"]+)"')
for i in download_list:
list_videos.append(urllib.unquote_plus(i))
aux_url = []
cldup = False
for e in list_videos:
url_api = "https://s3.animeflv.com/check.php?server=%s&v=%s"
# izanagi, yourupload, hyperion
if e.startswith("https://s3.animeflv.com/embed"):
server, v = scrapertools.find_single_match(e, 'server=([^&]+)&v=(.*?)$')
data = httptools.downloadpage(url_api % (server, v)).data.replace("\\", "")
if '{"error": "Por favor intenta de nuevo en unos segundos", "sleep": 3}' in data:
time.sleep(3)
data = httptools.downloadpage(url_api % (server, v)).data.replace("\\", "")
if server != "hyperion":
url = scrapertools.find_single_match(data, '"file":"([^"]+)"')
if url:
itemlist.append(item.clone(title="Enlace encontrado en %s" % server, url=url, action="play"))
else:
# pattern = '"direct":"([^"]+)"'
# url = scrapertools.find_single_match(data, pattern)
# itemlist.append(item.clone(title="Enlace encontrado en %s" % server, url=url, action="play"))
pattern = '"label":([^,]+),"type":"video/mp4","file":"([^"]+)"'
matches = scrapertools.find_multiple_matches(data, pattern)
video_urls = []
for label, url in matches:
video_urls.append([label, "mp4", url])
if video_urls:
video_urls.sort(key=lambda u: int(u[0]))
itemlist.append(item.clone(title="Enlace encontrado en %s" % server, action="play",
video_urls=video_urls))
download_list = scrapertools.find_multiple_matches(data, 'video\[\d+\] = \'<iframe .*?src="(.*?)"')
for url in download_list:
data = httptools.downloadpage(url).data
if 'izanagi' in url:
new_url = url.replace('embed', 'check')
new_data = httptools.downloadpage(new_url).data
url = scrapertools.find_single_match(new_data, '"file":"(.*?)"')
else:
if e.startswith("https://cldup.com") and not cldup:
itemlist.append(item.clone(title="Enlace encontrado en Cldup", action="play", url=e))
cldup = True
aux_url.append(e)
itemlist.extend(servertools.find_video_items(data=",".join(aux_url)))
for videoitem in itemlist:
videoitem.fulltitle = item.fulltitle
videoitem.channel = item.channel
videoitem.thumbnail = item.thumbnail
url = scrapertools.find_single_match(data, 'var redir = "(.*?)"')
if url != '':
url = url.replace("\\","")
itemlist.append(item.clone(title='%s', url=url, action='play'))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server)
return itemlist

View File

@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
import urllib
import re
from core import httptools
from core import scrapertools
@@ -25,7 +26,7 @@ def mainlist(item):
itemlist.append(item.clone(title="Novedades", action="entradas",
url= host + "/resultados-reciente.php?buscar=&genero=",
fanart="http://i.imgur.com/Q7fsFI6.png"))
itemlist.append(item.clone(title="Destacados", action="entradas",
itemlist.append(item.clone(title="Destacados", action="destacados",
url= host + "/resultados-destacados.php?buscar=&genero=",
fanart="http://i.imgur.com/Q7fsFI6.png"))
itemlist.append(item.clone(title="Categorías", action="cat", url= host + "/index.php",
@@ -37,6 +38,12 @@ def mainlist(item):
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}|"|\(|\)', "", data)
return data
def configuracion(item):
from platformcode import platformtools
@@ -95,22 +102,19 @@ def indice(item):
def cat(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<ul class="menu">(.*?)</nav>')
matches = scrapertools.find_multiple_matches(bloque, "<li>.*?<a href='([^']+)'.*?>(.*?)</a>")
for scrapedurl, scrapedtitle in matches:
scrapedurl = host + "/" + scrapedurl
if not "span" in scrapedtitle:
scrapedtitle = "[COLOR gold] **" + scrapedtitle + "**[/COLOR]"
itemlist.append(item.clone(action="entradas", title=scrapedtitle, url=scrapedurl))
else:
scrapedtitle = scrapertools.htmlclean(scrapedtitle)
itemlist.append(item.clone(action="entradas", title=scrapedtitle, url=scrapedurl))
data = get_source(item.url)
bloques = scrapertools.find_multiple_matches(data, '</li><li class=dropdown>.*?</ul>')
for bloque in bloques:
matches = scrapertools.find_multiple_matches(bloque, "<li><a href=(.*?)>(.*?)<")
for scrapedurl, scrapedtitle in matches:
scrapedurl = host + "/" + scrapedurl
if not "TODO" in scrapedtitle:
itemlist.append(item.clone(action="entradas", title=scrapedtitle, url=scrapedurl))
return itemlist
def entradas(item):
def destacados(item):
logger.info()
itemlist = []
item.text_color = color2
@@ -161,6 +165,37 @@ def entradas(item):
return itemlist
def entradas(item):
logger.info()
itemlist = []
item.text_color = color2
data = get_source(item.url)
patron = 'class=imagen.*?href=(.*?)><img.*?src=(.*?) alt=.*?title=(.*?)/>.*?</h2>(\d{4}) (.*?)<.*?space>(.*?)<'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, year, genero, scrapedplot in matches:
infolab = {'plot': scrapedplot, 'genre': genero}
scrapedurl = host + "/" + scrapedurl
scrapedthumbnail = host +'/'+ scrapedthumbnail
title = scrapedtitle
if not year.isspace() and year != "":
infolab['year'] = int(year)
itemlist.append(item.clone(action="findvideos", title=title, fulltitle=title,
url=scrapedurl, thumbnail=scrapedthumbnail, infoLabels=infolab, contentTitle =
title))
next_page = scrapertools.find_single_match(data, '<a class=last>.*?</a></li><li><a href=(.*?)>.*?</a>')
next_page = scrapertools.htmlclean(next_page)
if next_page:
itemlist.append(item.clone(action="entradas", title=">> Página Siguiente", url=host + next_page,
text_color=color3))
return itemlist
def findvideos(item):
logger.info()
itemlist = []

View File

@@ -0,0 +1,69 @@
{
"id": "cuevana3",
"name": "Cuevana 3",
"active": true,
"adult": false,
"language": ["lat", "cast"],
"thumbnail": "http://www.cuevana3.com/wp-content/uploads/2017/08/logo-v10.png",
"banner": "",
"version": 1,
"categories": [
"movies"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"CAST",
"VOSE"
]
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - Terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_documentales",
"type": "bool",
"label": "Incluir en Novedades - Documentales",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,200 @@
# -*- coding: utf-8 -*-
# -*- Channel Cuevana 3 -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
from channels import filtertools
host = 'http://www.cuevana3.com/'
IDIOMAS = {'Latino': 'LAT', 'Español': 'CAST', 'Subtitulado':'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['fastplay', 'rapidvideo', 'streamplay', 'flashx', 'streamito', 'streamango', 'vidoza']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(item.clone(title="Ultimas", action="list_all", url=host, thumbnail=get_thumb('last', auto=True)))
itemlist.append(item.clone(title="Generos", action="section", section='genre',
thumbnail=get_thumb('genres', auto=True)))
itemlist.append(item.clone(title="Castellano", action="list_all", url= host+'?s=Español',
thumbnail=get_thumb('audio', auto=True)))
itemlist.append(item.clone(title="Latino", action="list_all", url=host + '?s=Latino',
thumbnail=get_thumb('audio', auto=True)))
itemlist.append(item.clone(title="VOSE", action="list_all", url=host + '?s=Subtitulado',
thumbnail=get_thumb('audio', auto=True)))
itemlist.append(item.clone(title="Alfabetico", action="section", section='alpha',
thumbnail=get_thumb('alphabet', auto=True)))
itemlist.append(item.clone(title="Buscar", action="search", url=host+'?s=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
try:
data = get_source(item.url)
if item.section == 'alpha':
patron = '<span class=Num>\d+.*?<a href=(.*?) class.*?'
patron += 'src=(.*?) class.*?<strong>(.*?)</strong>.*?<td>(\d{4})</td>'
else:
patron = '<article id=post-.*?<a href=(.*?)>.*?src=(.*?) alt=.*?'
patron += '<h2 class=Title>(.*?)<\/h2>.*?<span class=Year>(.*?)<\/span>'
data = get_source(item.url)
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
url = scrapedurl
if "|" in scrapedtitle:
scrapedtitle= scrapedtitle.split("|")
contentTitle = scrapedtitle[0].strip()
else:
contentTitle = scrapedtitle
contentTitle = re.sub('\(.*?\)','', contentTitle)
title = '%s [%s]'%(contentTitle, year)
thumbnail = 'http:'+scrapedthumbnail
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
infoLabels={'year':year}
))
tmdb.set_infoLabels_itemlist(itemlist, True)
# Paginación
url_next_page = scrapertools.find_single_match(data,'<a class=next.*?href=(.*?)>')
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all', section=item.section))
except:
pass
return itemlist
def section(item):
logger.info()
itemlist = []
data = get_source(host)
action = 'list_all'
if item.section == 'quality':
patron = 'menu-item-object-category.*?menu-item-\d+><a href=(.*?)>(.*?)<\/a>'
elif item.section == 'genre':
patron = 'category menu-item-\d+><a href=(http:.*?)>(.*?)</a>'
elif item.section == 'year':
patron = 'custom menu-item-15\d+><a href=(.*?\?s.*?)>(\d{4})<\/a><\/li>'
elif item.section == 'alpha':
patron = '<li><a href=(.*?letter.*?)>(.*?)</a>'
action = 'list_all'
matches = re.compile(patron, re.DOTALL).findall(data)
for data_one, data_two in matches:
url = data_one
title = data_two
if title != 'Ver más':
new_item = Item(channel=item.channel, title= title, url=url, action=action, section=item.section)
itemlist.append(new_item)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'domain=(.*?) class=.*?><span>.*?</span>.*?<span>\d+ - (.*?) - (.*?)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, language, quality in matches:
if url != '' and 'youtube' not in url:
itemlist.append(item.clone(title='%s', url=url, language=IDIOMAS[language], quality=quality, action='play'))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % '%s [%s] [%s]'%(i.server.capitalize(),
i.language, i.quality))
tmdb.set_infoLabels_itemlist(itemlist, True)
try:
itemlist.append(trailer)
except:
pass
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return list_all(item)
else:
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'infantiles':
item.url = host+'/category/animacion'
elif categoria == 'terror':
item.url = host+'/category/terror'
elif categoria == 'documentales':
item.url = host+'/category/documental'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -2,7 +2,7 @@
"id": "descargasmix",
"name": "DescargasMIX",
"language": ["cast", "lat"],
"active": true,
"active": false,
"adult": false,
"thumbnail": "descargasmix.png",
"banner": "descargasmix.png",
@@ -77,4 +77,4 @@
"visible": true
}
]
}
}

View File

@@ -1,7 +1,7 @@
{
"id": "guaridavalencianista",
"name": "La Guarida valencianista",
"active": true,
"active": false,
"adult": false,
"language": ["cast"],
"thumbnail": "guaridavalencianista.png",
@@ -9,4 +9,4 @@
"categories": [
"documentary"
]
}
}

View File

@@ -1,7 +1,6 @@
# -*- coding: utf-8 -*-
import os
import xbmc
from core.item import Item
from platformcode import config, logger, platformtools
@@ -41,50 +40,50 @@ def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="", title="FAQ:",
thumbnail=get_thumb("help.png"),
folder=False))
if config.is_xbmc():
itemlist.append(Item(channel=item.channel, action="", title="FAQ:",
thumbnail=get_thumb("help.png"),
folder=False))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - ¿Cómo reportar un error?",
thumbnail=get_thumb("help.png"),
folder=False, extra="report_error"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - ¿Se pueden activar/desactivar los canales?",
thumbnail=get_thumb("help.png"),
folder=False, extra="onoff_canales"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - ¿Es posible la sincronización automática con Trakt?",
thumbnail=get_thumb("help.png"),
folder=False, extra="trakt_sync"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - ¿Es posible mostrar todos los resultados juntos en el buscador global?",
thumbnail=get_thumb("help.png"),
folder=False, extra="buscador_juntos"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - Los enlaces tardan en aparecer.",
thumbnail=get_thumb("help.png"),
folder=False, extra="tiempo_enlaces"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - La búsqueda de contenido no se hace correctamente.",
thumbnail=get_thumb("help.png"),
folder=False, extra="prob_busquedacont"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - Algún canal no funciona correctamente.",
thumbnail=get_thumb("help.png"),
folder=False, extra="canal_fallo"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - Los enlaces Torrent no funcionan.",
thumbnail=get_thumb("help.png"),
folder=False, extra="prob_torrent"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - No se actualiza correctamente la videoteca.",
thumbnail=get_thumb("help.png"),
folder=True, extra="prob_bib"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - Enlaces de interés",
thumbnail=get_thumb("help.png"),
folder=False, extra=""))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - ¿Se pueden activar/desactivar los canales?",
thumbnail=get_thumb("help.png"),
folder=False, extra="onoff_canales"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - ¿Es posible la sincronización automática con Trakt?",
thumbnail=get_thumb("help.png"),
folder=False, extra="trakt_sync"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - ¿Es posible mostrar todos los resultados juntos en el buscador global?",
thumbnail=get_thumb("help.png"),
folder=False, extra="buscador_juntos"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - Los enlaces tardan en aparecer.",
thumbnail=get_thumb("help.png"),
folder=False, extra="tiempo_enlaces"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - La búsqueda de contenido no se hace correctamente.",
thumbnail=get_thumb("help.png"),
folder=False, extra="prob_busquedacont"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - Algún canal no funciona correctamente.",
thumbnail=get_thumb("help.png"),
folder=False, extra="canal_fallo"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - Los enlaces Torrent no funcionan.",
thumbnail=get_thumb("help.png"),
folder=False, extra="prob_torrent"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - No se actualiza correctamente la videoteca.",
thumbnail=get_thumb("help.png"),
folder=True, extra="prob_bib"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - Enlaces de interés",
thumbnail=get_thumb("help.png"),
folder=False, extra=""))
return itemlist
@@ -192,6 +191,7 @@ def faq(item):
search.settings("")
elif item.extra == "report_error":
import xbmc
if config.get_platform(True)['num_version'] < 14:
log_name = "xbmc.log"
else:

View File

@@ -256,24 +256,30 @@ def findvideos(item):
for url in urls:
final_url = httptools.downloadpage('https:'+url).data
if 'vip' in url:
if language == 'VOSE':
sub = scrapertools.find_single_match(url, 'sub=(.*?)&')
subs = 'https:%s' % sub
if 'index' in url:
file_id = scrapertools.find_single_match(url, 'file=(.*?)&')
if language=='VOSE':
sub = scrapertools.find_single_match(url, 'sub=(.*?)&')
subs = 'https:%s' % sub
post = {'link':file_id}
post = {'link': file_id}
post = urllib.urlencode(post)
hidden_url = 'https://streamango.poseidonhd.com/repro//plugins/gkpluginsphp.php'
hidden_url = 'https://streamango.poseidonhd.com/repro/plugins/gkpluginsphp.php'
data_url = httptools.downloadpage(hidden_url, post=post).data
dict_vip_url = jsontools.load(data_url)
url = dict_vip_url['link']
else:
url = 'https:%s' % url
new_url = url.replace('embed','stream')
url = httptools.downloadpage(new_url, follow_redirects=False).headers.get('location')
#title = '%s [%s]' % (item.title, language)
itemlist.append(item.clone(title='[%s] [%s]', url=url, action='play', subtitle=subs,
language=language, quality=quality, infoLabels = item.infoLabels))
file_id = scrapertools.find_single_match(url, 'url=(.*?)&')
post = {'url': file_id}
post = urllib.urlencode(post)
hidden_url = 'https://streamango.poseidonhd.com/repro/r.php'
data_url = httptools.downloadpage(hidden_url, post=post, follow_redirects=False)
url = data_url.headers['location']
itemlist.append(item.clone(title = '[%s] [%s]', url=url, action='play', subtitle=subs,
language=language, quality=quality, infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
# Requerido para Filtrar enlaces
@@ -289,6 +295,8 @@ def findvideos(item):
autoplay.start(itemlist, item)
itemlist = sorted(itemlist, key=lambda it: it.language)
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(

View File

@@ -3,7 +3,6 @@
import os
import re
import sys
import unicodedata
import urllib
import time
@@ -15,10 +14,10 @@ from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from platformcode import platformtools
host = "http://tv-vip.com"
def mainlist(item):
logger.info()
item.viewmode = "movie"
@@ -511,13 +510,13 @@ def series_library(item):
# Funcion unicamente para añadir/actualizar series a la libreria
lista_episodios = []
show = item.show.strip()
data_serie = anti_cloudflare(item.url, host=host, headers=headers)
data_serie = httptools.downloadpage(item.url).data
data_serie = jsontools.load(data_serie)
# Para series que en la web se listan divididas por temporadas
if data_serie["sortedPlaylistChilds"]:
for season_name in data_serie["sortedPlaylistChilds"]:
url_season = host + "/json/playlist/%s/index.json" % season_name['id']
data = anti_cloudflare(url_season, host=host, headers=headers)
data = httptools.downloadpage(url_season).data
data = jsontools.load(data)
if data["sortedRepoChilds"]:
for child in data["sortedRepoChilds"]:
@@ -612,8 +611,9 @@ def play(item):
uri_request = host + "/video-prod/s/uri?uri=%s&_=%s" % (uri, int(time.time()))
data = httptools.downloadpage(uri_request).data
data = jsontools.load(data)
url = item.url.replace(".tv-vip.com/transcoder/", ".tv-vip.info/c/transcoder/") + "?tt=" + str(data['tt']) + \
url = item.url.replace(".tv-vip.com/transcoder/", ".tv-vip.in/c/transcoder/") + "?tt=" + str(data['tt']) + \
"&mm=" + data['mm'] + "&bb=" + data['bb']
url += "|User-Agent=Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Mobile Safari/537.36"
itemlist.append(item.clone(action="play", server="directo", url=url, folder=False))
return itemlist
@@ -622,7 +622,7 @@ def listas(item):
logger.info()
# Para añadir listas a la videoteca en carpeta CINE
itemlist = []
data = anti_cloudflare(item.url, host=host, headers=headers)
data = httptools.downloadpage(item.url).data
data = jsontools.load(data)
head = header_string + get_cookie_value()
for child in data["sortedRepoChilds"]:

View File

@@ -3,6 +3,6 @@
"name": "Tengo una URL",
"active": false,
"adult": false,
"thumbnail": "url.png",
"banner": "url.png"
"thumbnail": "tengourl.png",
"banner": "tengourl.png"
}

View File

@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
@@ -24,7 +25,7 @@ def mainlist(item):
def search(item, texto):
logger.info("texto=" + texto)
if not texto.startswith("http://"):
if not texto.startswith("http"):
texto = "http://" + texto
itemlist = []
@@ -38,7 +39,7 @@ def search(item, texto):
itemlist.append(
Item(channel=item.channel, action="play", url=texto, server="directo", title="Ver enlace directo"))
else:
data = scrapertools.downloadpage(texto)
data = httptools.downloadpage(texto).data
itemlist = servertools.find_video_items(data=data)
for item in itemlist:
item.channel = "url"

View File

@@ -18,6 +18,9 @@ from core.cloudflare import Cloudflare
from platformcode import config, logger
from platformcode.logger import WebErrorException
## Obtiene la versión del addon
__version = config.get_addon_version()
cookies_lock = Lock()
cj = cookielib.MozillaCookieJar()
@@ -130,7 +133,7 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
if timeout is None and HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT is not None: timeout = HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT
logger.info("----------------------------------------------")
logger.info("downloadpage")
logger.info("downloadpage Alfa: %s" %__version)
logger.info("----------------------------------------------")
logger.info("Timeout: %s" % timeout)
logger.info("URL: " + url)
@@ -277,3 +280,5 @@ class NoRedirectHandler(urllib2.HTTPRedirectHandler):
http_error_301 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302

View File

@@ -15,6 +15,27 @@ __settings__ = xbmcaddon.Addon(id="plugin.video." + PLUGIN_NAME)
__language__ = __settings__.getLocalizedString
def get_addon_version(linea_inicio=0, total_lineas=2):
'''
Devuelve el número de de versión del addon, obtenido desde el archivo addon.xml
'''
path = get_runtime_path() + "\\addon.xml"
f = open(path, "rb")
data = []
for x, line in enumerate(f):
if x < linea_inicio: continue
if len(data) == total_lineas: break
data.append(line)
f.close()
data1 = "".join(data)
# <addon id="plugin.video.alfa" name="Alfa" version="2.5.21" provider-name="Alfa Addon">
aux = re.findall('<addon id="plugin.video.alfa" name="Alfa" version="([^"]+)"', data1, re.MULTILINE | re.DOTALL)
version = "???"
if len(aux) > 0:
version = aux[0]
return version
def get_platform(full_version=False):
"""
Devuelve la información la version de xbmc o kodi sobre el que se ejecuta el plugin

View File

@@ -18,6 +18,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
data = data.replace("\\'", "'")
media_url = scrapertools.find_single_match(data, '{type:"video/mp4",src:"([^"]+)"}')
if not media_url:
media_url = scrapertools.find_single_match(data, '"file":"([^"]+)')
logger.info("media_url=" + media_url)
video_urls = list()