Merge pull request #2 from alfa-addon/master

update
This commit is contained in:
alfa-jor
2017-08-05 20:04:50 +02:00
committed by GitHub
9 changed files with 98 additions and 34 deletions

2
plugin.video.alfa/channels/allpeliculas.py Executable file → Normal file
View File

@@ -402,7 +402,7 @@ def findvideostv(item):
idioma = IDIOMAS.get(idiomas_videos.get(language))
titulo = "%s [" + idioma + "] (" + calidad_videos.get(quality) + ")"
itemlist.append(item.clone(action="play", title=titulo, url=url, contentType="episode", server=server))
itemlist.append(item.clone(action="play", title=titulo, url=url, contentType="episode"))
# Enlace Descarga
patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?episode="%s' \

3
plugin.video.alfa/channels/anitoonstv.py Executable file → Normal file
View File

@@ -3,7 +3,6 @@
import re
from channels import renumbertools
from channelselector import get_thumb
from core import config
from core import httptools
from core import logger
@@ -17,7 +16,7 @@ host = "http://www.anitoonstv.com"
def mainlist(item):
logger.info()
thumb_series = get_thumb("thumb_channels_tvshow.png")
thumb_series = config.get_thumb("thumb_channels_tvshow.png")
itemlist = list()

5
plugin.video.alfa/channels/cartoonlatino.py Executable file → Normal file
View File

@@ -3,7 +3,6 @@
import re
from channels import renumbertools
from channelselector import get_thumb
from core import config
from core import httptools
from core import logger
@@ -18,8 +17,8 @@ host = "http://www.cartoon-latino.com/"
def mainlist(item):
logger.info()
thumb_series = get_thumb("thumb_channels_tvshow.png")
thumb_series_az = get_thumb("thumb_channels_tvshow_az.png")
thumb_series = config.get_thumb("thumb_channels_tvshow.png")
thumb_series_az = config.get_thumb("thumb_channels_tvshow_az.png")
itemlist = list()

View File

@@ -372,13 +372,10 @@ def play(item):
elif "links" in item.url or "www.cinetux.me" in item.url:
data = httptools.downloadpage(item.url).data
scrapedurl = scrapertools.find_single_match(data, '<a href="(http[^"]+)')
logger.info("Intel11 %s" %scrapedurl)
if scrapedurl == "":
scrapedurl = scrapertools.find_single_match(data, '(?i)<frame src="(http[^"]+)')
logger.info("Intel22 %s" %scrapedurl)
if scrapedurl == "":
scrapedurl = scrapertools.find_single_match(data, 'replace."([^"]+)"')
logger.info("Intel33 %s" %scrapedurl)
elif "goo.gl" in scrapedurl:
scrapedurl = httptools.downloadpage(scrapedurl, follow_redirects=False, only_headers=True).headers.get("location", "")
item.url = scrapedurl

View File

@@ -174,7 +174,7 @@ def episodiosxtemp(item):
patron = '<td><h3 class=".*?href="([^"]+)".*?">([^<]+).*?td>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
for scrapedurl, scrapedtitle in matches[::-1]:
url = scrapedurl
contentEpisodeNumber = re.findall(r'.*?x([^\/]+)\/', url)
title = scrapedtitle

View File

@@ -4,7 +4,6 @@ import re
from core import config
from core import httptools
from core import jsontools as json
from core import logger
from core import scrapertools
from core import tmdb
@@ -124,6 +123,7 @@ def mainlist(item):
def search(item, texto):
logger.info()
texto = texto.replace(" ", "%20")
item.url = "%s/buscar.php?apikey=%s&sid=%s&buscar=%s&modo=[fichas]&start=0" % (host, apikey, sid, texto)
try:
return busqueda(item)
@@ -140,8 +140,13 @@ def busqueda(item):
data = httptools.downloadpage(item.url).data
data = xml2dict(data)
if type(data["Data"]["Fichas"]["Ficha"]) == dict:
searched_data = [data["Data"]["Fichas"]["Ficha"]]
else:
searched_data = data["Data"]["Fichas"]["Ficha"]
for f in data["Data"]["Fichas"]["Ficha"]:
for f in searched_data:
f["Title"] = f["Title"].replace("<![CDATA[", "").replace("]]>", "")
title = "%s (%s)" % (f["Title"], f["Year"])
infolab = {'year': f["Year"]}
thumbnail = f["Poster"]
@@ -157,7 +162,6 @@ def busqueda(item):
else:
tipo = "movie"
show = ""
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, text_color=color2,
contentTitle=f["Title"], show=show, contentType=tipo, infoLabels=infolab,
thumbnail=thumbnail))
@@ -219,7 +223,7 @@ def newest(categoria):
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
logger.error("%s" %line)
return []
return itemlist
@@ -299,7 +303,7 @@ def fichas(item):
# data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
fichas_marca = {'1': 'Siguiendo', '2': 'Pendiente', '3': 'Favorita', '4': 'Vista', '5': 'Abandonada'}
patron = '<div class="c_fichas_image".*?href="\.([^"]+)".*?src-data="\.([^"]+)".*?' \
patron = '<div class="c_fichas_image"[^>]*>[^<]*<[^>]+href="\.([^"]+)".*?src-data="\.([^"]+)".*?' \
'<div class="c_fichas_data".*?marked="([^"]*)".*?serie="([^"]*)".*?' \
'<div class="c_fichas_title">(?:<div class="c_fichas_episode">([^<]+)</div>|)([^<]+)</div>'
matches = scrapertools.find_multiple_matches(data, patron)
@@ -776,7 +780,7 @@ def acciones_cuenta(item):
for category, contenido in matches:
itemlist.append(item.clone(action="", title=category, text_color=color3))
patron = '<div class="c_fichas_image">.*?href="\.([^"]+)".*?src="\.([^"]+)".*?serie="([^"]*)".*?' \
patron = '<div class="c_fichas_image"[^>]*>[^<]*<[^>]+href="\.([^"]+)".*?src="\.([^"]+)".*?serie="([^"]*)".*?' \
'<div class="c_fichas_title">(?:<div class="c_fichas_episode">([^<]+)</div>|)([^<]+)</div>'
entradas = scrapertools.find_multiple_matches(contenido, patron)
for scrapedurl, scrapedthumbnail, serie, episodio, scrapedtitle in entradas:
@@ -940,17 +944,9 @@ def xml2dict(xmldata):
Un diccionario construido a partir de los campos del XML.
"""
from core import filetools
import sys
parse = globals().get(sys._getframe().f_code.co_name)
# if xmldata is None and file is None:
# raise Exception("No hay nada que convertir!")
# elif xmldata is None:
# if not filetools.exists(file):
# raise Exception("El archivo no existe!")
# xmldata = open(file, "rb").read()
matches = re.compile("<(?P<tag>[^>]+)>[\n]*[\s]*[\t]*(?P<value>.*?)[\n]*[\s]*[\t]*<\/(?P=tag)\s*>",
re.DOTALL).findall(xmldata)
@@ -983,4 +979,4 @@ def xml2dict(xmldata):
return_dict[tag] = value
return return_dict
return return_dict

View File

@@ -16,8 +16,6 @@ IDIOMAS = {'es': 'Español', 'en': 'Inglés', 'la': 'Latino', 'vo': 'VO', 'vos':
list_idiomas = IDIOMAS.values()
CALIDADES = ['SD', 'HDiTunes', 'Micro-HD-720p', 'Micro-HD-1080p', '1080p', '720p']
CAPITULOS_DE_ESTRENO_STR = "Capítulos de Estreno"
def mainlist(item):
logger.info()
@@ -32,16 +30,10 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, title="Todas las series", action="series",
url=urlparse.urljoin(HOST, "listado/"), thumbnail=thumb_series))
itemlist.append(
Item(channel=item.channel, title="Capítulos de estreno", action="home_section", extra=CAPITULOS_DE_ESTRENO_STR,
url=HOST, thumbnail=thumb_series))
itemlist.append(
Item(channel=item.channel, title="Último actualizado", action="home_section", extra="Último Actualizado",
Item(channel=item.channel, title="Capítulos estrenados recientemente", action="home_section", extra="Series Online : Capítulos estrenados recientemente",
url=HOST, thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, title="Series más vistas", action="series", extra="Series Más vistas",
url=urlparse.urljoin(HOST, "listado-visto/"), thumbnail=thumb_series))
itemlist.append(
Item(channel=item.channel, title="Series menos vistas", action="home_section", extra="Series Menos vistas",
url=HOST, thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, title="Últimas fichas creadas", action="series",
url=urlparse.urljoin(HOST, "fichas_creadas/"), thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, title="Series por género", action="generos",

View File

@@ -0,0 +1,49 @@
{
"active": true,
"changes": [
{
"date": "03/08/2017",
"description": "Versión inicial"
}
],
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(https://vidlox.tv/embed-.*?.html)",
"url": "\\1"
}
]
},
"free": true,
"id": "vidlox",
"name": "vidlox",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://s1.postimg.org/wathgtvin/logo-vidlox1.png",
"version": 1
}

View File

@@ -0,0 +1,32 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Alfa addon - KODI Plugin
# Conector para vidlox
# https://github.com/alfa-addon
# ------------------------------------------------------------
from core import httptools
from core import logger
from core import scrapertools
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "borrado" in data:
return False, "[vidlox] El fichero ha sido borrado"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
bloque = scrapertools.find_single_match(data, 'sources:.\[.*?]')
matches = scrapertools.find_multiple_matches(bloque, '(http.*?)"')
for videourl in matches:
extension = extension = scrapertools.get_filename_from_url(videourl)[-4:]
video_urls.append(["%s [vidlox]" %extension, videourl])
return video_urls