Merge remote-tracking branch 'alfa-addon/master' into Fixes

This commit is contained in:
Unknown
2017-09-28 13:42:56 -03:00
304 changed files with 6339 additions and 7155 deletions

View File

@@ -233,9 +233,12 @@ def get_localized_string(code):
translationsfile = open(TRANSLATION_FILE_PATH, "r")
translations = translationsfile.read()
translationsfile.close()
cadenas = re.findall('<string id="%s">([^<]+)<' % code, translations)
cadenas = re.findall('msgctxt\s*"#%s"\nmsgid\s*"(.*?)"\nmsgstr\s*"(.*?)"' % code, translations)
if len(cadenas) > 0:
dev = cadenas[0]
dev = cadenas[0][1]
if not dev:
dev = cadenas[0][0]
else:
dev = "%d" % code
@@ -366,7 +369,7 @@ configfilepath = os.path.join(get_data_path(), "settings.xml")
if not os.path.exists(get_data_path()):
os.mkdir(get_data_path())
# Literales
TRANSLATION_FILE_PATH = os.path.join(get_runtime_path(), "resources", "language", "Spanish", "strings.xml")
TRANSLATION_FILE_PATH = os.path.join(get_runtime_path(), "resources", "language", "Spanish", "strings.po")
load_settings()
# modo adulto:

View File

@@ -677,16 +677,17 @@ class platform(Platformtools):
if not "label" in c: continue
# Obtenemos el valor
if not c["id"] in dict_values:
if not callback:
c["value"] = config.get_setting(c["id"], **kwargs)
if "id" in c:
if not c["id"] in dict_values:
if not callback:
c["value"] = config.get_setting(c["id"], **kwargs)
else:
c["value"] = c["default"]
dict_values[c["id"]] = c["value"]
else:
c["value"] = c["default"]
dict_values[c["id"]] = c["value"]
else:
c["value"] = dict_values[c["id"]]
c["value"] = dict_values[c["id"]]
# Translation
if c['label'].startswith('@') and unicode(c['label'][1:]).isnumeric():

View File

@@ -285,6 +285,7 @@ function get_response(data) {
else {
keypress = "";
};
if (!data.items[x].value) data.items[x].value = "";
itemlist[data.items[x].category].push(replace_list(html.config.text, {
"item_color": data.items[x].color,
"item_label": data.items[x].label,

View File

@@ -7,7 +7,7 @@
<setting type="sep"/>
<setting id="default_action" type="enum" lvalues="30006|30007|30008" label="30005" default="0"/>
<setting id="thumbnail_type" type="enum" lvalues="30011|30012|30200" label="30010" default="2"/>
<setting id="channel_language" type="labelenum" values="all|es|en|it" label="30019" default="all"/>
<setting id="channel_language" type="labelenum" values="all|cast|lat" label="30019" default="all"/>
<setting id="debug" type="bool" label="30003" default="false"/>
<setting label="Uso de servidores" type="lsep"/>
<setting id="resolve_priority" type="enum" label="Método prioritario" values="Free primero|Premium primero|Debriders primero" default="0"/>

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="1.9.0" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.1.0" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -18,14 +18,12 @@
<screenshot>resources/media/general/ss/4.jpg</screenshot>
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Arreglos[/B][/COLOR]
[I]- serieslan
- streamplay
- descargasmix
- canalpelis - Canal nuevo
- fixes internos[/I]
[COLOR green]Gracias a [COLOR yellow][B]msdos[/B][/COLOR] por su colaboración en esta versión[/COLOR]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
» canalpelis » hdfull
» xdvideos » playmax
» cinetux » gnula
» flashx » rapidvideo
¤ arreglos internos
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary>

View File

@@ -3,7 +3,7 @@
"name": "Allcalidad",
"active": true,
"adult": false,
"language": "es",
"language": ["lat"],
"thumbnail": "https://s22.postimg.org/irnlwuizh/allcalidad1.png",
"banner": "https://s22.postimg.org/9y1athlep/allcalidad2.png",
"version": 1,
@@ -15,8 +15,7 @@
],
"categories": [
"movie",
"direct",
"latino"
"direct"
],
"settings": [
{

View File

@@ -111,6 +111,8 @@ def findvideos(item):
match = scrapertools.find_multiple_matches(bloque, '(?is)(?:iframe|script) .*?src="([^"]+)')
for url in match:
titulo = "Ver en: %s"
if "goo.gl" in url:
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
if "youtube" in url:
titulo = "[COLOR = yellow]Ver trailer: %s[/COLOR]"
if "ad.js" in url or "script" in url:
@@ -123,7 +125,6 @@ def findvideos(item):
title = titulo,
fulltitle = item.fulltitle,
thumbnail = item.thumbnail,
server = "",
url = url
))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
@@ -139,3 +140,8 @@ def findvideos(item):
infoLabels={'title': item.fulltitle}, fulltitle=item.fulltitle,
extra="library"))
return itemlist
def play(item):
item.thumbnail = item.contentThumbnail
return [item]

View File

@@ -1,7 +1,7 @@
{
"id": "allpeliculas",
"name": "Allpeliculas",
"language": "es",
"language": ["lat"],
"active": true,
"adult": false,
"version": 1,
@@ -31,7 +31,6 @@
"banner": "allpeliculas.png",
"categories": [
"movie",
"latino",
"vos",
"tvshow"
],

View File

@@ -271,7 +271,7 @@ def findvideos(item):
idioma = IDIOMAS.get(idiomas_videos.get(language))
titulo = "%s [" + idioma + "] [" + calidad_videos.get(calidad) + "]"
itemlist.append(item.clone(action="play", title=titulo, url=url, extra=idioma))
itemlist.append(item.clone(action="play", title=titulo, url=url, language = idioma, extra=idioma))
# Enlace Descarga
patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?id_lang=' \
@@ -280,7 +280,7 @@ def findvideos(item):
for calidad, servidor_num, language, url in matches:
idioma = IDIOMAS.get(idiomas_videos.get(language))
titulo = "[%s] [" + idioma + "] [" + calidad_videos.get(calidad) + "]"
itemlist.append(item.clone(action="play", title=titulo, url=url, extra=idioma))
itemlist.append(item.clone(action="play", title=titulo, url=url, language = idioma, extra=idioma))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
itemlist.sort(key=lambda item: (item.extra, item.server))
@@ -401,7 +401,7 @@ def findvideostv(item):
idioma = IDIOMAS.get(idiomas_videos.get(language))
titulo = "%s [" + idioma + "] (" + calidad_videos.get(quality) + ")"
itemlist.append(item.clone(action="play", title=titulo, url=url, contentType="episode"))
itemlist.append(item.clone(action="play", title=titulo, url=url, language = idioma, contentType="episode"))
# Enlace Descarga
patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?episode="%s' \
@@ -412,7 +412,7 @@ def findvideostv(item):
for quality, servidor_num, episode, language, url in matches:
idioma = IDIOMAS.get(idiomas_videos.get(language))
titulo = "%s [" + idioma + "] (" + calidad_videos.get(quality) + ")"
itemlist.append(item.clone(action="play", title=titulo, url=url, contentType="episode", server=server))
itemlist.append(item.clone(action="play", title=titulo, url=url, language = idioma,contentType="episode", server=server))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
itemlist.sort(key=lambda item: (int(item.infoLabels['episode']), item.title))

View File

@@ -3,7 +3,7 @@
"name": "Alltorrent",
"active": true,
"adult": false,
"language": "es",
"language": ["cast"],
"thumbnail": "http://imgur.com/sLaXHvp.png",
"version": 1,
"changes": [

View File

@@ -3,7 +3,7 @@
"name": "AnimeFLV",
"active": true,
"adult": false,
"language": "es",
"language": ["cast", "lat"],
"thumbnail": "animeflv.png",
"banner": "animeflv.png",
"version": 1,

View File

@@ -1,14 +1,17 @@
# -*- coding: utf-8 -*-
import re
import time
import urlparse
import urllib
from channels import renumbertools
from core import httptools
from core import jsontools
from core import servertools
from core import scrapertools
from core.item import Item
from platformcode import logger
from platformcode import config, logger
HOST = "https://animeflv.net/"
@@ -239,7 +242,7 @@ def episodios(item):
else:
season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, episode)
title = "%s: %sx%s" % (item.title, season, str(episode).zfill(2))
title = "%sx%s : %s" % (season, str(episode).zfill(2), item.title)
itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumb, fulltitle=title,
fanart=item.thumbnail, contentType="episode"))
@@ -260,11 +263,15 @@ def episodios(item):
else:
season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, episode)
title = "%s: %sx%s" % (item.title, season, str(episode).zfill(2))
title = "%sx%s : %s" % (season, str(episode).zfill(2), item.title)
itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumb, fulltitle=title,
fanart=item.thumbnail, contentType="episode"))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca",
action="add_serie_to_library", extra="episodios"))
return itemlist
@@ -273,42 +280,52 @@ def findvideos(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", httptools.downloadpage(item.url).data)
list_videos = scrapertools.find_multiple_matches(data, 'video\[\d\]\s=\s\'<iframe.+?src="([^"]+)"')
list_videos.extend(scrapertools.find_multiple_matches(data, 'href="http://ouo.io/s/y0d65LCP\?s=([^"]+)"'))
# logger.info("data=%s " % list_videos)
download_list = scrapertools.find_multiple_matches(data, 'href="http://ouo.io/s/y0d65LCP\?s=([^"]+)"')
for i in download_list:
list_videos.append(urllib.unquote_plus(i))
aux_url = []
cldup = False
for e in list_videos:
if e.startswith("https://s3.animeflv.com/embed.php?"):
server = scrapertools.find_single_match(e, 'server=(.*?)&')
e = e.replace("embed", "check").replace("https", "http")
data = httptools.downloadpage(e).data.replace("\\", "")
url_api = "https://s3.animeflv.com/check.php?server=%s&v=%s"
# izanagi, yourupload, hyperion
if e.startswith("https://s3.animeflv.com/embed"):
server, v = scrapertools.find_single_match(e, 'server=([^&]+)&v=(.*?)$')
data = httptools.downloadpage(url_api % (server, v)).data.replace("\\", "")
if '{"error": "Por favor intenta de nuevo en unos segundos", "sleep": 3}' in data:
import time
time.sleep(3)
data = httptools.downloadpage(e).data.replace("\\", "")
data = httptools.downloadpage(url_api % (server, v)).data.replace("\\", "")
video_urls = []
if server == "gdrive":
data = jsontools.load(data)
for s in data.get("sources", []):
video_urls.append([s["label"], s["type"], s["file"]])
if video_urls:
video_urls.sort(key=lambda v: int(v[0]))
itemlist.append(item.clone(title="Enlace encontrado en %s" % server, action="play",
video_urls=video_urls))
else:
if server != "hyperion":
url = scrapertools.find_single_match(data, '"file":"([^"]+)"')
if url:
itemlist.append(item.clone(title="Enlace encontrado en %s" % server, url=url, action="play"))
else:
# pattern = '"direct":"([^"]+)"'
# url = scrapertools.find_single_match(data, pattern)
# itemlist.append(item.clone(title="Enlace encontrado en %s" % server, url=url, action="play"))
pattern = '"label":([^,]+),"type":"video/mp4","file":"([^"]+)"'
matches = scrapertools.find_multiple_matches(data, pattern)
video_urls = []
for label, url in matches:
video_urls.append([label, "mp4", url])
if video_urls:
video_urls.sort(key=lambda u: int(u[0]))
itemlist.append(item.clone(title="Enlace encontrado en %s" % server, action="play",
video_urls=video_urls))
else:
if e.startswith("https://cldup.com") and not cldup:
itemlist.append(item.clone(title="Enlace encontrado en Cldup", action="play", url=e))
cldup = True
aux_url.append(e)
from core import servertools
itemlist.extend(servertools.find_video_items(data=",".join(aux_url)))
for videoitem in itemlist:
videoitem.fulltitle = item.fulltitle

View File

@@ -3,7 +3,7 @@
"name": "Animeflv.ME",
"active": true,
"adult": false,
"language": "es",
"language": ["cast", "lat"],
"thumbnail": "http://i.imgur.com/x9AdvBx.png",
"banner": "http://i.imgur.com/dTZwCPq.png",
"version": 1,

View File

@@ -8,6 +8,7 @@ from channels import renumbertools
from core import filetools
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
@@ -23,7 +24,7 @@ REGEX_TITLE = r'(?:bigChar_a" href=.+?>)(.+?)(?:</a>)'
REGEX_THUMB = r'src="(http://media.animeflv\.me/uploads/thumbs/[^"]+?)"'
REGEX_PLOT = r'<span class="info">Línea de historia:</span><p><span>(.*?)</span>'
REGEX_URL = r'href="(http://animeflv\.me/Anime/[^"]+)">'
REGEX_SERIE = r'{0}.+?{1}([^<]+?)</a><p>(.+?)</p>'.format(REGEX_THUMB, REGEX_URL)
REGEX_SERIE = r'%s.+?%s([^<]+?)</a><p>(.+?)</p>' % (REGEX_THUMB, REGEX_URL)
REGEX_EPISODE = r'href="(http://animeflv\.me/Ver/[^"]+?)">(?:<span.+?</script>)?(.+?)</a></td><td>(\d+/\d+/\d+)</td></tr>'
REGEX_GENERO = r'<a href="(http://animeflv\.me/genero/[^\/]+/)">([^<]+)</a>'
@@ -157,20 +158,13 @@ def letras(item):
base_url = 'http://animeflv.me/ListadeAnime?c='
itemlist = []
itemlist.append(Item(channel=item.channel, action="series", title="#",
url=base_url + "#", viewmode="movies_with_plot"))
itemlist = list()
itemlist.append(Item(channel=item.channel, action="series", title="#", url=base_url + "#"))
# Itera sobre las posiciones de las letras en la tabla ascii
# 65 = A, 90 = Z
for i in xrange(65, 91):
letter = chr(i)
for letter in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
logger.debug("title=[%s], url=[%s], thumbnail=[]" % (letter, base_url + letter))
logger.debug("title=[{0}], url=[{1}], thumbnail=[]".format(
letter, base_url + letter))
itemlist.append(Item(channel=item.channel, action="series", title=letter,
url=base_url + letter, viewmode="movies_with_plot"))
itemlist.append(Item(channel=item.channel, action="series", title=letter, url=base_url + letter))
return itemlist
@@ -182,14 +176,12 @@ def generos(item):
html = get_url_contents(item.url)
generos = re.findall(REGEX_GENERO, html)
list_genre = re.findall(REGEX_GENERO, html)
for url, genero in generos:
logger.debug(
"title=[{0}], url=[{1}], thumbnail=[]".format(genero, url))
for url, genero in list_genre:
logger.debug("title=[%s], url=[%s], thumbnail=[]" % (genero, url))
itemlist.append(Item(channel=item.channel, action="series", title=genero, url=url,
plot='', viewmode="movies_with_plot"))
itemlist.append(Item(channel=item.channel, action="series", title=genero, url=url))
return itemlist
@@ -198,28 +190,26 @@ def search(item, texto):
logger.info()
texto = texto.replace(" ", "%20")
item.url = "{0}{1}".format(item.url, texto)
item.url = "%s%s" % (item.url, texto)
html = get_url_contents(item.url)
try:
# Se encontro un solo resultado y se redicciono a la página de la serie
if html.find('<title>Ver') >= 0:
series = [__extract_info_from_serie(html)]
show_list = [__extract_info_from_serie(html)]
# Se obtuvo una lista de resultados
else:
series = __find_series(html)
show_list = __find_series(html)
items = []
for serie in series:
title, url, thumbnail, plot = serie
for show in show_list:
title, url, thumbnail, plot = show
logger.debug("title=[{0}], url=[{1}], thumbnail=[{2}]".format(
title, url, thumbnail))
logger.debug("title=[%s], url=[%s], thumbnail=[%s]" % (title, url, thumbnail))
items.append(Item(channel=item.channel, action="episodios", title=title,
url=url, thumbnail=thumbnail, plot=plot,
show=title, viewmode="movies_with_plot", context=renumbertools.context(item)))
items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
plot=plot, show=title, viewmode="movies_with_plot", context=renumbertools.context(item)))
except:
import sys
for line in sys.exc_info():
@@ -234,25 +224,21 @@ def series(item):
page_html = get_url_contents(item.url)
series = __find_series(page_html)
show_list = __find_series(page_html)
items = []
for serie in series:
title, url, thumbnail, plot = serie
for show in show_list:
title, url, thumbnail, plot = show
logger.debug("title=[{0}], url=[{1}], thumbnail=[{2}]".format(
title, url, thumbnail))
logger.debug("title=[%s], url=[%s], thumbnail=[%s]" % (title, url, thumbnail))
items.append(Item(channel=item.channel, action="episodios", title=title, url=url,
thumbnail=thumbnail, plot=plot, show=title, viewmode="movies_with_plot",
context=renumbertools.context(item)))
items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
plot=plot, show=title, viewmode="movies_with_plot", context=renumbertools.context(item)))
url_next_page = __find_next_page(page_html)
if url_next_page:
items.append(Item(channel=item.channel, action="series", title=">> Página Siguiente",
url=url_next_page, thumbnail="", plot="", folder=True,
viewmode="movies_with_plot"))
items.append(Item(channel=item.channel, action="series", title=">> Página Siguiente", url=url_next_page))
return items
@@ -265,7 +251,10 @@ def episodios(item):
html_serie = get_url_contents(item.url)
info_serie = __extract_info_from_serie(html_serie)
plot = info_serie[3] if info_serie else ''
if info_serie[3]:
plot = info_serie[3]
else:
plot = ''
episodes = re.findall(REGEX_EPISODE, html_serie, re.DOTALL)
@@ -280,21 +269,17 @@ def episodios(item):
season, episode = renumbertools.numbered_for_tratk(
item.channel, item.show, season, episode)
title = "{0}x{1:02d} {2} ({3})".format(
season, episode, "Episodio " + str(episode), date)
title = "%sx%s %s (%s)" % (season, str(episode).zfill(2), "Episodio %s" % episode, date)
# El enlace pertenece a una pelicula
else:
title = "{0} ({1})".format(title, date)
title = "%s (%s)" % (title, date)
item.url = url
es_pelicula = True
logger.debug("title=[{0}], url=[{1}], thumbnail=[{2}]".format(
title, url, item.thumbnail))
logger.debug("title=[%s], url=[%s], thumbnail=[%s]" % (title, url, item.thumbnail))
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url,
thumbnail=item.thumbnail, plot=plot, show=item.show,
fulltitle="{0} {1}".format(item.show, title),
viewmode="movies_with_plot", folder=True))
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=item.thumbnail,
plot=plot, show=item.show, fulltitle="%s %s" % (item.show, title)))
# El sistema soporta la videoteca y se encontro por lo menos un episodio
# o pelicula
@@ -315,7 +300,6 @@ def episodios(item):
itemlist.append(Item(channel=item.channel, title="Descargar todos los episodios",
url=item.url, action="download_all_episodes", extra="episodios",
show=item.show))
return itemlist
@@ -325,22 +309,27 @@ def findvideos(item):
itemlist = []
page_html = get_url_contents(item.url)
regex_api = r'http://player\.animeflv\.me/[^\"]+'
iframe_url = scrapertools.find_single_match(page_html, regex_api)
iframe_html = get_url_contents(iframe_url)
itemlist.extend(servertools.find_video_items(data=iframe_html))
qualities = ["360", "480", "720", "1080"]
for videoitem in itemlist:
videoitem.fulltitle = item.fulltitle
videoitem.title = "%s en calidad [%s]" % (videoitem.server, qualities[1])
videoitem.channel = item.channel
videoitem.thumbnail = item.thumbnail
regex_video_list = r'var part = \[([^\]]+)'
videos_html = scrapertools.find_single_match(iframe_html, regex_video_list)
videos = re.findall('"([^"]+)"', videos_html, re.DOTALL)
qualities = ["360", "480", "720", "1080"]
for quality_id, video_url in enumerate(videos):
itemlist.append(Item(channel=item.channel, action="play", url=video_url, show=re.escape(item.show),
title="Ver en calidad [{0}]".format(qualities[quality_id]), plot=item.plot,
folder=True, fulltitle=item.title, viewmode="movies_with_plot"))
title="Ver en calidad [%s]" % (qualities[quality_id]), plot=item.plot,
fulltitle=item.title))
return __sort_by_quality(itemlist)

View File

@@ -3,13 +3,12 @@
"name": "AnimeFLV.RU",
"active": true,
"adult": false,
"language": "es",
"language": ["cast", "lat"],
"thumbnail": "http://i.imgur.com/5nRR9qq.png",
"banner": "animeflv_ru.png",
"version": 1,
"compatible": {
"python": "2.7.9",
"addon_version": "4.2.1"
"python": "2.7.9"
},
"changes": {
"change": [

View File

@@ -3,7 +3,7 @@
"name": "Animeid",
"active": true,
"adult": false,
"language": "es",
"language": ["cast", "lat"],
"thumbnail": "animeid.png",
"banner": "animeid.png",
"version": 1,

View File

@@ -3,7 +3,7 @@
"name": "AnimesHD",
"active": true,
"adult": false,
"language": "es",
"language": ["cast", "lat"],
"thumbnail": "https://s21.postimg.org/b43i3ljav/animeshd.png",
"banner": "https://s4.postimg.org/lulxulmql/animeshd-banner.png",
"version": 1,
@@ -22,7 +22,6 @@
}
],
"categories": [
"latino",
"anime"
]
}

4
plugin.video.alfa/channels/animeshd.py Executable file → Normal file
View File

@@ -162,7 +162,7 @@ def episodios(item):
for scrapedurl, scrapedlang, scrapedtitle in matches:
language = scrapedlang
title = scrapedtitle + ' (%s)' % language
title = scrapedtitle
url = scrapedurl
itemlist.append(item.clone(title=title, url=url, action='findvideos', language=language))
return itemlist
@@ -176,7 +176,7 @@ def findvideos(item):
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
title = item.title+' (%s)'%videoitem.server
title = item.title
videoitem.channel = item.channel
videoitem.title = title
videoitem.action = 'play'

View File

@@ -3,7 +3,7 @@
"name": "AniToons TV",
"active": true,
"adult": false,
"language": "es",
"language": ["cast", "lat"],
"thumbnail": "http://i.imgur.com/9Zu5NBc.png",
"banner": "http://i.imgur.com/JQSXCaB.png",
"version": 1,
@@ -19,6 +19,6 @@
],
"categories": [
"tvshow",
"latino"
"anime"
]
}

View File

@@ -10,6 +10,17 @@ from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
IDIOMAS = {'latino': 'Latino'}
list_language = IDIOMAS.values()
list_servers = ['openload',
'okru',
'netutv',
'rapidvideo'
]
list_quality = ['default']
host = "http://www.anitoonstv.com"
@@ -17,6 +28,7 @@ host = "http://www.anitoonstv.com"
def mainlist(item):
logger.info()
thumb_series = get_thumb("channels_tvshow.png")
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
@@ -29,6 +41,7 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, action="lista", title="Pokemon", url=host,
thumbnail=thumb_series))
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -73,10 +86,10 @@ def lista(item):
if "&" in show:
cad = title.split("xy")
show = cad[0]
context1=[renumbertools.context(item), autoplay.context]
itemlist.append(
item.clone(title=title, url=url, plot=show, action="episodios", show=show,
context=renumbertools.context(item)))
context=context1))
tmdb.set_infoLabels(itemlist)
return itemlist
@@ -106,19 +119,15 @@ def episodios(item):
season, episode = renumbertools.numbered_for_tratk(
item.channel, item.show, season, episode)
date = name
title = "{0}x{1:02d} {2} ({3})".format(
season, episode, "Episodio " + str(episode), date)
title = "%sx%s %s (%s)" % (season, str(episode).zfill(2), "Episodio %s" % episode, date)
# title = str(temp)+"x"+cap+" "+name
url = host + "/" + link
if "NO DISPONIBLE" in name:
name = name
else:
if "NO DISPONIBLE" not in name:
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, thumbnail=scrapedthumbnail,
plot=scrapedplot, url=url, show=show))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=show))
return itemlist
@@ -132,6 +141,7 @@ def findvideos(item):
data = httptools.downloadpage(item.url).data
data1 = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data_vid = scrapertools.find_single_match(data1, '<div class="videos">(.+?)<\/div><div .+?>')
# name = scrapertools.find_single_match(data,'<span>Titulo.+?<\/span>([^<]+)<br>')
scrapedplot = scrapertools.find_single_match(data, '<br><span>Descrip.+?<\/span>([^<]+)<br>')
scrapedthumbnail = scrapertools.find_single_match(data, '<div class="caracteristicas"><img src="([^<]+)">')
@@ -139,13 +149,16 @@ def findvideos(item):
for server, quality, url in itemla:
if "Calidad Alta" in quality:
quality = quality.replace("Calidad Alta", "HQ")
server = server.lower()
server = server.strip()
if "ok" in server:
server = server.lower().strip()
if "ok" == server:
server = 'okru'
itemlist.append(
item.clone(url=url, action="play", server=server, contentQuality=quality, thumbnail=scrapedthumbnail,
plot=scrapedplot, title="Enlace encontrado en %s: [%s ]" % (server.capitalize(), quality)))
if "netu" == server:
continue
itemlist.append(item.clone(url=url, action="play", server=server, contentQuality=quality,
thumbnail=scrapedthumbnail, plot=scrapedplot,
title="Enlace encontrado en %s: [%s]" % (server.capitalize(), quality)))
autoplay.start(itemlist, item)
return itemlist
@@ -155,18 +168,15 @@ def play(item):
itemlist = []
# Buscamos video por servidor ...
devuelve = servertools.findvideosbyserver(item.url, item.server)
if not devuelve:
# ...sino lo encontramos buscamos en todos los servidores disponibles
devuelve = servertools.findvideos(item.url, skip=True)
if devuelve:
# logger.debug(devuelve)
itemlist.append(Item(channel=item.channel, title=item.contentTitle, action="play", server=devuelve[0][2],
url=devuelve[0][1], thumbnail=item.thumbnail, folder=False))
url=devuelve[0][1], thumbnail=item.thumbnail))
return itemlist

View File

@@ -1,7 +1,7 @@
{
"id": "areadocumental",
"name": "Area-Documental",
"language": "es",
"language": ["cast", "lat"],
"adult": false,
"active": true,
"version": 1,

15
plugin.video.alfa/channels/areadocumental.py Executable file → Normal file
View File

@@ -135,18 +135,19 @@ def entradas(item):
scrapedthumbnail = host + urllib.quote(scrapedthumbnail)
title = scrapedtitle
if "full_hd" in extra:
scrapedtitle += " [COLOR gold][3D][/COLOR]"
quality = "3D"
elif "720" in extra:
scrapedtitle += " [COLOR gold][720p][/COLOR]"
quality ='720'
else:
scrapedtitle += " [COLOR gold][SD][/COLOR]"
quality = 'SD'
year = year.replace("\xc2\xa0", "").replace(" ", "")
if not year.isspace() and year != "":
infolab['year'] = int(year)
scrapedtitle += " (" + year + ")"
itemlist.append(item.clone(action="findvideos", title=scrapedtitle, fulltitle=title,
url=scrapedurl, thumbnail=scrapedthumbnail, infoLabels=infolab))
itemlist.append(item.clone(action="findvideos", title=title, fulltitle=title,
url=scrapedurl, thumbnail=scrapedthumbnail, infoLabels=infolab, contentTitle =
title, quality = quality))
next_page = scrapertools.find_single_match(data2, '<a href="([^"]+)"> ></a>')
if next_page:
@@ -171,7 +172,7 @@ def findvideos(item):
url_sub = host + urllib.quote(url_sub)
title = "Ver video en [[COLOR %s]%s[/COLOR]] Sub %s" % (color3, quality, label)
itemlist.append(item.clone(action="play", server="directo", title=title,
url=url, subtitle=url_sub, extra=item.url, calidad=quality))
url=url, subtitle=url_sub, extra=item.url, quality=quality, language = label))
return itemlist

View File

@@ -43,6 +43,10 @@ def show_option(channel, itemlist, text_color='yellow', thumbnail=None, fanart=N
:return:
'''
logger.info()
if not config.is_xbmc():
return itemlist
if thumbnail == None:
thumbnail = 'https://s7.postimg.org/65ooga04b/Auto_Play.png'
if fanart == None:
@@ -74,228 +78,231 @@ def start(itemlist, item):
:return: intenta autoreproducir, en caso de fallar devuelve el itemlist que recibio en un principio
'''
logger.info()
global autoplay_node
if not config.is_xbmc():
platformtools.dialog_notification('AutoPlay ERROR', 'Sólo disponible para XBMC/Kodi')
#platformtools.dialog_notification('AutoPlay ERROR', 'Sólo disponible para XBMC/Kodi')
return itemlist
else:
if not autoplay_node:
# Obtiene el nodo AUTOPLAY desde el json
autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY')
# Agrega servidores y calidades que no estaban listados a autoplay_node
new_options = check_value(item.channel, itemlist)
global autoplay_node
if not autoplay_node:
# Obtiene el nodo AUTOPLAY desde el json
autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY')
# Obtiene el nodo del canal desde autoplay_node
channel_node = autoplay_node.get(item.channel, {})
# Obtiene los ajustes des autoplay para este canal
settings_node = channel_node.get('settings', {})
if not item.channel in autoplay_node:
return itemlist
if settings_node['active']:
url_list_valid = []
autoplay_list = []
favorite_servers = []
favorite_quality = []
# Agrega servidores y calidades que no estaban listados a autoplay_node
new_options = check_value(item.channel, itemlist)
# Guarda el valor actual de "Accion y Player Mode" en preferencias
user_config_setting_action = config.get_setting("default_action")
user_config_setting_player = config.get_setting("player_mode")
# Habilita la accion "Ver en calidad alta" (si el servidor devuelve más de una calidad p.e. gdrive)
if user_config_setting_action != 2:
config.set_setting("default_action", 2)
if user_config_setting_player != 0:
config.set_setting("player_mode", 0)
# Obtiene el nodo del canal desde autoplay_node
channel_node = autoplay_node.get(item.channel, {})
# Obtiene los ajustes des autoplay para este canal
settings_node = channel_node.get('settings', {})
# Informa que AutoPlay esta activo
platformtools.dialog_notification('AutoPlay Activo', '', sound=False)
if settings_node['active']:
url_list_valid = []
autoplay_list = []
favorite_servers = []
favorite_quality = []
# Prioridades a la hora de ordenar itemlist:
# 0: Servidores y calidades
# 1: Calidades y servidores
# 2: Solo servidores
# 3: Solo calidades
# 4: No ordenar
if settings_node['custom_servers'] and settings_node['custom_quality']:
priority = settings_node['priority'] # 0: Servidores y calidades o 1: Calidades y servidores
elif settings_node['custom_servers']:
priority = 2 # Solo servidores
elif settings_node['custom_quality']:
priority = 3 # Solo calidades
else:
priority = 4 # No ordenar
# Guarda el valor actual de "Accion y Player Mode" en preferencias
user_config_setting_action = config.get_setting("default_action")
user_config_setting_player = config.get_setting("player_mode")
# Habilita la accion "Ver en calidad alta" (si el servidor devuelve más de una calidad p.e. gdrive)
if user_config_setting_action != 2:
config.set_setting("default_action", 2)
if user_config_setting_player != 0:
config.set_setting("player_mode", 0)
# Obtiene las listas servidores, calidades disponibles desde el nodo del json de AutoPlay
server_list = channel_node.get('servers', [])
quality_list = channel_node.get('quality', [])
# Informa que AutoPlay esta activo
platformtools.dialog_notification('AutoPlay Activo', '', sound=False)
# Se guardan los textos de cada servidor y calidad en listas p.e. favorite_servers = ['openload',
# 'streamcloud']
for num in range(1, 4):
favorite_servers.append(channel_node['servers'][settings_node['server_%s' % num]])
favorite_quality.append(channel_node['quality'][settings_node['quality_%s' % num]])
# Prioridades a la hora de ordenar itemlist:
# 0: Servidores y calidades
# 1: Calidades y servidores
# 2: Solo servidores
# 3: Solo calidades
# 4: No ordenar
if settings_node['custom_servers'] and settings_node['custom_quality']:
priority = settings_node['priority'] # 0: Servidores y calidades o 1: Calidades y servidores
elif settings_node['custom_servers']:
priority = 2 # Solo servidores
elif settings_node['custom_quality']:
priority = 3 # Solo calidades
else:
priority = 4 # No ordenar
# Se filtran los enlaces de itemlist y que se correspondan con los valores de autoplay
for item in itemlist:
autoplay_elem = dict()
# Obtiene las listas servidores, calidades disponibles desde el nodo del json de AutoPlay
server_list = channel_node.get('servers', [])
quality_list = channel_node.get('quality', [])
# Comprobamos q se trata de un item de video
if 'server' not in item:
# Se guardan los textos de cada servidor y calidad en listas p.e. favorite_servers = ['openload',
# 'streamcloud']
for num in range(1, 4):
favorite_servers.append(channel_node['servers'][settings_node['server_%s' % num]])
favorite_quality.append(channel_node['quality'][settings_node['quality_%s' % num]])
# Se filtran los enlaces de itemlist y que se correspondan con los valores de autoplay
for item in itemlist:
autoplay_elem = dict()
# Comprobamos q se trata de un item de video
if 'server' not in item:
continue
# Agrega la opcion configurar AutoPlay al menu contextual
if 'context' not in item:
item.context = list()
if not filter(lambda x: x['action'] == 'autoplay_config', context):
item.context.append({"title": "Configurar AutoPlay",
"action": "autoplay_config",
"channel": "autoplay",
"from_channel": item.channel})
# Si no tiene calidad definida le asigna calidad 'default'
if item.quality == '':
item.quality = 'default'
# Se crea la lista para configuracion personalizada
if priority < 2: # 0: Servidores y calidades o 1: Calidades y servidores
# si el servidor y la calidad no se encuentran en las listas de favoritos o la url esta repetida,
# descartamos el item
if item.server not in favorite_servers or item.quality not in favorite_quality \
or item.url in url_list_valid:
continue
# Agrega la opcion configurar AutoPlay al menu contextual
if 'context' not in item:
item.context = list()
if not filter(lambda x: x['action'] == 'autoplay_config', context):
item.context.append({"title": "Configurar AutoPlay",
"action": "autoplay_config",
"channel": "autoplay",
"from_channel": item.channel})
# Si no tiene calidad definida le asigna calidad 'default'
if item.quality == '':
item.quality = 'default'
# Se crea la lista para configuracion personalizada
if priority < 2: # 0: Servidores y calidades o 1: Calidades y servidores
# si el servidor y la calidad no se encuentran en las listas de favoritos o la url esta repetida,
# descartamos el item
if item.server not in favorite_servers or item.quality not in favorite_quality \
or item.url in url_list_valid:
continue
autoplay_elem["indice_server"] = favorite_servers.index(item.server)
autoplay_elem["indice_quality"] = favorite_quality.index(item.quality)
elif priority == 2: # Solo servidores
# si el servidor no se encuentra en la lista de favoritos o la url esta repetida,
# descartamos el item
if item.server not in favorite_servers or item.url in url_list_valid:
continue
autoplay_elem["indice_server"] = favorite_servers.index(item.server)
elif priority == 3: # Solo calidades
# si la calidad no se encuentra en la lista de favoritos o la url esta repetida,
# descartamos el item
if item.quality not in favorite_quality or item.url in url_list_valid:
continue
autoplay_elem["indice_quality"] = favorite_quality.index(item.quality)
else: # No ordenar
# si la url esta repetida, descartamos el item
if item.url in url_list_valid:
continue
# Si el item llega hasta aqui lo añadimos al listado de urls validas y a autoplay_list
url_list_valid.append(item.url)
autoplay_elem['videoitem'] = item
# autoplay_elem['server'] = item.server
# autoplay_elem['quality'] = item.quality
autoplay_list.append(autoplay_elem)
# Ordenamos segun la prioridad
if priority == 0: # Servidores y calidades
autoplay_list.sort(key=lambda orden: (orden['indice_server'], orden['indice_quality']))
elif priority == 1: # Calidades y servidores
autoplay_list.sort(key=lambda orden: (orden['indice_quality'], orden['indice_server']))
autoplay_elem["indice_server"] = favorite_servers.index(item.server)
autoplay_elem["indice_quality"] = favorite_quality.index(item.quality)
elif priority == 2: # Solo servidores
autoplay_list.sort(key=lambda orden: orden['indice_server'])
# si el servidor no se encuentra en la lista de favoritos o la url esta repetida,
# descartamos el item
if item.server not in favorite_servers or item.url in url_list_valid:
continue
autoplay_elem["indice_server"] = favorite_servers.index(item.server)
elif priority == 3: # Solo calidades
autoplay_list.sort(key=lambda orden: orden['indice_quality'])
# Si hay elementos en la lista de autoplay se intenta reproducir cada elemento, hasta encontrar uno
# funcional o fallen todos
if autoplay_list:
played = False
max_intentos = 5
max_intentos_servers = {}
# si la calidad no se encuentra en la lista de favoritos o la url esta repetida,
# descartamos el item
if item.quality not in favorite_quality or item.url in url_list_valid:
continue
autoplay_elem["indice_quality"] = favorite_quality.index(item.quality)
# Si se esta reproduciendo algo detiene la reproduccion
if platformtools.is_playing():
platformtools.stop_video()
else: # No ordenar
for autoplay_elem in autoplay_list:
if not platformtools.is_playing() and not played:
videoitem = autoplay_elem['videoitem']
# si la url esta repetida, descartamos el item
if item.url in url_list_valid:
continue
if videoitem.server not in max_intentos_servers:
# Si el item llega hasta aqui lo añadimos al listado de urls validas y a autoplay_list
url_list_valid.append(item.url)
autoplay_elem['videoitem'] = item
# autoplay_elem['server'] = item.server
# autoplay_elem['quality'] = item.quality
autoplay_list.append(autoplay_elem)
# Ordenamos segun la prioridad
if priority == 0: # Servidores y calidades
autoplay_list.sort(key=lambda orden: (orden['indice_server'], orden['indice_quality']))
elif priority == 1: # Calidades y servidores
autoplay_list.sort(key=lambda orden: (orden['indice_quality'], orden['indice_server']))
elif priority == 2: # Solo servidores
autoplay_list.sort(key=lambda orden: orden['indice_server'])
elif priority == 3: # Solo calidades
autoplay_list.sort(key=lambda orden: orden['indice_quality'])
# Si hay elementos en la lista de autoplay se intenta reproducir cada elemento, hasta encontrar uno
# funcional o fallen todos
if autoplay_list:
played = False
max_intentos = 5
max_intentos_servers = {}
# Si se esta reproduciendo algo detiene la reproduccion
if platformtools.is_playing():
platformtools.stop_video()
for autoplay_elem in autoplay_list:
if not platformtools.is_playing() and not played:
videoitem = autoplay_elem['videoitem']
if videoitem.server not in max_intentos_servers:
max_intentos_servers[videoitem.server] = max_intentos
# Si se han alcanzado el numero maximo de intentos de este servidor saltamos al siguiente
if max_intentos_servers[videoitem.server] == 0:
continue
lang = " "
if hasattr(videoitem, 'language') and videoitem.language != "":
lang = " '%s' " % videoitem.language
platformtools.dialog_notification("AutoPlay", "%s%s%s" % (
videoitem.server.upper(), lang, videoitem.quality.upper()), sound=False)
# TODO videoitem.server es el id del server, pero podria no ser el nombre!!!
# Intenta reproducir los enlaces
# Si el canal tiene metodo play propio lo utiliza
channel = __import__('channels.%s' % item.channel, None, None, ["channels.%s" % item.channel])
if hasattr(channel, 'play'):
resolved_item = getattr(channel, 'play')(videoitem)
if len(resolved_item) > 0:
if isinstance(resolved_item[0], list):
videoitem.video_urls = resolved_item
else:
videoitem = resolved_item[0]
# si no directamente reproduce
platformtools.play_video(videoitem)
try:
if platformtools.is_playing():
played = True
break
except: # TODO evitar el informe de que el conector fallo o el video no se encuentra
logger.debug(str(len(autoplay_list)))
# Si hemos llegado hasta aqui es por q no se ha podido reproducir
max_intentos_servers[videoitem.server] -= 1
# Si se han alcanzado el numero maximo de intentos de este servidor
# preguntar si queremos seguir probando o lo ignoramos
if max_intentos_servers[videoitem.server] == 0:
text = "Parece que los enlaces de %s no estan funcionando." % videoitem.server.upper()
if not platformtools.dialog_yesno("AutoPlay", text,
"¿Desea ignorar todos los enlaces de este servidor?"):
max_intentos_servers[videoitem.server] = max_intentos
# Si se han alcanzado el numero maximo de intentos de este servidor saltamos al siguiente
if max_intentos_servers[videoitem.server] == 0:
continue
else:
platformtools.dialog_notification('AutoPlay No Fue Posible', 'No Hubo Coincidencias')
if new_options:
platformtools.dialog_notification("AutoPlay", "Nueva Calidad/Servidor disponible en la "
"configuracion", sound=False)
lang = " "
if hasattr(videoitem, 'language') and videoitem.language != "":
lang = " '%s' " % videoitem.language
# Restaura si es necesario el valor previo de "Accion y Player Mode" en preferencias
if user_config_setting_action != 2:
config.set_setting("default_action", user_config_setting_action)
if user_config_setting_player != 0:
config.set_setting("player_mode", user_config_setting_player)
platformtools.dialog_notification("AutoPlay", "%s%s%s" % (
videoitem.server.upper(), lang, videoitem.quality.upper()), sound=False)
# TODO videoitem.server es el id del server, pero podria no ser el nombre!!!
# Intenta reproducir los enlaces
# Si el canal tiene metodo play propio lo utiliza
channel = __import__('channels.%s' % item.channel, None, None, ["channels.%s" % item.channel])
if hasattr(channel, 'play'):
resolved_item = getattr(channel, 'play')(videoitem)
if len(resolved_item) > 0:
if isinstance(resolved_item[0], list):
videoitem.video_urls = resolved_item
else:
videoitem = resolved_item[0]
# si no directamente reproduce
platformtools.play_video(videoitem)
try:
if platformtools.is_playing():
played = True
break
except: # TODO evitar el informe de que el conector fallo o el video no se encuentra
logger.debug(str(len(autoplay_list)))
# Si hemos llegado hasta aqui es por q no se ha podido reproducir
max_intentos_servers[videoitem.server] -= 1
# Si se han alcanzado el numero maximo de intentos de este servidor
# preguntar si queremos seguir probando o lo ignoramos
if max_intentos_servers[videoitem.server] == 0:
text = "Parece que los enlaces de %s no estan funcionando." % videoitem.server.upper()
if not platformtools.dialog_yesno("AutoPlay", text,
"¿Desea ignorar todos los enlaces de este servidor?"):
max_intentos_servers[videoitem.server] = max_intentos
else:
platformtools.dialog_notification('AutoPlay No Fue Posible', 'No Hubo Coincidencias')
if new_options:
platformtools.dialog_notification("AutoPlay", "Nueva Calidad/Servidor disponible en la "
"configuracion", sound=False)
# Restaura si es necesario el valor previo de "Accion y Player Mode" en preferencias
if user_config_setting_action != 2:
config.set_setting("default_action", user_config_setting_action)
if user_config_setting_player != 0:
config.set_setting("player_mode", user_config_setting_player)
# devuelve la lista de enlaces para la eleccion manual
return itemlist
# devuelve la lista de enlaces para la eleccion manual
return itemlist
def init(channel, list_servers, list_quality):
'''
Comprueba la existencia de canal en el archivo de configuracion de Autoplay y si no existe lo añade.
Comprueba la existencia de canal en el archivo de configuracion de Autoplay y si no existe lo añade.
Es necesario llamar a esta funcion al entrar a cualquier canal que incluya la funcion Autoplay.
:param channel: (str) id del canal
:param list_servers: (list) lista inicial de servidores validos para el canal. No es necesario incluirlos todos,
:param list_servers: (list) lista inicial de servidores validos para el canal. No es necesario incluirlos todos,
ya que la lista de servidores validos se ira actualizando dinamicamente.
:param list_quality: (list) lista inicial de calidades validas para el canal. No es necesario incluirlas todas,
:param list_quality: (list) lista inicial de calidades validas para el canal. No es necesario incluirlas todas,
ya que la lista de calidades validas se ira actualizando dinamicamente.
:return: (bool) True si la inicializacion ha sido correcta.
'''
@@ -304,7 +311,7 @@ def init(channel, list_servers, list_quality):
result = True
if not config.is_xbmc():
platformtools.dialog_notification('AutoPlay ERROR', 'Sólo disponible para XBMC/Kodi')
# platformtools.dialog_notification('AutoPlay ERROR', 'Sólo disponible para XBMC/Kodi')
result = False
else:
autoplay_path = os.path.join(config.get_data_path(), "settings_channels", 'autoplay_data.json')
@@ -344,16 +351,12 @@ def init(channel, list_servers, list_quality):
if change:
result, json_data = jsontools.update_node(autoplay_node, 'autoplay', 'AUTOPLAY')
if result:
heading = "AutoPlay Disponible"
msj = "Seleccione '<Configurar AutoPlay>' para activarlo."
icon = 0
else:
if not result:
heading = "Error al iniciar AutoPlay"
msj = "Consulte su log para obtener mas información."
icon = 1
platformtools.dialog_notification(heading, msj, icon, sound=False)
platformtools.dialog_notification(heading, msj, icon, sound=False)
return result
@@ -517,9 +520,9 @@ def autoplay_config(item):
def save(item, dict_data_saved):
'''
Guarda los datos de la ventana de configuracion
:param item: item
:param dict_data_saved: dict
:param dict_data_saved: dict
:return:
'''
logger.info()

View File

@@ -3,7 +3,7 @@
"name": "Bajui2",
"active": true,
"adult": false,
"language": "es",
"language": ["cast"],
"thumbnail": "bajui.png",
"banner": "bajui.png",
"fanart": "bajui.png",

View File

@@ -3,7 +3,7 @@
"name": "Beeg",
"active": true,
"adult": true,
"language": "es",
"language": ["*"],
"thumbnail": "beeg.png",
"banner": "beeg.png",
"version": 1,

View File

@@ -3,7 +3,7 @@
"name": "Bityouth",
"active": true,
"adult": false,
"language": "es",
"language": ["cast"],
"thumbnail": "http://s6.postimg.org/6ash180up/bityoulogo.png",
"banner": "bityouth.png",
"version": 1,

View File

@@ -3,7 +3,7 @@
"name": "BorrachodeTorrent",
"active": true,
"adult": false,
"language": "es",
"language": ["cast"],
"thumbnail": "http://imgur.com/BePrYmy.png",
"version": 1,
"changes": [

View File

@@ -3,7 +3,7 @@
"name": "Bricocine",
"active": true,
"adult": false,
"language": "es",
"language": ["cast"],
"thumbnail": "http://s6.postimg.org/9u8m1ep8x/bricocine.jpg",
"banner": "bricocine.png",
"version": 1,

View File

@@ -3,7 +3,7 @@
"name": "CanalPelis",
"active": true,
"adult": false,
"language": "es",
"language": ["cast", "lat"],
"fanart": "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/canalpelisbg.jpg",
"thumbnail": "http://www.canalpelis.com/wp-content/uploads/2016/11/logo_web.gif",
"banner": "",
@@ -15,7 +15,6 @@
}
],
"categories": [
"latino",
"movie",
"tvshow",
"vos"
@@ -38,6 +37,8 @@
"visible": true,
"lvalues": [
"Sin color",
"Perfil 5",
"Perfil 4",
"Perfil 3",
"Perfil 2",
"Perfil 1"

7
plugin.video.alfa/channels/canalpelis.py Executable file → Normal file
View File

@@ -127,7 +127,7 @@ def peliculas(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", data)
# logger.info(data)
logger.info(data)
patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?' # img, title.strip()
patron += '<span class="icon-star2"></span>(.*?)/div>.*?' # rating
@@ -138,7 +138,8 @@ def peliculas(item):
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedtitle, rating, calidad, scrapedurl, year in matches[item.page:item.page + 20]:
if 'Próximamente' not in calidad:
if 'Próximamente' not in calidad and '-XXX.jpg' not in scrapedthumbnail:
scrapedtitle = scrapedtitle.replace('Ver ', '').strip()
contentTitle = scrapedtitle.partition(':')[0].partition(',')[0]
title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (
@@ -337,7 +338,7 @@ def episodios(item):
itemlist.sort(key=lambda it: int(it.infoLabels['episode']),
reverse=config.get_setting('orden_episodios', __channel__))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
# Opción "Añadir esta serie a la videoteca"
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,

View File

@@ -3,7 +3,7 @@
"name": "Canalporno",
"active": true,
"adult": true,
"language": "es",
"language": ["*"],
"thumbnail": "http://i.imgur.com/gAbPcvT.png?1",
"banner": "canalporno.png",
"version": 1,

View File

@@ -3,7 +3,7 @@
"name": "Cartoon-Latino",
"active": true,
"adult": false,
"language": "es",
"language": ["lat"],
"thumbnail": "http://i.imgur.com/wk6fRDZ.png",
"banner": "http://i.imgur.com/115c59F.png",
"version": 1,
@@ -14,7 +14,6 @@
}
],
"categories": [
"tvshow",
"latino"
"tvshow"
]
}

View File

@@ -12,18 +12,29 @@ from core.item import Item
from platformcode import config, logger
host = "http://www.cartoon-latino.com/"
from channels import autoplay
IDIOMAS = {'latino': 'Latino'}
list_language = IDIOMAS.values()
list_servers = ['openload',
'vimple',
'gvideo',
'rapidvideo'
]
list_quality = ['default']
def mainlist(item):
logger.info()
thumb_series = get_thumb("channels_tvshow.png")
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=host,
thumbnail=thumb_series))
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -87,9 +98,10 @@ def lista(item):
for link, name in matches:
title = name + " [Latino]"
url = link
context1=[renumbertools.context(item), autoplay.context]
itemlist.append(
item.clone(title=title, url=url, plot=title, action="episodios", show=title,
context=renumbertools.context(item)))
context=context1))
tmdb.set_infoLabels(itemlist)
return itemlist
@@ -171,11 +183,13 @@ def findvideos(item):
if server in link:
url = link.replace('" + ID' + server + ' + "', str(id))
if "drive" in server:
server1 = 'googlevideo'
server1 = 'Gvideo'
else:
server1 = server
itemlist.append(item.clone(url=url, action="play", server=server1,
title="Enlace encontrado en %s " % (server1.capitalize())))
autoplay.start(itemlist, item)
return itemlist

View File

@@ -3,7 +3,7 @@
"name": "CiberDocumentales",
"active": true,
"adult": false,
"language": "es",
"language": ["cast", "lat"],
"thumbnail": "https://s9.postimg.org/secdb5s8v/ciberdocumentales.png",
"banner": "https://s1.postimg.org/sa486z0of/ciberdocumentales_banner.png",
"version": 1,

View File

@@ -3,7 +3,7 @@
"name": "CineAsiaEnLinea",
"active": true,
"adult": false,
"language": "es",
"language": ["cast", "lat"],
"thumbnail": "http://i.imgur.com/5KOU8uy.png?3",
"banner": "cineasiaenlinea.png",
"version": 1,

View File

@@ -1,12 +1,9 @@
{
"id": "cinecalidad",
"name": "CineCalidad",
"compatible": {
"addon_version": "4.3"
},
"active": true,
"adult": false,
"language": "es",
"language": ["cast", "lat"],
"thumbnail": "https://s31.postimg.org/puxmvsi7v/cinecalidad.png",
"banner": "https://s32.postimg.org/kihkdpx1x/banner_cinecalidad.png",
"version": 1,
@@ -33,7 +30,6 @@
}
],
"categories": [
"latino",
"movie"
],
"settings": [

View File

@@ -298,7 +298,7 @@ def findvideos(item):
if server_id in server_url:
server = server_id.lower()
thumbnail = servertools.guess_server_thumbnail(server_id)
thumbnail = item.contentThumbnail
if server_id == 'TVM':
server = 'thevideo.me'
url = server_url[server_id] + video_id + '.html'
@@ -367,7 +367,7 @@ def play(item):
for videoitem in itemlist:
videoitem.title = item.fulltitle
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.extra
videoitem.thumbnail = item.contentThumbnail
videoitem.channel = item.channel
else:
itemlist.append(item)
@@ -463,3 +463,4 @@ def search(item, texto):
for line in sys.exc_info():
logger.error("%s" % line)
return []

View File

@@ -3,7 +3,7 @@
"name": "Cinefox",
"active": true,
"adult": false,
"language": "es",
"language": ["cast", "lat"],
"version": 1,
"thumbnail": "cinefox.png",
"banner": "cinefox.png",
@@ -28,7 +28,6 @@
"categories": [
"movie",
"tvshow",
"latino",
"vos"
],
"settings": [

0
plugin.video.alfa/channels/cinefox.py Executable file → Normal file
View File

View File

@@ -3,7 +3,7 @@
"name": "CineFoxTV",
"active": true,
"adult": false,
"language": "es",
"language": ["lat"],
"thumbnail": "https://s28.postimg.org/lytn2q1tp/cinefoxtv.png",
"banner": "cinefoxtv.png",
"version": 1,
@@ -22,7 +22,6 @@
}
],
"categories": [
"latino",
"movie"
],
"settings": [

2
plugin.video.alfa/channels/cinefoxtv.py Executable file → Normal file
View File

@@ -164,7 +164,7 @@ def findvideos(item):
itemlist.extend(servertools.find_video_items(data=scrapedurl))
for videoitem in itemlist:
videoitem.title = item.contentTitle + ' (' + videoitem.server + ')'
videoitem.title = item.contentTitle
videoitem.channel = item.channel
videoitem.plot = info
videoitem.action = "play"

View File

@@ -3,7 +3,7 @@
"name": "CineHindi",
"active": true,
"adult": false,
"language": "es",
"language": ["cast", "lat"],
"thumbnail": "cinehindi.png",
"banner": "http://i.imgur.com/cau9TVe.png",
"version": 1,

View File

@@ -3,7 +3,7 @@
"name": "Cinetemagay",
"active": true,
"adult": true,
"language": "es",
"language": ["*"],
"thumbnail": "cinetemagay.png",
"banner": "cinetemagay.png",
"version": 1,

View File

@@ -3,7 +3,7 @@
"name": "Cinetux",
"active": true,
"adult": false,
"language": "es",
"language": ["cast", "lat"],
"thumbnail": "cinetux.png",
"banner": "cinetux.png",
"fanart": "cinetux.jpg",
@@ -27,7 +27,6 @@
}
],
"categories": [
"latino",
"direct",
"movie"
],

View File

@@ -1,7 +1,5 @@
# -*- coding: utf-8 -*-
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
@@ -48,13 +46,11 @@ def mainlist(item):
"/0/Genre.png",
text_color=color1))
url = urlparse.urljoin(CHANNEL_HOST, "genero/documental/")
itemlist.append(item.clone(title="Documentales", text_bold=True, text_color=color2, action=""))
itemlist.append(item.clone(action="peliculas", title=" Novedades", url=url, text_color=color1,
itemlist.append(item.clone(action="peliculas", title=" Novedades", url=CHANNEL_HOST + "genero/documental/", text_color=color1,
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres"
"/0/Documentaries.png"))
url = urlparse.urljoin(CHANNEL_HOST, "genero/documental/?orderby=title&order=asc&gdsr_order=asc")
itemlist.append(item.clone(action="peliculas", title=" Por orden alfabético", text_color=color1, url=url,
itemlist.append(item.clone(action="peliculas", title=" Por orden alfabético", text_color=color1, url=CHANNEL_HOST + "genero/documental/?orderby=title&order=asc&gdsr_order=asc",
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres"
"/0/A-Z.png"))
itemlist.append(item.clone(title="", action=""))
@@ -100,7 +96,7 @@ def newest(categoria):
itemlist.pop()
elif categoria == 'documentales':
item.url = urlparse.urljoin(CHANNEL_HOST, "genero/documental/")
item.url = CHANNEL_HOST + "genero/documental/"
item.action = "peliculas"
itemlist = peliculas(item)
@@ -108,7 +104,7 @@ def newest(categoria):
itemlist.pop()
elif categoria == 'infantiles':
item.url = urlparse.urljoin(CHANNEL_HOST, "genero/infantil/")
item.url = CHANNEL_HOST + "genero/infantil/"
item.action = "peliculas"
itemlist = peliculas(item)
@@ -130,7 +126,6 @@ def peliculas(item):
itemlist = []
item.text_color = color2
# Descarga la página
data = httptools.downloadpage(item.url).data
patron = '(?s)class="(?:result-item|item movies)">.*?<img src="([^"]+)'
patron += '.*?alt="([^"]+)"'
@@ -156,11 +151,6 @@ def peliculas(item):
if year:
new_item.infoLabels['year'] = int(year)
itemlist.append(new_item)
try:
# tmdb.set_infoLabels(itemlist, __modo_grafico__)
a = 1
except:
pass
# Extrae el paginador
next_page_link = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)')
@@ -218,7 +208,6 @@ def generos(item):
scrapedtitle = unicode(scrapedtitle, "utf8").capitalize().encode("utf8")
if scrapedtitle == "Erotico" and config.get_setting("adult_mode") == 0:
continue
itemlist.append(item.clone(action="peliculas", title=scrapedtitle, url=scrapedurl))
return itemlist
@@ -228,9 +217,9 @@ def idioma(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="peliculas", title="Español", url="http://www.cinetux.net/idioma/espanol/"))
itemlist.append(item.clone(action="peliculas", title="Latino", url="http://www.cinetux.net/idioma/latino/"))
itemlist.append(item.clone(action="peliculas", title="VOSE", url="http://www.cinetux.net/idioma/subtitulado/"))
itemlist.append(item.clone(action="peliculas", title="Español", url= CHANNEL_HOST + "idioma/espanol/"))
itemlist.append(item.clone(action="peliculas", title="Latino", url= CHANNEL_HOST + "idioma/latino/"))
itemlist.append(item.clone(action="peliculas", title="VOSE", url= CHANNEL_HOST + "idioma/subtitulado/"))
return itemlist
@@ -290,7 +279,6 @@ def findvideos(item):
else:
itemlist.append(item.clone(title="No hay enlaces disponibles", action="", text_color=color3))
return itemlist
@@ -307,6 +295,7 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
patron = '(?is)#(option-[^"]+).*?png">([^<]+)'
match = scrapertools.find_multiple_matches(data, patron)
for scrapedoption, language in match:
scrapedserver = ""
lazy = ""
if "lazy" in bloque1:
lazy = "lazy-"
@@ -314,15 +303,13 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
url = scrapertools.find_single_match(bloque1, patron)
if "goo.gl" in url:
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
if "www.cinetux.me" in url:
server = scrapertools.find_single_match(url, "player/(.*?)\.")
else:
server = servertools.get_server_from_url(url)
matches.append([url, server, "", language.strip(), t_tipo])
if "player" in url:
scrapedserver = scrapertools.find_single_match(url, 'player/(\w+)')
matches.append([url, scrapedserver, "", language.strip(), t_tipo])
bloque2 = scrapertools.find_single_match(data, '(?s)box_links.*?dt_social_single')
bloque2 = bloque2.replace("\t", "").replace("\r", "")
patron = '(?s)optn" href="([^"]+)'
patron += '.*?title="([^"]+)'
patron += '.*?title="([^\.]+)'
patron += '.*?src.*?src="[^>]+"?/>([^<]+)'
patron += '.*?src="[^>]+"?/>([^<]+)'
patron += '.*?/span>([^<]+)'
@@ -336,19 +323,19 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
scrapedtipo = match[4]
if t_tipo.upper() not in scrapedtipo.upper():
continue
title = " Mirror en " + scrapedserver.split(".")[0] + " (" + scrapedlanguage + ")"
title = " Mirror en %s (" + scrapedlanguage + ")"
if len(scrapedcalidad.strip()) > 0:
title += " (Calidad " + scrapedcalidad.strip() + ")"
if filtro_idioma == 3 or item.filtro:
lista_enlaces.append(item.clone(title=title, action="play", text_color=color2,
url=scrapedurl, server=scrapedserver, idioma=scrapedlanguage,
extra=item.url))
extra=item.url, contentThumbnail = item.thumbnail))
else:
idioma = dict_idiomas[language]
if idioma == filtro_idioma:
lista_enlaces.append(item.clone(title=title, text_color=color2, action="play", url=scrapedurl,
extra=item.url))
extra=item.url, contentThumbnail = item.thumbnail))
else:
if language not in filtrados:
filtrados.append(language)
@@ -357,30 +344,28 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
title = "Mostrar enlaces filtrados en %s" % ", ".join(filtrados)
lista_enlaces.append(item.clone(title=title, action="findvideos", url=item.url, text_color=color3,
filtro=True))
lista_enlaces = servertools.get_servers_itemlist(lista_enlaces, lambda i: i.title % i.server.capitalize())
return lista_enlaces
def play(item):
logger.info()
itemlist = []
video_urls = []
if "api.cinetux" in item.url:
data = httptools.downloadpage(item.url, headers={'Referer': item.extra}).data.replace("\\", "")
id = scrapertools.find_single_match(data, 'img src="[^#]+#(.*?)"')
item.url = "https://youtube.googleapis.com/embed/?status=ok&hl=es&allow_embed=1&ps=docs&partnerid=30&hd=1&autoplay=0&cc_load_policy=1&showinfo=0&docid=" + id
itemlist = servertools.find_video_items(data=item.url)
elif "links" in item.url or "www.cinetux.me" in item.url:
data = httptools.downloadpage(item.url).data
scrapedurl = scrapertools.find_single_match(data, '<a href="(http[^"]+)')
if scrapedurl == "":
scrapedurl = scrapertools.find_single_match(data, '(?i)<frame src="(http[^"]+)')
scrapedurl = scrapertools.find_single_match(data, '(?i)frame.*?src="(http[^"]+)')
if scrapedurl == "":
scrapedurl = scrapertools.find_single_match(data, 'replace."([^"]+)"')
elif "goo.gl" in scrapedurl:
scrapedurl = httptools.downloadpage(scrapedurl, follow_redirects=False, only_headers=True).headers.get(
"location", "")
item.url = scrapedurl
itemlist = servertools.find_video_items(data=item.url)
else:
return [item]
return itemlist
item.thumbnail = item.contentThumbnail
item.server = servertools.get_server_from_url(item.url)
return [item]

View File

@@ -3,7 +3,7 @@
"name": "ClasicoFilm",
"active": true,
"adult": false,
"language": "es",
"language": ["cast"],
"thumbnail": "http://i.imgur.com/F7sevVu.jpg?1",
"banner": "clasicofilm.png",
"version": 1,

View File

@@ -1,7 +1,7 @@
{
"id": "copiapop",
"name": "Copiapop/Diskokosmiko",
"language": "es",
"language": ["cast", "lat"],
"active": true,
"adult": false,
"version": 1,

View File

@@ -3,7 +3,7 @@
"name": "Crimenes Imperfectos",
"active": true,
"adult": false,
"language": "es",
"language": ["cast"],
"banner": "crimenes.png",
"thumbnail": "crimenes.png",
"version": 1,

View File

@@ -1,7 +1,7 @@
{
"id": "crunchyroll",
"name": "Crunchyroll",
"language": "es",
"language": ["cast", "lat"],
"active": true,
"adult": false,
"version": 1,

View File

@@ -3,7 +3,7 @@
"name": "Cuelgame",
"active": true,
"adult": false,
"language": "es",
"language": ["cast"],
"version": 1,
"changes": [
{

View File

@@ -3,7 +3,7 @@
"name": "Cumlouder",
"active": true,
"adult": true,
"language": "es",
"language": ["*"],
"thumbnail": "cumlouder.png",
"banner": "cumlouder.png",
"version": 1,

View File

@@ -1,7 +1,7 @@
{
"id": "datoporn",
"name": "DatoPorn",
"language": "es",
"language": ["*"],
"active": true,
"adult": true,
"changes": [

View File

@@ -1,7 +1,7 @@
{
"id": "descargacineclasico",
"name": "descargacineclasico",
"language": "es",
"language": ["cast"],
"active": true,
"adult": false,
"version": 1,

View File

@@ -1,7 +1,7 @@
{
"id": "descargasmix",
"name": "DescargasMIX",
"language": "es",
"language": ["cast", "lat"],
"active": true,
"version": 1,
"adult": false,
@@ -31,7 +31,6 @@
"banner": "descargasmix.png",
"categories": [
"movie",
"latino",
"vos",
"torrent",
"documentary",

3
plugin.video.alfa/channels/descargasmix.py Executable file → Normal file
View File

@@ -220,7 +220,7 @@ def episodios(item):
itemlist = []
data = get_data(item.url)
patron = '(<ul class="menu" id="seasons-list">.*?<div class="section-box related-posts">)'
patron = '(<ul class="menu ses" id="seasons-list">.*?<div class="section-box related-posts">)'
bloque = scrapertools.find_single_match(data, patron)
matches = scrapertools.find_multiple_matches(bloque, '<div class="polo".*?>(.*?)</div>')
for scrapedtitle in matches:
@@ -261,6 +261,7 @@ def epienlaces(item):
data = get_data(item.url)
data = data.replace("\n", "").replace("\t", "")
# Bloque de enlaces
patron = '<div class="polo".*?>%s(.*?)(?:<div class="polo"|</li>)' % item.extra.strip()
bloque = scrapertools.find_single_match(data, patron)

View File

@@ -3,7 +3,7 @@
"name": "Discoverymx",
"active": true,
"adult": false,
"language": "es",
"language": ["cast", "lat"],
"thumbnail": "discoverymx.png",
"banner": "discoverymx.png",
"version": 1,
@@ -18,7 +18,6 @@
}
],
"categories": [
"latino",
"documentary"
]
}

View File

@@ -3,7 +3,7 @@
"name": "Divxatope",
"active": true,
"adult": false,
"language": "es",
"language": ["cast", "lat"],
"thumbnail": "divxatope.png",
"banner": "divxatope.png",
"version": 1,

21
plugin.video.alfa/channels/divxatope.py Executable file → Normal file
View File

@@ -258,12 +258,11 @@ def findvideos(item):
# Descarga la pagina
data = httptools.downloadpage(item.url).data
item.plot = scrapertools.find_single_match(data, '<div class="post-entry" style="height:300px;">(.*?)</div>')
item.plot = scrapertools.htmlclean(item.plot).strip()
item.contentPlot = item.plot
link = scrapertools.find_single_match(data, 'href="http://(?:tumejorserie|tumejorjuego).*?link=([^"]+)"')
link = scrapertools.find_single_match(data, 'href.*?=.*?"http:\/\/(?:tumejorserie|tumejorjuego).*?link=([^"]+)"')
if link != "":
link = "http://www.divxatope1.com/" + link
logger.info("torrent=" + link)
@@ -272,12 +271,16 @@ def findvideos(item):
url=link, thumbnail=servertools.guess_server_thumbnail("torrent"), plot=item.plot, folder=False,
parentContent=item))
patron = "<div class=\"box1\"[^<]+<img[^<]+</div[^<]+"
patron += '<div class="box2">([^<]+)</div[^<]+'
patron += '<div class="box3">([^<]+)</div[^<]+'
patron += '<div class="box4">([^<]+)</div[^<]+'
patron += '<div class="box5">(.*?)</div[^<]+'
patron += '<div class="box6">([^<]+)<'
patron = '<div class=\"box1\"[^<]+<img[^<]+<\/div[^<]+<div class="box2">([^<]+)<\/div[^<]+<div class="box3">([^<]+)'
patron += '<\/div[^<]+<div class="box4">([^<]+)<\/div[^<]+<div class="box5"><a href=(.*?) rel.*?'
patron += '<\/div[^<]+<div class="box6">([^<]+)<'
#patron = "<div class=\"box1\"[^<]+<img[^<]+</div[^<]+"
#patron += '<div class="box2">([^<]+)</div[^<]+'
#patron += '<div class="box3">([^<]+)</div[^<]+'
#patron += '<div class="box4">([^<]+)</div[^<]+'
#patron += '<div class="box5">(.*?)</div[^<]+'
#patron += '<div class="box6">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
@@ -299,7 +302,7 @@ def findvideos(item):
if "partes" in title:
action = "extract_url"
new_item = Item(channel=item.channel, action=action, title=title, fulltitle=title, url=url,
thumbnail=thumbnail, plot=plot, parentContent=item)
thumbnail=thumbnail, plot=plot, parentContent=item, server = servername)
if comentarios.startswith("Ver en"):
itemlist_ver.append(new_item)
else:

View File

@@ -3,7 +3,7 @@
"name": "Divxtotal",
"active": true,
"adult": false,
"language": "es",
"language": ["cast"],
"thumbnail": "http://imgur.com/Madj03A.jpg",
"version": 1,
"changes": [

6
plugin.video.alfa/channels/divxtotal.py Executable file → Normal file
View File

@@ -232,7 +232,6 @@ def findtemporadas(item):
th.start()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
if len(item.extra.split("|")):
if len(item.extra.split("|")) >= 4:
fanart = item.extra.split("|")[2]
@@ -266,7 +265,7 @@ def findtemporadas(item):
fanart_extra = item.fanart
fanart_info = item.fanart
bloque_episodios = scrapertools.find_multiple_matches(data, 'Temporada (\d+) </a>(.*?)</table>')
bloque_episodios = scrapertools.find_multiple_matches(data, 'Temporada.*?(\d+).*?<\/a>(.*?)<\/table>')
for temporada, bloque_epis in bloque_episodios:
item.infoLabels = item.InfoLabels
item.infoLabels['season'] = temporada
@@ -299,9 +298,8 @@ def epis(item):
itemlist = []
if item.extra == "serie_add":
item.url = item.datalibrary
patron = scrapertools.find_multiple_matches(item.url,
'<td><img src=".*?images/(.*?)\.png.*?<a href="([^"]+)" title="">.*?(\d+x\d+).*?td>')
'<td><img src=".*?images\/(.*?)\.png".*?href="([^"]+)" title="">.*?(\d+x\d+).*?td>')
for idioma, url, epi in patron:
episodio = scrapertools.find_single_match(epi, '\d+x(\d+)')
item.infoLabels['episode'] = episodio

View File

@@ -3,7 +3,7 @@
"name": "DocumaniaTV",
"active": true,
"adult": false,
"language": "es",
"language": ["cast", "lat"],
"thumbnail": "http://i.imgur.com/qMR9sg9.png",
"banner": "documaniatv.png",
"version": 1,

View File

@@ -3,7 +3,7 @@
"name": "Documentales Online",
"active": true,
"adult": false,
"language": "es",
"language": ["cast", "lat"],
"thumbnail": "http://i.imgur.com/fsrnC4m.jpg",
"version": 1,
"changes": [

View File

@@ -1,40 +1,15 @@
{
"id": "doomtv",
"name": "doomtv",
"compatible": {
"addon_version": "4.3"
},
"active": true,
"adult": false,
"language": "es",
"language": ["lat"],
"thumbnail": "https://s2.postimg.org/jivgi4ak9/doomtv.png",
"banner": "https://s32.postimg.org/6gxyripvp/doomtv_banner.png",
"version": 1,
"changes": [
{
"date": "24/06/2017",
"description": "Cambios para autoplay"
},
{
"date": "06/06/2017",
"description": "COmpatibilida con AutoPlay"
},
{
"date": "12/05/2017",
"description": "Fix generos y enlaces"
},
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "01/02/2017",
"description": "Release."
}
],
"categories": [
"latino",
"movie"
"movie",
"direct"
],
"settings": [
{

303
plugin.video.alfa/channels/doomtv.py Executable file → Normal file
View File

@@ -18,7 +18,7 @@ list_language = IDIOMAS.values()
CALIDADES = {'1080p': '1080p', '720p': '720p', '480p': '480p', '360p': '360p'}
list_quality = CALIDADES.values()
list_servers = ['directo']
list_servers = ['directo', 'openload']
host = 'http://doomtv.net/'
headers = {
@@ -32,10 +32,10 @@ tgenero = {"Comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png",
"Aventura": "https://s10.postimg.org/6su40czih/aventura.png",
"Romance": "https://s15.postimg.org/fb5j8cl63/romance.png",
"Animación": "https://s13.postimg.org/5on877l87/animacion.png",
"Ciencia Ficción": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png",
"Ciencia ficción": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png",
"Terror": "https://s7.postimg.org/yi0gij3gb/terror.png",
"Documentales": "https://s16.postimg.org/7xjj4bmol/documental.png",
"Musical": "https://s29.postimg.org/bbxmdh9c7/musical.png",
"Documental": "https://s16.postimg.org/7xjj4bmol/documental.png",
"Música": "https://s29.postimg.org/bbxmdh9c7/musical.png",
"Fantasía": "https://s13.postimg.org/65ylohgvb/fantasia.png",
"Bélico Guerra": "https://s23.postimg.org/71itp9hcr/belica.png",
"Misterio": "https://s1.postimg.org/w7fdgf2vj/misterio.png",
@@ -56,7 +56,6 @@ tgenero = {"Comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png",
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(
@@ -64,7 +63,7 @@ def mainlist(item):
action="lista",
thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png',
fanart='https://s18.postimg.org/fwvaeo6qh/todas.png',
url=host
url='%s%s'%(host,'peliculas/page/1')
))
itemlist.append(
@@ -72,34 +71,15 @@ def mainlist(item):
action="seccion",
thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png',
fanart='https://s3.postimg.org/5s9jg2wtf/generos.png',
url=host,
extra='generos'
url='%s%s' % (host, 'peliculas/page/1'),
))
itemlist.append(
item.clone(title="Mas vistas",
action="seccion",
item.clone(title="Mas Vistas",
action="lista",
thumbnail='https://s9.postimg.org/wmhzu9d7z/vistas.png',
fanart='https://s9.postimg.org/wmhzu9d7z/vistas.png',
url=host,
extra='masvistas'
))
itemlist.append(
item.clone(title="Recomendadas",
action="lista",
thumbnail='https://s12.postimg.org/s881laywd/recomendadas.png',
fanart='https://s12.postimg.org/s881laywd/recomendadas.png',
url=host,
extra='recomendadas'
))
itemlist.append(
item.clone(title="Por año",
action="seccion",
thumbnail='https://s8.postimg.org/7eoedwfg5/pora_o.png',
fanart='https://s8.postimg.org/7eoedwfg5/pora_o.png',
url=host, extra='poraño'
url='%s%s'%(host,'top-imdb/page/1'),
))
itemlist.append(
@@ -110,8 +90,6 @@ def mainlist(item):
fanart='https://s30.postimg.org/pei7txpa9/buscar.png'
))
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -123,23 +101,11 @@ def lista(item):
next_page_url = ''
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = 'movie-id=.*?href=(.*?) data-url.*?quality>(.*?)'
patron += '<img data-original=(.*?) class.*?<h2>(.*?)<\/h2>.*?<p>(.*?)<\/p>'
if item.extra == 'recomendadas':
patron = '<a href="(.*?)">.*?'
patron += '<div class="imgss">.*?'
patron += '<img src="(.*?)" alt="(.*?)(?:.*?|\(.*?|&#8211;|").*?'
patron += '<div class="imdb">.*?'
patron += '<\/a>.*?'
patron += '<span class="ttps">.*?<\/span>.*?'
patron += '<span class="ytps">(.*?)<\/span><\/div>'
elif item.extra in ['generos', 'poraño', 'buscar']:
patron = '<div class=movie>.*?<img src=(.*?) alt=(.*?)(?:\s|\/)><a href=(.*?)>.*?'
patron += '<h2>.*?<\/h2>.*?(?:<span class=year>(.*?)<\/span>)?.*?<\/div>'
else:
patron = '<div class="imagen">.*?'
patron += '<img src="(.*?)" alt="(.*?)(?:.*?|\(.*?|&#8211;|").*?'
patron += '<a href="([^"]+)"><(?:span) class="player"><\/span><\/a>.*?'
patron += 'h2>\s*.*?(?:year)">(.*?)<\/span>.*?<\/div>'
matches = re.compile(patron, re.DOTALL).findall(data)
if item.next_page != 'b':
@@ -150,39 +116,36 @@ def lista(item):
else:
matches = matches[max_items:]
next_page = 'a'
patron_next_page = '<div class="siguiente"><a href="(.*?)"|\/\?'
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
if len(matches_next_page) > 0:
next_page_url = urlparse.urljoin(item.url, matches_next_page[0])
next_page_str = scrapertools.find_single_match(data,"<li class='active'><a class=''>(\d+)</a>")
next_page_num = int(next_page_str)+1
page_base = re.sub(r'(page\/\d+)','', item.url)
next_page_url = '%s%s%s'%(page_base,'page/',next_page_num)
for scrapedthumbnail, scrapedtitle, scrapedurl, scrapedyear in matches:
if item.extra == 'recomendadas':
url = scrapedthumbnail
title = scrapedurl
thumbnail = scrapedtitle
else:
url = scrapedurl
thumbnail = scrapedthumbnail
title = scrapedtitle
year = scrapedyear
if next_page_url:
next_page_url = next_page_url
for scrapedurl, quality, scrapedthumbnail, scrapedtitle, plot in matches:
url = scrapedurl
thumbnail = scrapedthumbnail
filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w185", "")
filtro_list = {"poster_path": filtro_thumb.strip()}
filtro_list = filtro_list.items()
title = scrapedtitle
fanart = ''
plot = ''
if 'serie' not in url:
itemlist.append(
Item(channel=item.channel,
action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
plot=plot,
fanart=fanart,
contentTitle=title,
infoLabels={'year': year},
context=autoplay.context
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
plot = plot
itemlist.append(
Item(channel=item.channel,
action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
plot=plot,
infoLabels={'filtro': filtro_list},
fanart=fanart,
contentTitle=title
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
# Paginacion
if next_page_url != '':
itemlist.append(
@@ -203,17 +166,8 @@ def seccion(item):
itemlist = []
duplicado = []
data = httptools.downloadpage(item.url).data
if item.extra == 'generos':
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
accion = 'lista'
if item.extra == 'masvistas':
patron = '<b>\d*<\/b>\s*<a href="(.*?)">(.*?<\/a>\s*<span>.*?<\/span>\s*<i>.*?<\/i><\/li>)'
accion = 'findvideos'
elif item.extra == 'poraño':
patron = '<li><a class="ito" HREF="(.*?)">(.*?)<\/a><\/li>'
else:
patron = '<li class=cat-item cat-item-.*?><a href=(.*?)>(.*?)<\/i>'
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = 'menu-item-object-category menu-item-\d+><a href=(.*?)>(.*?)<\/a><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -221,61 +175,19 @@ def seccion(item):
url = scrapedurl
title = scrapedtitle
thumbnail = ''
fanart = ''
plot = ''
year = ''
contentTitle = ''
if item.extra == 'masvistas':
year = re.findall(r'\b\d{4}\b', scrapedtitle)
title = re.sub(r'<\/a>\s*<span>.*?<\/span>\s*<i>.*?<\/i><\/li>', '', scrapedtitle)
contentTitle = title
title = title + ' (' + year[0] + ')'
elif item.extra == 'generos':
title = re.sub(r'<\/a> <i>\d+', '', scrapedtitle)
cantidad = re.findall(r'.*?<\/a> <i>(\d+)', scrapedtitle)
th_title = title
title = title + ' (' + cantidad[0] + ')'
thumbnail = tgenero[th_title]
fanart = thumbnail
if title in tgenero:
thumbnail = tgenero[title]
if url not in duplicado:
itemlist.append(
Item(channel=item.channel,
action=accion,
action='lista',
title=title,
url=url,
thumbnail=thumbnail,
plot=plot,
fanart=fanart,
contentTitle=contentTitle,
infoLabels={'year': year}
thumbnail = thumbnail
))
duplicado.append(url)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def unpack(packed):
p, c, k = re.search("}\('(.*)', *\d+, *(\d+), *'(.*)'\.", packed, re.DOTALL).groups()
for c in reversed(range(int(c))):
if k.split('|')[c]: p = re.sub(r'(\b%s\b)' % c, k.split('|')[c], p)
p = p.replace('\\', '')
p = p.decode('string_escape')
return p
def getinfo(page_url):
info = ()
logger.info()
data = httptools.downloadpage(page_url).data
thumbnail = scrapertools.find_single_match(data, '<div class="cover" style="background-image: url\((.*?)\);')
plot = scrapertools.find_single_match(data, '<h2>Synopsis<\/h2>\s*<p>(.*?)<\/p>')
info = (plot, thumbnail)
return info
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
@@ -305,98 +217,47 @@ def newest(categoria):
return itemlist
def get_url(item):
logger.info()
itemlist = []
duplicado = []
patrones = ["{'label':(.*?),.*?'file':'(.*?)'}", "{file:'(.*?redirector.*?),label:'(.*?)'}"]
data = httptools.downloadpage(item.url, headers=headers, cookies=False).data
patron = 'class="player-content"><iframe src="(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for option in matches:
if 'allplayer' in option:
url = 'http:/' + option.replace('//', '/')
data = httptools.downloadpage(url, headers=headers, cookies=False).data
packed = scrapertools.find_single_match(data, "<div id='allplayer'>.*?(eval\(function\(p,a,c,k.*?\)\)\))")
if packed:
unpacked = unpack(packed)
video_urls = []
if "vimeocdn" in unpacked:
streams = scrapertools.find_multiple_matches(unpacked,
"{file:'(.*?)',type:'video/.*?',label:'(.*?)'")
for video_url, quality in streams:
video_urls.append([video_url, quality])
else:
doc_id = scrapertools.find_single_match(unpacked, 'driveid=(.*?)&')
doc_url = "http://docs.google.com/get_video_info?docid=%s" % doc_id
response = httptools.downloadpage(doc_url, cookies=False)
cookies = ""
cookie = response.headers["set-cookie"].split("HttpOnly, ")
for c in cookie:
cookies += c.split(";", 1)[0] + "; "
data = response.data.decode('unicode-escape')
data = urllib.unquote_plus(urllib.unquote_plus(data))
headers_string = "|Cookie=" + cookies
url_streams = scrapertools.find_single_match(data, 'url_encoded_fmt_stream_map=(.*)')
streams = scrapertools.find_multiple_matches(url_streams,
'itag=(\d+)&url=(.*?)(?:;.*?quality=.*?(?:,|&)|&quality=.*?(?:,|&))')
itags = {'18': '360p', '22': '720p', '34': '360p', '35': '480p', '37': '1080p', '59': '480p'}
for itag, video_url in streams:
video_url += headers_string
video_urls.append([video_url, itags[itag]])
for video_item in video_urls:
calidad = video_item[1]
title = '%s [%s]' % (item.contentTitle, calidad)
url = video_item[0]
if url not in duplicado:
itemlist.append(
Item(channel=item.channel,
action='play',
title=title,
url=url,
thumbnail=item.thumbnail,
plot=item.plot,
fanart=item.fanart,
contentTitle=item.contentTitle,
language=IDIOMAS['latino'],
server='directo',
quality=CALIDADES[calidad],
context=item.context
))
duplicado.append(url)
else:
itemlist.extend(servertools.find_video_items(data=option))
for videoitem in itemlist:
if 'Enlace' in videoitem.title:
videoitem.channel = item.channel
videoitem.title = item.contentTitle + ' (' + videoitem.server + ')'
videoitem.language = 'latino'
videoitem.quality = 'default'
return itemlist
def findvideos(item):
logger.info()
itemlist = []
itemlist = get_url(item)
#itemlist = get_url(item)
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = 'id=(tab\d+)><div class=movieplay><(?:iframe|script) src=(.*?)(?:scrolling|><\/script>)'
matches = re.compile(patron, re.DOTALL).findall(data)
# Requerido para FilterTools
for option, urls in matches:
quality = scrapertools.find_single_match(data, '<div class=les-content><a href=#%s>(.*?)<\/a><\/div>'%option)
title = '%s (%s)' % (item.title, quality)
if 'content' in urls:
urls = '%s%s'%('http:',urls)
hidden_data = httptools.downloadpage(urls).data
hidden_data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", hidden_data)
patron = 'sources: \[{file: (.*?),'
matches = re.compile(patron, re.DOTALL).findall(hidden_data)
itemlist = filtertools.get_links(itemlist, item, list_language)
for videoitem in matches:
# Requerido para AutoPlay
autoplay.start(itemlist, item)
new_item = Item(
channel = item.channel,
url = videoitem,
title = title,
contentTitle = item.title,
action = 'play',
quality = quality
)
itemlist.append(new_item)
else:
new_item = Item(
channel=item.channel,
url=urls,
title=title,
contentTitle=item.title,
action='play',
quality = quality
)
itemlist.append(new_item)
itemlist = servertools.get_servers_itemlist(itemlist)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(

View File

@@ -3,7 +3,7 @@
"name": "DoramasTV",
"active": true,
"adult": false,
"language": "es",
"language": ["cast", "lat"],
"thumbnail": "doramastv.png",
"banner": "doramastv.png",
"version": 1,

View File

@@ -3,7 +3,7 @@
"name": "Descargas",
"active": false,
"adult": false,
"language": "es",
"language": ["*"],
"version": 1,
"changes": [
{

View File

@@ -3,7 +3,7 @@
"name": "Trailers ecartelera",
"active": true,
"adult": false,
"language": "es",
"language": ["cast", "lat"],
"thumbnail": "ecarteleratrailers.png",
"banner": "ecarteleratrailers.png",
"version": 1,

View File

@@ -41,7 +41,7 @@ def mainlist(item):
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, fanart=thumbnail,
plot=plot, server="directo", folder=False))
plot=plot,folder=False))
# ------------------------------------------------------
# Extrae la página siguiente

View File

@@ -3,7 +3,7 @@
"name": "Elite Torrent",
"active": true,
"adult": false,
"language": "es",
"language": ["cast"],
"thumbnail": "elitetorrent.png",
"banner": "elitetorrent.png",
"version": 2,

View File

@@ -3,7 +3,7 @@
"name": "El señor del anillo",
"active": true,
"adult": false,
"language": "es",
"language": ["cast", "lat"],
"thumbnail": "elsenordelanillo.png",
"banner": "elsenordelanillo.png",
"version": 1,
@@ -18,7 +18,6 @@
}
],
"categories": [
"latino",
"movie"
]
}

View File

@@ -3,7 +3,7 @@
"name": "Eporner",
"active": true,
"adult": true,
"language": "es",
"language": ["*"],
"thumbnail": "eporner.png",
"banner": "eporner.png",
"version": 1,

View File

@@ -3,7 +3,7 @@
"name": "Erotik",
"active": true,
"adult": true,
"language": "es",
"language": ["*"],
"thumbnail": "http://www.youfreeporntube.com/uploads/custom-logo.png",
"banner": "http://www.youfreeporntube.com/uploads/custom-logo.png",
"version": 1,

View File

@@ -1,12 +1,9 @@
{
"id": "estadepelis",
"name": "Estadepelis",
"compatible": {
"addon_version": "4.3"
},
"active": true,
"adult": false,
"language": "es",
"language": ["lat"],
"thumbnail": "https://s24.postimg.org/nsgit7fhh/estadepelis.png",
"banner": "https://s28.postimg.org/ud0l032ul/estadepelis_banner.png",
"version": 1,
@@ -33,7 +30,6 @@
}
],
"categories": [
"latino",
"movie"
],
"settings": [

View File

@@ -3,7 +3,7 @@
"name": "EstrenosGo",
"active": true,
"adult": false,
"language": "es",
"language": ["cast"],
"fanart": "https://github.com/master-1970/resources/raw/master/images/fanart/estrenosgo.png",
"thumbnail": "https://github.com/master-1970/resources/raw/master/images/squares/estrenosgo.png",
"banner": "estrenosgo.png",
@@ -24,6 +24,7 @@
],
"categories": [
"movie",
"tvshow"
"tvshow",
"torrent"
]
}

8
plugin.video.alfa/channels/estrenosgo.py Executable file → Normal file
View File

@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
import re
from channelselector import get_thumb
from core import channeltools
from core import httptools
from core import scrapertools
@@ -166,7 +166,8 @@ def findvideos(item):
title="Ver %s en %s%s" % (
capitulo.strip(), s[0][2].capitalize(), idioma),
thumbnail2=item.thumbnail,
thumbnail=config.get_thumb("server_" + s[0][2] + ".png")))
thumbnail=get_thumb("server_" + s[0][2] + ".png"),
language = idioma))
else:
import os
for s in servertools.findvideos(data):
@@ -174,7 +175,8 @@ def findvideos(item):
title="Ver en %s%s" % (s[2].capitalize(), idioma),
thumbnail2=item.thumbnail,
thumbnail=os.path.join(config.get_runtime_path(), "resources", "media",
"servers", "server_" + s[2] + ".png")))
"servers", "server_" + s[2] + ".png"),
language = idioma))
# Insertar items "Buscar trailer" y "Añadir a la videoteca"
if itemlist and item.extra == "movie":

View File

@@ -3,7 +3,7 @@
"name": "Filesmonster Catalogue",
"active": true,
"adult": true,
"language": "es",
"language": ["*"],
"thumbnail": "filesmonster_catalogue.png",
"banner": "filesmonster_catalogue.png",
"version": 1,

View File

@@ -1,7 +1,7 @@
{
"id": "freecambay",
"name": "FreeCamBay",
"language": "es",
"language": ["*"],
"active": true,
"adult": true,
"version": 1,

View File

@@ -0,0 +1,30 @@
{
"id": "gmobi",
"name": "gmobi",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"version": 1,
"thumbnail": "http://gnula.mobi/wp-content/uploads/2016/08/Untitled-6.png",
"banner": "",
"changes": [
{
"date": "25/08/2017",
"description": "Nuevo canal"
}
],
"categories": [
"movie",
"adult"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
}
]
}

View File

@@ -0,0 +1,95 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Alfa
# ------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import httptools
from core import tmdb
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Novedades", action="peliculas", url="http://gnula.mobi/"))
itemlist.append(item.clone(title="Castellano", action="peliculas",
url="http://www.gnula.mobi/tag/esp)anol/"))
itemlist.append(item.clone(title="Latino", action="peliculas", url="http://gnula.mobi/tag/latino/"))
itemlist.append(item.clone(title="VOSE", action="peliculas", url="http://gnula.mobi/tag/subtitulada/"))
itemlist.append(item.clone(title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "http://gnula.mobi/?s=%s" % texto
try:
return sub_search(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def sub_search(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="row">.*?<a href="([^"]+)" title="([^"]+)">.*?<img src="(.*?)" title'
matches = scrapertools.find_multiple_matches(data, patron)
for url,name,img in matches:
itemlist.append(item.clone(title=name, url=url, action="findvideos", show=name, thumbnail=img))
paginacion = scrapertools.find_single_match(data, '<a href="([^"]+)" ><i class="glyphicon '
'glyphicon-chevron-right" aria-hidden="true"></i>')
if paginacion:
itemlist.append(channel=item.channel, action="sub_search", title="Next page >>" , url=paginacion)
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="col-mt-5 postsh">.*?href="(.*?)" title="(.*?)".*?under-title">(.*?)<.*?src="(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl, scrapedyear, scrapedtitle, scrapedthumbnail in matches:
url = scrapedurl
title = scrapedtitle
year = scrapertools.find_single_match(scrapedyear, r'.*?\((\d{4})\)')
thumbnail = scrapedthumbnail
new_item =Item (channel = item.channel, action="findvideos", title=title, contentTitle=title, url=url,
thumbnail=thumbnail, infoLabels = {'year':year})
if year:
tmdb.set_infoLabels_item(new_item)
itemlist.append(new_item)
next_page_url = scrapertools.find_single_match(data,'<link rel="next" href="(.*?)"\/>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append(item.clone(action="peliculas", title="Siguiente >>", text_color="yellow",
url=next_page_url))
return itemlist

View File

@@ -3,7 +3,7 @@
"name": "Gnula",
"active": true,
"adult": false,
"language": "es",
"language": ["cast", "lat"],
"thumbnail": "gnula.png",
"banner": "gnula.png",
"version": 1,
@@ -18,7 +18,6 @@
}
],
"categories": [
"latino",
"movie"
]
}

View File

@@ -1,24 +1,24 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
host = "http://gnula.nu/"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Estrenos", action="peliculas",
url="http://gnula.nu/peliculas-online/lista-de-peliculas-online-parte-1/", viewmode="movie"))
url= host +"peliculas-online/lista-de-peliculas-online-parte-1/", viewmode="movie"))
itemlist.append(
Item(channel=item.channel, title="Generos", action="generos", url="http://gnula.nu/generos/lista-de-generos/"))
Item(channel=item.channel, title="Generos", action="generos", url= host + "generos/lista-de-generos/"))
itemlist.append(Item(channel=item.channel, title="Recomendadas", action="peliculas",
url="http://gnula.nu/peliculas-online/lista-de-peliculas-recomendadas/", viewmode="movie"))
# itemlist.append( Item(channel=item.channel, title="Portada" , action="portada" , url="http://gnula.nu/"))
url= host + "peliculas-online/lista-de-peliculas-recomendadas/", viewmode="movie"))
return itemlist
@@ -26,23 +26,23 @@ def generos(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
# <span style="font-weight: bold;">Lista de géneros</span><br/>
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<spa[^>]+>Lista de g(.*?)/table')
# <strong>Historia antigua</strong> [<a href="http://gnula.nu/generos/lista-de-peliculas-del-genero-historia-antigua/"
patron = '<strong>([^<]+)</strong> .<a href="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for genero, scrapedurl in matches:
title = scrapertools.htmlclean(genero)
plot = ""
url = urlparse.urljoin(item.url, scrapedurl)
url = item.url + scrapedurl
thumbnail = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action='peliculas', title=title, url=url, thumbnail=thumbnail, plot=plot,
extra=title, viewmode="movie"))
itemlist.append(Item(channel = item.channel,
action = 'peliculas',
title = title,
url = url,
thumbnail = thumbnail,
plot = plot,
viewmode = "movie"))
itemlist = sorted(itemlist, key=lambda item: item.title)
@@ -52,17 +52,9 @@ def generos(item):
def peliculas(item):
logger.info()
'''
<a class="Ntooltip" href="http://gnula.nu/comedia-romantica/ver-with-this-ring-2015-online/">With This Ring<span><br/>
<img src="http://gnula.nu/wp-content/uploads/2015/06/With_This_Ring2.gif"></span></a> [<span style="color: #33ccff;">18/07/15</span> <span style="color: #33ff33;">(VS)</span><span style="color: red;">(VC)</span><span style="color: #cc66cc;">(VL)</span>] [<span style="color: #ffcc99;">HD-R</span>]&#8212;&#8211;<strong>Comedia, Romántica</strong><br/>
'''
'''
<a class="Ntooltip" href="http://gnula.nu/aventuras/ver-las-aventuras-de-tintin-el-secreto-del-unicornio-2011-online/">The Adventures of Tintin<span><br />
<img src="http://gnula.nu/wp-content/uploads/2015/07/The_Adventures_of_Tintin_Secret_of_the_Unicorn2.gif"></span></a> (2011) [<span style="color: #33ccff;">10/07/15</span> <span style="color: #33ff33;">(VS)</span><span style="color: red;">(VC)</span><span style="color: #cc66cc;">(VL)</span>] [<span style="color: #ffcc99;">DVD-R</span>]&#8212;&#8211;<strong>Animación, Infantil, Aventuras</strong><br />
'''
# Descarga la página
data = scrapertools.cachePage(item.url)
patron = '<a class="Ntooltip" href="([^"]+)">([^<]+)<span><br[^<]+'
data = httptools.downloadpage(item.url).data
patron = '<a class="Ntooltip" href="([^"]+)">([^<]+)<span><br[^<]+'
patron += '<img src="([^"]+)"></span></a>(.*?)<br'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -70,34 +62,57 @@ def peliculas(item):
for scrapedurl, scrapedtitle, scrapedthumbnail, resto in matches:
plot = scrapertools.htmlclean(resto).strip()
title = scrapedtitle + " " + plot
fulltitle = title
contentTitle = scrapedtitle
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(Item(channel=item.channel, action='findvideos', title=title, fulltitle=fulltitle, url=url,
thumbnail=thumbnail, plot=plot, extra=title, hasContentDetails=True,
contentTitle=contentTitle, contentThumbnail=thumbnail,
contentType="movie", context=["buscar_trailer"]))
url = item.url + scrapedurl
itemlist.append(Item(channel = item.channel,
action = 'findvideos',
title = title,
url = url,
thumbnail = scrapedthumbnail,
plot = plot,
hasContentDetails = True,
contentTitle = contentTitle,
contentType = "movie",
context = ["buscar_trailer"]
))
return itemlist
def findvideos(item):
logger.info("item=" + item.tostring())
itemlist = []
# Descarga la página para obtener el argumento
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
item.plot = scrapertools.find_single_match(data, '<div class="entry">(.*?)<div class="iframes">')
item.plot = scrapertools.htmlclean(item.plot).strip()
item.contentPlot = item.plot
patron = 'Ver película online.*?>.*?>([^<]+)'
scrapedopcion = scrapertools.find_single_match(data, patron)
titulo_opcional = scrapertools.find_single_match(scrapedopcion, ".*?, (.*)").upper()
bloque = scrapertools.find_multiple_matches(data, 'contenedor_tab.*?/table')
cuenta = 0
for datos in bloque:
cuenta = cuenta + 1
patron = '<em>(opción %s.*?)</em>' %cuenta
scrapedopcion = scrapertools.find_single_match(data, patron)
titulo_opcion = "(" + scrapertools.find_single_match(scrapedopcion, "op.*?, (.*)").upper() + ")"
if "TRAILER" in titulo_opcion or titulo_opcion == "()":
titulo_opcion = "(" + titulo_opcional + ")"
urls = scrapertools.find_multiple_matches(datos, '(?:src|href)="([^"]+)')
titulo = "Ver en %s " + titulo_opcion
for url in urls:
itemlist.append(Item(channel = item.channel,
action = "play",
contentThumbnail = item.thumbnail,
fulltitle = item.contentTitle,
title = titulo,
url = url
))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
return itemlist
newthumbnail = scrapertools.find_single_match(data,
'<div class="entry"[^<]+<p align="center"><img alt="[^"]+" src="([^"]+)"')
if newthumbnail != "":
item.thumbnail = newthumbnail
item.contentThumbnail = newthumbnail
logger.info("plot=" + item.plot)
return servertools.find_video_items(item=item, data=data)
def play(item):
item.thumbnail = item.contentThumbnail
return [item]

View File

@@ -3,7 +3,7 @@
"name": "La Guarida valencianista",
"active": true,
"adult": false,
"language": "es",
"language": ["cast"],
"thumbnail": "guaridavalencianista.png",
"banner": "guaridavalencianista.png",
"version": 1,

View File

@@ -3,7 +3,7 @@
"name": "HDFull",
"active": true,
"adult": false,
"language": "es",
"language": ["cast", "lat"],
"thumbnail": "hdfull.png",
"banner": "hdfull.png",
"version": 1,

22
plugin.video.alfa/channels/hdfull.py Executable file → Normal file
View File

@@ -273,13 +273,13 @@ def listado_series(item):
def fichas(item):
logger.info()
itemlist = []
textoidiomas=''
infoLabels=dict()
## Carga estados
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
if item.title == "Buscar...":
data = agrupa_datos(httptools.downloadpage(item.url, post=item.extra).data)
s_p = scrapertools.get_match(data, '<h3 class="section-title">(.*?)<div id="footer-wrapper">').split(
'<h3 class="section-title">')
@@ -320,10 +320,12 @@ def fichas(item):
if scrapedlangs != ">":
textoidiomas = extrae_idiomas(scrapedlangs)
#Todo Quitar el idioma
title += bbcode_kodi2html(" ( [COLOR teal][B]" + textoidiomas + "[/B][/COLOR])")
if scrapedrating != ">":
valoracion = re.sub(r'><[^>]+>(\d+)<b class="dec">(\d+)</b>', r'\1,\2', scrapedrating)
infoLabels['rating']=valoracion
title += bbcode_kodi2html(" ([COLOR orange]" + valoracion + "[/COLOR])")
url = urlparse.urljoin(item.url, scrapedurl)
@@ -348,7 +350,8 @@ def fichas(item):
itemlist.append(
Item(channel=item.channel, action=action, title=title, url=url, fulltitle=title, thumbnail=thumbnail,
show=show, folder=True, contentType=contentType, contentTitle=contentTitle))
show=show, folder=True, contentType=contentType, contentTitle=contentTitle,
language =textoidiomas, infoLabels=infoLabels))
## Paginación
next_page_url = scrapertools.find_single_match(data, '<a href="([^"]+)">.raquo;</a>')
@@ -424,7 +427,7 @@ def episodios(item):
for episode in episodes:
thumbnail = host + "/thumbs/" + episode['thumbnail']
language = episode['languages']
temporada = episode['season']
episodio = episode['episode']
if len(episodio) == 1: episodio = '0' + episodio
@@ -465,7 +468,8 @@ def episodios(item):
'id'] + ";3"
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
thumbnail=thumbnail, show=item.show, folder=True, contentType="episode"))
thumbnail=thumbnail, show=item.show, folder=True, contentType="episode",
language=language))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=url_targets,
@@ -610,7 +614,6 @@ def findvideos(item):
itemlist = []
## Carga estados
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
url_targets = item.url
## Vídeos
@@ -794,11 +797,14 @@ def agrupa_datos(data):
def extrae_idiomas(bloqueidiomas):
logger.info("idiomas=" + bloqueidiomas)
# Todo cambiar por lista
#textoidiomas=[]
textoidiomas = ''
patronidiomas = '([a-z0-9]+).png"'
idiomas = re.compile(patronidiomas, re.DOTALL).findall(bloqueidiomas)
textoidiomas = ""
for idioma in idiomas:
textoidiomas = textoidiomas + idioma.upper() + " "
textoidiomas = textoidiomas + idioma +" "
#textoidiomas.append(idioma.upper())
return textoidiomas

View File

@@ -3,7 +3,7 @@
"name": "HentaiEnEspañol",
"active": true,
"adult": true,
"language": "es",
"language": ["*"],
"thumbnail": "https://s11.postimg.org/cmuwcvvpf/hentaienespanol.png",
"banner": "https://s3.postimg.org/j3qkfut8z/hentaienespanol_banner.png",
"version": 1,

View File

@@ -3,7 +3,7 @@
"name": "Hentai ID",
"active": true,
"adult": true,
"language": "es",
"language": ["*"],
"thumbnail": "https://dl.dropboxusercontent.com/u/30248079/hentai_id.png",
"banner": "https://dl.dropboxusercontent.com/u/30248079/hentai_id2.png",
"version": 1,

View File

@@ -3,7 +3,7 @@
"name": "Idocumentales",
"active": true,
"adult": false,
"language": "es",
"language": ["cast", "lat"],
"thumbnail": "https://s27.postimg.org/pjq3y552b/idocumentales.png",
"banner": "https://s16.postimg.org/6d8bh1z1x/idocumentales_banner.png",
"version": 1,

View File

@@ -3,7 +3,7 @@
"name": "Inkapelis",
"active": true,
"adult": false,
"language": "es",
"language": ["cast", "lat"],
"changes": [
{
"date": "12/03/2017",
@@ -23,8 +23,7 @@
"banner": "inkapelis.png",
"categories": [
"movie",
"vos",
"latino"
"vos"
],
"settings": [
{

6
plugin.video.alfa/channels/inkapelis.py Executable file → Normal file
View File

@@ -352,7 +352,8 @@ def findvideos(item):
if server == "Ul":
server = "Uploaded"
title = "%s [%s][%s]" % (server, idioma, calidad)
itemlist.append(item.clone(action="play", title=title, url=url))
itemlist.append(item.clone(action="play", title=title, url=url, language = idioma, quality = calidad,
server = server))
patron = 'id="(embed[0-9]*)".*?<div class="calishow">(.*?)<.*?src="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
@@ -362,7 +363,8 @@ def findvideos(item):
title = "Directo"
idioma = scrapertools.find_single_match(data, 'href="#%s".*?>([^<]+)<' % id_embed)
title = "%s [%s][%s]" % (title.capitalize(), idioma, calidad)
itemlist.append(item.clone(action="play", title=title, url=url))
itemlist.append(item.clone(action="play", title=title, url=url, language = idioma, quality = calidad,
server = server))
if itemlist:
if not config.get_setting('menu_trailer', item.channel):

View File

@@ -1,7 +1,7 @@
{
"id": "javtasty",
"name": "JavTasty",
"language": "es",
"language": ["*"],
"active": true,
"adult": true,
"version": 1,

View File

@@ -3,7 +3,7 @@
"name": "javus.net",
"active": true,
"adult": true,
"language": "es",
"language": ["*"],
"thumbnail": "https://s15.postimg.org/pzd3h4vy3/javus.png",
"banner": "https://s21.postimg.org/5pqzedp2f/javus_banner.png",
"version": 1,

View File

@@ -3,7 +3,7 @@
"name": "JKanime",
"active": true,
"adult": false,
"language": "es",
"language": ["cast", "lat"],
"thumbnail": "jkanime.png",
"banner": "jkanime.png",
"version": 1,

View File

@@ -3,7 +3,9 @@
import re
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
@@ -11,7 +13,7 @@ from platformcode import logger
def mainlist(item):
logger.info()
itemlist = []
itemlist = list()
itemlist.append(
Item(channel=item.channel, action="ultimos_capitulos", title="Últimos Capitulos", url="http://jkanime.net/"))
itemlist.append(Item(channel=item.channel, action="ultimos", title="Últimos", url="http://jkanime.net/"))
@@ -25,7 +27,7 @@ def mainlist(item):
def ultimos_capitulos(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data, '<ul class="ratedul">.+?</ul>')
data = data.replace('\t', '')
@@ -43,7 +45,8 @@ def ultimos_capitulos(item):
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot))
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
show=scrapedtitle.strip(), fulltitle=title))
return itemlist
@@ -67,7 +70,7 @@ def search(item, texto):
def ultimos(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data, '<ul class="latestul">(.*?)</ul>')
patron = '<a href="([^"]+)">([^<]+)<'
@@ -90,7 +93,7 @@ def generos(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data, '<div class="genres">(.*?)</div>')
patron = '<a href="([^"]+)">([^<]+)</a>'
@@ -114,7 +117,7 @@ def letras(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data, '<ul class="animelet">(.*?)</ul>')
patron = '<a href="([^"]+)">([^<]+)</a>'
@@ -138,24 +141,9 @@ def series(item):
logger.info()
# Descarga la pagina
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
# Extrae las entradas
'''
<table class="search">
<tr>
<td rowspan="2">
<a href="http://jkanime.net/basilisk-kouga-ninpou-chou/"><img src="http://jkanime.net/assets/images/animes/thumbnail/basilisk-kouga-ninpou-chou.jpg" width="50" /></a>
</td>
<td><a class="titl" href="http://jkanime.net/basilisk-kouga-ninpou-chou/">Basilisk: Kouga Ninpou Chou</a></td>
<td rowspan="2" style="width:50px; text-align:center;">Serie</td>
<td rowspan="2" style="width:50px; text-align:center;" >24 Eps</td>
</tr>
<tr>
<td><p>Basilisk, considerada una de las mejores series del genero ninja, nos narra la historia de dos clanes ninja separados por el odio entre dos familias. Los actuales representantes, Kouga Danjo del clan Kouga y Ogen del clan&#8230; <a class="next" href="http://jkanime.net/basilisk-kouga-ninpou-chou/">seguir leyendo</a></p></td>
</tr>
</table>
'''
patron = '<table class="search[^<]+'
patron += '<tr[^<]+'
patron += '<td[^<]+'
@@ -181,7 +169,7 @@ def series(item):
itemlist.append(
Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, fanart=thumbnail,
plot=plot, extra=extra))
plot=plot, extra=extra, show=scrapedtitle.strip()))
try:
siguiente = scrapertools.get_match(data, '<a class="listsiguiente" href="([^"]+)" >Resultados Siguientes')
@@ -198,7 +186,7 @@ def series(item):
return itemlist
def getPagesAndEpisodes(data):
def get_pages_and_episodes(data):
results = re.findall('href="#pag([0-9]+)">[0-9]+ - ([0-9]+)', data)
if results:
return int(results[-1][0]), int(results[-1][1])
@@ -210,37 +198,30 @@ def episodios(item):
itemlist = []
# Descarga la pagina
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
scrapedplot = scrapertools.get_match(data, '<meta name="description" content="([^"]+)"/>')
scrapedthumbnail = scrapertools.find_single_match(data, '<div class="separedescrip">.*?src="([^"]+)"')
idserie = scrapertools.get_match(data, "ajax/pagination_episodes/(\d+)/")
logger.info("idserie=" + idserie)
if " Eps" in item.extra and not "Desc" in item.extra:
if " Eps" in item.extra and "Desc" not in item.extra:
caps_x = item.extra
caps_x = caps_x.replace(" Eps", "")
capitulos = int(caps_x)
paginas = capitulos / 10 + (capitulos % 10 > 0)
else:
paginas, capitulos = getPagesAndEpisodes(data)
paginas, capitulos = get_pages_and_episodes(data)
logger.info("idserie=" + idserie)
for numero in range(1, paginas + 1):
for num_pag in range(1, paginas + 1):
numero_pagina = str(numero)
headers = []
headers.append(
["User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:16.0) Gecko/20100101 Firefox/16.0"])
headers.append(["Referer", item.url])
data2 = scrapertools.cache_page(
"http://jkanime.net/ajax/pagination_episodes/" + idserie + "/" + numero_pagina + "/")
logger.info("data2=" + data2)
numero_pagina = str(num_pag)
headers = {"Referer": item.url}
data2 = scrapertools.cache_page("http://jkanime.net/ajax/pagination_episodes/%s/%s/" % (idserie, numero_pagina),
headers=headers)
# logger.info("data2=" + data2)
'''
[{"number":"1","title":"Rose of Versailles - 1"},{"number":"2","title":"Rose of Versailles - 2"},{"number":"3","title":"Rose of Versailles - 3"},{"number":"4","title":"Rose of Versailles - 4"},{"number":"5","title":"Rose of Versailles - 5"},{"number":"6","title":"Rose of Versailles - 6"},{"number":"7","title":"Rose of Versailles - 7"},{"number":"8","title":"Rose of Versailles - 8"},{"number":"9","title":"Rose of Versailles - 9"},{"number":"10","title":"Rose of Versailles - 10"}]
[{"id":"14199","title":"GetBackers - 1","number":"1","animes_id":"122","timestamp":"2012-01-04 16:59:30"},{"id":"14200","title":"GetBackers - 2","number":"2","animes_id":"122","timestamp":"2012-01-04 16:59:30"},{"id":"14201","title":"GetBackers - 3","number":"3","animes_id":"122","timestamp":"2012-01-04 16:59:30"},{"id":"14202","title":"GetBackers - 4","number":"4","animes_id":"122","timestamp":"2012-01-04 16:59:30"},{"id":"14203","title":"GetBackers - 5","number":"5","animes_id":"122","timestamp":"2012-01-04 16:59:30"},{"id":"14204","title":"GetBackers - 6","number":"6","animes_id":"122","timestamp":"2012-01-04 16:59:30"},{"id":"14205","title":"GetBackers - 7","number":"7","animes_id":"122","timestamp":"2012-01-04 16:59:30"},{"id":"14206","title":"GetBackers - 8","number":"8","animes_id":"122","timestamp":"2012-01-04 16:59:30"},{"id":"14207","title":"GetBackers - 9","number":"9","animes_id":"122","timestamp":"2012-01-04 16:59:30"},{"id":"14208","title":"GetBackers - 10","number":"10","animes_id":"122","timestamp":"2012-01-04 16:59:30"}]
'''
patron = '"number"\:"(\d+)","title"\:"([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data2)
@@ -253,12 +234,12 @@ def episodios(item):
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail,
fanart=thumbnail, plot=plot))
fanart=thumbnail, plot=plot, fulltitle=title))
if len(itemlist) == 0:
try:
porestrenar = scrapertools.get_match(data,
'<div[^<]+<span class="labl">Estad[^<]+</span[^<]+<span[^>]+>Por estrenar</span>')
# porestrenar = scrapertools.get_match(data,
# '<div[^<]+<span class="labl">Estad[^<]+</span[^<]+<span[^>]+>Por estrenar</span>')
itemlist.append(Item(channel=item.channel, action="findvideos", title="Serie por estrenar", url="",
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot,
server="directo", folder=False))
@@ -266,3 +247,34 @@ def episodios(item):
pass
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", httptools.downloadpage(item.url).data)
list_videos = scrapertools.find_multiple_matches(data, '<iframe class="player_conte" src="([^"]+)"')
aux_url = []
index = 1
for e in list_videos:
if e.startswith("https://jkanime.net/jk.php?"):
headers = {"Referer": item.url}
data = httptools.downloadpage(e, headers=headers).data
url = scrapertools.find_single_match(data, '<embed class="player_conte".*?&file=([^\"]+)\"')
if url:
itemlist.append(item.clone(title="Enlace encontrado en server #%s" % index, url=url, action="play"))
index += 1
else:
aux_url.append(e)
itemlist.extend(servertools.find_video_items(data=",".join(aux_url)))
for videoitem in itemlist:
videoitem.fulltitle = item.fulltitle
videoitem.channel = item.channel
videoitem.thumbnail = item.thumbnail
return itemlist

View File

@@ -1,7 +1,7 @@
{
"id": "lacajita",
"name": "LaCajita",
"language": "es",
"language": ["cast", "lat"],
"active": true,
"adult": false,
"version": 1,
@@ -14,7 +14,6 @@
"thumbnail": "http://i.imgur.com/LVdupxc.png",
"categories": [
"movie",
"latino",
"vos"
],
"settings": [

6
plugin.video.alfa/channels/lacajita.py Executable file → Normal file
View File

@@ -103,7 +103,8 @@ def entradas(item):
filtro = {"poster_path": filtro_thumb}.items()
itemlist.append(Item(channel=item.channel, action="findvideos", url=scrapedurl, title=titulo,
contentTitle=scrapedtitle, infoLabels={'filtro': filtro}, text_color=color2,
thumbnail=scrapedthumbnail, contentType="movie", fulltitle=scrapedtitle))
thumbnail=scrapedthumbnail, contentType="movie", fulltitle=scrapedtitle, language =
idiomas))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if len(matches) > item.page + 20:
@@ -233,7 +234,8 @@ def findvideos(item):
if detalle:
title += " (%s)" % detalle
itemlist.append(item.clone(action="play", url=url, title=title, server=servidor, text_color=color3))
itemlist.append(item.clone(action="play", url=url, title=title, server=servidor, text_color=color3,
language = idioma, quality = calidad))
if item.extra != "findvideos" and config.get_videolibrary_support():
itemlist.append(item.clone(title="Añadir película a la videoteca", action="add_pelicula_to_library",

View File

@@ -1,12 +1,9 @@
{
"id": "locopelis",
"name": "LOCOPELIS",
"compatible": {
"addon_version": "4.3"
},
"active": true,
"adult": false,
"language": "es",
"language": ["cast", "lat"],
"thumbnail": "https://s31.postimg.org/5worjw2nv/locopelis.png",
"banner": "https://s31.postimg.org/ng87bb9jv/locopelis_banner.png",
"version": 1,
@@ -33,7 +30,6 @@
}
],
"categories": [
"latino",
"movie"
],
"settings": [

View File

@@ -0,0 +1,30 @@
{
"id": "maxipelis",
"name": "Maxipelis",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"version": 1,
"thumbnail": "http://www.maxipelis.net/wp-content/uploads/2016/12/applogo.png",
"banner": "",
"changes": [
{
"date": "25/08/2017",
"description": "Nuevo canal"
}
],
"categories": [
"movie",
"adult"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,152 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Alfa
# ------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
from core import tmdb
host = 'http://www.maxipelis.net'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Novedades" , action="peliculas", url=host + "/pelicula"))
itemlist.append(Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return sub_search(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def sub_search(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="thumbnail animation-2"> <a href="([^"]+)"> <img src="([^"]+)" alt="(.*?)" />.*?'
patron +='<div class="contenido"><p>(.*?)</p>'
matches = scrapertools.find_multiple_matches(data, patron)
for url,img,name,plot in matches:
itemlist.append(item.clone(channel=item.channel, action="findvideos", title=name, url=url, plot=plot,
thumbnail=img))
paginacion = scrapertools.find_single_match(data, '<div class=\'resppages\'><a href="([^"]+)" ><span class="'
'icon-chevron-right"></span>')
if paginacion:
itemlist.append(Item(channel=item.channel, action="sub_search", title="Next page >>" , url=paginacion))
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<li class="cat-item"><a href="([^"]+)".*?>(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=host + scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot))
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="poster">.*?src="(.*?)" alt="(.*?)">.*?'
patron += '"quality">(.*?)<.*?href="(.*?)".*?<span>(\d{4}).*?"texto">(.*?)<.*?'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, scrapedquality, scrapedurl, scrapedyear, scrapedplot in matches:
url = scrapedurl
thumbnail = scrapedthumbnail
contentTitle = scrapedtitle
quality = scrapedquality
year = scrapedyear
plot = scrapedplot
if quality == "" or year=="" :
title = contentTitle
else:
title = contentTitle + " (" + year + ") " + "[COLOR red]" + quality + "[/COLOR]"
new_item = Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
contentTitle = contentTitle , infoLabels={'year':year} )
if year:
tmdb.set_infoLabels_item(new_item)
itemlist.append(new_item)
try:
patron = '<a href="([^"]+)" ><span class="icon-chevron-right"></span></a></div>'
next_page = re.compile(patron,re.DOTALL).findall(data)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Siguiente >>" , text_color="yellow",
url=next_page[0]))
except: pass
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<tr><td> <a class="link_a" href="([^"]+)".*?<td> (.*?)</td><td> (.*?)</td><td> (.*?)</td>'
matches = scrapertools.find_multiple_matches(data, patron)
for url, server, calidad, idioma in matches:
title = item.contentTitle
server = servertools.get_server_from_url(url)
itemlist.append(item.clone(action="play", title=title, fulltitle = item.title, url=url, language = idioma,
contentTitle = item.contentTitle, quality = calidad, server = server))
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra !='findvideos' :
itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Agregar esta pelicula a la Videoteca[/COLOR]',
url=item.url, action="add_pelicula_to_library", extra="findvideos",
contentTitle = item.contentTitle))
return itemlist
# def play(item):
# logger.info()
# itemlist = servertools.find_video_items(data=item.url)
#
# for videoitem in itemlist:
# videoitem.title = item.title
# videoitem.fulltitle = item.fulltitle
# videoitem.thumbnail = item.thumbnail
# videoitem.channel = item.channel
# videoitem.
# return itemlist

View File

@@ -3,7 +3,7 @@
"name": "Mejor Torrent",
"active": true,
"adult": false,
"language": "es",
"language": ["cast"],
"thumbnail": "mejortorrent.png",
"banner": "mejortorrent.png",
"version": 1,

View File

@@ -1,12 +1,9 @@
{
"id": "metaserie",
"name": "MetaSerie (Latino)",
"compatible": {
"addon_version": "4.3"
},
"active": true,
"adult": false,
"language": "es",
"language": ["lat"],
"thumbnail": "https://s32.postimg.org/7g50yo39h/metaserie.png",
"banner": "https://s31.postimg.org/u6yddil8r/metaserie_banner.png",
"version": 1,
@@ -33,7 +30,6 @@
}
],
"categories": [
"latino",
"tvshow"
],
"settings": [

Some files were not shown because too many files have changed in this diff Show More