folder reorganization

This commit is contained in:
cttynul
2019-04-23 14:32:53 +02:00
parent 659751b2f4
commit 8e7ee78a87
1195 changed files with 267003 additions and 2 deletions

1
.dev Normal file
View File

@@ -0,0 +1 @@

2
.gitignore vendored
View File

@@ -1,5 +1,3 @@
*
!plugin.video.alfa
*.pyo
*.pyc
.DS_Store

1
__init__.py Normal file
View File

@@ -0,0 +1 @@
# -*- coding: utf-8 -*-

48
addon.xml Normal file
View File

@@ -0,0 +1,48 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.8.3" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
</requires>
<extension point="xbmc.python.pluginsource" library="default.py">
<provides>video</provides>
</extension>
<extension point="xbmc.addon.metadata">
<summary lang="es">Navega con Kodi por páginas web.</summary>
<assets>
<icon>logo-cumple.png</icon>
<fanart>fanart1.jpg</fanart>
<screenshot>resources/media/themes/ss/1.jpg</screenshot>
<screenshot>resources/media/themes/ss/2.jpg</screenshot>
<screenshot>resources/media/themes/ss/3.jpg</screenshot>
<screenshot>resources/media/themes/ss/4.jpg</screenshot>
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Arreglos[/B][/COLOR]
¤ animeshd ¤ gamovideo ¤ elitetorrent
¤ newpct1 ¤ cinetux ¤ asialiveaction
¤ gnula ¤ fembed ¤ hdfilmologia
¤ gvideo ¤ vidlox ¤ javtasty
¤ qwertyy
[COLOR green][B]Novedades[/B][/COLOR]
¤ watchseries ¤ xstreamcdn ¤ videobb
¤ animespace ¤ tvanime
Agradecimientos a @shlibidon por colaborar con esta versión
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary>
<description lang="en">Browse web pages using Kodi, you can easily watch their video content.</description>
<disclaimer>[COLOR red]The owners and submitters to this addon do not host or distribute any of the content displayed by these addons nor do they have any affiliation with the content providers.[/COLOR]</disclaimer>
<platform>all</platform>
<license>GNU GPL v3</license>
<forum>foro</forum>
<website>web</website>
<email>my@email.com</email>
<source>https://github.com/alfa-addon/addon</source>
</extension>
<extension point="xbmc.service" library="videolibrary_service.py" start="login|startup">
</extension>
</addon>

16
channels/LIKUOO.json Normal file
View File

@@ -0,0 +1,16 @@
{
"id": "LIKUOO",
"name": "LIKUOO",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "https://likuoo.video/files_static/images/logo.jpg",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

93
channels/LIKUOO.py Normal file
View File

@@ -0,0 +1,93 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
host = 'http://www.likuoo.video'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="lista", url=host))
itemlist.append( Item(channel=item.channel, title="Pornstar" , action="categorias", url=host + "/pornstars/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/all-channels/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search/?s=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="item_p">.*?<a href="([^"]+)" title="([^"]+)"><img src="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
scrapedplot = ""
scrapedthumbnail = "https:" + scrapedthumbnail
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'...<a href="([^"]+)" class="next">&#187;</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="item">.*?'
patron += '<a href="([^"]+)" title="(.*?)">.*?'
patron += 'src="(.*?)".*?'
patron += '<div class="runtime">(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
url = urlparse.urljoin(item.url,scrapedurl)
scrapedtime = scrapedtime.replace("m", ":").replace("s", " ")
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " +scrapedtitle
contentTitle = title
thumbnail = "https:" + scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
next_page = scrapertools.find_single_match(data,'...<a href="([^"]+)" class="next">&#187;</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info()
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.fulltitle
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videochannel=item.channel
return itemlist

16
channels/TXXX.json Normal file
View File

@@ -0,0 +1,16 @@
{
"id": "TXXX",
"name": "TXXX",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://www.txxx.com/images/desktop-logo.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

149
channels/TXXX.py Normal file
View File

@@ -0,0 +1,149 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
host = 'http://www.txxx.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Ultimas" , action="lista", url=host + "/latest-updates/"))
itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="lista", url=host + "/top-rated/"))
itemlist.append( Item(channel=item.channel, title="Mas popular" , action="lista", url=host + "/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host + "/channels-list/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search/s=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def catalogo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="channel-thumb">.*?'
patron += '<a href="([^"]+)" title="([^"]+)".*?'
patron += '<img src="([^"]+)".*?'
patron += '<span>(.*?)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,num in matches:
scrapedplot = ""
scrapedurl = host + scrapedurl
title = scrapedtitle + "[COLOR yellow] " + num + "[/COLOR]"
itemlist.append( Item(channel=item.channel, action="lista", title=title , url=scrapedurl ,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<a class=" btn btn--size--l btn--next" href="([^"]+)" title="Next Page"')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" ,
text_color="blue", url=next_page) )
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<a class="categories-list__link" href="([^"]+)">.*?'
patron += '<span class="categories-list__name cat-icon" data-title="([^"]+)">.*?'
patron += '<span class="categories-list__badge">(.*?)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,num in matches:
url = urlparse.urljoin(item.url,scrapedurl)
scrapedthumbnail = ""
scrapedplot = ""
title = scrapedtitle + "[COLOR yellow] " + num + "[/COLOR]"
itemlist.append( Item(channel=item.channel, action="lista", title=title , url=url ,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = 'data-video-id="\d+">.*?<a href="([^"]+)".*?'
patron += '<img src="([^"]+)" alt="([^"]+)".*?'
patron += '</div>(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,scrapedtime in matches:
contentTitle = scrapedtitle
scrapedhd = scrapertools.find_single_match(scrapedtime, '<span class="thumb__hd">(.*?)</span>')
duration = scrapertools.find_single_match(scrapedtime, '<span class="thumb__duration">(.*?)</span>')
if scrapedhd != '':
title = "[COLOR yellow]" +duration+ "[/COLOR] " + "[COLOR red]" +scrapedhd+ "[/COLOR] "+scrapedtitle
else:
title = "[COLOR yellow]" + duration + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail,
plot=plot, contentTitle=title) )
next_page = scrapertools.find_single_match(data,'<a class=" btn btn--size--l btn--next.*?" href="([^"]+)" title="Next Page"')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
video_url = scrapertools.find_single_match(data, 'var video_url = "([^"]*)"')
video_url += scrapertools.find_single_match(data, 'video_url \+= "([^"]*)"')
partes = video_url.split('||')
video_url = decode_url(partes[0])
video_url = re.sub('/get_file/\d+/[0-9a-z]{32}/', partes[1], video_url)
video_url += '&' if '?' in video_url else '?'
video_url += 'lip=' + partes[2] + '&lt=' + partes[3]
itemlist.append(item.clone(action="play", title=item.title, url=video_url))
return itemlist
def decode_url(txt):
_0x52f6x15 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,~'
reto = ''; n = 0
# En las dos siguientes líneas, ABCEM ocupan 2 bytes cada letra! El replace lo deja en 1 byte. !!!!: АВСЕМ (10 bytes) ABCEM (5 bytes)
txt = re.sub('[^АВСЕМA-Za-z0-9\.\,\~]', '', txt)
txt = txt.replace('А', 'A').replace('В', 'B').replace('С', 'C').replace('Е', 'E').replace('М', 'M')
while n < len(txt):
a = _0x52f6x15.index(txt[n])
n += 1
b = _0x52f6x15.index(txt[n])
n += 1
c = _0x52f6x15.index(txt[n])
n += 1
d = _0x52f6x15.index(txt[n])
n += 1
a = a << 2 | b >> 4
b = (b & 15) << 4 | c >> 2
e = (c & 3) << 6 | d
reto += chr(a)
if c != 64: reto += chr(b)
if d != 64: reto += chr(e)
return urllib.unquote(reto)

12
channels/__init__.py Normal file
View File

@@ -0,0 +1,12 @@
# -*- coding: utf-8 -*-
import os
import sys
# Appends the main plugin dir to the PYTHONPATH if an internal package cannot be imported.
# Examples: In Plex Media Server all modules are under "Code.*" package, and in Enigma2 under "Plugins.Extensions.*"
try:
# from core import logger
import core
except:
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")))

15
channels/absoluporn.json Normal file
View File

@@ -0,0 +1,15 @@
{
"id": "absoluporn",
"name": "absoluporn",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://www.absoluporn.es/image/deco/logo.gif",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

95
channels/absoluporn.py Normal file
View File

@@ -0,0 +1,95 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
host = 'http://www.absoluporn.es'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/wall-date-1.html"))
itemlist.append( Item(channel=item.channel, title="Mas valorados" , action="lista", url=host + "/wall-note-1.html"))
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "/wall-main-1.html"))
itemlist.append( Item(channel=item.channel, title="Mas largos" , action="lista", url=host + "/wall-time-1.html"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search-%s-1.html" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '&nbsp;<a href="([^"]+)" class="link1">([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedurl = scrapedurl.replace(".html", "_date.html")
scrapedurl = host +"/" + scrapedurl
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>|<br/>", "", data)
patron = '<div class="thumb-main-titre"><a href="([^"]+)".*?'
patron += 'title="([^"]+)".*?'
patron += 'src="([^"]+)".*?'
patron += '<div class="time">(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
fanart=thumbnail, contentTitle = scrapedtitle))
next_page = scrapertools.find_single_match(data, '<span class="text16">\d+</span> <a href="..([^"]+)"')
if next_page:
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue",
url=next_page) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'servervideo = \'([^\']+)\'.*?'
patron += 'path = \'([^\']+)\'.*?'
patron += 'filee = \'([^\']+)\'.*?'
matches = scrapertools.find_multiple_matches(data, patron)
for servervideo,path,filee in matches:
scrapedurl = servervideo + path + "56ea912c4df934c216c352fa8d623af3" + filee
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
return itemlist

921
channels/alfavorites.py Normal file
View File

@@ -0,0 +1,921 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Alfa favoritos
# ==============
# - Lista de enlaces guardados como favoritos, solamente en Alfa, no Kodi.
# - Los enlaces se organizan en carpetas (virtuales) que puede definir el usuario.
# - Se utiliza un sólo fichero para guardar todas las carpetas y enlaces: alfavorites-default.json
# - Se puede copiar alfavorites-default.json a otros dispositivos ya que la única dependencia local es el thumbnail asociado a los enlaces,
# pero se detecta por código y se ajusta al dispositivo actual.
# - Se pueden tener distintos ficheros de alfavoritos y alternar entre ellos, pero solamente uno de ellos es la "lista activa".
# - Los ficheros deben estar en config.get_data_path() y empezar por alfavorites- y terminar en .json
# Requerimientos en otros módulos para ejecutar este canal:
# - Añadir un enlace a este canal en channelselector.py
# - Modificar platformtools.py para controlar el menú contextual y añadir "Guardar enlace" en set_context_commands
# ------------------------------------------------------------
import os, re
from datetime import datetime
from core.item import Item
from platformcode import config, logger, platformtools
from core import filetools, jsontools
def fechahora_actual():
return datetime.now().strftime('%Y-%m-%d %H:%M')
# Helpers para listas
# -------------------
PREFIJO_LISTA = 'alfavorites-'
# Devuelve el nombre de la lista activa (Ej: alfavorites-default.json)
def get_lista_activa():
return config.get_setting('lista_activa', default = PREFIJO_LISTA + 'default.json')
# Extrae nombre de la lista del fichero, quitando prefijo y sufijo (Ej: alfavorites-Prueba.json => Prueba)
def get_name_from_filename(filename):
return filename.replace(PREFIJO_LISTA, '').replace('.json', '')
# Componer el fichero de lista a partir de un nombre, añadiendo prefijo y sufijo (Ej: Prueba => alfavorites-Prueba.json)
def get_filename_from_name(name):
return PREFIJO_LISTA + name + '.json'
# Apuntar en un fichero de log los códigos de los ficheros que se hayan compartido
def save_log_lista_shared(msg):
msg = fechahora_actual() + ': ' + msg + os.linesep
fullfilename = os.path.join(config.get_data_path(), 'alfavorites_shared.log')
with open(fullfilename, 'a') as f: f.write(msg); f.close()
# Limpiar texto para usar como nombre de fichero
def text_clean(txt, disallowed_chars = '[^a-zA-Z0-9\-_()\[\]. ]+', blank_char = ' '):
import unicodedata
try:
txt = unicode(txt, 'utf-8')
except NameError: # unicode is a default on python 3
pass
txt = unicodedata.normalize('NFKD', txt).encode('ascii', 'ignore')
txt = txt.decode('utf-8').strip()
if blank_char != ' ': txt = txt.replace(' ', blank_char)
txt = re.sub(disallowed_chars, '', txt)
return str(txt)
# Clase para cargar y guardar en el fichero de Alfavoritos
# --------------------------------------------------------
class AlfavoritesData:
def __init__(self, filename = None):
# Si no se especifica ningún fichero se usa la lista_activa (si no la hay se crea)
if filename == None:
filename = get_lista_activa()
self.user_favorites_file = os.path.join(config.get_data_path(), filename)
if not os.path.exists(self.user_favorites_file):
fichero_anterior = os.path.join(config.get_data_path(), 'user_favorites.json')
if os.path.exists(fichero_anterior): # formato anterior, convertir (a eliminar después de algunas versiones)
jsondata = jsontools.load(filetools.read(fichero_anterior))
self.user_favorites = jsondata
self.info_lista = {}
self.save()
filetools.remove(fichero_anterior)
else:
self.user_favorites = []
else:
jsondata = jsontools.load(filetools.read(self.user_favorites_file))
if not 'user_favorites' in jsondata or not 'info_lista' in jsondata: # formato incorrecto
self.user_favorites = []
else:
self.user_favorites = jsondata['user_favorites']
self.info_lista = jsondata['info_lista']
if len(self.user_favorites) == 0:
self.info_lista = {}
# Crear algunas carpetas por defecto
self.user_favorites.append({ 'title': config.get_localized_string(30122), 'items': [] })
self.user_favorites.append({ 'title': config.get_localized_string(30123), 'items': [] })
self.user_favorites.append({ 'title': config.get_localized_string(70149), 'items': [] })
self.save()
def save(self):
if 'created' not in self.info_lista:
self.info_lista['created'] = fechahora_actual()
self.info_lista['updated'] = fechahora_actual()
jsondata = {}
jsondata['user_favorites'] = self.user_favorites
jsondata['info_lista'] = self.info_lista
if not filetools.write(self.user_favorites_file, jsontools.dump(jsondata)):
platformtools.dialog_ok('Alfa', config.get_localized_string(70614), os.path.basename(self.user_favorites_file))
# ============================
# Añadir desde menú contextual
# ============================
def addFavourite(item):
logger.info()
alfav = AlfavoritesData()
# Si se llega aquí mediante el menú contextual, hay que recuperar los parámetros action y channel
if item.from_action:
item.__dict__['action'] = item.__dict__.pop('from_action')
if item.from_channel:
item.__dict__['channel'] = item.__dict__.pop('from_channel')
# Limpiar título
item.title = re.sub(r'\[COLOR [^\]]*\]', '', item.title.replace('[/COLOR]', '')).strip()
if item.text_color:
item.__dict__.pop('text_color')
# Diálogo para escoger/crear carpeta
i_perfil = _selecciona_perfil(alfav, config.get_localized_string(70546))
if i_perfil == -1: return False
# Detectar que el mismo enlace no exista ya en la carpeta
campos = ['channel','action','url','extra','list_type'] # si todos estos campos coinciden se considera que el enlace ya existe
for enlace in alfav.user_favorites[i_perfil]['items']:
it = Item().fromurl(enlace)
repe = True
for prop in campos:
if prop in it.__dict__ and prop in item.__dict__ and it.__dict__[prop] != item.__dict__[prop]:
repe = False
break
if repe:
platformtools.dialog_notification(config.get_localized_string(70615), config.get_localized_string(70616))
return False
# Si es una película/serie, completar información de tmdb si no se tiene activado tmdb_plus_info (para season/episodio no hace falta pq ya se habrá hecho la "segunda pasada")
if (item.contentType == 'movie' or item.contentType == 'tvshow') and not config.get_setting('tmdb_plus_info', default=False):
from core import tmdb
tmdb.set_infoLabels(item, True) # obtener más datos en "segunda pasada" (actores, duración, ...)
# Añadir fecha en que se guarda
item.date_added = fechahora_actual()
# Guardar
alfav.user_favorites[i_perfil]['items'].append(item.tourl())
alfav.save()
platformtools.dialog_notification(config.get_localized_string(70531), config.get_localized_string(70532) % alfav.user_favorites[i_perfil]['title'])
return True
# ====================
# NAVEGACIÓN
# ====================
def mainlist(item):
logger.info()
alfav = AlfavoritesData()
item.category = get_name_from_filename(os.path.basename(alfav.user_favorites_file))
itemlist = []
last_i = len(alfav.user_favorites) - 1
for i_perfil, perfil in enumerate(alfav.user_favorites):
context = []
context.append({'title': config.get_localized_string(70533), 'channel': item.channel, 'action': 'editar_perfil_titulo',
'i_perfil': i_perfil})
context.append({'title': config.get_localized_string(70534), 'channel': item.channel, 'action': 'eliminar_perfil',
'i_perfil': i_perfil})
if i_perfil > 0:
context.append({'title': config.get_localized_string(70535), 'channel': item.channel, 'action': 'mover_perfil',
'i_perfil': i_perfil, 'direccion': 'top'})
context.append({'title': config.get_localized_string(70536), 'channel': item.channel, 'action': 'mover_perfil',
'i_perfil': i_perfil, 'direccion': 'arriba'})
if i_perfil < last_i:
context.append({'title': config.get_localized_string(70537), 'channel': item.channel, 'action': 'mover_perfil',
'i_perfil': i_perfil, 'direccion': 'abajo'})
context.append({'title': config.get_localized_string(70538), 'channel': item.channel, 'action': 'mover_perfil',
'i_perfil': i_perfil, 'direccion': 'bottom'})
plot = '%d enlaces en la carpeta' % len(perfil['items'])
itemlist.append(Item(channel=item.channel, action='mostrar_perfil', title=perfil['title'], plot=plot, i_perfil=i_perfil, context=context))
itemlist.append(item.clone(action='crear_perfil', title=config.get_localized_string(70542), folder=False))
itemlist.append(item.clone(action='mainlist_listas', title=config.get_localized_string(70603)))
return itemlist
def mostrar_perfil(item):
logger.info()
alfav = AlfavoritesData()
itemlist = []
i_perfil = item.i_perfil
if not alfav.user_favorites[i_perfil]: return itemlist
last_i = len(alfav.user_favorites[i_perfil]['items']) - 1
ruta_runtime = config.get_runtime_path()
for i_enlace, enlace in enumerate(alfav.user_favorites[i_perfil]['items']):
it = Item().fromurl(enlace)
it.context = [ {'title': '[COLOR blue]'+config.get_localized_string(70617)+'[/COLOR]', 'channel': item.channel, 'action': 'acciones_enlace',
'i_enlace': i_enlace, 'i_perfil': i_perfil} ]
it.plot += '[CR][CR][COLOR blue]Canal:[/COLOR] ' + it.channel + ' [COLOR blue]Action:[/COLOR] ' + it.action
if it.extra != '': it.plot += ' [COLOR blue]Extra:[/COLOR] ' + it.extra
it.plot += '[CR][COLOR blue]Url:[/COLOR] ' + it.url if isinstance(it.url, str) else '...'
if it.date_added != '': it.plot += '[CR][COLOR blue]Added:[/COLOR] ' + it.date_added
# Si no es una url, ni tiene la ruta del sistema, convertir el path ya que se habrá copiado de otro dispositivo.
# Sería más óptimo que la conversión se hiciera con un menú de importar, pero de momento se controla en run-time.
if it.thumbnail and '://' not in it.thumbnail and not it.thumbnail.startswith(ruta_runtime):
ruta, fichero = filetools.split(it.thumbnail)
if ruta == '' and fichero == it.thumbnail: # en linux el split con un path de windows no separa correctamente
ruta, fichero = filetools.split(it.thumbnail.replace('\\','/'))
if 'channels' in ruta and 'thumb' in ruta:
it.thumbnail = filetools.join(ruta_runtime, 'resources', 'media', 'channels', 'thumb', fichero)
elif 'themes' in ruta and 'default' in ruta:
it.thumbnail = filetools.join(ruta_runtime, 'resources', 'media', 'themes', 'default', fichero)
itemlist.append(it)
return itemlist
# Rutinas internas compartidas
# ----------------------------
# Diálogo para seleccionar/crear una carpeta. Devuelve índice de la carpeta en user_favorites (-1 si cancel)
def _selecciona_perfil(alfav, titulo='Seleccionar carpeta', i_actual=-1):
acciones = [(perfil['title'] if i_p != i_actual else '[I][COLOR pink]%s[/COLOR][/I]' % perfil['title']) for i_p, perfil in enumerate(alfav.user_favorites)]
acciones.append('Crear nueva carpeta')
i_perfil = -1
while i_perfil == -1: # repetir hasta seleccionar una carpeta o cancelar
ret = platformtools.dialog_select(titulo, acciones)
if ret == -1: return -1 # pedido cancel
if ret < len(alfav.user_favorites):
i_perfil = ret
else: # crear nueva carpeta
if _crea_perfil(alfav):
i_perfil = len(alfav.user_favorites) - 1
return i_perfil
# Diálogo para crear una carpeta
def _crea_perfil(alfav):
titulo = platformtools.dialog_input(default='', heading=config.get_localized_string(70551))
if titulo is None or titulo == '':
return False
alfav.user_favorites.append({'title': titulo, 'items': []})
alfav.save()
return True
# Gestión de perfiles y enlaces
# -----------------------------
def crear_perfil(item):
logger.info()
alfav = AlfavoritesData()
if not _crea_perfil(alfav): return False
platformtools.itemlist_refresh()
return True
def editar_perfil_titulo(item):
logger.info()
alfav = AlfavoritesData()
if not alfav.user_favorites[item.i_perfil]: return False
titulo = platformtools.dialog_input(default=alfav.user_favorites[item.i_perfil]['title'], heading=config.get_localized_string(70533))
if titulo is None or titulo == '' or titulo == alfav.user_favorites[item.i_perfil]['title']:
return False
alfav.user_favorites[item.i_perfil]['title'] = titulo
alfav.save()
platformtools.itemlist_refresh()
return True
def eliminar_perfil(item):
logger.info()
alfav = AlfavoritesData()
if not alfav.user_favorites[item.i_perfil]: return False
# Pedir confirmación
if not platformtools.dialog_yesno(config.get_localized_string(70618), config.get_localized_string(70619)): return False
del alfav.user_favorites[item.i_perfil]
alfav.save()
platformtools.itemlist_refresh()
return True
def acciones_enlace(item):
logger.info()
acciones = [config.get_localized_string(70620), config.get_localized_string(70621), config.get_localized_string(70622), config.get_localized_string(70623),
config.get_localized_string(70624), config.get_localized_string(70548), config.get_localized_string(70625),
config.get_localized_string(70626), config.get_localized_string(70627), config.get_localized_string(70628)]
ret = platformtools.dialog_select('Acción a ejecutar', acciones)
if ret == -1:
return False # pedido cancel
elif ret == 0:
return editar_enlace_titulo(item)
elif ret == 1:
return editar_enlace_color(item)
elif ret == 2:
return editar_enlace_thumbnail(item)
elif ret == 3:
return editar_enlace_carpeta(item)
elif ret == 4:
return editar_enlace_lista(item)
elif ret == 5:
return eliminar_enlace(item)
elif ret == 6:
return mover_enlace(item.clone(direccion='top'))
elif ret == 7:
return mover_enlace(item.clone(direccion='arriba'))
elif ret == 8:
return mover_enlace(item.clone(direccion='abajo'))
elif ret == 9:
return mover_enlace(item.clone(direccion='bottom'))
def editar_enlace_titulo(item):
logger.info()
alfav = AlfavoritesData()
if not alfav.user_favorites[item.i_perfil]: return False
if not alfav.user_favorites[item.i_perfil]['items'][item.i_enlace]: return False
it = Item().fromurl(alfav.user_favorites[item.i_perfil]['items'][item.i_enlace])
titulo = platformtools.dialog_input(default=it.title, heading='Cambiar título del enlace')
if titulo is None or titulo == '' or titulo == it.title:
return False
it.title = titulo
alfav.user_favorites[item.i_perfil]['items'][item.i_enlace] = it.tourl()
alfav.save()
platformtools.itemlist_refresh()
return True
def editar_enlace_color(item):
logger.info()
alfav = AlfavoritesData()
if not alfav.user_favorites[item.i_perfil]: return False
if not alfav.user_favorites[item.i_perfil]['items'][item.i_enlace]: return False
it = Item().fromurl(alfav.user_favorites[item.i_perfil]['items'][item.i_enlace])
colores = ['green','yellow','red','blue','white','orange','lime','aqua','pink','violet','purple','tomato','olive','antiquewhite','gold']
opciones = ['[COLOR %s]%s[/COLOR]' % (col, col) for col in colores]
ret = platformtools.dialog_select('Seleccionar color:', opciones)
if ret == -1: return False # pedido cancel
it.text_color = colores[ret]
alfav.user_favorites[item.i_perfil]['items'][item.i_enlace] = it.tourl()
alfav.save()
platformtools.itemlist_refresh()
return True
def editar_enlace_thumbnail(item):
logger.info()
alfav = AlfavoritesData()
if not alfav.user_favorites[item.i_perfil]: return False
if not alfav.user_favorites[item.i_perfil]['items'][item.i_enlace]: return False
it = Item().fromurl(alfav.user_favorites[item.i_perfil]['items'][item.i_enlace])
# A partir de Kodi 17 se puede usar xbmcgui.Dialog().select con thumbnails (ListItem & useDetails=True)
is_kodi17 = (config.get_platform(True)['num_version'] >= 17.0)
if is_kodi17:
import xbmcgui
# Diálogo para escoger thumbnail (el del canal o iconos predefinidos)
opciones = []
ids = []
try:
from core import channeltools
channel_parameters = channeltools.get_channel_parameters(it.channel)
if channel_parameters['thumbnail'] != '':
nombre = 'Canal %s' % it.channel
if is_kodi17:
it_thumb = xbmcgui.ListItem(nombre)
it_thumb.setArt({ 'thumb': channel_parameters['thumbnail'] })
opciones.append(it_thumb)
else:
opciones.append(nombre)
ids.append(channel_parameters['thumbnail'])
except:
pass
resource_path = os.path.join(config.get_runtime_path(), 'resources', 'media', 'themes', 'default')
for f in sorted(os.listdir(resource_path)):
if f.startswith('thumb_') and not f.startswith('thumb_intervenido') and f != 'thumb_back.png':
nombre = f.replace('thumb_', '').replace('_', ' ').replace('.png', '')
if is_kodi17:
it_thumb = xbmcgui.ListItem(nombre)
it_thumb.setArt({ 'thumb': os.path.join(resource_path, f) })
opciones.append(it_thumb)
else:
opciones.append(nombre)
ids.append(os.path.join(resource_path, f))
if is_kodi17:
ret = xbmcgui.Dialog().select('Seleccionar thumbnail:', opciones, useDetails=True)
else:
ret = platformtools.dialog_select('Seleccionar thumbnail:', opciones)
if ret == -1: return False # pedido cancel
it.thumbnail = ids[ret]
alfav.user_favorites[item.i_perfil]['items'][item.i_enlace] = it.tourl()
alfav.save()
platformtools.itemlist_refresh()
return True
def editar_enlace_carpeta(item):
logger.info()
alfav = AlfavoritesData()
if not alfav.user_favorites[item.i_perfil]: return False
if not alfav.user_favorites[item.i_perfil]['items'][item.i_enlace]: return False
# Diálogo para escoger/crear carpeta
i_perfil = _selecciona_perfil(alfav, 'Mover enlace a:', item.i_perfil)
if i_perfil == -1 or i_perfil == item.i_perfil: return False
alfav.user_favorites[i_perfil]['items'].append(alfav.user_favorites[item.i_perfil]['items'][item.i_enlace])
del alfav.user_favorites[item.i_perfil]['items'][item.i_enlace]
alfav.save()
platformtools.itemlist_refresh()
return True
def editar_enlace_lista(item):
logger.info()
alfav = AlfavoritesData()
if not alfav.user_favorites[item.i_perfil]: return False
if not alfav.user_favorites[item.i_perfil]['items'][item.i_enlace]: return False
# Diálogo para escoger lista
opciones = []
itemlist_listas = mainlist_listas(item)
for it in itemlist_listas:
if it.lista != '' and '[<---]' not in it.title: # descarta item crear y lista activa
opciones.append(it.lista)
if len(opciones) == 0:
platformtools.dialog_ok('Alfa', 'No hay otras listas dónde mover el enlace.', 'Puedes crearlas desde el menú Gestionar listas de enlaces')
return False
ret = platformtools.dialog_select('Seleccionar lista destino', opciones)
if ret == -1:
return False # pedido cancel
alfav_destino = AlfavoritesData(opciones[ret])
# Diálogo para escoger/crear carpeta en la lista de destino
i_perfil = _selecciona_perfil(alfav_destino, 'Seleccionar carpeta destino', -1)
if i_perfil == -1: return False
alfav_destino.user_favorites[i_perfil]['items'].append(alfav.user_favorites[item.i_perfil]['items'][item.i_enlace])
del alfav.user_favorites[item.i_perfil]['items'][item.i_enlace]
alfav_destino.save()
alfav.save()
platformtools.itemlist_refresh()
return True
def eliminar_enlace(item):
logger.info()
alfav = AlfavoritesData()
if not alfav.user_favorites[item.i_perfil]: return False
if not alfav.user_favorites[item.i_perfil]['items'][item.i_enlace]: return False
del alfav.user_favorites[item.i_perfil]['items'][item.i_enlace]
alfav.save()
platformtools.itemlist_refresh()
return True
# Mover perfiles y enlaces (arriba, abajo, top, bottom)
# ------------------------
def mover_perfil(item):
logger.info()
alfav = AlfavoritesData()
alfav.user_favorites = _mover_item(alfav.user_favorites, item.i_perfil, item.direccion)
alfav.save()
platformtools.itemlist_refresh()
return True
def mover_enlace(item):
logger.info()
alfav = AlfavoritesData()
if not alfav.user_favorites[item.i_perfil]: return False
alfav.user_favorites[item.i_perfil]['items'] = _mover_item(alfav.user_favorites[item.i_perfil]['items'], item.i_enlace, item.direccion)
alfav.save()
platformtools.itemlist_refresh()
return True
# Mueve un item determinado (numérico) de una lista (arriba, abajo, top, bottom) y devuelve la lista modificada
def _mover_item(lista, i_selected, direccion):
last_i = len(lista) - 1
if i_selected > last_i or i_selected < 0: return lista # índice inexistente en lista
if direccion == 'arriba':
if i_selected == 0: # Ya está arriba de todo
return lista
lista.insert(i_selected - 1, lista.pop(i_selected))
elif direccion == 'abajo':
if i_selected == last_i: # Ya está abajo de todo
return lista
lista.insert(i_selected + 1, lista.pop(i_selected))
elif direccion == 'top':
if i_selected == 0: # Ya está arriba de todo
return lista
lista.insert(0, lista.pop(i_selected))
elif direccion == 'bottom':
if i_selected == last_i: # Ya está abajo de todo
return lista
lista.insert(last_i, lista.pop(i_selected))
return lista
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Gestionar diferentes listas de alfavoritos
# ------------------------------------------
def mainlist_listas(item):
logger.info()
itemlist = []
item.category = 'Listas'
lista_activa = get_lista_activa()
import glob
path = os.path.join(config.get_data_path(), PREFIJO_LISTA+'*.json')
for fichero in glob.glob(path):
lista = os.path.basename(fichero)
nombre = get_name_from_filename(lista)
titulo = nombre if lista != lista_activa else '[COLOR gold]%s[/COLOR] [<---]' % nombre
itemlist.append(item.clone(action='acciones_lista', lista=lista, title=titulo, folder=False))
itemlist.append(item.clone(action='acciones_nueva_lista', title=config.get_localized_string(70642), folder=False))
return itemlist
def acciones_lista(item):
logger.info()
acciones = [config.get_localized_string(70604), config.get_localized_string(70629),
config.get_localized_string(70605), config.get_localized_string(70606), config.get_localized_string(70607)]
ret = platformtools.dialog_select(item.lista, acciones)
if ret == -1:
return False # pedido cancel
elif ret == 0:
return activar_lista(item)
elif ret == 1:
return renombrar_lista(item)
elif ret == 2:
return compartir_lista(item)
elif ret == 3:
return eliminar_lista(item)
elif ret == 4:
return informacion_lista(item)
def activar_lista(item):
logger.info()
fullfilename = os.path.join(config.get_data_path(), item.lista)
if not os.path.exists(fullfilename):
platformtools.dialog_ok('Alfa', config.get_localized_string(70630), item.lista)
return False
config.set_setting('lista_activa', item.lista)
from channelselector import get_thumb
item_inicio = Item(title=config.get_localized_string(70527), channel="alfavorites", action="mainlist",
thumbnail=get_thumb("mylink.png"),
category=config.get_localized_string(70527), viewmode="thumbnails")
platformtools.itemlist_update(item_inicio, replace=True)
return True
def renombrar_lista(item):
logger.info()
fullfilename_current = os.path.join(config.get_data_path(), item.lista)
if not os.path.exists(fullfilename_current):
platformtools.dialog_ok('Alfa', config.get_localized_string(70630), fullfilename_current)
return False
nombre = get_name_from_filename(item.lista)
titulo = platformtools.dialog_input(default=nombre, heading=config.get_localized_string(70612))
if titulo is None or titulo == '' or titulo == nombre:
return False
titulo = text_clean(titulo, blank_char='_')
filename = get_filename_from_name(titulo)
fullfilename = os.path.join(config.get_data_path(), filename)
# Comprobar que el nuevo nombre no exista
if os.path.exists(fullfilename):
platformtools.dialog_ok('Alfa', config.get_localized_string(70613), fullfilename)
return False
# Rename del fichero
if not filetools.rename(fullfilename_current, filename):
platformtools.dialog_ok('Alfa', config.get_localized_string(70631), fullfilename)
return False
# Update settings si es la lista activa
if item.lista == get_lista_activa():
config.set_setting('lista_activa', filename)
platformtools.itemlist_refresh()
return True
def eliminar_lista(item):
logger.info()
fullfilename = os.path.join(config.get_data_path(), item.lista)
if not os.path.exists(fullfilename):
platformtools.dialog_ok('Alfa', config.get_localized_string(70630), item.lista)
return False
if item.lista == get_lista_activa():
platformtools.dialog_ok('Alfa', config.get_localized_string(70632), item.lista)
return False
if not platformtools.dialog_yesno(config.get_localized_string(70606), config.get_localized_string(70633) + ' %s ?' % item.lista): return False
filetools.remove(fullfilename)
platformtools.itemlist_refresh()
return True
def informacion_lista(item):
logger.info()
fullfilename = os.path.join(config.get_data_path(), item.lista)
if not os.path.exists(fullfilename):
platformtools.dialog_ok('Alfa', config.get_localized_string(70630), item.lista)
return False
alfav = AlfavoritesData(item.lista)
txt = 'Lista: [COLOR gold]%s[/COLOR]' % item.lista
txt += '[CR]' + config.get_localized_string(70634) + ' ' + alfav.info_lista['created'] + ' ' + config.get_localized_string(70635) + ' ' + alfav.info_lista['updated']
if 'downloaded_date' in alfav.info_lista:
txt += '[CR]' + config.get_localized_string(70636) + ' ' + alfav.info_lista['downloaded_date'] + ' ' + alfav.info_lista['downloaded_from'] + ' ' + config.get_localized_string(70637)
if 'tinyupload_date' in alfav.info_lista:
txt += '[CR]' + config.get_localized_string(70638) + ' ' + alfav.info_lista['tinyupload_date'] + ' ' + config.get_localized_string(70639) + ' [COLOR blue]' + alfav.info_lista['tinyupload_code'] + '[/COLOR]'
txt += '[CR]' + config.get_localized_string(70640) + ' ' + len(alfav.user_favorites)
for perfil in alfav.user_favorites:
txt += '[CR]- %s (%d %s)' % (perfil['title'], len(perfil['items']), config.get_localized_string(70641))
platformtools.dialog_textviewer(config.get_localized_string(70607), txt)
return True
def compartir_lista(item):
logger.info()
fullfilename = os.path.join(config.get_data_path(), item.lista)
if not os.path.exists(fullfilename):
platformtools.dialog_ok('Alfa', config.get_localized_string(70630), fullfilename)
return False
try:
progreso = platformtools.dialog_progress_bg(config.get_localized_string(70643), config.get_localized_string(70644))
# Acceso a la página principal de tinyupload para obtener datos necesarios
from core import httptools, scrapertools
data = httptools.downloadpage('http://s000.tinyupload.com/index.php').data
upload_url = scrapertools.find_single_match(data, 'form action="([^"]+)')
sessionid = scrapertools.find_single_match(upload_url, 'sid=(.+)')
progreso.update(10, config.get_localized_string(70645), config.get_localized_string(70646))
# Envío del fichero a tinyupload mediante multipart/form-data
from lib import MultipartPostHandler
import urllib2
opener = urllib2.build_opener(MultipartPostHandler.MultipartPostHandler)
params = { 'MAX_FILE_SIZE' : '52428800', 'file_description' : '', 'sessionid' : sessionid, 'uploaded_file' : open(fullfilename, 'rb') }
handle = opener.open(upload_url, params)
data = handle.read()
progreso.close()
if not 'File was uploaded successfuly' in data:
logger.debug(data)
platformtools.dialog_ok('Alfa', config.get_localized_string(70647))
return False
codigo = scrapertools.find_single_match(data, 'href="index\.php\?file_id=([^"]+)')
except:
platformtools.dialog_ok('Alfa', config.get_localized_string(70647), item.lista)
return False
# Apuntar código en fichero de log y dentro de la lista
save_log_lista_shared(config.get_localized_string(70648) + ' ' + item.lista + ' ' + codigo + ' ' + config.get_localized_string(70649))
alfav = AlfavoritesData(item.lista)
alfav.info_lista['tinyupload_date'] = fechahora_actual()
alfav.info_lista['tinyupload_code'] = codigo
alfav.save()
platformtools.dialog_ok('Alfa', config.get_localized_string(70650), codigo)
return True
def acciones_nueva_lista(item):
logger.info()
acciones = [config.get_localized_string(70651),
config.get_localized_string(70652),
config.get_localized_string(70653),
config.get_localized_string(70654)]
ret = platformtools.dialog_select(config.get_localized_string(70608), acciones)
if ret == -1:
return False # pedido cancel
elif ret == 0:
return crear_lista(item)
elif ret == 1:
codigo = platformtools.dialog_input(default='', heading=config.get_localized_string(70609)) # 05370382084539519168
if codigo is None or codigo == '':
return False
return descargar_lista(item, 'http://s000.tinyupload.com/?file_id=' + codigo)
elif ret == 2:
url = platformtools.dialog_input(default='https://', heading=config.get_localized_string(70610))
if url is None or url == '':
return False
return descargar_lista(item, url)
elif ret == 3:
txt = config.get_localized_string(70611)
platformtools.dialog_textviewer(config.get_localized_string(70607), txt)
return False
def crear_lista(item):
logger.info()
titulo = platformtools.dialog_input(default='', heading=config.get_localized_string(70612))
if titulo is None or titulo == '':
return False
titulo = text_clean(titulo, blank_char='_')
filename = get_filename_from_name(titulo)
fullfilename = os.path.join(config.get_data_path(), filename)
# Comprobar que el fichero no exista ya
if os.path.exists(fullfilename):
platformtools.dialog_ok('Alfa', config.get_localized_string(70613), fullfilename)
return False
# Provocar que se guarde con las carpetas vacías por defecto
alfav = AlfavoritesData(filename)
platformtools.itemlist_refresh()
return True
def descargar_lista(item, url):
logger.info()
from core import httptools, scrapertools
if 'tinyupload.com/' in url:
try:
from urlparse import urlparse
data = httptools.downloadpage(url).data
logger.debug(data)
down_url, url_name = scrapertools.find_single_match(data, ' href="(download\.php[^"]*)"><b>([^<]*)')
url_json = '{uri.scheme}://{uri.netloc}/'.format(uri=urlparse(url)) + down_url
except:
platformtools.dialog_ok('Alfa', config.get_localized_string(70655), url)
return False
elif 'zippyshare.com/' in url:
from core import servertools
video_urls, puedes, motivo = servertools.resolve_video_urls_for_playing('zippyshare', url)
if not puedes:
platformtools.dialog_ok('Alfa', config.get_localized_string(70655), motivo)
return False
url_json = video_urls[0][1] # https://www58.zippyshare.com/d/qPzzQ0UM/25460/alfavorites-testeanding.json
url_name = url_json[url_json.rfind('/')+1:]
elif 'friendpaste.com/' in url:
url_json = url if url.endswith('/raw') else url + '/raw'
url_name = 'friendpaste'
else:
url_json = url
url_name = url[url.rfind('/')+1:]
# Download json
data = httptools.downloadpage(url_json).data
# Verificar formato json de alfavorites y añadir info de la descarga
jsondata = jsontools.load(data)
if 'user_favorites' not in jsondata or 'info_lista' not in jsondata:
logger.debug(data)
platformtools.dialog_ok('Alfa', config.get_localized_string(70656))
return False
jsondata['info_lista']['downloaded_date'] = fechahora_actual()
jsondata['info_lista']['downloaded_from'] = url
data = jsontools.dump(jsondata)
# Pedir nombre para la lista descargada
nombre = get_name_from_filename(url_name)
titulo = platformtools.dialog_input(default=nombre, heading=config.get_localized_string(70657))
if titulo is None or titulo == '':
return False
titulo = text_clean(titulo, blank_char='_')
filename = get_filename_from_name(titulo)
fullfilename = os.path.join(config.get_data_path(), filename)
# Si el nuevo nombre ya existe pedir confirmación para sobrescribir
if os.path.exists(fullfilename):
if not platformtools.dialog_yesno('Alfa', config.get_localized_string(70613), config.get_localized_string(70658), filename):
return False
if not filetools.write(fullfilename, data):
platformtools.dialog_ok('Alfa', config.get_localized_string(70659), filename)
platformtools.dialog_ok('Alfa', config.get_localized_string(70660), filename)
platformtools.itemlist_refresh()
return True

14
channels/alsoporn.json Normal file
View File

@@ -0,0 +1,14 @@
{
"id": "alsoporn",
"name": "alsoporn",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://alsoporn.com/images/alsoporn.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

90
channels/alsoporn.py Normal file
View File

@@ -0,0 +1,90 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
host = 'http://www.alsoporn.com'
def mainlist(item):
logger.info()
itemlist = []
# itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/en/g/All/new/1"))
itemlist.append( Item(channel=item.channel, title="Top" , action="lista", url=host + "/g/All/top/1"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search/=%s/" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)">.*?'
patron += '<img src="([^"]+)" alt="([^"]+)" />'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
scrapedplot = ""
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
return sorted(itemlist, key=lambda i: i.title)
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="alsoporn_prev">.*?'
patron += '<a href="([^"]+)">.*?'
patron += '<img src="([^"]+)" alt="([^"]+)">.*?'
patron += '<span>([^"]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,scrapedtime in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
fanart=thumbnail, plot=plot, contentTitle = scrapedtitle))
next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)" target="_self"><span class="alsoporn_page">NEXT</span></a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
scrapedurl = scrapertools.find_single_match(data,'<iframe frameborder=0 scrolling="no" src=\'([^\']+)\'')
data = httptools.downloadpage(item.url).data
scrapedurl1 = scrapertools.find_single_match(data,'<iframe src="(.*?)"')
scrapedurl1 = scrapedurl1.replace("//www.playercdn.com/ec/i2.php?", "https://www.trinitytube.xyz/ec/i2.php?")
data = httptools.downloadpage(item.url).data
scrapedurl2 = scrapertools.find_single_match(data,'<source src="(.*?)"')
itemlist.append(item.clone(action="play", title=item.title, fulltitle = item.title, url=scrapedurl2))
return itemlist

View File

@@ -0,0 +1,62 @@
{
"id": "altadefinizione01",
"name": "Altadefinizione01",
"language": ["ita"],
"active": true,
"adult": false,
"thumbnail": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/altadefinizione01.png",
"banner": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/altadefinizione01.png",
"categories": ["movie"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi in Ricerca Globale",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Numero di link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": ["Non filtrare","IT"]
}
]
}

View File

@@ -0,0 +1,163 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per altadefinizione01
# ------------------------------------------------------------
import re
import urlparse
from channels import filtertools, autoplay, support
from core import servertools, httptools, tmdb, scrapertoolsV2
from core.item import Item
from platformcode import logger, config
#URL che reindirizza sempre al dominio corrente
host = "https://altadefinizione01.team"
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['openload', 'streamango', 'rapidvideo', 'streamcherry', 'megadrive']
list_quality = ['default']
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'altadefinizione01')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'altadefinizione01')
headers = [['Referer', host]]
blacklist_categorie = ['Altadefinizione01', 'Altadefinizione.to']
def mainlist(item):
support.log()
itemlist =[]
support.menu(itemlist, 'Al Cinema','peliculas',host+'/cinema/')
support.menu(itemlist, 'Ultimi Film Inseriti','peliculas',host)
support.menu(itemlist, 'Film Sub-ITA','peliculas',host+'/sub-ita/')
support.menu(itemlist, 'Film Ordine Alfabetico ','AZlist',host+'/catalog/')
support.menu(itemlist, 'Categorie Film','categories',host)
support.menu(itemlist, 'Cerca...','search')
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
return itemlist
def categories(item):
support.log(item)
itemlist = support.scrape(item,'<li><a href="([^"]+)">(.*?)</a></li>',['url','title'],headers,'Altadefinizione01',patron_block='<ul class="kategori_list">(.*?)</ul>',action='peliculas',url_host=host)
return support.thumb(itemlist)
def AZlist(item):
support.log()
return support.scrape(item,r'<a title="([^"]+)" href="([^"]+)"',['title','url'],headers,patron_block=r'<div class="movies-letter">(.*?)<\/div>',action='peliculas_list',url_host=host)
def newest(categoria):
# import web_pdb; web_pdb.set_trace()
support.log(categoria)
itemlist = []
item = Item()
try:
if categoria == "peliculas":
item.url = host
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def search(item, texto):
support.log(texto)
item.url = "%s/index.php?do=search&story=%s&subaction=search" % (
host, texto)
try:
if item.extra == "movie":
return subIta(item)
if item.extra == "tvshow":
return peliculas_tv(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def peliculas(item):
support.log()
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<div class="cover_kapsul ml-mask".*?<a href="(.*?)">(.*?)<\/a>.*?<img .*?src="(.*?)".*?<div class="trdublaj">(.*?)<\/div>.(<div class="sub_ita">(.*?)<\/div>|())'
matches = scrapertoolsV2.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedquality, subDiv, subText, empty in matches:
info = scrapertoolsV2.find_multiple_matches(data, r'<span class="ml-label">([0-9]+)+<\/span>.*?<span class="ml-label">(.*?)<\/span>.*?<p class="ml-cat".*?<p>(.*?)<\/p>.*?<a href="(.*?)" class="ml-watch">')
infoLabels = {}
for infoLabels['year'], duration, scrapedplot, checkUrl in info:
if checkUrl == scrapedurl:
break
infoLabels['duration'] = int(duration.replace(' min', '')) * 60 # calcolo la durata in secondi
scrapedthumbnail = host + scrapedthumbnail
scrapedtitle = scrapertoolsV2.decodeHtmlentities(scrapedtitle)
fulltitle = scrapedtitle
if subDiv:
fulltitle += support.typo(subText + ' _ () color limegreen')
fulltitle += support.typo(scrapedquality.strip()+ ' _ [] color blue')
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType=item.contenType,
contentTitle=scrapedtitle,
contentQuality=scrapedquality.strip(),
plot=scrapedplot,
title=fulltitle,
fulltitle=scrapedtitle,
show=scrapedtitle,
url=scrapedurl,
infoLabels=infoLabels,
thumbnail=scrapedthumbnail))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
support.nextPage(itemlist,item,data,'<span>[^<]+</span>[^<]+<a href="(.*?)">')
return itemlist
def peliculas_list(item):
support.log()
block = r'<tbody>(.*)<\/tbody>'
patron = r'<td class="mlnh-thumb"><a href="([^"]+)" title="([^"]+)".*?> <img.*?src="([^"]+)".*?<td class="mlnh-3">([0-9]+)<\/td><td class="mlnh-4">(.*?)<\/td>'
return support.scrape(item,patron, ['url','title','year','quality'],patron_block=block)
def findvideos(item):
support.log()
itemlist = support.server(item, headers=headers)
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
support.videolibrary(itemlist, item, 'color blue')
return itemlist

View File

@@ -0,0 +1,70 @@
{
"id": "altadefinizioneclick",
"name": "AltadefinizioneClick",
"active": true,
"adult": false,
"language": ["ita"],
"thumbnail": "https:\/\/raw.githubusercontent.com\/Zanzibar82\/images\/master\/posters\/altadefinizioneclick.png",
"bannermenu": "https:\/\/raw.githubusercontent.com\/Zanzibar82\/images\/master\/posters\/altadefinizioneciclk.png",
"categories": ["tvshow","movie","vosi"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Numero de link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": ["Non filtrare","IT"]
}
]
}

View File

@@ -0,0 +1,109 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per altadefinizioneclick
# ----------------------------------------------------------
import re
from channels import autoplay, filtertools, support
from core import servertools
from core.item import Item
from platformcode import logger, config
host = "https://altadefinizione.center" ### <- cambio Host da .fm a .center
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['openload', 'streamango', "vidoza", "thevideo", "okru", 'youtube']
list_quality = ['1080p']
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'altadefinizioneclick')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'altadefinizioneclick')
headers = [['Referer', host]]
def mainlist(item):
support.log()
itemlist = []
support.menu(itemlist, 'Film', 'peliculas', host + "/nuove-uscite/")
support.menu(itemlist, 'Per Genere submenu', 'menu', host, args='Film')
support.menu(itemlist, 'Per Anno submenu', 'menu', host, args='Anno')
support.menu(itemlist, 'Sub-IIA', 'peliculas', host + "/sub-ita/")
support.menu(itemlist, 'Cerca...', 'search', host, 'movie')
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
return itemlist
def search(item, texto):
support.log("search ", texto)
item.extra = 'search'
item.url = host + "/?s=" + texto
try:
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
support.log(categoria)
itemlist = []
item = Item()
try:
if categoria == "peliculas":
item.url = host + "/nuove-uscite/"
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def menu(item):
support.log()
itemlist = support.scrape(item, '<li><a href="(.*?)">(.*?)</a></li>', ['url', 'title'], headers, patron_block='<ul class="listSubCat" id="'+ str(item.args) + '">(.*?)</ul>', action='peliculas')
return support.thumb(itemlist)
def peliculas(item):
support.log()
if item.extra == 'search':
itemlist = support.scrape(item, r'<a href="([^"]+)">\s*<div[^=]+=[^=]+=[^=]+=[^=]+=[^=]+="(.*?)"[^>]+>[^<]+<[^>]+>\s*<h[^=]+="titleFilm">(.*?)<', ['url', 'thumb', 'title'], headers, patronNext='<a class="next page-numbers" href="([^"]+)">')
else:
itemlist = support.scrape(item, r'<img width[^s]+src="([^"]+)[^>]+>[^>]+>[^>]+>[^>]+><a href="([^"]+)">([^<]+)<\/a>[^>]+>[^>]+>[^>]+>(?:[^>]+>|)[^I]+IMDB\:\s*([^<]+)<', ['thumb', 'url', 'title', 'rating'], headers, patronNext='<a class="next page-numbers" href="([^"]+)">')
for item in itemlist:
item.title = re.sub(r'.\(.*?\)', '', item.title)
return itemlist
def findvideos(item):
support.log()
itemlist = support.hdpass_get_servers(item)
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
itemlist = filtertools.get_links(itemlist, item, list_language)
autoplay.start(itemlist, item)
support.videolibrary(itemlist, item ,'color blue bold')
return itemlist

View File

@@ -0,0 +1,70 @@
{
"id": "altadefinizionehd",
"name": "AltadefinizioneHD",
"active": true,
"adult": false,
"language": ["ita"],
"thumbnail": "https://altadefinizione.doctor/wp-content/uploads/2019/02/logo.png",
"bannermenu": "https://altadefinizione.doctor/wp-content/uploads/2019/02/logo.png",
"categories": ["tvshow","movie"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi in Ricerca Globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Includi in Novità - Serie TV",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Numero de link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "1", "3", "5", "10" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": ["Non filtrare","IT"]
}
]
}

View File

@@ -0,0 +1,264 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per Altadefinizione HD
# ----------------------------------------------------------
import re
from core import httptools, scrapertools, servertools, tmdb
from platformcode import logger, config
from core.item import Item
from channels import autoplay
from channelselector import thumb
host = "https://altadefinizione.doctor"
headers = [['Referer', host]]
list_servers = ['openload']
list_quality = ['default']
def mainlist(item):
logger.info("[altadefinizionehd.py] mainlist")
autoplay.init(item.channel, list_servers, list_quality)
itemlist = [Item(channel=item.channel,
action="video",
title="[B]Film[/B]",
url=host + '/movies/',
thumbnail=NovitaThumbnail,
fanart=FilmFanart),
Item(channel=item.channel,
action="menu",
title="[B] > Film per Genere[/B]",
url=host,
extra='GENERE',
thumbnail=NovitaThumbnail,
fanart=FilmFanart),
Item(channel=item.channel,
action="menu",
title="[B] > Film per Anno[/B]",
url=host,
extra='ANNO',
thumbnail=NovitaThumbnail,
fanart=FilmFanart),
Item(channel=item.channel,
action="video",
title="Film Sub-Ita",
url=host + "/genre/sub-ita/",
thumbnail=NovitaThumbnail,
fanart=FilmFanart),
Item(channel=item.channel,
action="video",
title="Film Rip",
url=host + "/genre/dvdrip-bdrip-brrip/",
thumbnail=NovitaThumbnail,
fanart=FilmFanart),
Item(channel=item.channel,
action="video",
title="Film al Cinema",
url=host + "/genre/cinema/",
thumbnail=NovitaThumbnail,
fanart=FilmFanart),
Item(channel=item.channel,
action="search",
extra="movie",
title="[COLOR blue]Cerca Film...[/COLOR]",
thumbnail=CercaThumbnail,
fanart=FilmFanart)]
autoplay.show_option(item.channel, itemlist)
itemlist = thumb(itemlist)
return itemlist
def menu(item):
logger.info("[altadefinizionehd.py] menu")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
logger.info("[altadefinizionehd.py] DATA"+data)
patron = r'<li id="menu.*?><a href="#">FILM PER ' + item.extra + r'<\/a><ul class="sub-menu">(.*?)<\/ul>'
logger.info("[altadefinizionehd.py] BLOCK"+patron)
block = scrapertools.find_single_match(data, patron)
logger.info("[altadefinizionehd.py] BLOCK"+block)
patron = r'<li id=[^>]+><a href="(.*?)">(.*?)<\/a><\/li>'
matches = re.compile(patron, re.DOTALL).findall(block)
for url, title in matches:
itemlist.append(
Item(channel=item.channel,
action='video',
title=title,
url=url))
return itemlist
def newest(categoria):
logger.info("[altadefinizionehd.py] newest" + categoria)
itemlist = []
item = Item()
try:
if categoria == "peliculas":
item.url = host
item.action = "video"
itemlist = video(item)
if itemlist[-1].action == "video":
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def video(item):
logger.info("[altadefinizionehd.py] video")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
logger.info("[altadefinizionehd.py] Data" +data)
if 'archive-content' in data:
regex = r'<div id="archive-content".*?>(.*?)<div class="pagination'
else:
regex = r'<div class="items".*?>(.*?)<div class="pagination'
block = scrapertools.find_single_match(data, regex)
logger.info("[altadefinizionehd.py] Block" +block)
patron = r'<article .*?class="item movies">.*?<img src="([^"]+)".*?<span class="quality">(.*?)<\/span>.*?<a href="([^"]+)">.*?<h4>([^<]+)<\/h4>(.*?)<\/article>'
matches = re.compile(patron, re.DOTALL).findall(block)
for scrapedthumb, scrapedquality, scrapedurl, scrapedtitle, scrapedinfo in matches:
title = scrapedtitle + " [" + scrapedquality + "]"
patron = r'IMDb: (.*?)<\/span> <span>(.*?)<\/span>.*?"texto">(.*?)<\/div>'
matches = re.compile(patron, re.DOTALL).findall(scrapedinfo)
logger.info("[altadefinizionehd.py] MATCHES" + str(matches))
for rating, year, plot in matches:
infoLabels = {}
infoLabels['Year'] = year
infoLabels['Rating'] = rating
infoLabels['Plot'] = plot
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="movie",
title=title,
fulltitle=scrapedtitle,
infoLabels=infoLabels,
url=scrapedurl,
thumbnail=scrapedthumb))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
patron = '<a class='+ "'arrow_pag'" + ' href="([^"]+)"'
next_page = scrapertools.find_single_match(data, patron)
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action="video",
title="[COLOR blue]" + config.get_localized_string(30992) + "[/COLOR]",
url=next_page,
thumbnail=thumb()))
return itemlist
def search(item, texto):
logger.info("[altadefinizionehd.py] init texto=[" + texto + "]")
item.url = host + "/?s=" + texto
return search_page(item)
def search_page(item):
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<img src="([^"]+)".*?.*?<a href="([^"]+)">(.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
patron = '<a class='+ "'arrow_pag'" + ' href="([^"]+)"'
next_page = scrapertools.find_single_match(data, patron)
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action="search_page",
title="[COLOR blue]" + config.get_localized_string(30992) + "[/COLOR]",
url=next_page,
thumbnail=thumb()))
return itemlist
def findvideos(item):
data = httptools.downloadpage(item.url).data
patron = r"<li id='player-.*?'.*?class='dooplay_player_option'\sdata-type='(.*?)'\sdata-post='(.*?)'\sdata-nume='(.*?)'>.*?'title'>(.*?)</"
matches = re.compile(patron, re.IGNORECASE).findall(data)
itemlist = []
for scrapedtype, scrapedpost, scrapednume, scrapedtitle in matches:
itemlist.append(
Item(channel=item.channel,
action="play",
fulltitle=item.title + " [" + scrapedtitle + "]",
show=scrapedtitle,
title=item.title + " [COLOR blue][" + scrapedtitle + "][/COLOR]",
url=host + "/wp-admin/admin-ajax.php",
post=scrapedpost,
server=scrapedtitle,
nume=scrapednume,
type=scrapedtype,
extra=item.extra,
folder=True))
autoplay.start(itemlist, item)
return itemlist
def play(item):
import urllib
payload = urllib.urlencode({'action': 'doo_player_ajax', 'post': item.post, 'nume': item.nume, 'type': item.type})
data = httptools.downloadpage(item.url, post=payload).data
patron = r"<iframe.*src='(([^']+))'\s"
matches = re.compile(patron, re.IGNORECASE).findall(data)
url = matches[0][0]
url = url.strip()
data = httptools.downloadpage(url, headers=headers).data
itemlist = servertools.find_video_items(data=data)
return itemlist
NovitaThumbnail = "https://superrepo.org/static/images/icons/original/xplugin.video.moviereleases.png.pagespeed.ic.j4bhi0Vp3d.png"
GenereThumbnail = "https://farm8.staticflickr.com/7562/15516589868_13689936d0_o.png"
FilmFanart = "https://superrepo.org/static/images/fanart/original/script.artwork.downloader.jpg"
CercaThumbnail = "http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"
CercaFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
ListTxt = "[COLOR orange]Torna a video principale [/COLOR]"
AvantiTxt = config.get_localized_string(30992)
AvantiImg = "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"
thumbnail = "http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"

15
channels/analdin.json Normal file
View File

@@ -0,0 +1,15 @@
{
"id": "analdin",
"name": "analdin",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "https://www.analdin.com/images/logo-retina.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

113
channels/analdin.py Normal file
View File

@@ -0,0 +1,113 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
host = 'https://www.analdin.com/es'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/más-reciente/"))
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="lista", url=host + "/más-visto/"))
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/mejor-valorado/"))
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categorías/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def catalogo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data,'<strong class="popup-title">Canales</strong>(.*?)<strong>Models</strong>')
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li><a class="item" href="([^"]+)" title="([^"]+)">'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">&raquo;</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="catalogo", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?'
patron += 'src="([^"]+)".*?'
patron += '<div class="videos">([^"]+)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
return sorted(itemlist, key=lambda i: i.title)
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class="popup-video-link" href="([^"]+)".*?'
patron += 'thumb="([^"]+)".*?'
patron += '<div class="duration">(.*?)</div>.*?'
patron += '<strong class="title">\s*([^"]+)</strong>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtime,scrapedtitle in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
fanart=thumbnail, contentTitle = title))
next_page = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue",
url=next_page) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'video_url: \'([^\']+)\''
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl in matches:
url = scrapedurl
itemlist.append(item.clone(action="play", title=url, fulltitle = item.title, url=url))
return itemlist

36
channels/animeforce.json Normal file
View File

@@ -0,0 +1,36 @@
{
"id": "animeforce",
"name": "AnimeForce",
"language": ["ita"],
"active": true,
"adult": false,
"thumbnail": "http://www.animeforce.org/wp-content/uploads/2013/05/logo-animeforce.png",
"banner": "http://www.animeforce.org/wp-content/uploads/2013/05/logo-animeforce.png",
"categories": ["anime"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Includi in Novità - Anime",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
}
]
}

504
channels/animeforce.py Normal file
View File

@@ -0,0 +1,504 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per http://animeinstreaming.net/
# ------------------------------------------------------------
import re, urllib, urlparse
from core import httptools, scrapertools, servertools, tmdb
from core.item import Item
from platformcode import config, logger
from servers.decrypters import adfly
host = "https://ww1.animeforce.org"
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
headers = [['Referer', host]]
PERPAGE = 20
# -----------------------------------------------------------------
def mainlist(item):
log("mainlist", "mainlist", item.channel)
itemlist = [Item(channel=item.channel,
action="lista_anime",
title="[COLOR azure]Anime [/COLOR]- [COLOR lightsalmon]Lista Completa[/COLOR]",
url=host + "/lista-anime/",
thumbnail=CategoriaThumbnail,
fanart=CategoriaFanart),
Item(channel=item.channel,
action="animeaggiornati",
title="[COLOR azure]Anime Aggiornati[/COLOR]",
url=host,
thumbnail=CategoriaThumbnail,
fanart=CategoriaFanart),
Item(channel=item.channel,
action="ultimiep",
title="[COLOR azure]Ultimi Episodi[/COLOR]",
url=host,
thumbnail=CategoriaThumbnail,
fanart=CategoriaFanart),
Item(channel=item.channel,
action="search",
title="[COLOR yellow]Cerca ...[/COLOR]",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
return itemlist
# =================================================================
# -----------------------------------------------------------------
def newest(categoria):
log("newest", "newest" + categoria)
itemlist = []
item = Item()
try:
if categoria == "anime":
item.url = host
item.action = "ultimiep"
itemlist = ultimiep(item)
if itemlist[-1].action == "ultimiep":
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
# =================================================================
# -----------------------------------------------------------------
def search(item, texto):
log("search", "search", item.channel)
item.url = host + "/?s=" + texto
try:
return search_anime(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
# =================================================================
# -----------------------------------------------------------------
def search_anime(item):
log("search_anime", "search_anime", item.channel)
itemlist = []
data = httptools.downloadpage(item.url).data
patron = r'<a href="([^"]+)"><img.*?src="([^"]+)".*?title="([^"]+)".*?/>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
if "Sub Ita Download & Streaming" in scrapedtitle or "Sub Ita Streaming":
if 'episodio' in scrapedtitle.lower():
itemlist.append(episode_item(item, scrapedtitle, scrapedurl, scrapedthumbnail))
else:
scrapedtitle, eptype = clean_title(scrapedtitle, simpleClean=True)
cleantitle, eptype = clean_title(scrapedtitle)
scrapedurl, total_eps = create_url(scrapedurl, cleantitle)
itemlist.append(
Item(channel=item.channel,
action="episodios",
text_color="azure",
contentType="tvshow",
title=scrapedtitle,
url=scrapedurl,
fulltitle=cleantitle,
show=cleantitle,
thumbnail=scrapedthumbnail))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Next Page
next_page = scrapertools.find_single_match(data, r'<link rel="next" href="([^"]+)"[^/]+/>')
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action="search_anime",
text_bold=True,
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=next_page,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
return itemlist
# =================================================================
# -----------------------------------------------------------------
def animeaggiornati(item):
log("animeaggiornati", "animeaggiornati", item.channel)
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<img.*?src="([^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+><a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
if 'Streaming' in scrapedtitle:
cleantitle, eptype = clean_title(scrapedtitle)
# Creazione URL
scrapedurl, total_eps = create_url(scrapedurl, scrapedtitle)
itemlist.append(
Item(channel=item.channel,
action="episodios",
text_color="azure",
contentType="tvshow",
title=cleantitle,
url=scrapedurl,
fulltitle=cleantitle,
show=cleantitle,
thumbnail=scrapedthumbnail))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
# =================================================================
# -----------------------------------------------------------------
def ultimiep(item):
log("ultimiep", "ultimiep", item.channel)
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<img.*?src="([^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+><a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
if 'Streaming' in scrapedtitle:
itemlist.append(episode_item(item, scrapedtitle, scrapedurl, scrapedthumbnail))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
# =================================================================
# -----------------------------------------------------------------
def lista_anime(item):
log("lista_anime", "lista_anime", item.channel)
itemlist = []
p = 1
if '{}' in item.url:
item.url, p = item.url.split('{}')
p = int(p)
# Carica la pagina
data = httptools.downloadpage(item.url).data
# Estrae i contenuti
patron = r'<li>\s*<strong>\s*<a\s*href="([^"]+?)">([^<]+?)</a>\s*</strong>\s*</li>'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapedplot = ""
scrapedthumbnail = ""
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
if (p - 1) * PERPAGE > i: continue
if i >= p * PERPAGE: break
# Pulizia titolo
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
cleantitle, eptype = clean_title(scrapedtitle, simpleClean=True)
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="episodios",
text_color="azure",
contentType="tvshow",
title=cleantitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=cleantitle,
show=cleantitle,
plot=scrapedplot,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if len(matches) >= p * PERPAGE:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="lista_anime",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
return itemlist
# =================================================================
# -----------------------------------------------------------------
def episodios(item):
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<td style="[^"]*?">\s*.*?<strong>(.*?)</strong>.*?\s*</td>\s*<td style="[^"]*?">\s*<a href="([^"]+?)"[^>]+>\s*<img.*?src="([^"]+?)".*?/>\s*</a>\s*</td>'
matches = re.compile(patron, re.DOTALL).findall(data)
vvvvid_videos = False
for scrapedtitle, scrapedurl, scrapedimg in matches:
if 'nodownload' in scrapedimg or 'nostreaming' in scrapedimg:
continue
if 'vvvvid' in scrapedurl.lower():
if not vvvvid_videos: vvvvid_videos = True
itemlist.append(Item(title='I Video VVVVID Non sono supportati', text_color="red"))
continue
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle = re.sub(r'<[^>]*?>', '', scrapedtitle)
scrapedtitle = '[COLOR azure][B]' + scrapedtitle + '[/B][/COLOR]'
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="episode",
title=scrapedtitle,
url=urlparse.urljoin(host, scrapedurl),
fulltitle=scrapedtitle,
show=scrapedtitle,
plot=item.plot,
fanart=item.fanart,
thumbnail=item.thumbnail))
# Comandi di servizio
if config.get_videolibrary_support() and len(itemlist) != 0 and not vvvvid_videos:
itemlist.append(
Item(channel=item.channel,
title=config.get_localized_string(30161),
text_color="yellow",
text_bold=True,
url=item.url,
action="add_serie_to_library",
extra="episodios",
show=item.show))
return itemlist
# ==================================================================
# -----------------------------------------------------------------
def findvideos(item):
logger.info("kod.animeforce findvideos")
itemlist = []
if item.extra:
data = httptools.downloadpage(item.url, headers=headers).data
blocco = scrapertools.find_single_match(data, r'%s(.*?)</tr>' % item.extra)
url = scrapertools.find_single_match(blocco, r'<a href="([^"]+)"[^>]*>')
if 'vvvvid' in url.lower():
itemlist = [Item(title='I Video VVVVID Non sono supportati', text_color="red")]
return itemlist
if 'http' not in url: url = "".join(['https:', url])
else:
url = item.url
if 'adf.ly' in url:
url = adfly.get_long_url(url)
elif 'bit.ly' in url:
url = httptools.downloadpage(url, only_headers=True, follow_redirects=False).headers.get("location")
if 'animeforce' in url:
headers.append(['Referer', item.url])
data = httptools.downloadpage(url, headers=headers).data
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
videoitem.title = item.title + videoitem.title
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
videoitem.contentType = item.contentType
url = url.split('&')[0]
data = httptools.downloadpage(url, headers=headers).data
patron = """<source\s*src=(?:"|')([^"']+?)(?:"|')\s*type=(?:"|')video/mp4(?:"|')>"""
matches = re.compile(patron, re.DOTALL).findall(data)
headers.append(['Referer', url])
for video in matches:
itemlist.append(Item(channel=item.channel, action="play", title=item.title,
url=video + '|' + urllib.urlencode(dict(headers)), folder=False))
else:
itemlist.extend(servertools.find_video_items(data=url))
for videoitem in itemlist:
videoitem.title = item.title + videoitem.title
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
videoitem.contentType = item.contentType
return itemlist
# ==================================================================
# =================================================================
# Funzioni di servizio
# -----------------------------------------------------------------
def scrapedAll(url="", patron=""):
data = httptools.downloadpage(url).data
MyPatron = patron
matches = re.compile(MyPatron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
return matches
# =================================================================
# -----------------------------------------------------------------
def create_url(url, title, eptype=""):
logger.info()
if 'download' not in url:
url = url.replace('-streaming', '-download-streaming')
total_eps = ""
if not eptype:
url = re.sub(r'episodio?-?\d+-?(?:\d+-|)[oav]*', '', url)
else: # Solo se è un episodio passa
total_eps = scrapertools.find_single_match(title.lower(), r'\((\d+)-(?:episodio|sub-ita)\)') # Questo numero verrà rimosso dall'url
if total_eps: url = url.replace('%s-' % total_eps, '')
url = re.sub(r'%s-?\d*-' % eptype.lower(), '', url)
url = url.replace('-fine', '')
return url, total_eps
# =================================================================
# -----------------------------------------------------------------
def clean_title(title, simpleClean=False):
logger.info()
title = title.replace("Streaming", "").replace("&", "")
title = title.replace("Download", "")
title = title.replace("Sub Ita", "")
cleantitle = title.replace("#038;", "").replace("amp;", "").strip()
if '(Fine)' in title:
cleantitle = cleantitle.replace('(Fine)', '').strip() + " (Fine)"
eptype = ""
if not simpleClean:
if "episodio" in title.lower():
eptype = scrapertools.find_single_match(title, "((?:Episodio?|OAV))")
cleantitle = re.sub(r'%s\s*\d*\s*(?:\(\d+\)|)' % eptype, '', title).strip()
if 'episodio' not in eptype.lower():
cleantitle = re.sub(r'Episodio?\s*\d+\s*(?:\(\d+\)|)\s*[\(OAV\)]*', '', cleantitle).strip()
if '(Fine)' in title:
cleantitle = cleantitle.replace('(Fine)', '')
return cleantitle, eptype
# =================================================================
# -----------------------------------------------------------------
def episode_item(item, scrapedtitle, scrapedurl, scrapedthumbnail):
scrapedtitle, eptype = clean_title(scrapedtitle, simpleClean=True)
cleantitle, eptype = clean_title(scrapedtitle)
# Creazione URL
scrapedurl, total_eps = create_url(scrapedurl, scrapedtitle, eptype)
epnumber = ""
if 'episodio' in eptype.lower():
epnumber = scrapertools.find_single_match(scrapedtitle.lower(), r'episodio?\s*(\d+)')
eptype += ":? %s%s" % (epnumber, (r" \(%s\):?" % total_eps) if total_eps else "")
extra = "<tr>\s*<td[^>]+><strong>(?:[^>]+>|)%s(?:[^>]+>[^>]+>|[^<]*|[^>]+>)</strong>" % eptype
item = Item(channel=item.channel,
action="findvideos",
contentType="tvshow",
title=scrapedtitle,
text_color="azure",
url=scrapedurl,
fulltitle=cleantitle,
extra=extra,
show=cleantitle,
thumbnail=scrapedthumbnail)
return item
# =================================================================
# -----------------------------------------------------------------
def scrapedSingle(url="", single="", patron=""):
data = httptools.downloadpage(url).data
paginazione = scrapertools.find_single_match(data, single)
matches = re.compile(patron, re.DOTALL).findall(paginazione)
scrapertools.printMatches(matches)
return matches
# =================================================================
# -----------------------------------------------------------------
def Crea_Url(pagina="1", azione="ricerca", categoria="", nome=""):
# esempio
# chiamate.php?azione=ricerca&cat=&nome=&pag=
Stringa = host + "chiamate.php?azione=" + azione + "&cat=" + categoria + "&nome=" + nome + "&pag=" + pagina
log("crea_Url", Stringa)
return Stringa
# =================================================================
# -----------------------------------------------------------------
def log(funzione="", stringa="", canale=""):
logger.debug("[" + canale + "].[" + funzione + "] " + stringa)
# =================================================================
# =================================================================
# riferimenti di servizio
# -----------------------------------------------------------------
AnimeThumbnail = "http://img15.deviantart.net/f81c/i/2011/173/7/6/cursed_candies_anime_poster_by_careko-d3jnzg9.jpg"
AnimeFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
CategoriaThumbnail = "http://static.europosters.cz/image/750/poster/street-fighter-anime-i4817.jpg"
CategoriaFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
CercaThumbnail = "http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"
CercaFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
AvantiTxt = config.get_localized_string(30992)
AvantiImg = "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"

View File

@@ -0,0 +1,62 @@
{
"id": "animeleggendari",
"name": "AnimeLeggendari",
"active": true,
"adult": false,
"language": ["ita"],
"thumbnail": "https://animeleggendari.com/wp-content/uploads/2018/01/123header.jpg",
"bannermenu": "https://animeleggendari.com/wp-content/uploads/2018/01/123header.jpg",
"categories": ["anime"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Includi in Novità - Anime",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Numero de link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "1", "3", "5", "10" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": ["Non filtrare", "IT"]
}
]
}

221
channels/animeleggendari.py Normal file
View File

@@ -0,0 +1,221 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per animeleggendari
# ------------------------------------------------------------
import re
from channels import autoplay
from channels import filtertools, support
from core import servertools, httptools, scrapertools, tmdb
from platformcode import logger, config
from core.item import Item
host = "https://animeleggendari.com"
# Richiesto per Autoplay
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['openload', 'streamango']
list_quality = ['default']
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'animeleggendari')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'animeleggendari')
def mainlist(item):
logger.info('[animeleggendari.py] mainlist')
# Richiesto per Autoplay
autoplay.init(item.channel, list_servers, list_quality)
itemlist = [Item(channel=item.channel,
action="lista_anime",
title="[B]Anime Leggendari[/B]",
url="%s/category/anime-leggendari/" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="lista_anime",
title="Anime [B]ITA[/B]",
url="%s/category/anime-ita/" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="lista_anime",
title="Anime [B]SUB ITA[/B]",
url="%s/category/anime-sub-ita/" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="lista_anime",
title="Conclusi",
url="%s/category/serie-anime-concluse/" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="lista_anime",
title="In Corso",
url="%s/category/anime-in-corso/" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="generi",
title="Generi >",
url=host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="search",
title="[B]Cerca...[/B]",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
# Autoplay visualizza voce menu
autoplay.show_option(item.channel, itemlist)
return itemlist
def search(item, texto):
logger.info('[animeleggendari.py] search')
item.url = host + "/?s=" + texto
try:
return lista_anime(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def generi(item):
logger.info('[animeleggendari.py] generi')
itemlist = []
data = httptools.downloadpage(item.url).data.replace('\n','').replace('\t','')
logger.info("[animeleggendari.py] generi= "+data)
blocco =scrapertools.find_single_match(data, r'Generi.*?<ul.*?>(.*?)<\/ul>')
logger.info("[animeleggendari.py] blocco= "+blocco)
patron = '<a href="([^"]+)">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(blocco)
logger.info("[animeleggendari.py] matches= "+str(matches))
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle.replace('Anime ','')
itemlist.append(
Item(channel=item.channel,
action="lista_anime",
title=title,
url=scrapedurl))
return itemlist
def lista_anime(item):
logger.info('[animeleggendari.py] lista_anime')
itemlist = []
data = httptools.downloadpage(item.url).data
patron = r'<a class="[^"]+" href="([^"]+)" title="([^"]+)"><img[^s]+src="([^"]+)"[^>]+'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip()).replace("streaming", "")
if 'top 10 anime da vedere' in scrapedtitle.lower(): continue
lang = scrapertools.find_single_match(scrapedtitle, r"((?:SUB ITA|ITA))")
cleantitle = scrapedtitle.replace(lang, "").replace('(Streaming & Download)', '')
cleantitle = cleantitle.replace('OAV', '').replace('OVA', '').replace('MOVIE', '')
itemlist.append(
Item(channel=item.channel,
action="episodios",
contentType="tvshow" if 'movie' not in scrapedtitle.lower() and 'ova' not in scrapedtitle.lower() else "movie",
text_color="azure",
title=scrapedtitle.replace('(Streaming & Download)', '').replace(lang, '[B][' + lang + '][/B]'),
fulltitle=cleantitle,
url=scrapedurl,
show=cleantitle,
thumbnail=scrapedthumbnail,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
patronvideos = r'<a class="next page-numbers" href="([^"]+)">'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
itemlist.append(
Item(channel=item.channel,
action="lista_anime",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
return itemlist
def episodios(item):
logger.info('[animeleggendari.py] episodios')
itemlist = []
data = httptools.downloadpage(item.url).data
blocco = scrapertools.find_single_match(data, r'(?:<p style="text-align: left;">|<div class="pagination clearfix">\s*)(.*?)</span></a></div>')
# Il primo episodio è la pagina stessa
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType=item.contentType,
title="Episodio: 1",
text_color="azure",
fulltitle="%s %s %s " % (support.color(item.title, "deepskyblue"), support.color("|", "azure"), support.color("1", "orange")),
url=item.url,
thumbnail=item.thumbnail,
folder=True))
if blocco != "":
patron = r'<a href="([^"]+)".*?><span class="pagelink">(\d+)</span></a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapednumber in matches:
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType=item.contentType,
title="Episodio: %s" % scrapednumber,
text_color="azure",
fulltitle="%s %s %s " % (support.color(item.title, "deepskyblue"), support.color("|", "azure"), support.color(scrapednumber, "orange")),
url=scrapedurl,
thumbnail=item.thumbnail,
folder=True))
if config.get_videolibrary_support() and len(itemlist) != 0:
itemlist.append(
Item(channel=item.channel,
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
url=item.url,
action="add_serie_to_library",
extra="episodi",
show=item.show))
return itemlist
def findvideos(item):
logger.info('[animeleggendari.py] findvideos')
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
server = re.sub(r'[-\[\]\s]+', '', videoitem.title)
videoitem.title = "".join(["[%s] " % support.color(server.capitalize(), 'orange'), item.title])
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
# Richiesto per Verifica se i link esistono
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Richiesto per FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Autoplay
autoplay.start(itemlist, item)
return itemlist

52
channels/animespace.json Normal file
View File

@@ -0,0 +1,52 @@
{
"id": "animespace",
"name": "AnimeSpace",
"active": true,
"adult": false,
"language": [],
"thumbnail": "",
"banner": "",
"categories": [
"anime",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"VOSE"
]
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Incluir en Novedades - Episodios de anime",
"default": true,
"enabled": true,
"visible": true
}
]
}

264
channels/animespace.py Normal file
View File

@@ -0,0 +1,264 @@
# -*- coding: utf-8 -*-
# -*- Channel AnimeSpace -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
from core import httptools
from core import scrapertools
from core import servertools
from channelselector import get_thumb
from core import tmdb
from core.item import Item
from platformcode import logger, config
from channels import autoplay
from channels import filtertools
from channels import renumbertools
host = "https://animespace.tv/"
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'animespace')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'animespace')
IDIOMAS = {'VOSE': 'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['directo', 'openload', 'streamango']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title="Nuevos Episodios",
action="new_episodes",
thumbnail=get_thumb('new_episodes', auto=True),
url=host))
itemlist.append(Item(channel=item.channel, title="Ultimas",
action="list_all",
thumbnail=get_thumb('last', auto=True),
url=host + 'emision'))
itemlist.append(Item(channel=item.channel, title="Todas",
action="list_all",
thumbnail=get_thumb('all', auto=True),
url=host + 'animes'))
itemlist.append(Item(channel=item.channel, title="Anime",
action="list_all",
thumbnail=get_thumb('anime', auto=True),
url=host + 'categoria/anime'))
itemlist.append(Item(channel=item.channel, title="Películas",
action="list_all",
thumbnail=get_thumb('movies', auto=True),
url=host + 'categoria/pelicula'))
itemlist.append(Item(channel=item.channel, title="OVAs",
action="list_all",
thumbnail='',
url=host + 'categoria/ova'))
itemlist.append(Item(channel=item.channel, title="ONAs",
action="list_all",
thumbnail='',
url=host + 'categoria/ona'))
itemlist.append(Item(channel=item.channel, title="Especiales",
action="list_all",
thumbnail='',
url=host + 'categoria/especial'))
itemlist.append(Item(channel=item.channel, title="Buscar",
action="search",
url=host + 'search?q=',
thumbnail=get_thumb('search', auto=True),
fanart='https://s30.postimg.cc/pei7txpa9/buscar.png'
))
autoplay.show_option(item.channel, itemlist)
itemlist = renumbertools.show_option(item.channel, itemlist)
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<article.*?href="([^"]+)">.*?src="([^"]+)".*?'
patron += '<h3 class="Title">([^<]+)</h3>.*?"fecha">([^<]+)<.*?</i>([^<]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year, type in matches:
type = type.strip().lower()
url = scrapedurl
thumbnail = scrapedthumbnail
lang = 'VOSE'
title = scrapedtitle
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
new_item= Item(channel=item.channel,
action='episodios',
title=title,
url=url,
thumbnail=thumbnail,
language = lang,
infoLabels={'year':year}
)
if type != 'anime':
new_item.contentTitle=title
else:
new_item.plot=type
new_item.contentSerieName=title
new_item.context = context
itemlist.append(new_item)
# Paginacion
next_page = scrapertools.find_single_match(data,
'"page-item active">.*?</a>.*?<a class="page-link" href="([^"]+)">')
if next_page != "":
actual_page = scrapertools.find_single_match(item.url, '([^\?]+)?')
itemlist.append(Item(channel=item.channel,
action="list_all",
title=">> Página siguiente",
url=actual_page + next_page,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'
))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
try:
if texto != '':
return list_all(item)
else:
return []
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def new_episodes(item):
logger.info()
itemlist = []
full_data = get_source(item.url)
data = scrapertools.find_single_match(full_data, '<section class="caps">.*?</section>')
patron = '<article.*?<a href="([^"]+)">.*?src="([^"]+)".*?'
patron += '<span class="episode">.*?</i>([^<]+)</span>.*?<h2 class="Title">([^<]+)</h2>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, epi, scrapedtitle in matches:
url = scrapedurl
lang = 'VOSE'
title = '%s - %s' % (scrapedtitle, epi)
itemlist.append(Item(channel=item.channel, title=title, url=url, thumbnail=scrapedthumbnail,
action='findvideos', language=lang))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<a class="item" href="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl in matches:
episode = scrapertools.find_single_match(scrapedurl, '.*?capitulo-(\d+)')
lang = 'VOSE'
season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1, int(episode))
title = "%sx%s - %s" % (season, str(episode).zfill(2),item.contentSerieName)
url = scrapedurl
infoLabels['season'] = season
infoLabels['episode'] = episode
itemlist.append(Item(channel=item.channel, title=title, contentSerieName=item.contentSerieName, url=url,
action='findvideos', language=lang, infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
itemlist = itemlist[::-1]
if item.contentSerieName != '' and config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName,
extra1='library'))
return itemlist
def findvideos(item):
import urllib
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'id="Opt\d+">.*?src=(.*?) frameborder'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl in matches:
server = ''
scrapedurl = scrapedurl.replace('&quot;', '')
new_data = get_source(scrapedurl)
if "/stream/" in scrapedurl:
scrapedurl = scrapertools.find_single_match(new_data, '<source src="([^"]+)"')
server = "directo"
else:
scrapedurl = scrapertools.find_single_match(scrapedurl, '.*?url=([^&]+)?')
scrapedurl = urllib.unquote(scrapedurl)
if scrapedurl != '':
itemlist.append(Item(channel=item.channel, title='%s', url=scrapedurl, action='play',
language = item.language, infoLabels=item.infoLabels, server=server))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def newest(categoria):
itemlist = []
item = Item()
if categoria == 'anime':
item.url=host
itemlist = new_episodes(item)
return itemlist

37
channels/animesubita.json Normal file
View File

@@ -0,0 +1,37 @@
{
"id": "animesubita",
"name": "AnimeSubIta",
"active": true,
"adult": false,
"language": ["ita"],
"thumbnail": "animesubita.png",
"bannermenu": "animesubita.png",
"categories": ["anime"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Includi in Novità - Anime",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
}
]
}

344
channels/animesubita.py Normal file
View File

@@ -0,0 +1,344 @@
# -*- coding: utf-8 -*-
# Ringraziamo Icarus crew
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per AnimeSubIta
# ------------------------------------------------------------
import re, urllib, urlparse
from core import servertools, httptools, scrapertools, tmdb
from platformcode import logger, config
from core.item import Item
from channels import support
host = "http://www.animesubita.org"
PERPAGE = 20
# ----------------------------------------------------------------------------------------------------------------
def mainlist(item):
logger.info()
itemlist = [Item(channel=item.channel,
action="lista_anime_completa",
title=support.color("Lista Anime", "azure"),
url="%s/lista-anime/" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="ultimiep",
title=support.color("Ultimi Episodi", "azure"),
url="%s/category/ultimi-episodi/" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="lista_anime",
title=support.color("Anime in corso", "azure"),
url="%s/category/anime-in-corso/" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="categorie",
title=support.color("Categorie", "azure"),
url="%s/generi/" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="search",
title=support.color("Cerca anime ...", "yellow"),
extra="anime",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")
]
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == "anime":
item.url = host
item.action = "ultimiep"
itemlist = ultimiep(item)
if itemlist[-1].action == "ultimiep":
itemlist.pop()
# Continua l'esecuzione in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def search(item, texto):
logger.info()
item.url = host + "/?s=" + texto
try:
return lista_anime(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def categorie(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = r'<li><a title="[^"]+" href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(channel=item.channel,
action="lista_anime",
title=scrapedtitle.replace('Anime', '').strip(),
text_color="azure",
url=scrapedurl,
thumbnail=item.thumbnail,
folder=True))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def ultimiep(item):
logger.info("ultimiep")
itemlist = lista_anime(item, False, False)
for itm in itemlist:
title = scrapertools.decodeHtmlentities(itm.title)
# Pulizia titolo
title = title.replace("Streaming", "").replace("&", "")
title = title.replace("Download", "")
title = title.replace("Sub Ita", "").strip()
eptype = scrapertools.find_single_match(title, "((?:Episodio?|OAV))")
cleantitle = re.sub(r'%s\s*\d*\s*(?:\(\d+\)|)' % eptype, '', title).strip()
# Creazione URL
url = re.sub(r'%s-?\d*-' % eptype.lower(), '', itm.url)
if "-streaming" not in url:
url = url.replace("sub-ita", "sub-ita-streaming")
epnumber = ""
if 'episodio' in eptype.lower():
epnumber = scrapertools.find_single_match(title.lower(), r'episodio?\s*(\d+)')
eptype += ":? " + epnumber
extra = "<tr>\s*<td[^>]+><strong>(?:[^>]+>|)%s(?:[^>]+>[^>]+>|[^<]*|[^>]+>)</strong>" % eptype
itm.title = support.color(title, 'azure').strip()
itm.action = "findvideos"
itm.url = url
itm.fulltitle = cleantitle
itm.extra = extra
itm.show = re.sub(r'Episodio\s*', '', title)
itm.thumbnail = item.thumbnail
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def lista_anime(item, nextpage=True, show_lang=True):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
blocco = scrapertools.find_single_match(data, r'<div class="post-list group">(.*?)</nav><!--/.pagination-->')
# patron = r'<a href="([^"]+)" title="([^"]+)">\s*<img[^s]+src="([^"]+)"[^>]+>' # Patron con thumbnail, Kodi non scarica le immagini dal sito
patron = r'<a href="([^"]+)" title="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle = re.sub(r'\s+', ' ', scrapedtitle)
# Pulizia titolo
scrapedtitle = scrapedtitle.replace("Streaming", "").replace("&", "")
scrapedtitle = scrapedtitle.replace("Download", "")
lang = scrapertools.find_single_match(scrapedtitle, r"([Ss][Uu][Bb]\s*[Ii][Tt][Aa])")
scrapedtitle = scrapedtitle.replace("Sub Ita", "").strip()
eptype = scrapertools.find_single_match(scrapedtitle, "((?:Episodio?|OAV))")
cleantitle = re.sub(r'%s\s*\d*\s*(?:\(\d+\)|)' % eptype, '', scrapedtitle)
cleantitle = cleantitle.replace(lang, "").strip()
itemlist.append(
Item(channel=item.channel,
action="episodi",
contentType="tvshow" if 'oav' not in scrapedtitle.lower() else "movie",
title=color(scrapedtitle.replace(lang, "(%s)" % support.color(lang, "red") if show_lang else "").strip(), 'azure'),
fulltitle=cleantitle,
url=scrapedurl,
show=cleantitle,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if nextpage:
patronvideos = r'<link rel="next" href="([^"]+)"\s*/>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
scrapedurl = matches[0]
itemlist.append(
Item(channel=item.channel,
action="lista_anime",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def lista_anime_completa(item):
logger.info()
itemlist = []
p = 1
if '{}' in item.url:
item.url, p = item.url.split('{}')
p = int(p)
data = httptools.downloadpage(item.url).data
blocco = scrapertools.find_single_match(data, r'<ul class="lcp_catlist"[^>]+>(.*?)</ul>')
patron = r'<a href="([^"]+)"[^>]+>([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
if (p - 1) * PERPAGE > i: continue
if i >= p * PERPAGE: break
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
cleantitle = scrapedtitle.replace("Sub Ita Streaming", "").replace("Ita Streaming", "")
itemlist.append(
Item(channel=item.channel,
action="episodi",
contentType="tvshow" if 'oav' not in scrapedtitle.lower() else "movie",
title=support.color(scrapedtitle, 'azure'),
fulltitle=cleantitle,
show=cleantitle,
url=scrapedurl,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if len(matches) >= p * PERPAGE:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="lista_anime_completa",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def episodi(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<td style="[^"]*?">\s*.*?<strong>(.*?)</strong>.*?\s*</td>\s*<td style="[^"]*?">\s*<a href="([^"]+?)"[^>]+>\s*<img.*?src="([^"]+?)".*?/>\s*</a>\s*</td>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle, scrapedurl, scrapedimg in matches:
if 'nodownload' in scrapedimg or 'nostreaming' in scrapedimg:
continue
if 'vvvvid' in scrapedurl.lower():
itemlist.append(Item(title='I Video VVVVID Non sono supportati'))
continue
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle = re.sub(r'<[^>]*?>', '', scrapedtitle)
scrapedtitle = '[COLOR azure][B]' + scrapedtitle + '[/B][/COLOR]'
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="episode",
title=scrapedtitle,
url=urlparse.urljoin(host, scrapedurl),
fulltitle=item.title,
show=scrapedtitle,
plot=item.plot,
fanart=item.thumbnail,
thumbnail=item.thumbnail))
# Comandi di servizio
if config.get_videolibrary_support() and len(itemlist) != 0:
itemlist.append(
Item(channel=item.channel,
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
url=item.url,
action="add_serie_to_library",
extra="episodios",
show=item.show))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def findvideos(item):
logger.info()
itemlist = []
headers = {'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0'}
if item.extra:
data = httptools.downloadpage(item.url, headers=headers).data
blocco = scrapertools.find_single_match(data, r'%s(.*?)</tr>' % item.extra)
item.url = scrapertools.find_single_match(blocco, r'<a href="([^"]+)"[^>]+>')
patron = r'http:\/\/link[^a]+animesubita[^o]+org\/[^\/]+\/.*?(episodio\d*)[^p]+php(\?.*)'
for phpfile, scrapedurl in re.findall(patron, item.url, re.DOTALL):
url = "%s/%s.php%s" % (host, phpfile, scrapedurl)
headers['Referer'] = url
data = httptools.downloadpage(url, headers=headers).data
# ------------------------------------------------
cookies = ""
matches = re.compile('(.%s.*?)\n' % host.replace("http://", "").replace("www.", ""), re.DOTALL).findall(config.get_cookie_data())
for cookie in matches:
name = cookie.split('\t')[5]
value = cookie.split('\t')[6]
cookies += name + "=" + value + ";"
headers['Cookie'] = cookies[:-1]
# ------------------------------------------------
scrapedurl = scrapertools.find_single_match(data, r'<source src="([^"]+)"[^>]+>')
url = scrapedurl + '|' + urllib.urlencode(headers)
itemlist.append(
Item(channel=item.channel,
action="play",
text_color="azure",
title="[%s] %s" % (support.color("Diretto", "orange"), item.title),
fulltitle=item.fulltitle,
url=url,
thumbnail=item.thumbnail,
fanart=item.thumbnail,
plot=item.plot))
return itemlist

View File

@@ -0,0 +1,36 @@
{
"id": "animetubeita",
"name": "Animetubeita",
"active": true,
"adult": false,
"language": ["ita"],
"thumbnail": "http:\/\/i.imgur.com\/rQPx1iQ.png",
"bannermenu": "http:\/\/i.imgur.com\/rQPx1iQ.png",
"categories": ["anime"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Includi in Novità - Anime",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
}
]
}

364
channels/animetubeita.py Normal file
View File

@@ -0,0 +1,364 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per animetubeita
# ----------------------------------------------------------
import re, urllib
from core import httptools, scrapertools, tmdb
from platformcode import logger, config
from core.item import Item
host = "http://www.animetubeita.com"
hostlista = host + "/lista-anime/"
hostgeneri = host + "/generi/"
hostcorso = host + "/category/serie-in-corso/"
def mainlist(item):
log("animetubeita", "mainlist", item.channel)
itemlist = [Item(channel=item.channel,
action="lista_home",
title="[COLOR azure]Home[/COLOR]",
url=host,
thumbnail=AnimeThumbnail,
fanart=AnimeFanart),
# Item(channel=item.channel,
# action="lista_anime",
# title="[COLOR azure]A-Z[/COLOR]",
# url=hostlista,
# thumbnail=AnimeThumbnail,
# fanart=AnimeFanart),
Item(channel=item.channel,
action="lista_genere",
title="[COLOR azure]Genere[/COLOR]",
url=hostgeneri,
thumbnail=CategoriaThumbnail,
fanart=CategoriaFanart),
Item(channel=item.channel,
action="lista_in_corso",
title="[COLOR azure]Serie in Corso[/COLOR]",
url=hostcorso,
thumbnail=CategoriaThumbnail,
fanart=CategoriaFanart),
Item(channel=item.channel,
action="search",
title="[COLOR lime]Cerca...[/COLOR]",
url=host + "/?s=",
thumbnail=CercaThumbnail,
fanart=CercaFanart)]
return itemlist
def lista_home(item):
log("animetubeita", "lista_home", item.channel)
itemlist = []
patron = '<h2 class="title"><a href="(.*?)" rel="bookmark" title=".*?">.*?<img.*?src="(.*?)".*?<strong>Titolo</strong></td>.*?<td>(.*?)</td>.*?<td><strong>Trama</strong></td>.*?<td>(.*?)</'
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot in scrapedAll(item.url, patron):
title = scrapertools.decodeHtmlentities(scrapedtitle)
title = title.split("Sub")[0]
fulltitle = re.sub(r'[Ee]pisodio? \d+', '', title)
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
itemlist.append(
Item(channel=item.channel,
action="dl_s",
contentType="tvshow",
title="[COLOR azure]" + title + "[/COLOR]",
fulltitle=fulltitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fanart=scrapedthumbnail,
show=fulltitle,
plot=scrapedplot))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginazione
# ===========================================================
data = httptools.downloadpage(item.url).data
patron = '<link rel="next" href="(.*?)"'
next_page = scrapertools.find_single_match(data, patron)
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action="lista_home",
title=AvantiTxt,
url=next_page,
thumbnail=AvantiImg,
folder=True))
# ===========================================================
return itemlist
# def lista_anime(item):
# log("animetubeita", "lista_anime", item.channel)
# itemlist = []
# patron = '<li.*?class="page_.*?href="(.*?)">(.*?)</a></li>'
# for scrapedurl, scrapedtitle in scrapedAll(item.url, patron):
# title = scrapertools.decodeHtmlentities(scrapedtitle)
# title = title.split("Sub")[0]
# log("url:[" + scrapedurl + "] scrapedtitle:[" + title + "]")
# itemlist.append(
# Item(channel=item.channel,
# action="dettaglio",
# contentType="tvshow",
# title="[COLOR azure]" + title + "[/COLOR]",
# url=scrapedurl,
# show=title,
# thumbnail="",
# fanart=""))
# tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# return itemlist
def lista_genere(item):
log("lista_anime_genere", "lista_genere", item.channel)
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data,
'<div class="hentry page post-1 odd author-admin clear-block">(.*?)<div id="disqus_thread">')
patron = '<li class="cat-item cat-item.*?"><a href="(.*?)" >(.*?)</a>'
matches = re.compile(patron, re.DOTALL).findall(bloque)
scrapertools.printMatches(matches)
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(channel=item.channel,
action="lista_generi",
title='[COLOR lightsalmon][B]' + scrapedtitle + '[/B][/COLOR]',
url=scrapedurl,
fulltitle=scrapedtitle,
show=scrapedtitle,
thumbnail=item.thumbnail))
return itemlist
def lista_generi(item):
log("animetubeita", "lista_generi", item.channel)
itemlist = []
patron = '<h2 class="title"><a href="(.*?)" rel="bookmark" title=".*?">.*?<img.*?src="(.*?)".*?<strong>Titolo</strong></td>.*?<td>(.*?)</td>.*?<td><strong>Trama</strong></td>.*?<td>(.*?)</'
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot in scrapedAll(item.url, patron):
title = scrapertools.decodeHtmlentities(scrapedtitle)
title = title.split("Sub")[0]
fulltitle = re.sub(r'[Ee]pisodio? \d+', '', title)
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
itemlist.append(
Item(channel=item.channel,
action="dettaglio",
title="[COLOR azure]" + title + "[/COLOR]",
contentType="tvshow",
fulltitle=fulltitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
show=fulltitle,
fanart=scrapedthumbnail,
plot=scrapedplot))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginazione
# ===========================================================
data = httptools.downloadpage(item.url).data
patron = '<link rel="next" href="(.*?)"'
next_page = scrapertools.find_single_match(data, patron)
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action="lista_generi",
title=AvantiTxt,
url=next_page,
thumbnail=AvantiImg,
folder=True))
# ===========================================================
return itemlist
def lista_in_corso(item):
log("animetubeita", "lista_home", item.channel)
itemlist = []
patron = '<h2 class="title"><a href="(.*?)" rel="bookmark" title="Link.*?>(.*?)</a></h2>.*?<img.*?src="(.*?)".*?<td><strong>Trama</strong></td>.*?<td>(.*?)</td>'
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedplot in scrapedAll(item.url, patron):
title = scrapertools.decodeHtmlentities(scrapedtitle)
title = title.split("Sub")[0]
fulltitle = re.sub(r'[Ee]pisodio? \d+', '', title)
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
itemlist.append(
Item(channel=item.channel,
action="dettaglio",
title="[COLOR azure]" + title + "[/COLOR]",
contentType="tvshow",
fulltitle=fulltitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
show=fulltitle,
fanart=scrapedthumbnail,
plot=scrapedplot))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginazione
# ===========================================================
data = httptools.downloadpage(item.url).data
patron = '<link rel="next" href="(.*?)"'
next_page = scrapertools.find_single_match(data, patron)
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action="lista_in_corso",
title=AvantiTxt,
url=next_page,
thumbnail=AvantiImg,
folder=True))
# ===========================================================
return itemlist
def dl_s(item):
log("animetubeita", "dl_s", item.channel)
itemlist = []
encontrados = set()
# 1
patron = '<p><center><a.*?href="(.*?)"'
for scrapedurl in scrapedAll(item.url, patron):
if scrapedurl in encontrados: continue
encontrados.add(scrapedurl)
title = "DOWNLOAD & STREAMING"
itemlist.append(Item(channel=item.channel,
action="dettaglio",
title="[COLOR azure]" + title + "[/COLOR]",
url=scrapedurl,
thumbnail=item.thumbnail,
fanart=item.thumbnail,
plot=item.plot,
folder=True))
# 2
patron = '<p><center>.*?<a.*?href="(.*?)"'
for scrapedurl in scrapedAll(item.url, patron):
if scrapedurl in encontrados: continue
encontrados.add(scrapedurl)
title = "DOWNLOAD & STREAMING"
itemlist.append(Item(channel=item.channel,
action="dettaglio",
title="[COLOR azure]" + title + "[/COLOR]",
url=scrapedurl,
thumbnail=item.thumbnail,
fanart=item.thumbnail,
plot=item.plot,
folder=True))
return itemlist
def dettaglio(item):
log("animetubeita", "dettaglio", item.channel)
itemlist = []
headers = {'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0'}
episodio = 1
patron = r'<a href="http:\/\/link[^a]+animetubeita[^c]+com\/[^\/]+\/[^s]+((?:stream|strm))[^p]+php(\?.*?)"'
for phpfile, scrapedurl in scrapedAll(item.url, patron):
title = "Episodio " + str(episodio)
episodio += 1
url = "%s/%s.php%s" % (host, phpfile, scrapedurl)
headers['Referer'] = url
data = httptools.downloadpage(url, headers=headers).data
# ------------------------------------------------
cookies = ""
matches = re.compile('(.animetubeita.com.*?)\n', re.DOTALL).findall(config.get_cookie_data())
for cookie in matches:
name = cookie.split('\t')[5]
value = cookie.split('\t')[6]
cookies += name + "=" + value + ";"
headers['Cookie'] = cookies[:-1]
# ------------------------------------------------
url = scrapertools.find_single_match(data, """<source src="([^"]+)" type='video/mp4'>""")
url += '|' + urllib.urlencode(headers)
itemlist.append(Item(channel=item.channel,
action="play",
title="[COLOR azure]" + title + "[/COLOR]",
url=url,
thumbnail=item.thumbnail,
fanart=item.thumbnail,
plot=item.plot))
return itemlist
def search(item, texto):
log("animetubeita", "search", item.channel)
item.url = item.url + texto
try:
return lista_home(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def scrapedAll(url="", patron=""):
matches = []
data = httptools.downloadpage(url).data
MyPatron = patron
matches = re.compile(MyPatron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
return matches
def scrapedSingle(url="", single="", patron=""):
matches = []
data = httptools.downloadpage(url).data
elemento = scrapertools.find_single_match(data, single)
matches = re.compile(patron, re.DOTALL).findall(elemento)
scrapertools.printMatches(matches)
return matches
def log(funzione="", stringa="", canale=""):
logger.debug("[" + canale + "].[" + funzione + "] " + stringa)
AnimeThumbnail = "http://img15.deviantart.net/f81c/i/2011/173/7/6/cursed_candies_anime_poster_by_careko-d3jnzg9.jpg"
AnimeFanart = "http://www.animetubeita.com/wp-content/uploads/21407_anime_scenery.jpg"
CategoriaThumbnail = "http://static.europosters.cz/image/750/poster/street-fighter-anime-i4817.jpg"
CategoriaFanart = "http://www.animetubeita.com/wp-content/uploads/21407_anime_scenery.jpg"
CercaThumbnail = "http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"
CercaFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
AvantiTxt = config.get_localized_string(30992)
AvantiImg = "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"

61
channels/animeworld.json Normal file
View File

@@ -0,0 +1,61 @@
{
"id": "animeworld",
"name": "AnimeWorld",
"active": true,
"adult": false,
"language": ["ita"],
"thumbnail": "https://cdn.animeworld.it/static/images/general/logoaw.png",
"categories": ["anime"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Includi in Novità - Anime",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Numero de link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "1", "3", "5", "10" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": ["No filtrar","Italiano"]
}
]
}

425
channels/animeworld.py Normal file
View File

@@ -0,0 +1,425 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per animeworld
# ----------------------------------------------------------
import re, urlparse
from core import httptools, scrapertoolsV2, servertools, tmdb, tvdb
from core.item import Item
from platformcode import logger, config
from channels import autoplay, filtertools, support, autorenumber
from channelselector import thumb
host = "https://www.animeworld.it"
headers = [['Referer', host]]
IDIOMAS = {'Italiano': 'Italiano'}
list_language = IDIOMAS.values()
list_servers = ['diretto']
list_quality = []
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'animeworld')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'animeworld')
def mainlist(item):
logger.info("[animeworld.py] mainlist")
itemlist =[]
support.menu(itemlist, '[B] > Anime ITA[/B]', 'build_menu', host+'/filter?language[]=1')
support.menu(itemlist, '[B] > Anime SUB[/B]', 'build_menu', host+'/filter?language[]=0')
support.menu(itemlist, ' > Anime A-Z', 'alfabetico', host+'/az-list')
support.menu(itemlist, 'Anime - Ultimi Aggiunti', 'alfabetico', host+'/newest')
support.menu(itemlist, 'Anime - Ultimi Episodi', 'alfabetico', host+'/newest')
support.menu(itemlist, '[COLOR blue]Cerca...[/COLOR]', 'search')
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
return itemlist
# Crea Menu Filtro ======================================================
def build_menu(item):
itemlist = []
itemlist.append(Item(
channel=item.channel,
action="video",
title="[B]Tutti[/B]",
url=item.url,
thumbnail=CategoriaThumbnail,
fanart=CategoriaFanart))
data = httptools.downloadpage(item.url).data
data = re.sub(r'\n|\t','',data)
data = re.sub(r'>\s*<','><',data)
block = scrapertoolsV2.find_single_match(data, r'<form class="filters.*?>(.*?)<\/form>')
matches = re.compile(r'<button class="btn btn-sm btn-default dropdown-toggle" data-toggle="dropdown"> (.*?) <span.*?>(.*?)<\/ul>', re.DOTALL).findall(block)
for title, html in matches:
itemlist.append(
Item(channel=item.channel,
action='build_sub_menu',
contentType="tvshow",
title='[B] > ' + title + '[/B]',
fulltitle=title,
show=title,
url=item.url,
html=html))
# Elimina FLingua dal Menu
itemlist.pop(6)
itemlist.pop(6)
itemlist = thumb(itemlist)
return itemlist
# Crea SottoMenu Filtro ======================================================
def build_sub_menu(item):
itemlist = []
matches = re.compile(r'<input.*?name="(.*?)" value="(.*?)".*?><label.*?>(.*?)<\/label>', re.DOTALL).findall(item.html)
for name, value, title in matches:
itemlist.append(
Item(channel=item.channel,
action='video',
contentType="tvshow",
title='[B]' + title + ' >[/B]',
fulltitle=title,
show=title,
url=item.url + '&' + name + '=' + value,
plot=""))
itemlist = thumb(itemlist)
return itemlist
# Novità ======================================================
def newest(categoria):
logger.info("[animeworld.py] newest")
itemlist = []
item = Item()
try:
if categoria == "anime":
item.url = host + '/newest'
item.action = "video"
itemlist = video(item)
if itemlist[-1].action == "video":
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
# Cerca ===========================================================
def search(item, texto):
logger.info(texto)
item.url = host + '/search?keyword=' + texto
try:
return video(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
# Lista A-Z ====================================================
def alfabetico(item):
logger.info("[animeworld.py] alfabetico")
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'\n|\t','',data)
data = re.sub(r'>\s*<','><',data)
block = scrapertoolsV2.find_single_match(data, r'<span>.*?A alla Z.<\/span>.*?<ul>(.*?)<\/ul>')
matches = re.compile('<a href="([^"]+)" title="([^"]+)">', re.DOTALL).findall(block)
scrapertoolsV2.printMatches(matches)
for url, title in matches:
itemlist.append(
Item(channel=item.channel,
action='lista_anime',
contentType="tvshow",
title=title,
fulltitle=title,
show=title,
url=url,
plot=""))
return itemlist
def lista_anime(item):
logger.info("[animeworld.py] lista_anime")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
data = re.sub(r'\n|\t','',data)
data = re.sub(r'>\s*<','><',data)
# Estrae i contenuti
patron = r'<div class="item"><a href="([^"]+)".*?src="([^"]+)".*?data-jtitle="([^"]+)".*?>([^<]+)<\/a><p>(.*?)<\/p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumb, scrapedoriginal, scrapedtitle, scrapedplot in matches:
if scrapedoriginal == scrapedtitle:
scrapedoriginal=''
else:
scrapedoriginal = ' - [ ' + scrapedoriginal + ' ]'
year = ''
lang = ''
if '(' in scrapedtitle:
year = scrapertoolsV2.find_single_match(scrapedtitle, r'(\([0-9]+\))')
lang = scrapertoolsV2.find_single_match(scrapedtitle, r'(\([a-zA-Z]+\))')
title = scrapedtitle.replace(year,'').replace(lang,'')
original = scrapedoriginal.replace(year,'').replace(lang,'')
title = '[B]' + title + '[/B]' + year + lang + original
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
contentType="tvshow",
action="episodios",
text_color="azure",
title=title,
url=scrapedurl,
thumbnail=scrapedthumb,
fulltitle=title,
show=title,
plot=scrapedplot,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
autorenumber.renumber(itemlist)
# Next page
next_page = scrapertoolsV2.find_single_match(data, '<a class="page-link" href="([^"]+)" rel="next"')
if next_page != '':
itemlist.append(
Item(channel=item.channel,
action='lista_anime',
title='[B]' + config.get_localized_string(30992) + ' >[/B]',
url=next_page,
contentType=item.contentType,
thumbnail=thumb()))
return itemlist
def video(item):
logger.info("[animeworld.py] video")
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'\n|\t','',data)
data = re.sub(r'>\s*<','><',data)
patron = r'<a href="([^"]+)" class="poster.*?><img src="([^"]+)"(.*?)data-jtitle="([^"]+)" .*?>(.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumb ,scrapedinfo, scrapedoriginal, scrapedtitle in matches:
# Cerca Info come anno o lingua nel Titolo
year = ''
lang = ''
if '(' in scrapedtitle:
year = scrapertoolsV2.find_single_match(scrapedtitle, r'( \([0-9]+\))')
lang = scrapertoolsV2.find_single_match(scrapedtitle, r'( \([a-zA-Z]+\))')
# Rimuove Anno e Lingua nel Titolo
title = scrapedtitle.replace(year,'').replace(lang,'')
original = scrapedoriginal.replace(year,'').replace(lang,'')
# Compara Il Titolo con quello originale
if original == title:
original=''
else:
original = ' - [ ' + scrapedoriginal + ' ]'
# cerca info supplementari
ep = ''
ep = scrapertoolsV2.find_single_match(scrapedinfo, '<div class="ep">(.*?)<')
if ep != '':
ep = ' - ' + ep
ova = ''
ova = scrapertoolsV2.find_single_match(scrapedinfo, '<div class="ova">(.*?)<')
if ova != '':
ova = ' - (' + ova + ')'
ona = ''
ona = scrapertoolsV2.find_single_match(scrapedinfo, '<div class="ona">(.*?)<')
if ona != '':
ona = ' - (' + ona + ')'
movie = ''
movie = scrapertoolsV2.find_single_match(scrapedinfo, '<div class="movie">(.*?)<')
if movie != '':
movie = ' - (' + movie + ')'
special = ''
special = scrapertoolsV2.find_single_match(scrapedinfo, '<div class="special">(.*?)<')
if special != '':
special = ' - (' + special + ')'
# Concatena le informazioni
info = ep + lang + year + ova + ona + movie + special
# Crea il title da visualizzare
long_title = '[B]' + title + '[/B]' + info + original
# Controlla se sono Episodi o Film
if movie == '':
contentType = 'tvshow'
action = 'episodios'
else:
contentType = 'movie'
action = 'findvideos'
itemlist.append(
Item(channel=item.channel,
contentType=contentType,
action=action,
title=long_title,
url=scrapedurl,
fulltitle=title,
show=title,
thumbnail=scrapedthumb,
context = autoplay.context))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
autorenumber.renumber(itemlist)
# Next page
next_page = scrapertoolsV2.find_single_match(data, '<a class="page-link" href=".*?page=([^"]+)" rel="next"')
if next_page != '':
itemlist.append(
Item(channel=item.channel,
action='video',
title='[B]' + config.get_localized_string(30992) + ' >[/B]',
url=re.sub('&page=([^"]+)', '', item.url) + '&page=' + next_page,
contentType=item.contentType,
thumbnail=thumb()))
return itemlist
def episodios(item):
logger.info("[animeworld.py] episodios")
itemlist = []
data = httptools.downloadpage(item.url).data.replace('\n', '')
data = re.sub(r'>\s*<', '><', data)
block = scrapertoolsV2.find_single_match(data, r'<div class="widget servers".*?>(.*?)<div id="download"')
block = scrapertoolsV2.find_single_match(block,r'<div class="server.*?>(.*?)<div class="server.*?>')
patron = r'<li><a.*?href="([^"]+)".*?>(.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(block)
for scrapedurl, scrapedtitle in matches:
scrapedtitle = '[B] Episodio ' + scrapedtitle + '[/B]'
itemlist.append(
Item(
channel=item.channel,
action="findvideos",
contentType="episode",
title=scrapedtitle,
url=urlparse.urljoin(host, scrapedurl),
fulltitle=scrapedtitle,
show=scrapedtitle,
plot=item.plot,
fanart=item.thumbnail,
thumbnail=item.thumbnail))
autorenumber.renumber(itemlist, item,'bold')
# Aggiungi a Libreria
if config.get_videolibrary_support() and len(itemlist) != 0:
itemlist.append(
Item(
channel=item.channel,
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
url=item.url,
action="add_serie_to_library",
extra="episodios",
show=item.show))
return itemlist
def findvideos(item):
logger.info("[animeworld.py] findvideos")
itemlist = []
anime_id = scrapertoolsV2.find_single_match(item.url, r'.*\..*?\/(.*)')
data = httptools.downloadpage(host + "/ajax/episode/serverPlayer?id=" + anime_id).data
patron = '<source src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for video in matches:
itemlist.append(
Item(
channel=item.channel,
action="play",
title=item.title + " [[COLOR orange]Diretto[/COLOR]]",
url=video,
server='directo',
contentType=item.contentType,
folder=False))
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
# riferimenti di servizio ====================================================
AnimeThumbnail = "http://img15.deviantart.net/f81c/i/2011/173/7/6/cursed_candies_anime_poster_by_careko-d3jnzg9.jpg"
AnimeFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
CategoriaThumbnail = "http://static.europosters.cz/image/750/poster/street-fighter-anime-i4817.jpg"
CategoriaFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
CercaThumbnail = "http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"
CercaFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
AvantiTxt = config.get_localized_string(30992)
AvantiImg = "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"

724
channels/autoplay.py Normal file
View File

@@ -0,0 +1,724 @@
# -*- coding: utf-8 -*-
import os
from core import channeltools
from core import jsontools
from core.item import Item
from platformcode import config, logger
from platformcode import platformtools
from platformcode import launcher
from time import sleep
from platformcode.config import get_setting
__channel__ = "autoplay"
PLAYED = False
autoplay_node = {}
def context():
'''
Agrega la opcion Configurar AutoPlay al menu contextual
:return:
'''
_context = ""
if config.is_xbmc():
_context = [{"title": config.get_localized_string(60071),
"action": "autoplay_config",
"channel": "autoplay"}]
return _context
context = context()
def show_option(channel, itemlist, text_color='yellow', thumbnail=None, fanart=None):
'''
Agrega la opcion Configurar AutoPlay en la lista recibida
:param channel: str
:param itemlist: list (lista donde se desea integrar la opcion de configurar AutoPlay)
:param text_color: str (color para el texto de la opcion Configurar Autoplay)
:param thumbnail: str (direccion donde se encuentra el thumbnail para la opcion Configurar Autoplay)
:return:
'''
from channelselector import get_thumb
logger.info()
if not config.is_xbmc():
return itemlist
if thumbnail == None:
thumbnail = get_thumb('autoplay.png')
if fanart == None:
fanart = get_thumb('autoplay.png')
plot_autoplay = config.get_localized_string(60399)
itemlist.append(
Item(channel=__channel__,
title=config.get_localized_string(60071),
action="autoplay_config",
text_color=text_color,
thumbnail=thumbnail,
fanart=fanart,
plot=plot_autoplay,
from_channel=channel
))
return itemlist
def start(itemlist, item):
'''
Metodo principal desde donde se reproduce automaticamente los enlaces
- En caso la opcion de personalizar activa utilizara las opciones definidas por el usuario.
- En caso contrario intentara reproducir cualquier enlace que cuente con el idioma preferido.
:param itemlist: list (lista de items listos para reproducir, o sea con action='play')
:param item: item (el item principal del canal)
:return: intenta autoreproducir, en caso de fallar devuelve el itemlist que recibio en un principio
'''
logger.info()
global PLAYED
global autoplay_node
PLAYED = False
base_item = item
if not config.is_xbmc():
#platformtools.dialog_notification('AutoPlay ERROR', 'Sólo disponible para XBMC/Kodi')
return itemlist
if not autoplay_node:
# Obtiene el nodo AUTOPLAY desde el json
autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY')
channel_id = item.channel
if item.channel == 'videolibrary':
autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY')
channel_id = item.contentChannel
try:
active = autoplay_node['status']
except:
active = is_active(item.channel)
if not channel_id in autoplay_node or not active:
return itemlist
# Agrega servidores y calidades que no estaban listados a autoplay_node
new_options = check_value(channel_id, itemlist)
# Obtiene el nodo del canal desde autoplay_node
channel_node = autoplay_node.get(channel_id, {})
# Obtiene los ajustes des autoplay para este canal
settings_node = channel_node.get('settings', {})
if get_setting('autoplay') or settings_node['active']:
url_list_valid = []
autoplay_list = []
autoplay_b = []
favorite_servers = []
favorite_quality = []
# Guarda el valor actual de "Accion y Player Mode" en preferencias
user_config_setting_action = config.get_setting("default_action")
user_config_setting_player = config.get_setting("player_mode")
# Habilita la accion "Ver en calidad alta" (si el servidor devuelve más de una calidad p.e. gdrive)
if user_config_setting_action != 2:
config.set_setting("default_action", 2)
if user_config_setting_player != 0:
config.set_setting("player_mode", 0)
# Informa que AutoPlay esta activo
#platformtools.dialog_notification('AutoPlay Activo', '', sound=False)
# Prioridades a la hora de ordenar itemlist:
# 0: Servidores y calidades
# 1: Calidades y servidores
# 2: Solo servidores
# 3: Solo calidades
# 4: No ordenar
if (settings_node['custom_servers'] and settings_node['custom_quality']):
priority = settings_node['priority'] # 0: Servidores y calidades o 1: Calidades y servidores
elif settings_node['custom_servers']:
priority = 2 # Solo servidores
elif settings_node['custom_quality']:
priority = 3 # Solo calidades
else:
priority = 4 # No ordenar
# Obtiene las listas servidores, calidades disponibles desde el nodo del json de AutoPlay
server_list = channel_node.get('servers', [])
for server in server_list:
server = server.lower()
quality_list = channel_node.get('quality', [])
# Si no se definen calidades la se asigna default como calidad unica
if len(quality_list) == 0:
quality_list =['default']
# Se guardan los textos de cada servidor y calidad en listas p.e. favorite_servers = ['openload',
# 'streamcloud']
for num in range(1, 4):
favorite_servers.append(channel_node['servers'][settings_node['server_%s' % num]].lower())
favorite_quality.append(channel_node['quality'][settings_node['quality_%s' % num]])
# Se filtran los enlaces de itemlist y que se correspondan con los valores de autoplay
for item in itemlist:
autoplay_elem = dict()
b_dict = dict()
# Comprobamos q se trata de un item de video
if 'server' not in item:
continue
# Agrega la opcion configurar AutoPlay al menu contextual
if 'context' not in item:
item.context = list()
if not filter(lambda x: x['action'] == 'autoplay_config', context):
item.context.append({"title": config.get_localized_string(60071),
"action": "autoplay_config",
"channel": "autoplay",
"from_channel": channel_id})
# Si no tiene calidad definida le asigna calidad 'default'
if item.quality == '':
item.quality = 'default'
# Se crea la lista para configuracion personalizada
if priority < 2: # 0: Servidores y calidades o 1: Calidades y servidores
# si el servidor y la calidad no se encuentran en las listas de favoritos o la url esta repetida,
# descartamos el item
if item.server.lower() not in favorite_servers or item.quality not in favorite_quality \
or item.url in url_list_valid:
item.type_b = True
b_dict['videoitem']= item
autoplay_b.append(b_dict)
continue
autoplay_elem["indice_server"] = favorite_servers.index(item.server.lower())
autoplay_elem["indice_quality"] = favorite_quality.index(item.quality)
elif priority == 2: # Solo servidores
# si el servidor no se encuentra en la lista de favoritos o la url esta repetida,
# descartamos el item
if item.server.lower() not in favorite_servers or item.url in url_list_valid:
item.type_b = True
b_dict['videoitem'] = item
autoplay_b.append(b_dict)
continue
autoplay_elem["indice_server"] = favorite_servers.index(item.server.lower())
elif priority == 3: # Solo calidades
# si la calidad no se encuentra en la lista de favoritos o la url esta repetida,
# descartamos el item
if item.quality not in favorite_quality or item.url in url_list_valid:
item.type_b = True
b_dict['videoitem'] = item
autoplay_b.append(b_dict)
continue
autoplay_elem["indice_quality"] = favorite_quality.index(item.quality)
else: # No ordenar
# si la url esta repetida, descartamos el item
if item.url in url_list_valid:
continue
# Si el item llega hasta aqui lo añadimos al listado de urls validas y a autoplay_list
url_list_valid.append(item.url)
item.plan_b=True
autoplay_elem['videoitem'] = item
# autoplay_elem['server'] = item.server
# autoplay_elem['quality'] = item.quality
autoplay_list.append(autoplay_elem)
# Ordenamos segun la prioridad
if priority == 0: # Servidores y calidades
autoplay_list.sort(key=lambda orden: (orden['indice_server'], orden['indice_quality']))
elif priority == 1: # Calidades y servidores
autoplay_list.sort(key=lambda orden: (orden['indice_quality'], orden['indice_server']))
elif priority == 2: # Solo servidores
autoplay_list.sort(key=lambda orden: orden['indice_server'])
elif priority == 3: # Solo calidades
autoplay_list.sort(key=lambda orden: orden['indice_quality'])
# Se prepara el plan b, en caso de estar activo se agregan los elementos no favoritos al final
try:
plan_b = settings_node['plan_b']
except:
plan_b = True
text_b = ''
if plan_b:
autoplay_list.extend(autoplay_b)
# Si hay elementos en la lista de autoplay se intenta reproducir cada elemento, hasta encontrar uno
# funcional o fallen todos
if autoplay_list or (plan_b and autoplay_b):
#played = False
max_intentos = 5
max_intentos_servers = {}
# Si se esta reproduciendo algo detiene la reproduccion
if platformtools.is_playing():
platformtools.stop_video()
for autoplay_elem in autoplay_list:
play_item = Item
# Si no es un elemento favorito si agrega el texto plan b
if autoplay_elem['videoitem'].type_b:
text_b = '(Plan B)'
if not platformtools.is_playing() and not PLAYED:
videoitem = autoplay_elem['videoitem']
if videoitem.server.lower() not in max_intentos_servers:
max_intentos_servers[videoitem.server.lower()] = max_intentos
# Si se han alcanzado el numero maximo de intentos de este servidor saltamos al siguiente
if max_intentos_servers[videoitem.server.lower()] == 0:
continue
lang = " "
if hasattr(videoitem, 'language') and videoitem.language != "":
lang = " '%s' " % videoitem.language
platformtools.dialog_notification("AutoPlay %s" %text_b, "%s%s%s" % (
videoitem.server.upper(), lang, videoitem.quality.upper()), sound=False)
# TODO videoitem.server es el id del server, pero podria no ser el nombre!!!
# Intenta reproducir los enlaces
# Si el canal tiene metodo play propio lo utiliza
channel = __import__('channels.%s' % channel_id, None, None, ["channels.%s" % channel_id])
if hasattr(channel, 'play'):
resolved_item = getattr(channel, 'play')(videoitem)
if len(resolved_item) > 0:
if isinstance(resolved_item[0], list):
videoitem.video_urls = resolved_item
else:
videoitem = resolved_item[0]
# Si no directamente reproduce y marca como visto
# Verifica si el item viene de la videoteca
try:
if base_item.contentChannel =='videolibrary':
# Marca como visto
from platformcode import xbmc_videolibrary
xbmc_videolibrary.mark_auto_as_watched(base_item)
# Rellena el video con los datos del item principal y reproduce
play_item = base_item.clone(url=videoitem)
platformtools.play_video(play_item.url, autoplay=True)
else:
# Si no viene de la videoteca solo reproduce
platformtools.play_video(videoitem, autoplay=True)
except:
pass
sleep(3)
try:
if platformtools.is_playing():
PLAYED = True
break
except:
logger.debug(str(len(autoplay_list)))
# Si hemos llegado hasta aqui es por q no se ha podido reproducir
max_intentos_servers[videoitem.server.lower()] -= 1
# Si se han alcanzado el numero maximo de intentos de este servidor
# preguntar si queremos seguir probando o lo ignoramos
if max_intentos_servers[videoitem.server.lower()] == 0:
text = config.get_localized_string(60072) % videoitem.server.upper()
if not platformtools.dialog_yesno("AutoPlay", text,
config.get_localized_string(60073)):
max_intentos_servers[videoitem.server.lower()] = max_intentos
# Si no quedan elementos en la lista se informa
if autoplay_elem == autoplay_list[-1]:
platformtools.dialog_notification('AutoPlay', config.get_localized_string(60072))
else:
platformtools.dialog_notification(config.get_localized_string(60074), config.get_localized_string(60075))
if new_options:
platformtools.dialog_notification("AutoPlay", config.get_localized_string(60076), sound=False)
# Restaura si es necesario el valor previo de "Accion y Player Mode" en preferencias
if user_config_setting_action != 2:
config.set_setting("default_action", user_config_setting_action)
if user_config_setting_player != 0:
config.set_setting("player_mode", user_config_setting_player)
return itemlist
def init(channel, list_servers, list_quality, reset=False):
'''
Comprueba la existencia de canal en el archivo de configuracion de Autoplay y si no existe lo añade.
Es necesario llamar a esta funcion al entrar a cualquier canal que incluya la funcion Autoplay.
:param channel: (str) id del canal
:param list_servers: (list) lista inicial de servidores validos para el canal. No es necesario incluirlos todos,
ya que la lista de servidores validos se ira actualizando dinamicamente.
:param list_quality: (list) lista inicial de calidades validas para el canal. No es necesario incluirlas todas,
ya que la lista de calidades validas se ira actualizando dinamicamente.
:return: (bool) True si la inicializacion ha sido correcta.
'''
logger.info()
change = False
result = True
if not config.is_xbmc():
# platformtools.dialog_notification('AutoPlay ERROR', 'Sólo disponible para XBMC/Kodi')
result = False
else:
autoplay_path = os.path.join(config.get_data_path(), "settings_channels", 'autoplay_data.json')
if os.path.exists(autoplay_path):
autoplay_node = jsontools.get_node_from_file('autoplay', "AUTOPLAY")
else:
change = True
autoplay_node = {"AUTOPLAY": {}}
if channel not in autoplay_node or reset:
change = True
# Se comprueba que no haya calidades ni servidores duplicados
if 'default' not in list_quality:
list_quality.append('default')
# list_servers = list(set(list_servers))
# list_quality = list(set(list_quality))
# Creamos el nodo del canal y lo añadimos
channel_node = {"servers": list_servers,
"quality": list_quality,
"settings": {
"active": False,
"plan_b": True,
"custom_servers": False,
"custom_quality": False,
"priority": 0}}
for n in range(1, 4):
s = c = 0
if len(list_servers) >= n:
s = n - 1
if len(list_quality) >= n:
c = n - 1
channel_node["settings"]["server_%s" % n] = s
channel_node["settings"]["quality_%s" % n] = c
autoplay_node[channel] = channel_node
if change:
result, json_data = jsontools.update_node(autoplay_node, 'autoplay', 'AUTOPLAY')
if not result:
heading = config.get_localized_string(60077)
msj = config.get_localized_string(60078)
icon = 1
platformtools.dialog_notification(heading, msj, icon, sound=False)
return result
def check_value(channel, itemlist):
''' comprueba la existencia de un valor en la lista de servidores o calidades
si no existiera los agrega a la lista en el json
:param channel: str
:param values: list (una de servidores o calidades)
:param value_type: str (server o quality)
:return: list
'''
logger.info()
global autoplay_node
change = False
if not autoplay_node:
# Obtiene el nodo AUTOPLAY desde el json
autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY')
channel_node = autoplay_node.get(channel)
server_list = channel_node.get('servers')
if not server_list:
server_list = channel_node['servers'] = list()
quality_list = channel_node.get('quality')
if not quality_list:
quality_list = channel_node['quality'] = list()
for item in itemlist:
if item.server.lower() not in server_list and item.server !='':
server_list.append(item.server.lower())
change = True
if item.quality not in quality_list and item.quality !='':
quality_list.append(item.quality)
change = True
if change:
change, json_data = jsontools.update_node(autoplay_node, 'autoplay', 'AUTOPLAY')
return change
def autoplay_config(item):
logger.info()
global autoplay_node
dict_values = {}
list_controls = []
channel_parameters = channeltools.get_channel_parameters(item.from_channel)
channel_name = channel_parameters['title']
if not autoplay_node:
# Obtiene el nodo AUTOPLAY desde el json
autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY')
channel_node = autoplay_node.get(item.from_channel, {})
settings_node = channel_node.get('settings', {})
allow_option = True
active_settings = {"id": "active", "label": config.get_localized_string(60079),
"color": "0xffffff99", "type": "bool", "default": False, "enabled": allow_option,
"visible": allow_option}
list_controls.append(active_settings)
dict_values['active'] = settings_node.get('active', False)
# Idioma
status_language = config.get_setting("filter_languages", item.from_channel)
if not status_language:
status_language = 0
set_language = {"id": "language", "label": config.get_localized_string(60080), "color": "0xffffff99",
"type": "list", "default": 0, "enabled": "eq(-1,true)", "visible": True,
"lvalues": get_languages(item.from_channel)}
list_controls.append(set_language)
dict_values['language'] = status_language
separador = {"id": "label", "label": " "
"_________________________________________________________________________________________",
"type": "label", "enabled": True, "visible": True}
list_controls.append(separador)
# Seccion servidores favoritos
server_list = channel_node.get("servers", [])
if not server_list:
enabled = False
server_list = ["No disponible"]
else:
enabled = "eq(-3,true)"
custom_servers_settings = {"id": "custom_servers", "label": config.get_localized_string(60081), "color": "0xff66ffcc",
"type": "bool", "default": False, "enabled": enabled, "visible": True}
list_controls.append(custom_servers_settings)
if dict_values['active'] and enabled:
dict_values['custom_servers'] = settings_node.get('custom_servers', False)
else:
dict_values['custom_servers'] = False
for num in range(1, 4):
pos1 = num + 3
default = num - 1
if default > len(server_list) - 1:
default = 0
set_servers = {"id": "server_%s" % num, "label": u" \u2665" + config.get_localized_string(60082) % num,
"color": "0xfffcab14", "type": "list", "default": default,
"enabled": "eq(-%s,true)+eq(-%s,true)" % (pos1, num), "visible": True,
"lvalues": server_list}
list_controls.append(set_servers)
dict_values["server_%s" % num] = settings_node.get("server_%s" % num, 0)
if settings_node.get("server_%s" % num, 0) > len(server_list) - 1:
dict_values["server_%s" % num] = 0
# Seccion Calidades favoritas
quality_list = channel_node.get("quality", [])
if not quality_list:
enabled = False
quality_list = ["No disponible"]
else:
enabled = "eq(-7,true)"
custom_quality_settings = {"id": "custom_quality", "label": config.get_localized_string(60083), "color": "0xff66ffcc",
"type": "bool", "default": False, "enabled": enabled, "visible": True}
list_controls.append(custom_quality_settings)
if dict_values['active'] and enabled:
dict_values['custom_quality'] = settings_node.get('custom_quality', False)
else:
dict_values['custom_quality'] = False
for num in range(1, 4):
pos1 = num + 7
default = num - 1
if default > len(quality_list) - 1:
default = 0
set_quality = {"id": "quality_%s" % num, "label": u" \u2665 Calidad Favorita %s" % num,
"color": "0xfff442d9", "type": "list", "default": default,
"enabled": "eq(-%s,true)+eq(-%s,true)" % (pos1, num), "visible": True,
"lvalues": quality_list}
list_controls.append(set_quality)
dict_values["quality_%s" % num] = settings_node.get("quality_%s" % num, 0)
if settings_node.get("quality_%s" % num, 0) > len(quality_list) - 1:
dict_values["quality_%s" % num] = 0
# Plan B
dict_values['plan_b'] = settings_node.get('plan_b', False)
enabled = "eq(-4,true)|eq(-8,true)"
plan_b = {"id": "plan_b", "label": config.get_localized_string(70172),
"color": "0xffffff99",
"type": "bool", "default": False, "enabled": enabled, "visible": True}
list_controls.append(plan_b)
# Seccion Prioridades
priority_list = [config.get_localized_string(70174), config.get_localized_string(70175)]
set_priority = {"id": "priority", "label": config.get_localized_string(60085),
"color": "0xffffff99", "type": "list", "default": 0,
"enabled": True, "visible": "eq(-5,true)+eq(-9,true)+eq(-12,true)", "lvalues": priority_list}
list_controls.append(set_priority)
dict_values["priority"] = settings_node.get("priority", 0)
# Abrir cuadro de dialogo
platformtools.show_channel_settings(list_controls=list_controls, dict_values=dict_values, callback='save',
item=item, caption='%s - AutoPlay' % channel_name,
custom_button={'visible': True,
'function': "reset",
'close': True,
'label': 'Reset'})
def save(item, dict_data_saved):
'''
Guarda los datos de la ventana de configuracion
:param item: item
:param dict_data_saved: dict
:return:
'''
logger.info()
global autoplay_node
if not autoplay_node:
# Obtiene el nodo AUTOPLAY desde el json
autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY')
new_config = dict_data_saved
if not new_config['active']:
new_config['language']=0
channel_node = autoplay_node.get(item.from_channel)
config.set_setting("filter_languages", dict_data_saved.pop("language"), item.from_channel)
channel_node['settings'] = dict_data_saved
result, json_data = jsontools.update_node(autoplay_node, 'autoplay', 'AUTOPLAY')
return result
def get_languages(channel):
'''
Obtiene los idiomas desde el json del canal
:param channel: str
:return: list
'''
logger.info()
list_language = ['No filtrar']
list_controls, dict_settings = channeltools.get_channel_controls_settings(channel)
for control in list_controls:
try:
if control["id"] == 'filter_languages':
list_language = control["lvalues"]
except:
pass
return list_language
def is_active(channel):
'''
Devuelve un booleano q indica si esta activo o no autoplay en el canal desde el que se llama
:return: True si esta activo autoplay para el canal desde el que se llama, False en caso contrario.
'''
logger.info()
global autoplay_node
if not config.is_xbmc():
return False
if not autoplay_node:
# Obtiene el nodo AUTOPLAY desde el json
autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY')
# Obtine el canal desde el q se hace la llamada
#import inspect
#module = inspect.getmodule(inspect.currentframe().f_back)
#canal = module.__name__.split('.')[1]
canal = channel
# Obtiene el nodo del canal desde autoplay_node
channel_node = autoplay_node.get(canal, {})
# Obtiene los ajustes des autoplay para este canal
settings_node = channel_node.get('settings', {})
return settings_node.get('active', False) or get_setting('autoplay')
def reset(item, dict):
channel_name = item.from_channel
channel = __import__('channels.%s' % channel_name, fromlist=["channels.%s" % channel_name])
list_servers = channel.list_servers
list_quality = channel.list_quality
init(channel_name, list_servers, list_quality, reset=True)
platformtools.dialog_notification('AutoPlay', config.get_localized_string(70523) % item.category)
return
def set_status(status):
logger.info()
# Obtiene el nodo AUTOPLAY desde el json
autoplay_node = jsontools.get_node_from_file('autoplay', 'AUTOPLAY')
autoplay_node['status'] = status
result, json_data = jsontools.update_node(autoplay_node, 'autoplay', 'AUTOPLAY')
def play_multi_channel(item, itemlist):
logger.info()
global PLAYED
actual_channel = ''
channel_videos = []
video_dict = dict()
set_status(True)
for video_item in itemlist:
if video_item.contentChannel != actual_channel:
actual_channel = video_item.contentChannel
elif is_active(actual_channel):
channel_videos.append(video_item)
video_dict[actual_channel] = channel_videos
for channel, videos in video_dict.items():
item.contentChannel = channel
if not PLAYED:
start(videos, item)
else:
break

137
channels/autorenumber.py Normal file
View File

@@ -0,0 +1,137 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------------------
# autorenumber - Rinomina Automaticamente gli Episodi
# --------------------------------------------------------------------------------
import os
try:
import xbmcgui
except:
xbmcgui = None
from platformcode import config
from core import jsontools, tvdb
from core.item import Item
from platformcode import platformtools
from channels.support import typo, log
TAG_TVSHOW_RENUMERATE = "TVSHOW_AUTORENUMBER"
TAG_SEASON_EPISODE = "season_episode"
__channel__ = "autorenumber"
def access():
allow = False
if config.is_xbmc():
allow = True
return allow
def context():
if access():
_context = [{"title": config.get_localized_string(70585),
"action": "config_item",
"channel": "autorenumber"}]
return _context
def config_item(item):
log(item)
tvdb.find_and_set_infoLabels(item)
data = ''
data = add_season(data)
if not item.infoLabels['tvdb_id']:
heading = 'TVDB ID'
item.infoLabels['tvdb_id'] = platformtools.dialog_numeric(0, heading)
data.append(item.infoLabels['tvdb_id'])
write_data(item.from_channel, item.show, data)
def add_season(data=None):
log("data= ", data)
heading = config.get_localized_string(70686)
season = platformtools.dialog_numeric(0, heading)
if season != "":
heading = config.get_localized_string(70687)
episode = platformtools.dialog_numeric(0, heading)
if episode != "":
return [int(season), int(episode)]
def write_data(channel, show, data):
log()
dict_series = jsontools.get_node_from_file(channel, TAG_TVSHOW_RENUMERATE)
tvshow = show.strip()
list_season_episode = dict_series.get(tvshow, {}).get(TAG_SEASON_EPISODE, [])
if data:
dict_renumerate = {TAG_SEASON_EPISODE: data}
dict_series[tvshow] = dict_renumerate
else:
dict_series.pop(tvshow, None)
result, json_data = jsontools.update_node(dict_series, channel, TAG_TVSHOW_RENUMERATE)
if result:
if data:
message = config.get_localized_string(60446)
else:
message = config.get_localized_string(60444)
else:
message = config.get_localized_string(70593)
heading = show.strip()
platformtools.dialog_notification(heading, message)
def renumber(itemlist, item='', typography=''):
log()
if item:
try:
dict_series = jsontools.get_node_from_file(item.channel, TAG_TVSHOW_RENUMERATE)
SERIES = dict_series[item.show]['season_episode']
S = SERIES[0]
E = SERIES[1]
ID = SERIES[2]
page = 1
epList = []
exist = True
item.infoLabels['tvdb_id'] = ID
tvdb.set_infoLabels_item(item)
while exist:
data = tvdb.otvdb_global.get_list_episodes(ID,page)
if data:
for episodes in data['data']:
if episodes['airedSeason'] >= S:
if episodes['airedEpisodeNumber'] >= E:
epList.append(str(episodes['airedSeason']) + 'x' + str(episodes['airedEpisodeNumber']))
page = page + 1
else:
exist = False
ep = 0
for item in itemlist:
item.title = typo(epList[ep] + ' - ', typography) + item.title
ep = ep + 1
except:
return itemlist
else:
for item in itemlist:
if item.contentType != 'movie':
if item.context:
context2 = item.context
item.context = context() + context2
else:
item.context = context()
return itemlist

View File

@@ -0,0 +1,22 @@
{
"id": "bleachportal",
"name": "BleachPortal",
"language": ["ita"],
"active": true,
"adult": false,
"fanart": "http://i39.tinypic.com/35ibvcx.jpg",
"thumbnail": "http://www.bleachportal.it/images/index_r1_c1.jpg",
"banner": "http://cgi.di.uoa.gr/~std05181/images/bleach.jpg",
"categories": ["anime"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
}
]
}

117
channels/bleachportal.py Normal file
View File

@@ -0,0 +1,117 @@
# -*- coding: utf-8 -*-
# Ringraziamo Icarus crew
# ------------------------------------------------------------
# XBMC Plugin
# Canale per http://bleachportal.it
# ------------------------------------------------------------
import re
from core import scrapertools, httptools
from platformcode import logger
from core.item import Item
host = "http://www.bleachportal.it"
def mainlist(item):
logger.info("[BleachPortal.py]==> mainlist")
itemlist = [Item(channel=item.channel,
action="episodi",
title="[COLOR azure] Bleach [/COLOR] - [COLOR deepskyblue]Lista Episodi[/COLOR]",
url=host + "/streaming/bleach/stream_bleach.htm",
thumbnail="http://i45.tinypic.com/286xp3m.jpg",
fanart="http://i40.tinypic.com/5jsinb.jpg",
extra="bleach"),
Item(channel=item.channel,
action="episodi",
title="[COLOR azure] D.Gray Man [/COLOR] - [COLOR deepskyblue]Lista Episodi[/COLOR]",
url=host + "/streaming/d.gray-man/stream_dgray-man.htm",
thumbnail="http://i59.tinypic.com/9is3tf.jpg",
fanart="http://wallpapercraft.net/wp-content/uploads/2016/11/Cool-D-Gray-Man-Background.jpg",
extra="dgrayman")]
return itemlist
def episodi(item):
logger.info("[BleachPortal.py]==> episodi")
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<td>?[<span\s|<width="\d+%"\s]+?class="[^"]+">\D+([\d\-]+)\s?<[^<]+<[^<]+<[^<]+<[^<]+<.*?\s+?.*?<span style="[^"]+">([^<]+).*?\s?.*?<a href="\.*(/?[^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
animetitle = "Bleach" if item.extra == "bleach" else "D.Gray Man"
for scrapednumber, scrapedtitle, scrapedurl in matches:
scrapedtitle = scrapedtitle.decode('latin1').encode('utf8')
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title="[COLOR azure]%s Ep: [COLOR deepskyblue]%s[/COLOR][/COLOR]" % (animetitle, scrapednumber),
url=item.url.replace("stream_bleach.htm",scrapedurl) if "stream_bleach.htm" in item.url else item.url.replace("stream_dgray-man.htm", scrapedurl),
plot=scrapedtitle,
extra=item.extra,
thumbnail=item.thumbnail,
fanart=item.fanart,
fulltitle="[COLOR red]%s Ep: %s[/COLOR] | [COLOR deepskyblue]%s[/COLOR]" % (animetitle, scrapednumber, scrapedtitle)))
if item.extra == "bleach":
itemlist.append(
Item(channel=item.channel,
action="oav",
title="[B][COLOR azure] OAV e Movies [/COLOR][/B]",
url=item.url.replace("stream_bleach.htm", "stream_bleach_movie_oav.htm"),
extra=item.extra,
thumbnail=item.thumbnail,
fanart=item.fanart))
return list(reversed(itemlist))
def oav(item):
logger.info("[BleachPortal.py]==> oav")
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<td>?[<span\s|<width="\d+%"\s]+?class="[^"]+">-\s+(.*?)<[^<]+<[^<]+<[^<]+<[^<]+<.*?\s+?.*?<span style="[^"]+">([^<]+).*?\s?.*?<a href="\.*(/?[^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapednumber, scrapedtitle, scrapedurl in matches:
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title="[COLOR deepskyblue] " + scrapednumber + " [/COLOR]",
url=item.url.replace("stream_bleach_movie_oav.htm", scrapedurl),
plot=scrapedtitle,
extra=item.extra,
thumbnail=item.thumbnail,
fulltitle="[COLOR red]" + scrapednumber + "[/COLOR] | [COLOR deepskyblue]" + scrapedtitle + "[/COLOR]"))
return list(reversed(itemlist))
def findvideos(item):
logger.info("[BleachPortal.py]==> findvideos")
itemlist = []
if "bleach//" in item.url:
item.url = re.sub(r'\w+//', "", item.url)
data = httptools.downloadpage(item.url).data
if "bleach" in item.extra:
video = scrapertools.find_single_match(data, 'file: "(.*?)",')
else:
video = scrapertools.find_single_match(data, 'file=(.*?)&').rsplit('/', 1)[-1]
itemlist.append(
Item(channel=item.channel,
action="play",
title="[[COLOR orange]Diretto[/COLOR]] [B]%s[/B]" % item.title,
url=item.url.replace(item.url.split("/")[-1], "/" + video),
thumbnail=item.thumbnail,
fulltitle=item.fulltitle))
return itemlist

48
channels/bloghorror.json Normal file
View File

@@ -0,0 +1,48 @@
{
"id": "bloghorror",
"name": "BlogHorror",
"active": true,
"adult": false,
"language": [],
"thumbnail": "https://i.postimg.cc/gcgQhKTL/2018-10-10_20_34_57-_Peliculas_de_Terror_BLOGHORROR.png",
"banner": "",
"categories": [
"movie",
"vo",
"torrent"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_torrent",
"type": "bool",
"label": "Incluir en Novedades - Torrent",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
}
]
}

234
channels/bloghorror.py Normal file
View File

@@ -0,0 +1,234 @@
# -*- coding: utf-8 -*-
# -*- Channel BlogHorror -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import os
import re
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'http://bloghorror.com/'
fanart = 'http://bloghorror.com/wp-content/uploads/2015/04/bloghorror-2017-x.jpg'
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, fanart=fanart, title="Todas", action="list_all",
url=host+'/category/terror', thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, fanart=fanart, title="Asiaticas", action="list_all",
url=host+'/category/asiatico', thumbnail=get_thumb('asiaticas', auto=True)))
itemlist.append(Item(channel=item.channel, fanart=fanart, title = 'Buscar', action="search", url=host + '?s=', pages=3,
thumbnail=get_thumb('search', auto=True)))
return itemlist
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<article id="post-\d+".*?data-background="([^"]+)".*?href="([^"]+)".*?<h3.*?internal">([^<]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
url = scrapedurl
title = scrapertools.find_single_match(scrapedtitle, '(.*?)(?:|\(|\| )\d{4}').strip()
year = scrapertools.find_single_match(scrapedtitle, '(\d{4})')
thumbnail = scrapedthumbnail
new_item = Item(channel=item.channel, fanart=fanart, title=title, url=url, action='findvideos',
thumbnail=thumbnail, infoLabels={'year':year})
new_item.contentTitle=title
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
if itemlist != []:
next_page = scrapertools.find_single_match(data, 'page-numbers current.*?<a class="page-numbers" href="([^"]+)"')
if next_page != '':
itemlist.append(Item(channel=item.channel, fanart=fanart, action="list_all", title='Siguiente >>>', url=next_page))
else:
item.url=next_page
return itemlist
def section(item):
logger.info()
itemlist = []
data=get_source(host)
if item.title == 'Generos':
data = scrapertools.find_single_match(data, 'tabindex="0">Generos<.*?</ul>')
elif 'Años' in item.title:
data = scrapertools.find_single_match(data, 'tabindex="0">Año<.*?</ul>')
patron = 'href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title in matches:
itemlist.append(Item(channel=item.channel, fanart=fanart, title=title, url=url, action='list_all', pages=3))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
full_data = get_source(item.url)
data = scrapertools.find_single_match(full_data, '>FICHA TECNICA:<.*?</ul>')
#patron = '(?:bold|strong>|<br/>|<em>)([^<]+)(?:</em>|<br/>).*?="(magnet[^"]+)"'
patron = '(?:<em>|<br/><em>|/> )(DVD|720|1080)(?:</em>|<br/>|</span>).*?="(magnet[^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) == 0:
patron = '<a href="(magnet[^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(full_data)
patron_sub = 'href="(http://www.subdivx.com/bajar.php[^"]+)"'
sub_url = scrapertools.find_single_match(full_data, patron_sub)
sub_num = scrapertools.find_single_match(sub_url, 'u=(\d+)')
if sub_url == '':
sub = ''
lang = 'VO'
else:
try:
sub = get_sub_from_subdivx(sub_url, sub_num)
except:
sub = ''
lang = 'VOSE'
try:
for quality, scrapedurl in matches:
if quality.strip() not in ['DVD', '720', '1080']:
quality = 'DVD'
url = scrapedurl
if not config.get_setting('unify'):
title = ' [Torrent] [%s] [%s]' % (quality, lang)
else:
title = 'Torrent'
itemlist.append(Item(channel=item.channel, fanart=fanart, title=title, url=url, action='play',
server='torrent', quality=quality, language=lang, infoLabels=item.infoLabels,
subtitle=sub))
except:
for scrapedurl in matches:
quality = 'DVD'
url = scrapedurl
if not config.get_setting('unify'):
title = ' [Torrent] [%s] [%s]' % (quality, lang)
else:
title = 'Torrent'
itemlist.append(Item(channel=item.channel, fanart=fanart, title=title, url=url, action='play',
server='torrent', quality=quality, language=lang, infoLabels=item.infoLabels,
subtitle=sub))
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle
))
return itemlist
def search(item, texto):
logger.info()
itemlist = []
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
try:
return list_all(item)
except:
itemlist.append(item.clone(url='', title='No hay elementos...', action=''))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas', 'terror', 'torrent']:
item.url = host
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def get_sub_from_subdivx(sub_url, sub_num):
logger.info()
import xbmc
from time import sleep
import urlparse
sub_dir = os.path.join(config.get_data_path(), 'temp_subs')
if os.path.exists(sub_dir):
for sub_file in os.listdir(sub_dir):
old_sub = os.path.join(sub_dir, sub_file)
os.remove(old_sub)
sub_data = httptools.downloadpage(sub_url, follow_redirects=False)
if 'x-frame-options' not in sub_data.headers:
sub_url = 'http://subdivx.com/sub%s/%s' % (sub_num, sub_data.headers['location'])
sub_url = sub_url.replace('http:///', '')
sub_data = httptools.downloadpage(sub_url).data
fichero_rar = os.path.join(config.get_data_path(), "subtitle.rar")
outfile = open(fichero_rar, 'wb')
outfile.write(sub_data)
outfile.close()
xbmc.executebuiltin("XBMC.Extract(%s, %s/temp_subs)" % (fichero_rar, config.get_data_path()))
sleep(1)
if len(os.listdir(sub_dir)) > 0:
sub = os.path.join(sub_dir, os.listdir(sub_dir)[0])
else:
sub = ''
else:
logger.info('sub no valido')
sub = ''
return sub

15
channels/bravoporn.json Normal file
View File

@@ -0,0 +1,15 @@
{
"id": "bravoporn",
"name": "bravoporn",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://www.bravoporn.com/v/images/logo.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

89
channels/bravoporn.py Normal file
View File

@@ -0,0 +1,89 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
host = 'http://www.bravoporn.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host +"/latest-updates/"))
itemlist.append( Item(channel=item.channel, title="Popular" , action="lista", url=host + "/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/c/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/s/?q=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)" class="th">.*?'
patron += '<img src="([^"]+)".*?'
patron += '<span>([^"]+)</span>\s*(\d+) movies.*?</strong>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
scrapedthumbnail = "http:" + scrapedthumbnail
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/latest/"
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class=".*?video_block"><a href="([^"]+)".*?'
patron += '<img src="([^"]+)".*?alt="([^"]+)".*?'
patron += '<span class="time">([^"]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
thumbnail = "https:" + scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
fanart=thumbnail, plot=plot, contentTitle = scrapedtitle))
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="next" title="Next">Next</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<source src="([^"]+)" type=\'video/mp4\' title="HQ" />'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=scrapedurl))
return itemlist

View File

@@ -0,0 +1,15 @@
{
"id": "camwhoresbay",
"name": "camwhoresbay",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "https://www.camwhoresbay.com/images/porntrex.ico",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

114
channels/camwhoresbay.py Normal file
View File

@@ -0,0 +1,114 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
host = 'https://www.camwhoresbay.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/latest-updates/"))
itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="lista", url=host + "/top-rated/"))
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
item.url = "%s/search/%s/" % (host, texto.replace("+", "-"))
item.extra = texto
try:
return lista(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?'
patron += '<img class="thumb" src="([^"]+)".*?'
patron += '<div class="videos">([^"]+)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
return sorted(itemlist, key=lambda i: i.title)
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="video-item ">.*?'
patron += '<a href="([^"]+)" title="([^"]+)" class="thumb">.*?'
patron += 'data-original="([^"]+)".*?'
patron += '<i class="fa fa-clock-o"></i>(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
contentTitle = scrapedtitle, fanart=thumbnail))
if item.extra:
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from_videos\+from_albums:(\d+)')
if next_page:
if "from_videos=" in item.url:
next_page = re.sub(r'&from_videos=(\d+)', '&from_videos=%s' % next_page, item.url)
else:
next_page = "%s?mode=async&function=get_block&block_id=list_videos_videos_list_search_result" \
"&q=%s&category_ids=&sort_by=post_date&from_videos=%s" % (item.url, item.extra, next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
else:
next_page = scrapertools.find_single_match(data, '<li class="next"><a href="([^"]+)"')
if next_page and not next_page.startswith("#"):
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
else:
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from:(\d+)')
if next_page:
if "from" in item.url:
next_page = re.sub(r'&from=(\d+)', '&from=%s' % next_page, item.url)
else:
next_page = "%s?mode=async&function=get_block&block_id=list_videos_common_videos_list&sort_by=post_date&from=%s" % (
item.url, next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
scrapedurl = scrapertools.find_single_match(data, 'video_alt_url3: \'([^\']+)\'')
if scrapedurl == "" :
scrapedurl = scrapertools.find_single_match(data, 'video_alt_url2: \'([^\']+)\'')
if scrapedurl == "" :
scrapedurl = scrapertools.find_single_match(data, 'video_alt_url: \'([^\']+)\'')
if scrapedurl == "" :
scrapedurl = scrapertools.find_single_match(data, 'video_url: \'([^\']+)\'')
itemlist.append(Item(channel=item.channel, action="play", title=scrapedurl, fulltitle=item.title, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo"))
return itemlist

12
channels/canalporno.json Normal file
View File

@@ -0,0 +1,12 @@
{
"id": "canalporno",
"name": "Canalporno",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://i.imgur.com/gAbPcvT.png?1",
"banner": "canalporno.png",
"categories": [
"adult"
]
}

87
channels/canalporno.py Normal file
View File

@@ -0,0 +1,87 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from platformcode import logger
host = "http://www.canalporno.com"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="findvideos", title="Útimos videos", url=host))
itemlist.append(item.clone(action="categorias", title="Listado Categorias",
url=host + "/categorias"))
itemlist.append(item.clone(action="search", title="Buscar", url=host + "/search/?q=%s"))
return itemlist
def search(item, texto):
logger.info()
try:
item.url = item.url % texto
itemlist = findvideos(item)
return sorted(itemlist, key=lambda it: it.title)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<img src="([^"]+)".*?alt="([^"]+)".*?<h2><a href="([^"]+)">.*?' \
'<div class="duracion"><span class="ico-duracion sprite"></span> ([^"]+) min</div>'
matches = scrapertools.find_multiple_matches(data, patron)
for thumbnail, title, url, time in matches:
scrapedtitle = time + " - " + title
scrapedurl = host + url
scrapedthumbnail = thumbnail
itemlist.append(item.clone(action="play", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail))
patron = '<div class="paginacion">.*?<span class="selected">.*?<a href="([^"]+)">([^"]+)</a>'
matches = scrapertools.find_multiple_matches(data, patron)
for url, title in matches:
url = host + url
title = "Página %s" % title
itemlist.append(item.clone(action="findvideos", title=title, url=url))
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<ul class="ordenar-por ordenar-por-categoria">'
'(.*?)<\/ul>')
#patron = '<div class="muestra-categorias">.*?<a class="thumb" href="([^"]+)".*?<img class="categorias" src="([^"]+)".*?<div class="nombre">([^"]+)</div>'
patron = "<li><a href='([^']+)'\s?title='([^']+)'>.*?<\/a><\/li>"
matches = scrapertools.find_multiple_matches(bloque, patron)
for url, title in matches:
url = host + url
#thumbnail = "http:" + thumbnail
itemlist.append(item.clone(action="findvideos", title=title, url=url))
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
url = scrapertools.find_single_match(data, '<source src="([^"]+)"')
itemlist.append(item.clone(url=url, server="directo"))
return itemlist

70
channels/casacinema.json Normal file
View File

@@ -0,0 +1,70 @@
{
"id": "casacinema",
"name": "Casacinema",
"language": ["ita"],
"active": true,
"adult": false,
"thumbnail": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/casacinema.png",
"banner": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/casacinema.png",
"categories": ["tvshow", "movie"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Includi in Novità - Serie TV",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Numero de link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": ["Non filtrare","IT"]
}
]
}

371
channels/casacinema.py Normal file
View File

@@ -0,0 +1,371 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Kodi on Demand - Kodi Addon
# Canale per casacinema
# ------------------------------------------------------------
import re, urlparse
from core import scrapertools, scrapertoolsV2, httptools, servertools, tmdb
from channels import autoplay, filtertools, support
from core.item import Item
from platformcode import logger, config
from channelselector import thumb, get_thumb
host = 'https://www.casacinema.site'
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['openload', 'wstream', 'speedvideo']
list_quality = ['HD', 'SD']
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'casacinema')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'casacinema')
headers = [['Referer', '%s/genere/serie-tv' % host]]
def mainlist(item):
logger.info("kod.casacinema mainlist")
autoplay.init(item.channel, list_servers, list_quality)
itemlist = [Item(channel=item.channel,
title="[B]Film[/B]",
action="peliculas",
extra="movie",
url="%s/genere/film" % host),
Item(channel=item.channel,
title="[B]Film - HD[/B]",
action="peliculas",
extra="movie",
url="%s/?s=[HD]" % host),
Item(channel=item.channel,
title="[B] > Categorie[/B]",
action="categorias",
extra="movie",
url="%s/genere/film" % host),
Item(channel=item.channel,
title="[B]Film Sub - Ita[/B]",
action="peliculas",
extra="movie",
url="%s/genere/sub-ita" % host),
Item(channel=item.channel,
title="[COLOR blue]Cerca Film...[/COLOR]",
action="search",
extra="movie",),
Item(channel=item.channel,
title="[B]Serie TV[/B]",
extra="tvshow",
action="peliculas_tv",
url="%s/genere/serie-tv" % host),
Item(channel=item.channel,
title="[COLOR blue]Cerca Serie TV...[/COLOR]",
action="search",
extra="tvshow")]
autoplay.show_option(item.channel, itemlist)
# auto thumb
itemlist=thumb(itemlist)
return itemlist
def newest(categoria):
logger.info("[casacinema.py] newest" + categoria)
itemlist = []
item = Item()
try:
if categoria == "film":
item.url = host + '/genere/film'
item.extra = "movie"
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def search(item, texto):
logger.info("[casacinema.py] " + item.url + " search " + texto)
item.url = host + "?s=" + texto
try:
if item.extra == "tvshow":
return peliculas_tv(item)
if item.extra == "movie":
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def peliculas(item):
logger.info("kod.casacinema peliculas")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
logger.info('DATA=' +data)
# Estrae i contenuti
patron = '<li><a href="([^"]+)"[^=]+="([^"]+)"><div>\s*<div[^>]+>(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
title = scrapertools.decodeHtmlentities(scrapedtitle)
cleantitle = re.sub(r'[-]*\s*[Ii]l [Ff]ilm\s*[-]*?', '', title).strip()
cleantitle = cleantitle.replace('[HD]', '').strip()
year = scrapertools.find_single_match(title, r'\((\d{4})\)')
infolabels = {}
if year:
cleantitle = cleantitle.replace("(%s)" % year, '').strip()
infolabels['year'] = year
scrapedplot = ""
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="movie",
title=title,
text_color="azure",
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=cleantitle,
show=cleantitle,
plot=scrapedplot,
infoLabels=infolabels,
extra=item.extra,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
## Paginación
next_page = scrapertools.find_single_match(data, '<li><a href="([^"]+)".*?>Pagina')
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title="[COLOR blue]" + config.get_localized_string(30992) + " >[/COLOR]",
url=next_page,
extra=item.extra,
thumbnail=get_thumb('next.png')))
return itemlist
def peliculas_tv(item):
logger.info("kod.casacinema peliculas")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
# Estrae i contenuti
patron = '<li><a href="([^"]+)"[^=]+="([^"]+)"><div>\s*<div[^>]+>(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
title = scrapertools.decodeHtmlentities(scrapedtitle)
cleantitle = re.sub(r'[-]*\s*[Ss]erie [Tt]v\s*[-]*?', '', title).strip()
cleantitle = cleantitle.replace('[HD]', '').replace('[SD]', '').strip()
year = scrapertools.find_single_match(title, r'\((\d{4})\)')
infolabels = {}
if year:
cleantitle = cleantitle.replace("(%s)" % year, '').strip()
infolabels['year'] = year
scrapedplot = ""
itemlist.append(
Item(channel=item.channel,
action="episodios",
contentType="tvshow",
title=title,
text_color="azure",
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=cleantitle,
show=cleantitle,
plot=scrapedplot,
infoLabels=infolabels,
extra=item.extra,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
## Paginación
next_page = scrapertools.find_single_match(data, '<li><a href="([^"]+)">Pagina') ### <- Regex rimosso spazio - precedente <li><a href="([^"]+)" >Pagina
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action="peliculas_tv",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=next_page,
extra=item.extra,
thumbnail=get_thumb('next.png')))
return itemlist
def categorias(item):
logger.info("kod.casacinema categorias")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
# Narrow search by selecting only the combo
bloque = scrapertools.find_single_match(data, 'Categorie(.*?)</ul>')
# The categories are the options for the combo
patron = '<a href="(.*?)">(.*?)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
extra=item.extra,
url=urlparse.urljoin(host, scrapedurl)))
return itemlist
def episodios(item):
def load_episodios(html, item, itemlist, lang_title):
patron = '.*?<a href="[^"]+"[^o]+ofollow[^>]+>[^<]+</a><(?:b|/)[^>]+>'
matches = re.compile(patron).findall(html)
for data in matches:
# Estrae i contenuti
scrapedtitle = scrapertoolsV2.htmlclean(re.sub(r'(<a [^>]+>)*(<\/a>.*)*(Speedvideo)*', '', data)).strip()
if scrapedtitle != 'Categorie':
scrapedtitle = scrapedtitle.replace('&#215;', 'x')
scrapedtitle = scrapedtitle.replace('×', 'x')
scrapedtitle = scrapedtitle.replace(';', '')
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="episode",
title="[COLOR azure]%s[/COLOR]" % (scrapedtitle + " (" + lang_title + ")"),
url=data,
thumbnail=item.thumbnail,
extra=item.extra,
fulltitle=scrapedtitle + " (" + lang_title + ")" + ' - ' + item.show,
show=item.show))
logger.info("[casacinema.py] episodios")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
data = scrapertools.decodeHtmlentities(data)
data = scrapertools.find_single_match(data, '<p>(?:<strong>|)(.*?)<div id="disqus_thread">')
lang_titles = []
starts = []
patron = r"Stagione.*?(?:ITA|\d+)"
matches = re.compile(patron, re.IGNORECASE).finditer(data)
for match in matches:
season_title = match.group()
if season_title != '':
lang_titles.append('SUB ITA' if 'SUB' in season_title.upper() else 'ITA')
starts.append(match.end())
i = 1
len_lang_titles = len(lang_titles)
while i <= len_lang_titles:
inizio = starts[i - 1]
fine = starts[i] if i < len_lang_titles else -1
html = data[inizio:fine]
lang_title = lang_titles[i - 1]
load_episodios(html, item, itemlist, lang_title)
i += 1
if config.get_videolibrary_support() and len(itemlist) != 0:
itemlist.append(
Item(channel=item.channel,
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
url=item.url,
action="add_serie_to_library",
extra="episodios" + "###" + item.extra,
show=item.show))
return itemlist
def findvideos(item):
logger.info("kod.casacinema findvideos")
data = item.url if item.extra == "tvshow" else httptools.downloadpage(item.url, headers=headers).data
html = httptools.downloadpage(data).data
patron = '"http:\/\/shrink-service\.it\/[^\/]+\/[^\/]+\/([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(html)
for url in matches:
if url is not None:
data = data
else:
continue
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
server = re.sub(r'[-\[\]\s]+', '', videoitem.title).capitalize()
videoitem.title = "".join(["[%s] " % support.color(server, 'orange'), item.title])
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.show = item.show
videoitem.plot = item.plot
videoitem.channel = item.channel
videoitem.contentType = item.contentType
videoitem.language = IDIOMAS['Italiano']
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow][B]Aggiungi alla videoteca[/B][/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist

View File

@@ -0,0 +1,36 @@
{
"id": "casacinemaInfo",
"name": "La casa del cinema",
"language": ["ita"],
"active": true,
"adult": false,
"thumbnail": "",
"banner": "",
"categories": ["movie"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
}
]
}

153
channels/casacinemaInfo.py Normal file
View File

@@ -0,0 +1,153 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per casacinema
# ------------------------------------------------------------
import re, urlparse, base64
from core import scrapertoolsV2, httptools, servertools, tmdb
from channels import autoplay, support
from core.item import Item
from platformcode import logger, config
host = 'https://casacinema.info'
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['openload', 'wstream', 'speedvideo']
list_quality = ['1080p', '720', '480p', '360p']
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'casacinema')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'casacinema')
def mainlist(item):
logger.info("alfa.casacinema mainlist")
autoplay.init(item.channel, list_servers, list_quality)
itemlist = [Item(channel=item.channel,
title="Film",
action="peliculas",
extra="movie",
url=host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="In sala",
action="peliculas",
extra="movie",
url="%s/category/in-sala/" % host,
thumbnail="http://jcrent.com/apple%20tv%20final/HD.png"),
Item(channel=item.channel,
title="Novità",
action="peliculas",
extra="movie",
url="%s/category/nuove-uscite/" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="Sub - Ita",
action="peliculas",
extra="movie",
url="%s/category/sub-ita/" % host,
thumbnail="http://i.imgur.com/qUENzxl.png"),
Item(channel=item.channel,
title="[COLOR yellow]Cerca...[/COLOR]",
action="search",
extra="movie",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
autoplay.show_option(item.channel, itemlist)
return itemlist
def search(item, texto):
logger.info("[casacinemaInfo.py] " + item.url + " search " + texto)
item.url = host + "?s=" + texto
data = httptools.downloadpage(item.url).data
itemlist = []
patron = '<li class="col-md-12 itemlist">.*?<a href="([^"]+)" title="([^"]+)".*?<img src="([^"]+)".*?Film dell\\\'anno: ([0-9]{4}).*?<p class="text-list">([^<>]+)</p>'
matches = scrapertoolsV2.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear, scrapedplot in matches:
title = scrapertoolsV2.decodeHtmlentities(scrapedtitle)
cleantitle = title.replace('[Sub-ITA]', '').strip()
infoLabels = {"plot": scrapertoolsV2.decodeHtmlentities(scrapedplot), "year": scrapedyear}
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="movie",
title=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
infoLabels=infoLabels,
fulltitle=cleantitle))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def peliculas(item):
logger.info("[casacinemaInfo.py] peliculas")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
# Estrae i contenuti
patron = '<div class="col-mt-5 postsh">[^<>]+<div class="poster-media-card">[^<>]+<a href="([^"]+)" title="([^"]+)".*?<img src="([^"]+)"'
matches = scrapertoolsV2.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
title = scrapertoolsV2.decodeHtmlentities(scrapedtitle)
cleantitle = title.replace('[Sub-ITA]', '').strip()
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="movie",
title=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=cleantitle))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
## Paginación
next_page = scrapertoolsV2.find_single_match(data, '<a href="([^"]+)"><i class="glyphicon glyphicon-chevron-right"') ### <- Regex rimosso spazio - precedente <li><a href="([^"]+)" >Pagina -> Continua. riga 221
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=next_page,
extra=item.extra,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
return itemlist
def findvideos(item):
logger.info("[casacinemaInfo.py] findvideos")
itemlist = support.hdpass_get_servers(item)
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist

14
channels/cat3plus.json Normal file
View File

@@ -0,0 +1,14 @@
{
"id": "cat3plus",
"name": "Cat3plus",
"active": true,
"adult": true,
"language": [],
"thumbnail": "https://i.imgur.com/SJxXKa2.png",
"fanart": "https://i.imgur.com/ejCwTxT.jpg",
"banner": "https://i.imgur.com/bXUyk6m.png",
"categories": [
"movie",
"vo"
]
}

130
channels/cat3plus.py Normal file
View File

@@ -0,0 +1,130 @@
# -*- coding: utf-8 -*-
# -*- Channel SleazeMovies -*-
# -*- Created for Alfa-addon -*-
# -*- By Sculkurt -*-
import re
import urllib
import urlparse
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
host = 'http://www.cat3plus.com/'
headers = [
['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:38.0) Gecko/20100101 Firefox/38.0'],
['Accept-Encoding', 'gzip, deflate'],
['Referer', host]
]
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(item.clone(title="Todas", action="list_all", url=host, thumbnail=get_thumb('all', auto=True)))
itemlist.append(item.clone(title="Años", action="years", url=host, thumbnail=get_thumb('year', auto=True)))
itemlist.append(item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True)))
return itemlist
def years(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(item.url, cookies=False).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = "<a dir='ltr' href='([^']+)'>([^<]+)</a>"
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(action='list_all', title=scrapedtitle, url=scrapedurl))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = "<h2 class='post-title entry-title'><a href='([^']+)'>([^(]+).*?\(([^)]+).*?"
patron += 'src="([^"]+).*?'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, year, img in matches:
itemlist.append(Item(channel = item.channel,
title = scrapedtitle,
url = scrapedurl,
action = "findvideos",
thumbnail = img,
contentTitle = scrapedtitle,
contentType = "movie",
infoLabels = {'year': year}))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
# Extraer la marca de siguiente página
next_page = scrapertools.find_single_match(data, "<a class='blog-pager-older-link' href='([^']+)'")
if next_page != "":
itemlist.append(Item(channel=item.channel, action="list_all", title=">> Página siguiente", url=next_page, folder=True))
return itemlist
def search(item, texto):
logger.info()
if texto != "":
texto = texto.replace(" ", "+")
item.url = host + "search?q=" + texto
item.extra = "busqueda"
try:
return list_all(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<h2>\s*<a href="([^"]+)" target="_blank">.*?</a></h2>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url in matches:
data = httptools.downloadpage(url, headers={'Referer': item.url}).data
itemlist.extend(servertools.find_video_items(data=data))
for video in itemlist:
video.channel = item.channel
video.contentTitle = item.contentTitle
video.title = video.server.capitalize()
# Opción "Añadir esta pelicula a la videoteca"
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel = item.channel,
title = '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url = item.url,
action = "add_pelicula_to_library",
extra = "findvideos",
contentTitle = item.contentTitle,
thumbnail = item.thumbnail
))
return itemlist

36
channels/cb01anime.json Normal file
View File

@@ -0,0 +1,36 @@
{
"id": "cb01anime",
"name": "Cb01anime",
"language": ["ita"],
"active": true,
"adult": false,
"thumbnail": "http://i.imgur.com/bHoUMo2.png",
"banner": "http://i.imgur.com/bHoUMo2.png",
"categories": ["anime"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi in Ricerca Globale",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Includi in Novità - Anime",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
}
]
}

278
channels/cb01anime.py Normal file
View File

@@ -0,0 +1,278 @@
# -*- coding: utf-8 -*-
# Ringraziamo Icarus crew
# ------------------------------------------------------------
# XBMC Plugin
# Canale per cineblog01 - anime
# ------------------------------------------------------------
import re
from core import httptools, scrapertools, servertools, tmdb
from core.item import Item
from platformcode import logger, config
host = "https://www.cineblog01.pink"
#esclusione degli articoli 'di servizio'
blacklist = ['AVVISO IMPORTANTE CB01.ROCKS', 'Lista Alfabetica Completa Anime/Cartoon', 'CB01.UNO ▶ TROVA LINDIRIZZO UFFICIALE']
# -----------------------------------------------------------------
def mainlist(item):
logger.info("[cb01anime.py] mainlist")
# Main options
itemlist = [Item(channel=item.channel,
action="list_titles",
title="[COLOR azure]Anime - Novita'[/COLOR]",
url=host + '/anime',
thumbnail="http://orig09.deviantart.net/df5a/f/2014/169/2/a/fist_of_the_north_star_folder_icon_by_minacsky_saya-d7mq8c8.png"),
Item(channel=item.channel,
action="genere",
title="[COLOR azure]Anime - Per Genere[/COLOR]",
url=host + '/anime',
thumbnail="http://xbmc-repo-ackbarr.googlecode.com/svn/trunk/dev/skin.cirrus%20extended%20v2/extras/moviegenres/Genres.png"),
Item(channel=item.channel,
action="alfabetico",
title="[COLOR azure]Anime - Per Lettera A-Z[/COLOR]",
url=host + '/anime',
thumbnail="http://i.imgur.com/IjCmx5r.png"),
Item(channel=item.channel,
action="listacompleta",
title="[COLOR azure]Anime - Lista Completa[/COLOR]",
url="%s/anime/lista-completa-anime-cartoon/" % host,
thumbnail="http://i.imgur.com/IjCmx5r.png"),
Item(channel=item.channel,
action="search",
title="[COLOR yellow]Cerca Anime[/COLOR]",
extra="anime",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
return itemlist
# =================================================================
# -----------------------------------------------------------------
def genere(item):
logger.info("[cb01anime.py] genere")
return build_itemlist(item, '<select name="select2"(.*?)</select>', '<option value="([^"]+)">([^<]+)</option>',
"list_titles")
def alfabetico(item):
logger.info("[cb01anime.py] alfabetico")
return build_itemlist(item, '<option value=\'-1\'>Anime per Lettera</option>(.*?)</select>',
'<option value="([^"]+)">\(([^<]+)\)</option>', "list_titles")
def listacompleta(item):
logger.info("[cb01anime.py] listacompleta")
return build_itemlist(item,
'<a href="#char_5a" title="Go to the letter Z">Z</a></span></div>(.*?)</ul></div><div style="clear:both;"></div></div>',
'<li><a href="' + host + '([^"]+)"><span class="head">([^<]+)</span></a></li>', "episodios")
def build_itemlist(item, re_bloque, re_patron, iaction):
itemlist = []
data = httptools.downloadpage(item.url).data
# Narrow search by selecting only the combo
bloque = scrapertools.find_single_match(data, re_bloque)
# The categories are the options for the combo
matches = re.compile(re_patron, re.DOTALL).findall(bloque)
scrapertools.printMatches(matches)
for url, titulo in matches:
itemlist.append(
Item(channel=item.channel,
action=iaction,
contentType="tvshow",
title=titulo,
fulltitle=titulo,
text_color="azure",
show=titulo,
url=host + url,
plot=""))
return itemlist
# =================================================================
# -----------------------------------------------------------------
def search(item, texto):
logger.info("[cb01anime.py] " + item.url + " search " + texto)
item.url = host + "/anime/?s=" + texto
return list_titles(item)
# =================================================================
# -----------------------------------------------------------------
def list_titles(item):
logger.info("[cb01anime.py] mainlist")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
# Estrae i contenuti
patronvideos = r'<div class="span4">\s*<a href="([^"]+)">'
patronvideos += r'<img src="([^"]+)"[^>]+><\/a>[^>]+>[^>]+>'
patronvideos += r'[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(.*?)<\/a>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedtitle = scrapertools.htmlclean(scrapedtitle).strip()
if not scrapedtitle in blacklist:
if 'lista richieste' in scrapedtitle.lower(): continue
patron = r'(?:\[[Ff][Uu][Ll]{2}\s*[Ii][Tt][Aa]\]|\[[Ss][Uu][Bb]\s*[Ii][Tt][Aa]\])'
cleantitle = re.sub(patron, '', scrapedtitle).strip()
## ------------------------------------------------
scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
## ------------------------------------------------
# Añade al listado de XBMC
itemlist.append(
Item(channel=item.channel,
action="listacompleta" if "Lista Alfabetica Completa Anime/Cartoon" in scrapedtitle else "episodios",
contentType="tvshow",
title=scrapedtitle,
fulltitle=cleantitle,
text_color="azure",
show=cleantitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
viewmode="movie_with_plot"))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Put the next page mark
try:
next_page = scrapertools.find_single_match(data, "<link rel='next' href='([^']+)'")
itemlist.append(
Item(channel=item.channel,
action="list_titles",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=next_page,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
except:
pass
return itemlist
# =================================================================
# -----------------------------------------------------------------
def episodios(item):
logger.info("[cb01anime.py] episodios")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
# data = scrapertools.decodeHtmlentities(data)
patron1 = '(?:<p>|<td bgcolor="#ECEAE1">)<span class="txt_dow">(.*?)(?:</p>)?(?:\s*</span>)?\s*</td>'
patron2 = '<a.*?href="([^"]+)"[^>]*>([^<]+)</a>'
matches1 = re.compile(patron1, re.DOTALL).findall(data)
if len(matches1) > 0:
for match1 in re.split('<br />|<p>', matches1[0]):
if len(match1) > 0:
# Estrae i contenuti
titulo = None
scrapedurl = ''
matches2 = re.compile(patron2, re.DOTALL).finditer(match1)
for match2 in matches2:
if titulo is None:
titulo = match2.group(2)
scrapedurl += match2.group(1) + '#' + match2.group(2) + '|'
if titulo is not None:
title = item.title + " " + titulo
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="episode",
title=title,
extra=scrapedurl,
fulltitle=item.fulltitle,
show=item.show))
if config.get_videolibrary_support() and len(itemlist) != 0:
itemlist.append(
Item(channel=item.channel,
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
url=item.url,
action="add_serie_to_library",
extra="episodios",
show=item.show))
return itemlist
# =================================================================
# -----------------------------------------------------------------
def findvideos(item):
logger.info("[cb01anime.py] findvideos")
itemlist = []
for match in item.extra.split(r'|'):
match_split = match.split(r'#')
scrapedurl = match_split[0]
if len(scrapedurl) > 0:
scrapedtitle = match_split[1]
title = item.title + " [COLOR blue][" + scrapedtitle + "][/COLOR]"
itemlist.append(
Item(channel=item.channel,
action="play",
title=title,
url=scrapedurl,
fulltitle=item.fulltitle,
show=item.show,
ontentType=item.contentType,
folder=False))
return itemlist
# =================================================================
# -----------------------------------------------------------------
def play(item):
logger.info("[cb01anime.py] play")
if '/goto/' in item.url:
item.url = item.url.split('/goto/')[-1].decode('base64')
data = item.url
logger.debug("##### Play data ##\n%s\n##" % data)
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.show
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
videoitem.contentType = item.contentType
return itemlist

65
channels/cineblog01.json Normal file
View File

@@ -0,0 +1,65 @@
{
"id": "cineblog01",
"name": "CB01",
"language": ["ita"],
"active": true,
"adult": false,
"thumbnail": "cb01.png",
"banner": "cb01.png",
"categories": ["tvshow", "movie", "vosi"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi in Ricerca Globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Numero di link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Italiano"
]
}
]
}

305
channels/cineblog01.py Normal file
View File

@@ -0,0 +1,305 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per cineblog01
# ------------------------------------------------------------
import re
import urlparse
from channels import autoplay, filtertools, support
from core import scrapertoolsV2, httptools, servertools
from core.item import Item
from lib import unshortenit
from platformcode import logger, config
#impostati dinamicamente da getUrl()
host = ""
headers = ""
def findhost():
global host, headers
permUrl = httptools.downloadpage('https://www.cb01.uno/', follow_redirects=False).headers
host = 'https://www.'+permUrl['location'].replace('https://www.google.it/search?q=site:', '')
headers = [['Referer', host]]
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['openload', 'streamango', 'wstream']
list_quality = ['HD', 'default']
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'cineblog01')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'cineblog01')
#esclusione degli articoli 'di servizio'
blacklist = ['BENVENUTI', 'Richieste Serie TV', 'CB01.UNO &#x25b6; TROVA L&#8217;INDIRIZZO UFFICIALE ', 'Aggiornamento Quotidiano Serie TV', 'OSCAR 2019 ▶ CB01.UNO: Vota il tuo film preferito! 🎬']
def mainlist(item):
findhost()
autoplay.init(item.channel, list_servers, list_quality)
# Main options
itemlist = []
support.menu(itemlist, 'Film bold', 'peliculas', host)
support.menu(itemlist, 'HD submenu', 'menu', host, args="Film HD Streaming")
support.menu(itemlist, 'Per genere submenu', 'menu', host, args="Film per Genere")
support.menu(itemlist, 'Per anno submenu', 'menu', host, args="Film per Anno")
support.menu(itemlist, 'Cerca... submenu color blue', 'search', host, args='film')
support.menu(itemlist, 'Serie TV bold', 'peliculas', host + '/serietv/', contentType='episode')
support.menu(itemlist, 'Per Lettera submenu', 'menu', host + '/serietv/', contentType='episode', args="Serie-Tv per Lettera")
support.menu(itemlist, 'Per Genere submenu', 'menu', host + '/serietv/', contentType='episode', args="Serie-Tv per Genere")
support.menu(itemlist, 'Per anno submenu', 'menu', host + '/serietv/', contentType='episode', args="Serie-Tv per Anno")
support.menu(itemlist, 'Cerca... submenu color blue', 'search', host + '/serietv/', contentType='episode', args='serie')
autoplay.show_option(item.channel, itemlist)
return itemlist
def menu(item):
findhost()
itemlist= []
data = httptools.downloadpage(item.url, headers=headers).data
data = re.sub('\n|\t', '', data)
block = scrapertoolsV2.find_single_match(data, item.args + r'<span.*?><\/span>.*?<ul.*?>(.*?)<\/ul>')
support.log('MENU BLOCK= ',block)
patron = r'href="?([^">]+)"?>(.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(block)
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(
channel=item.channel,
title=scrapedtitle,
contentType=item.contentType,
action='peliculas',
url=host + scrapedurl
)
)
return support.thumb(itemlist)
def search(item, text):
support.log(item.url, "search" ,text)
try:
item.url = item.url + "/?s=" + text
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
findhost()
itemlist = []
item = Item()
item.url = host + '/lista-film-ultimi-100-film-aggiunti/'
return support.scrape(item, r'<a href=([^>]+)>([^<([]+)(?:\[([A-Z]+)\])?\s\(([0-9]{4})\)<\/a>',
['url', 'title', 'quality', 'year'],
patron_block=r'Ultimi 100 film aggiunti:.*?<\/td>')
def peliculas(item):
support.log()
if item.contentType == 'movie' or '/serietv/' not in item.url:
patron = r'<div class="?card-image"?>.*?<img src="?([^" ]+)"? alt.*?<a href="?([^" >]+)(?:\/|")>([^<[(]+)(?:\[([A-Za-z0-9/-]+)])? (?:\(([0-9]{4})\))?.*?<strong>([^<>&]+).*?DURATA ([0-9]+).*?<br(?: /)?>([^<>]+)'
listGroups = ['thumb', 'url', 'title', 'quality', 'year', 'genre', 'duration', 'plot']
action = 'findvideos'
else:
patron = r'div class="card-image">.*?<img src="([^ ]+)" alt.*?<a href="([^ >]+)">([^<[(]+)<\/a>.*?<strong><span style="[^"]+">([^<>0-9(]+)\(([0-9]{4}).*?<\/(p|div)>([^<>]+)'
listGroups = ['thumb', 'url', 'title', 'genre', 'year', 'plot']
action = 'episodios'
return support.scrape(item, patron_block=[r'<div class="?sequex-page-left"?>(.*?)<aside class="?sequex-page-right"?>',
'<div class="?card-image"?>.*?(?=<div class="?card-image"?>|<div class="?rating"?>)'],
patron=patron, listGroups=listGroups,
patronNext='<a class="?page-link"? href="?([^>]+)"?><i class="fa fa-angle-right">', blacklist=blacklist, action=action)
def episodios(item):
item.contentType = 'episode'
return support.scrape(item, patron_block=[r'<article class="sequex-post-content">(.*?)<\/article>',
r'<div class="sp-head[a-z ]*?" title="Espandi">[^<>]*?</div>(.*?)<div class="spdiv">\[riduci\]</div>'],
patron='(?:<p>)?([0-9]+&#215;[0-9]+)(.*?)(?:</p>|<br)', listGroups=['title', 'url'])
def findvideos(item):
findhost()
if item.contentType == "episode":
return findvid_serie(item)
def load_links(itemlist, re_txt, color, desc_txt, quality=""):
streaming = scrapertoolsV2.find_single_match(data, re_txt).replace('"', '')
support.log('STREAMING=',streaming)
patron = '<td><a.*?href=(.*?) target[^>]+>([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(streaming)
for scrapedurl, scrapedtitle in matches:
logger.debug("##### findvideos %s ## %s ## %s ##" % (desc_txt, scrapedurl, scrapedtitle))
title = "[COLOR " + color + "]" + desc_txt + ":[/COLOR] " + item.fulltitle + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]"
itemlist.append(
Item(channel=item.channel,
action="play",
title=title,
url=scrapedurl,
server=scrapedtitle,
fulltitle=item.fulltitle,
thumbnail=item.thumbnail,
show=item.show,
quality=quality,
contentType=item.contentType,
folder=False))
support.log()
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
data = re.sub('\n|\t','',data)
# Extract the quality format
patronvideos = '>([^<]+)</strong></div>'
matches = re.compile(patronvideos, re.DOTALL).finditer(data)
QualityStr = ""
for match in matches:
QualityStr = scrapertoolsV2.decodeHtmlentities(match.group(1))[6:]
# Estrae i contenuti - Streaming
load_links(itemlist, '<strong>Streaming:</strong>(.*?)<tableclass=cbtable height=30>', "orange", "Streaming", "SD")
# Estrae i contenuti - Streaming HD
load_links(itemlist, '<strong>Streaming HD[^<]+</strong>(.*?)<tableclass=cbtable height=30>', "yellow", "Streaming HD", "HD")
autoplay.start(itemlist, item)
# Estrae i contenuti - Streaming 3D
load_links(itemlist, '<strong>Streaming 3D[^<]+</strong>(.*?)<tableclass=cbtable height=30>', "pink", "Streaming 3D")
# Estrae i contenuti - Download
load_links(itemlist, '<strong>Download:</strong>(.*?)<tableclass=cbtable height=30>', "aqua", "Download")
# Estrae i contenuti - Download HD
load_links(itemlist, '<strong>Download HD[^<]+</strong>(.*?)<tableclass=cbtable width=100% height=20>', "azure",
"Download HD")
if len(itemlist) == 0:
itemlist = servertools.find_video_items(item=item)
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
support.videolibrary(itemlist, item)
return itemlist
def findvid_serie(item):
def load_vid_series(html, item, itemlist, blktxt):
patron = '<a href="([^"]+)"[^=]+="_blank"[^>]+>(.*?)</a>'
# Estrae i contenuti
matches = re.compile(patron, re.DOTALL).finditer(html)
for match in matches:
scrapedurl = match.group(1)
scrapedtitle = match.group(2)
title = item.title + " [COLOR blue][" + scrapedtitle + "][/COLOR]"
itemlist.append(
Item(channel=item.channel,
action="play",
title=title,
url=scrapedurl,
server=scrapedtitle,
fulltitle=item.fulltitle,
show=item.show,
contentType=item.contentType,
folder=False))
support.log()
itemlist = []
lnkblk = []
lnkblkp = []
data = item.url
# First blocks of links
if data[0:data.find('<a')].find(':') > 0:
lnkblk.append(data[data.find(' - ') + 3:data[0:data.find('<a')].find(':') + 1])
lnkblkp.append(data.find(' - ') + 3)
else:
lnkblk.append(' ')
lnkblkp.append(data.find('<a'))
# Find new blocks of links
patron = r'<a\s[^>]+>[^<]+</a>([^<]+)'
matches = re.compile(patron, re.DOTALL).finditer(data)
for match in matches:
sep = match.group(1)
if sep != ' - ':
lnkblk.append(sep)
i = 0
if len(lnkblk) > 1:
for lb in lnkblk[1:]:
lnkblkp.append(data.find(lb, lnkblkp[i] + len(lnkblk[i])))
i = i + 1
for i in range(0, len(lnkblk)):
if i == len(lnkblk) - 1:
load_vid_series(data[lnkblkp[i]:], item, itemlist, lnkblk[i])
else:
load_vid_series(data[lnkblkp[i]:lnkblkp[i + 1]], item, itemlist, lnkblk[i])
autoplay.start(itemlist, item)
return itemlist
def play(item):
support.log()
itemlist = []
### Handling new cb01 wrapper
if host[9:] + "/film/" in item.url:
iurl = httptools.downloadpage(item.url, only_headers=True, follow_redirects=False).headers.get("location", "")
support.log("/film/ wrapper: ", iurl)
if iurl:
item.url = iurl
if '/goto/' in item.url:
item.url = item.url.split('/goto/')[-1].decode('base64')
item.url = item.url.replace('http://cineblog01.uno', 'http://k4pp4.pw')
logger.debug("##############################################################")
if "go.php" in item.url:
data = httptools.downloadpage(item.url).data
if "window.location.href" in data:
try:
data = scrapertoolsV2.find_single_match(data, 'window.location.href = "([^"]+)";')
except IndexError:
data = httptools.downloadpage(item.url, only_headers=True, follow_redirects=False).headers.get("location", "")
data, c = unshortenit.unwrap_30x_only(data)
else:
data = scrapertoolsV2.find_single_match(data, r'<a href="([^"]+)".*?class="btn-wrapper">.*?licca.*?</a>')
logger.debug("##### play go.php data ##\n%s\n##" % data)
else:
data = support.swzz_get_url(item)
return support.server(item, data, headers)

13
channels/cinehindi.json Normal file
View File

@@ -0,0 +1,13 @@
{
"id": "cinehindi",
"name": "CineHindi",
"active": true,
"adult": false,
"language": ["vos"],
"thumbnail": "cinehindi.png",
"banner": "http://i.imgur.com/cau9TVe.png",
"categories": [
"movie",
"vos"
]
}

163
channels/cinehindi.py Normal file
View File

@@ -0,0 +1,163 @@
# -*- coding: UTF-8 -*-
import re
import urlparse
from channelselector import get_thumb
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
IDIOMAS = {'Hindi': 'Hindi'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['openload', 'netutv']
host = "http://www.cinehindi.com/"
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="genero", title="Generos", url=host, thumbnail = get_thumb("genres", auto = True)))
itemlist.append(Item(channel=item.channel, action="lista", title="Novedades", url=host, thumbnail = get_thumb("newest", auto = True)))
#itemlist.append(Item(channel=item.channel, action="proximas", title="Próximas Películas",
# url=urlparse.urljoin(host, "proximamente")))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=urlparse.urljoin(host, "?s="), thumbnail = get_thumb("search", auto = True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def genero(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(host).data
patron = '<option class=.*? value=([^<]+)>'
patron += '([^<]+)<\/option>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
if 'Próximas Películas' in scrapedtitle:
continue
itemlist.append(item.clone(action='lista', title=scrapedtitle, cat=scrapedurl))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return lista(item)
def proximas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data) # Eliminamos tabuladores, dobles espacios saltos de linea, etc...
patron = 'class="item">.*?' # Todos los items de peliculas (en esta web) empiezan con esto
patron += '<a href="([^"]+).*?' # scrapedurl
patron += '<img src="([^"]+).*?' # scrapedthumbnail
patron += 'alt="([^"]+).*?' # scrapedtitle
patron += '<span class="player">.+?<span class="year">([^"]+)<\/span>' # scrapedyear
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
if "ver" in scrapedurl:
scrapedtitle = scrapedtitle + " [" + scrapedyear + "]"
else:
scrapedtitle = scrapedtitle + " [" + scrapedyear + "]" + '(Proximamente)'
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="findvideos", extra=scrapedtitle,
show=scrapedtitle, thumbnail=scrapedthumbnail, contentType="movie",
context=["buscar_trailer"]))
# Paginacion
patron_pag = '<a rel=.+?nofollow.+? class=.+?page larger.+? href=.+?(.+?)proximamente.+?>([^"]+)<\/a>'
pagina = scrapertools.find_multiple_matches(data, patron_pag)
for next_page_url, i in pagina:
if int(i) == 2:
item.url = next_page_url + 'proximamente/page/' + str(i) + '/'
itemlist.append(Item(channel=item.channel, action="proximas", title=">> Página siguiente", url=item.url,
thumbnail='https://s32.postimg.cc/4zppxf5j9/siguiente.png'))
return itemlist
def lista(item):
logger.info()
itemlist = []
if not item.cat:
data = httptools.downloadpage(item.url).data
else:
url = httptools.downloadpage("%s?cat=%s" %(host, item.cat), follow_redirects=False, only_headers=True).headers.get("location", "")
data = httptools.downloadpage(url).data
bloque = data#scrapertools.find_single_match(data, """class="item_1 items.*?id="paginador">""")
patron = '<div id=mt.+?>' # Todos los items de peliculas (en esta web) empiezan con esto
patron += '<a href=([^"]+)\/><div class=image>' # scrapedurl
patron += '<img src=([^"]+) alt=.*?' # scrapedthumbnail
patron += '<span class=tt>([^"]+)<\/span>' # scrapedtitle
patron += '<span class=ttx>([^"]+)<div class=degradado>.*?' # scrapedplot
patron += '<span class=year>([^"]+)<\/span><\/div><\/div>' # scrapedfixyear
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot, scrapedyear in matches:
#patron = '<span class="year">([^<]+)' # scrapedyear
#scrapedyear = scrapertools.find_single_match(scrapedfixyear, patron)
scrapedtitle = scrapedtitle.replace(scrapertools.find_single_match(scrapedtitle,'\(\d{4}\)'),'').strip()
title = scrapedtitle
if scrapedyear:
title += ' (%s)' % (scrapedyear)
item.infoLabels['year'] = int(scrapedyear)
patron = '<span class="calidad2">([^<]+).*?' # scrapedquality
#scrapedquality = scrapertools.find_single_match(scrapedfixyear, patron)
#if scrapedquality:
# title += ' [%s]' % (scrapedquality)
itemlist.append(
item.clone(title=title, url=scrapedurl, action="findvideos", extra=scrapedtitle,
contentTitle=scrapedtitle, thumbnail=scrapedthumbnail, plot=scrapedplot, contentType="movie", context=["buscar_trailer"]))
tmdb.set_infoLabels(itemlist)
# Paginacion
patron = 'rel="next" href="([^"]+)'
next_page_url = scrapertools.find_single_match(data, patron)
if next_page_url != "":
item.url = next_page_url
itemlist.append(Item(channel=item.channel, action="lista", title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=next_page_url,
thumbnail='https://s32.postimg.cc/4zppxf5j9/siguiente.png'))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
itemlist1 = []
data = httptools.downloadpage(item.url).data
itemlist1.extend(servertools.find_video_items(data=data))
patron_show = '<div class="data"><h1 itemprop="name">([^<]+)<\/h1>'
show = scrapertools.find_single_match(data, patron_show)
for videoitem in itemlist1:
videoitem.channel = item.channel
videoitem.infoLabels = item.infoLabels
for i in range(len(itemlist1)):
if not 'youtube' in itemlist1[i].title:
itemlist.append(itemlist1[i])
tmdb.set_infoLabels(itemlist, True)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentChannel!='videolibrary':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=show))
return itemlist
def play(item):
logger.info()
item.thumbnail = item.contentThumbnail
return [item]

View File

@@ -0,0 +1,78 @@
{
"id": "cinemalibero",
"name": "Cinemalibero",
"language": ["ita"],
"active": true,
"adult": false,
"thumbnail": "https://www.cinemalibero.center/wp-content/themes/Cinemalibero%202.0/images/logo02.png",
"banner": "https://www.cinemalibero.center/wp-content/themes/Cinemalibero%202.0/images/logo02.png",
"categories": ["tvshow", "movie","anime"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Includi in Novità - Serie TV",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Includi in Novità - Anime",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Numero de link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": ["Non filtrare","IT"]
}
]
}

334
channels/cinemalibero.py Normal file
View File

@@ -0,0 +1,334 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per CinemaLibero - First Version
# ------------------------------------------------------------
import base64
import re
import urlparse
from channels import autoplay
from channels import filtertools
from core import scrapertools, servertools, httptools
from platformcode import logger, config
from core.item import Item
from lib import unshortenit
from platformcode import config
from core import tmdb
# Necessario per Autoplay
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['wstream', 'openload', 'streamango', 'akstream', 'clipwatching', 'cloudvideo', 'youtube']
list_quality = ['default']
# Necessario per Verifica Link
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'cinemalibero')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'cinemalibero')
host = 'https://www.cinemalibero.center'
headers = [['Referer', host]]
def mainlist(item):
logger.info('[cinemalibero.py] mainlist')
autoplay.init(item.channel, list_servers, list_quality) # Necessario per Autoplay
# Menu Principale
itemlist = [Item(channel=item.channel,
action='video',
title='Film',
url=host+'/category/film/',
contentType='movie',
thumbnail=''),
Item(channel=item.channel,
action='sottomenu_film',
title='Generi Film',
url=host,
contentType='movie',
thumbnail=''),
Item(channel=item.channel,
action='video',
title='Serie TV',
url=host+'/category/serie-tv/',
contentType='episode',
extra='tv',
thumbnail=''),
Item(channel=item.channel,
action='video',
title='Anime',
url=host+'/category/anime-giapponesi/',
contentType='episode',
thumbnail=''),
Item(channel=item.channel,
action='video',
title='Sport',
url=host+'/category/sport/',
contentType='movie',
thumbnail=''),
Item(channel=item.channel,
action='search',
title='[B]Cerca...[/B]',
thumbnail=''),
]
autoplay.show_option(item.channel, itemlist) # Necessario per Autoplay (Menu Configurazione)
return itemlist
def search(item, texto):
logger.info("[cinemalibero.py] " + item.url + " search " + texto)
item.url = host + "/?s=" + texto
try:
return video(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def video(item):
logger.info('[cinemalibero.py] video')
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data.replace('\n','').replace('\t','')
block = scrapertools.find_single_match(data, '<div class="container">.*?class="col-md-12">(.*?)<div class=(?:"container"|"bg-dark ")>')
# Estrae i contenuti
matches = re.compile(r'<div class="col-lg-3">(.*?)<\/a><\/div>', re.DOTALL).findall(block)
for match in matches:
url = scrapertools.find_single_match(match, r'href="([^"]+)"')
long_title = scrapertools.find_single_match(match, r'<div class="titolo">([^<]+)<\/div>')
thumb = scrapertools.find_single_match(match, r'url=\((.*?)\)')
quality = scrapertools.find_single_match(match, r'<div class="voto">([^<]+)<\/div>')
genere = scrapertools.find_single_match(match, r'<div class="genere">([^<]+)<\/div>')
year = scrapertools.find_single_match(long_title, r'\(([0-9)]+)') or scrapertools.find_single_match(long_title, r'\) ([0-9)]+)')
lang = scrapertools.find_single_match(long_title, r'\(([a-zA-Z)]+)')
title = re.sub(r'\(.*','',long_title)
title = re.sub(r'(?:\(|\))','',title)
if genere:
genere = ' - [' + genere + ']'
if year:
long_title = title + ' - ('+ year + ')' + genere
if lang:
long_title = '[B]' + title + '[/B]' + ' - ('+ lang + ')' + genere
else:
long_title = '[B]' + title + '[/B]'
# Seleziona fra Serie TV e Film
if item.contentType == 'movie':
tipologia = 'movie'
action = 'findvideos'
elif item.contentType == 'episode':
tipologia = 'tv'
action = 'episodios'
else:
tipologia = 'movie'
action = 'select'
itemlist.append(
Item(channel=item.channel,
action=action,
contentType=item.contentType,
title=long_title,
fulltitle=title,
quality=quality,
url=url,
thumbnail=thumb,
infoLabels=year,
show=title))
# Next page
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers".*?href="([^"]+)">')
if next_page != '':
itemlist.append(
Item(channel=item.channel,
action='video',
title='[B]' + config.get_localized_string(30992) + ' &raquo;[/B]',
url=next_page,
contentType=item.contentType,
thumbnail='http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png'))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def select(item):
data = httptools.downloadpage(item.url, headers=headers).data
block = scrapertools.find_single_match(data, r'<div class="col-md-8 bg-white rounded-left p-5"><div>(.*?)<\/div>')
if re.findall('rel="category tag">serie', data, re.IGNORECASE):
logger.info('select = ### è una serie ###')
return episodios(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
url=item.url,
extra='serie',
contentType='episode'))
elif re.findall('rel="category tag">anime', data, re.IGNORECASE):
if re.findall('episodio', block, re.IGNORECASE):
logger.info('select = ### è un anime ###')
return episodios(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
url=item.url,
extra='anime',
contentType='episode'))
else:
logger.info('select = ### è un film ###')
return findvideos(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
url=item.url,
contentType='movie'))
else:
logger.info('select = ### è un film ###')
return findvideos(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
url=item.url,
contentType='movie'))
def findvideos(item): # Questa def. deve sempre essere nominata findvideos
logger.info('[cinemalibero.py] findvideos')
itemlist = []
if item.contentType == 'episode':
data = item.url.lower()
block = scrapertools.find_single_match(data,r'>streaming.*?<\/strong>*?<\/h2>(.*?)<\/div>')
urls = re.findall('<a.*?href="([^"]+)"', block, re.DOTALL)
else:
data = httptools.downloadpage(item.url, headers=headers).data
data = re.sub(r'\n|\t','',data).lower()
block = scrapertools.find_single_match(data,r'>streaming.*?<\/strong>(.*?)<strong>')
urls = re.findall('<a href="([^"]+)".*?class="external"', block, re.DOTALL)
logger.info('URLS'+ str(urls))
if urls:
data =''
for url in urls:
url, c = unshortenit.unshorten(url)
data += url + '\n'
logger.info('DATA'+ data)
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.fulltitle + ' - [COLOR limegreen][[/COLOR]'+videoitem.title+' [COLOR limegreen]][/COLOR]'
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.show = item.show
videoitem.plot = item.plot
videoitem.channel = item.channel
videoitem.contentType = item.contentType
# Link Aggiungi alla Libreria
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findservers':
itemlist.append(
Item(channel=item.channel, title='[COLOR lightblue][B]Aggiungi alla videoteca[/B][/COLOR]', url=item.url,
action='add_pelicula_to_library', extra='findservers', contentTitle=item.contentTitle))
# Necessario per filtrare i Link
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Necessario per FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Necessario per AutoPlay
autoplay.start(itemlist, item)
return itemlist
def episodios(item): # Questa def. deve sempre essere nominata episodios
logger.info('[cinemalibero.py] episodios')
itemlist = []
extra =''
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
block = scrapertools.find_single_match(data, r'<div class="col-md-8 bg-white rounded-left p-5"><div>(.*?)<\/div>')
if re.findall('rel="category tag">serie', data, re.IGNORECASE):
# logger.info('select = ### è una serie ###')
extra='serie'
elif re.findall('rel="category tag">anime', data, re.IGNORECASE):
if re.findall('episodi', block, re.IGNORECASE):
# logger.info('select = ### è un anime ###')
extra='anime'
block = re.sub(r'<h2>.*?<\/h2>','',block)
block = block.replace('<p>','').replace('<p style="text-align: left;">','').replace('<','<').replace('-<','<').replace('&#8211;<','<').replace('&#8211; <','<').replace('<strong>','<stop><start><strong>')+'<stop>'
block = re.sub(r'stagione completa.*?<\/p>','',block,flags=re.IGNORECASE)
if extra == 'serie':
block = block.replace('<br /> <a','<a')
matches = re.compile(r'<start>.*?(?:stagione|Stagione)(.*?)<\/(?:strong|span)><\/p>(.*?)<stop>', re.DOTALL).findall(block)
for lang, html in matches:
lang = re.sub('<.*?>','',lang)
html = html.replace('<br />','\n').replace('</p>','\n')
matches = re.compile(r'([^<]+)([^\n]+)\n', re.DOTALL).findall(html)
for scrapedtitle, html in matches:
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType='episode',
title=scrapedtitle + ' - (' + lang + ')',
fulltitle=scrapedtitle,
show=scrapedtitle,
url=html))
elif extra == 'anime':
block = re.sub(r'<start.*?(?:download:|Download:).*?<stop>','',block)
block = re.sub(r'(?:mirror|Mirror)[^<]+<','',block)
block = block.replace('<br />','\n').replace('/a></p>','\n')
block = re.sub(r'<start.*?(?:download|Download).*?\n','',block)
matches = re.compile('<a(.*?)\n', re.DOTALL).findall(block)
for html in matches:
scrapedtitle = scrapertools.find_single_match(html, r'>(.*?)<\/a>')
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType='episode',
title=scrapedtitle,
fulltitle=scrapedtitle,
show=scrapedtitle,
url=html))
else:
logger.info('select = ### è un film ###')
return findvideos(Item(channel=item.channel,
title=item.title,
fulltitle=item.fulltitle,
url=item.url,
show=item.fulltitle,
contentType='movie'))
if config.get_videolibrary_support() and len(itemlist) != 0:
itemlist.append(
Item(channel=item.channel,
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
url=item.url,
action="add_serie_to_library",
extra="episodios",
show=item.show))
return itemlist

View File

@@ -0,0 +1,71 @@
{
"id": "cinemastreaming",
"name": "Cinemastreaming",
"language": ["ita"],
"active": false,
"adult": false,
"thumbnail": "https://www.telegramitalia.it/wp-content/uploads/2018/02/IMG_20180222_214809_805.jpg",
"banner": "https://www.telegramitalia.it/wp-content/uploads/2018/02/IMG_20180222_214809_805.jpg",
"categories": ["tvshow", "movie"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Includi in Novità - Serie TV",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Numero de link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": ["Non filtrare","IT"]
}
]
}

View File

@@ -0,0 +1,80 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per cinemastreaming
# ------------------------------------------------------------
import re
from channels import filtertools
from core import scrapertools, servertools, httptools
from core.item import Item
from platformcode import config
from core import tmdb
host = 'https://cinemastreaming.info'
headers = [['Referer', host]]
def mainlist(item):
log()
# Menu Principale
itemlist = [Item(channel = item.channel,
contentType = 'movie',
title = 'Film',
url = host + '/film/',
action = 'video',
thumbnail = '',
fanart = ''
),
]
return itemlist
def video(item):
log()
itemlist = [] # Creo una lista Vuota
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
block = scrapertools.find_single_match(data, r'<main>(.*?)<\/main>')
block = re.sub('\t|\n', '', block)
patron = r'<article.*?class="TPost C">.*?<a href="([^"]+)">.*?src="([^"]+)".*?>.*?<h3 class="Title">([^<]+)<\/h3>(.*?)<\/article>'
matches = re.compile(patron, re.DOTALL).findall(block)
for scrapedurl, scrapedthumb, scrapedtitle, scrapedinfo in matches:
log('Info Block', scrapedinfo)
patron = r'<span class="Year">(.*?)<\/span>.*?<span class="Vote.*?">(.*?)<\/span>.*?<div class="Description"><p>(.*?)<\/p>.*?<p class="Genre.*?">(.*?)<\/p><p class="Director.*?">.*?<a.*?>(.*?)<\/a>.*?<p class="Actors.*?">(.*?)<\/p>'
info = re.compile(patron, re.DOTALL).findall(scrapedinfo)
for year, rating, plot, genre, director, cast in info:
genre = scrapertools.find_multiple_matches(genre, r'<a.*?>(.*?)<\/a>')
cast = scrapertools.find_multiple_matches(cast, r'<a.*?>(.*?)<\/a>')
infoLabels = {}
infoLabels['Year'] = year
infoLabels['Rating'] = rating
infoLabels['Plot'] = plot
infoLabels['Genre'] = genre
infoLabels['Director'] = director
infoLabels['Cast'] = cast
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType=item.contentType,
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumb,
infoLabels = infoLabels,
show=scrapedtitle))
return itemlist
def log(stringa1="", stringa2=""):
import inspect, os
from platformcode import logger
logger.info("[" + os.path.basename(__file__) + "] - [" + inspect.stack()[1][3] + "] " + str(stringa1) + str(stringa2))

View File

@@ -0,0 +1,36 @@
{
"id": "cinetecadibologna",
"name": "Cinetecadibologna",
"language": ["ita"],
"active": true,
"adult": false,
"thumbnail": "http://cinestore.cinetecadibologna.it/pics/logo.gif",
"banner": "http://cinestore.cinetecadibologna.it/pics/logo.gif",
"categories": ["documentary"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_documentales",
"type": "bool",
"label": "Includi in Novità - Documentari",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,157 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per cinetecadibologna
# ------------------------------------------------------------
import re
import urlparse
from core import httptools, scrapertools
from core.item import Item
from platformcode import logger, config
host = "http://cinestore.cinetecadibologna.it"
headers = [['Referer', host]]
def mainlist(item):
logger.info("kod.cinetecadibologna mainlist")
itemlist = [Item(channel=item.channel,
title="[COLOR azure]Elenco Film - Cineteca di Bologna[/COLOR]",
action="peliculas",
url="%s/video/alfabetico_completo" % host,
thumbnail="http://cinestore.cinetecadibologna.it/pics/logo.gif"),
Item(channel=item.channel,
title="[COLOR azure]Epoche - Cineteca di Bologna[/COLOR]",
action="epoche",
url="%s/video/epoche" % host,
thumbnail="http://cinestore.cinetecadibologna.it/pics/logo.gif"),
Item(channel=item.channel,
title="[COLOR azure]Percorsi Tematici - Cineteca di Bologna[/COLOR]",
action="percorsi",
url="%s/video/percorsi" % host,
thumbnail="http://cinestore.cinetecadibologna.it/pics/logo.gif")]
return itemlist
def peliculas(item):
logger.info("kod.cinetecadibologna peliculas")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
# Estrae i contenuti
patron = '<img src="([^"]+)"[^>]+>\s*[^>]+>\s*<div[^>]+>\s*<div[^>]+>[^>]+>\s*<a href="([^"]+)"[^>]+>(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedthumbnail = host + scrapedthumbnail
scrapedurl = host + scrapedurl
if not "/video/" in scrapedurl:
continue
html = scrapertools.cache_page(scrapedurl)
start = html.find("Sinossi:")
end = html.find('<div class="sx_col">', start)
scrapedplot = html[start:end]
scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
itemlist.append(Item(channel=item.channel, action="findvideos", fulltitle=scrapedtitle, show=scrapedtitle,
title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot,
folder=True))
# Paginazione
patronvideos = '<div class="footerList clearfix">\s*<div class="sx">\s*[^>]+>[^g]+gina[^>]+>\s*[^>]+>\s*<div class="dx">\s*<a href="(.*?)">pagina suc'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url= scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
return itemlist
def epoche(item):
logger.info("kod.cinetecadibologna categorias")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
# Narrow search by selecting only the combo
bloque = scrapertools.find_single_match(data, '<h1 class="pagetitle">Epoche</h1>(.*?)</ul>')
# The categories are the options for the combo
patron = '<a href="([^"]+)">(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedurl, scrapedtitle in matches:
scrapedurl = host + scrapedurl
scrapedplot = ""
if scrapedtitle.startswith(("'")):
scrapedtitle = scrapedtitle.replace("'", "Anni '")
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail="http://www.cinetecadibologna.it/pics/cinema-ritrovato-alcinema.png",
plot=scrapedplot))
return itemlist
def percorsi(item):
logger.info("kod.cinetecadibologna categorias")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = '<div class="cover_percorso">\s*<a href="([^"]+)">\s*<img src="([^"]+)"[^>]+>\s*[^>]+>(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedurl = host + scrapedurl
scrapedplot = ""
scrapedthumbnail = host + scrapedthumbnail
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot))
return itemlist
def findvideos(item):
logger.info("kod.cinetecadibologna findvideos")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = 'filename: "(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for video in matches:
video = host + video
itemlist.append(
Item(
channel=item.channel,
action="play",
title=item.title + " [[COLOR orange]Diretto[/COLOR]]",
url=video,
folder=False))
return itemlist

12
channels/cinetemagay.json Normal file
View File

@@ -0,0 +1,12 @@
{
"id": "cinetemagay",
"name": "Cinetemagay",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "cinetemagay.png",
"banner": "cinetemagay.png",
"categories": [
"adult"
]
}

128
channels/cinetemagay.py Normal file
View File

@@ -0,0 +1,128 @@
# -*- coding: utf-8 -*-
import os
import re
from core import scrapertools
from core import servertools
from core import httptools
from core.item import Item
from platformcode import config, logger
IMAGES_PATH = os.path.join(config.get_runtime_path(), 'resources', 'images', 'cinetemagay')
def strip_tags(value):
return re.sub(r'<[^>]*?>', '', value)
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="lista", title="Cine gay latinoamericano",
url="http://cinegaylatinoamericano.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1",
thumbnail="http://www.americaeconomia.com/sites/default/files/imagecache/foto_nota/homosexual1.jpg"))
itemlist.append(Item(channel=item.channel, action="lista", title="Cine y cortos gay",
url="http://cineycortosgay.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1",
thumbnail="http://www.elmolar.org/wp-content/uploads/2015/05/cortometraje.jpg"))
itemlist.append(Item(channel=item.channel, action="lista", title="Cine gay online (México)",
url="http://cinegayonlinemexico.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1",
thumbnail="https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTmmqL6tS2Ced1VoxlGQT0q-ibPEz1DCV3E1waHFDI5KT0pg1lJ"))
itemlist.append(Item(channel=item.channel, action="lista", title="Sentido gay",
url="http://www.sentidogay.blogspot.com.es//feeds/posts/default/?max-results=100&start-index=1",
thumbnail="http://1.bp.blogspot.com/-epOPgDD_MQw/VPGZGQOou1I/AAAAAAAAAkI/lC25GrukDuo/s1048/SentidoGay.jpg"))
itemlist.append(Item(channel=item.channel, action="lista", title="PGPA",
url="http://pgpa.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1",
thumbnail="http://themes.googleusercontent.com/image?id=0BwVBOzw_-hbMNTRlZjk2YWMtYTVlMC00ZjZjLWI3OWEtMWEzZDEzYWVjZmQ4"))
return itemlist
def lista(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# Extrae las entradas (carpetas)
patronvideos = '&lt;img .*?src=&quot;(.*?)&quot;'
patronvideos += "(.*?)<link rel='alternate' type='text/html' href='([^']+)' title='([^']+)'.*?>"
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for match in matches:
scrapedtitle = match[3]
scrapedtitle = scrapedtitle.replace("&apos;", "'")
scrapedtitle = scrapedtitle.replace("&quot;", "'")
scrapedtitle = scrapedtitle.replace("&amp;amp;", "'")
scrapedtitle = scrapedtitle.replace("&amp;#39;", "'")
scrapedurl = match[2]
scrapedthumbnail = match[0]
imagen = ""
scrapedplot = match[1]
tipo = match[1]
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
scrapedplot = "<" + scrapedplot
scrapedplot = scrapedplot.replace("&gt;", ">")
scrapedplot = scrapedplot.replace("&lt;", "<")
scrapedplot = scrapedplot.replace("</div>", "\n")
scrapedplot = scrapedplot.replace("<br />", "\n")
scrapedplot = scrapedplot.replace("&amp;", "")
scrapedplot = scrapedplot.replace("nbsp;", "")
scrapedplot = strip_tags(scrapedplot)
itemlist.append(
Item(channel=item.channel, action="detail", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
plot=scrapedurl + scrapedplot, folder=True))
variable = item.url.split("index=")[1]
variable = int(variable)
variable += 100
variable = str(variable)
variable_url = item.url.split("index=")[0]
url_nueva = variable_url + "index=" + variable
itemlist.append(
Item(channel=item.channel, action="lista", title="Ir a la página siguiente (desde " + variable + ")",
url=url_nueva, thumbnail="", plot="Pasar a la página siguiente (en grupos de 100)\n\n" + url_nueva))
return itemlist
def detail(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
data = data.replace("%3A", ":")
data = data.replace("%2F", "/")
data = data.replace("%3D", "=")
data = data.replace("%3", "?")
data = data.replace("%26", "&")
descripcion = ""
plot = ""
patrondescrip = 'SINOPSIS:(.*?)'
matches = re.compile(patrondescrip, re.DOTALL).findall(data)
if len(matches) > 0:
descripcion = matches[0]
descripcion = descripcion.replace("&nbsp;", "")
descripcion = descripcion.replace("<br/>", "")
descripcion = descripcion.replace("\r", "")
descripcion = descripcion.replace("\n", " ")
descripcion = descripcion.replace("\t", " ")
descripcion = re.sub("<[^>]+>", " ", descripcion)
descripcion = descripcion
try:
plot = unicode(descripcion, "utf-8").encode("iso-8859-1")
except:
plot = descripcion
# Busca los enlaces a los videos de servidores
video_itemlist = servertools.find_video_items(data=data)
for video_item in video_itemlist:
itemlist.append(Item(channel=item.channel, action="play", server=video_item.server,
title=item.title + " " + video_item.title, url=video_item.url, thumbnail=item.thumbnail,
plot=video_item.url, folder=False))
return itemlist

15
channels/cliphunter.json Normal file
View File

@@ -0,0 +1,15 @@
{
"id": "cliphunter",
"name": "cliphunter",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://www.cliphunter.com/gfx/new/logo.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

109
channels/cliphunter.py Normal file
View File

@@ -0,0 +1,109 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
host = 'https://www.cliphunter.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/categories/All"))
itemlist.append( Item(channel=item.channel, title="Popular" , action="lista", url=host + "/popular/ratings/yesterday"))
itemlist.append( Item(channel=item.channel, title="Pornstars" , action="catalogo", url=host + "/pornstars/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search/%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def catalogo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)">\s*<img src=\'([^\']+)\'/>.*?<span>([^"]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
scrapedplot = ""
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/movies"
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail , plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">&raquo;</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="catalogo", title="Página Siguiente >>", text_color="blue",
url=next_page) )
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)" title="([^"]+)">.*?<img src="([^"]+)"/>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<img class=".*?" src="([^"]+)".*?<div class="tr">(.*?)</div>.*?<a href="([^"]+)\s*" class="vttl.*?">(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedthumbnail,scrapedtime,scrapedurl,scrapedtitle in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
fanart=thumbnail, contentTitle = title, infoLabels={'year':year} ))
next_page = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">&raquo;</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '"url"\:"(.*?)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
scrapedurl = scrapedurl.replace("\/", "/")
title = scrapedurl
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo"))
return itemlist

36
channels/cloudvideo.py Normal file
View File

@@ -0,0 +1,36 @@
# Conector Cloudvideo By Alfa development Group
# --------------------------------------------------------
import re
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if data.code == 404:
return False, "[Cloud] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
enc_data = scrapertools.find_single_match(data, "type='text/javascript'>(.*?)</script>")
dec_data = jsunpack.unpack(enc_data)
sources = scrapertools.find_single_match(data, "<source(.*?)</source")
patron = 'src="([^"]+)'
matches = scrapertools.find_multiple_matches(sources, patron)
for url in matches:
quality = 'm3u8'
video_url = url
if 'label' in url:
url = url.split(',')
video_url = url[0]
quality = url[1].replace('label:','')
video_urls.append(['cloudvideo [%s]' % quality, video_url])
return video_urls

33
channels/community.json Normal file
View File

@@ -0,0 +1,33 @@
{
"id": "community",
"name": "Community",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "",
"banner": "",
"fanart": "",
"categories": [
"direct",
"movie",
"tvshow",
"vo"
],
"settings": [
{
"id": "filterlanguages",
"type": "list",
"label": "Mostrar enlaces del canal en idioma...",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"No Filtrar",
"LAT",
"CAST",
"VO",
"VOSE"
]
}
]
}

299
channels/community.py Normal file
View File

@@ -0,0 +1,299 @@
# -*- coding: utf-8 -*-
# -*- Channel Community -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
import os
from core import httptools
from core import scrapertools
from core import servertools
from core import jsontools
from channelselector import get_thumb
from core import tmdb
from core.item import Item
from platformcode import logger, config, platformtools
from channels import autoplay
from channels import filtertools
list_data = {}
list_language = ['LAT', 'CAST', 'VO', 'VOSE']
list_servers = ['directo']
list_quality = ['SD', '720', '1080', '4k']
def mainlist(item):
logger.info()
path = os.path.join(config.get_data_path(), 'community_channels.json')
if not os.path.exists(path):
with open(path, "w") as file:
file.write('{"channels":{}}')
file.close()
autoplay.init(item.channel, list_servers, list_quality)
return show_channels(item)
def show_channels(item):
logger.info()
itemlist = []
context = [{"title": "Eliminar este canal",
"action": "remove_channel",
"channel": "community"}]
path = os.path.join(config.get_data_path(), 'community_channels.json')
file = open(path, "r")
json = jsontools.load(file.read())
itemlist.append(Item(channel=item.channel, title=config.get_localized_string(70676), action='add_channel', thumbnail=get_thumb('add.png')))
for key, channel in json['channels'].items():
if 'poster' in channel:
poster = channel['poster']
else:
poster = ''
itemlist.append(Item(channel=item.channel, title=channel['channel_name'], url=channel['path'],
thumbnail=poster, action='show_menu', channel_id = key, context=context))
return itemlist
def load_json(item):
logger.info()
if item.url.startswith('http'):
json_file = httptools.downloadpage(item.url).data
else:
json_file = open(item.url, "r").read()
json_data = jsontools.load(json_file)
return json_data
def show_menu(item):
global list_data
logger.info()
itemlist = []
json_data = load_json(item)
if "menu" in json_data:
for option in json_data['menu']:
itemlist.append(Item(channel=item.channel, title=option['title'], action='show_menu', url=option['link']))
autoplay.show_option(item.channel, itemlist)
return itemlist
if "movies_list" in json_data:
item.media_type='movies_list'
elif "tvshows_list" in json_data:
item.media_type = 'tvshows_list'
elif "episodes_list" in json_data:
item.media_type = 'episodes_list'
return list_all(item)
return itemlist
def list_all(item):
logger.info()
itemlist = []
media_type = item.media_type
json_data = load_json(item)
for media in json_data[media_type]:
quality, language, plot, poster = set_extra_values(media)
title = media['title']
title = set_title(title, language, quality)
new_item = Item(channel=item.channel, title=title, quality=quality,
language=language, plot=plot, thumbnail=poster)
if 'movies_list' in json_data:
new_item.url = media
new_item.contentTitle = media['title']
new_item.action = 'findvideos'
if 'year' in media:
new_item.infoLabels['year'] = media['year']
else:
new_item.url = media['seasons_list']
new_item.contentSerieName = media['title']
new_item.action = 'seasons'
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, seekTmdb=True)
return itemlist
def seasons(item):
logger.info()
itemlist = []
infoLabels = item.infoLabels
list_seasons = item.url
for season in list_seasons:
infoLabels['season'] = season['season']
title = config.get_localized_string(60027) % season['season']
itemlist.append(Item(channel=item.channel, title=title, url=season['link'], action='episodesxseason',
contentSeasonNumber=season['season'], infoLabels=infoLabels))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
itemlist = sorted(itemlist, key=lambda i: i.title)
return itemlist
def episodesxseason(item):
logger.info()
itemlist = []
json_data = load_json(item)
infoLabels = item.infoLabels
season_number = infoLabels['season']
for episode in json_data['episodes_list']:
episode_number = episode['number']
infoLabels['season'] = season_number
infoLabels['episode'] = episode_number
title = config.get_localized_string(70677) % (season_number, episode_number, episode_number)
itemlist.append(Item(channel=item.channel, title=title, url=episode, action='findvideos',
contentEpisodeNumber=episode_number, infoLabels=infoLabels))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
for url in item.url['links']:
quality, language, plot, poster = set_extra_values(url)
title = ''
title = set_title(title, language, quality)
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url['url'], action='play', quality=quality,
language=language, infoLabels = item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def add_channel(item):
logger.info()
import xbmc
import xbmcgui
channel_to_add = {}
json_file = ''
result = platformtools.dialog_select(config.get_localized_string(70676), [config.get_localized_string(70678), config.get_localized_string(70679)])
if result == -1:
return
if result==0:
file_path = xbmcgui.Dialog().browseSingle(1, config.get_localized_string(70680), 'files')
try:
channel_to_add['path'] = file_path
json_file = jsontools.load(open(file_path, "r").read())
channel_to_add['channel_name'] = json_file['channel_name']
except:
pass
elif result==1:
url = platformtools.dialog_input("", config.get_localized_string(70681), False)
try:
channel_to_add['path'] = url
json_file = jsontools.load(httptools.downloadpage(url).data)
except:
pass
if len(json_file) == 0:
return
if "episodes_list" in json_file:
platformtools.dialog_ok(config.get_localized_string(20000), config.get_localized_string(70682))
return
channel_to_add['channel_name'] = json_file['channel_name']
path = os.path.join(config.get_data_path(), 'community_channels.json')
community_json = open(path, "r")
community_json = jsontools.load(community_json.read())
id = len(community_json['channels']) + 1
community_json['channels'][id]=(channel_to_add)
with open(path, "w") as file:
file.write(jsontools.dump(community_json))
file.close()
platformtools.dialog_notification(config.get_localized_string(20000), config.get_localized_string(70683) % json_file['channel_name'])
return
def remove_channel(item):
logger.info()
import xbmc
import xbmcgui
path = os.path.join(config.get_data_path(), 'community_channels.json')
community_json = open(path, "r")
community_json = jsontools.load(community_json.read())
id = item.channel_id
to_delete = community_json['channels'][id]['channel_name']
del community_json['channels'][id]
with open(path, "w") as file:
file.write(jsontools.dump(community_json))
file.close()
platformtools.dialog_notification(config.get_localized_string(20000), config.get_localized_string(70684) % to_delete)
platformtools.itemlist_refresh()
return
def set_extra_values(dict):
logger.info()
quality = ''
language = ''
plot = ''
poster = ''
if 'quality' in dict and dict['quality'] != '':
quality = dict['quality'].upper()
if 'language' in dict and dict['language'] != '':
language = dict['language'].upper()
if 'plot' in dict and dict['plot'] != '':
plot = dict['plot']
if 'poster' in dict and dict['poster'] != '':
poster = dict['poster']
return quality, language, plot, poster
def set_title(title, language, quality):
logger.info()
if not config.get_setting('unify'):
if quality != '':
title += ' [%s]' % quality
if language != '':
if not isinstance(language, list):
title += ' [%s]' % language.upper()
else:
title += ' '
for lang in language:
title += '[%s]' % lang.upper()
return title.capitalize()

View File

@@ -0,0 +1,15 @@
{
"id": "coomelonitas",
"name": "Coomelonitas",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://www.coomelonitas.com/wp-content/themes/3xTheme/images/logo.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

67
channels/coomelonitas.py Normal file
View File

@@ -0,0 +1,67 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
host ='http://www.coomelonitas.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host+ "/?s=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)">([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="all"(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for match in matches:
title = scrapertools.find_single_match(match,'title="([^"]+)"')
url = scrapertools.find_single_match(match,'<a href="([^"]+)"')
plot = scrapertools.find_single_match(match,'<p class="summary">(.*?)</p>')
thumbnail = scrapertools.find_single_match(match,'<img src="([^"]+)"')
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
fanart=thumbnail, thumbnail=thumbnail, plot=plot, viewmode="movie") )
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="siguiente">')
if next_page!="":
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist

31
channels/cuevana2.json Normal file
View File

@@ -0,0 +1,31 @@
{
"id": "cuevana2",
"name": "Cuevana2",
"active": true,
"adult": false,
"language": ["en"],
"thumbnail": "cuevana2.png",
"categories": [
"movie",
"tvshow",
"vos"
],
"settings": [
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
}
]
}

320
channels/cuevana2.py Normal file
View File

@@ -0,0 +1,320 @@
# -*- coding: utf-8 -*-
import re
import urllib
from channelselector import get_thumb
from core.item import Item
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from platformcode import config, logger
from channels import autoplay
host = "http://www.cuevana2.com/"
list_quality = []
list_servers = ['rapidvideo', 'streamango', 'directo', 'yourupload', 'openload', 'dostream']
### MENUS ###
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
# PELICULAS
itemlist.append(Item(channel = item.channel, title = "--- Peliculas ---", folder=False, text_bold=True))
itemlist.append(Item(channel = item.channel, title = "Novedades", action = "movies",
url = host + "pelicula", thumbnail = get_thumb("newest", auto = True)))
itemlist.append(Item(channel = item.channel, title = "Por género", action = "genre",
url = host + "pelicula", thumbnail = get_thumb("genres", auto = True) ))
itemlist.append(Item(channel = item.channel, title = "Por año", action = "age",
url = host + "pelicula", thumbnail = get_thumb("year", auto = True)))
itemlist.append(Item(channel = item.channel, title = "Favoritas", action = "movies",
url = host + "peliculas-destacadas", thumbnail = get_thumb("favorites", auto = True) ))
itemlist.append(Item(channel = item.channel, title = "Buscar...", action = "search",
url = host + "pelicula/?s=", thumbnail = get_thumb("search", auto = True)))
# SERIES
itemlist.append(Item(channel = item.channel, title = "--- Series ---", folder=False, text_bold=True))
itemlist.append(Item(channel = item.channel, title = "Todas las Series", action = "shows",
url = host + "listar-series", thumbnail = get_thumb("tvshows", auto = True)))
itemlist.append(Item(channel = item.channel, title = "Buscar...", action = "search", extra='1',
url = host + "listar-series", thumbnail = get_thumb("search", auto = True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
### FIN MENUS ###
def inArray(arr, arr2):
for word in arr:
if word not in arr2:
return False
return True
def load_data(url):
data = httptools.downloadpage(url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
return data
def redirect_url(url, parameters=None):
data = httptools.downloadpage(url, post=parameters)
logger.info(data.url)
return data.url
def put_movies(itemlist, item, data, pattern):
matches = scrapertools.find_multiple_matches(data, pattern)
for link, img, title, rating, plot in matches:
if 'pelicula' in link:
itemTitle = "%s [COLOR yellow](%s/100)[/COLOR]" % (title, rating)
itemlist.append(Item(channel = item.channel, title=itemTitle, fulltitle=title, thumbnail=img,
url=link, plot=plot, action="findvideos"))
logger.info(link)
return itemlist
def put_episodes(itemlist, item, text):
pattern = '<li>.*?ref="([^"]+).*?"tit">(.*?)</span>'
matches = scrapertools.find_multiple_matches(text, pattern)
for link, title in matches:
itemlist.append(item.clone(title=title, fulltitle=item.title, url=link, action='findvideos', extra=1))
def episodes(item):
logger.info()
itemlist = []
data = load_data(item.url)
seasonsPattern = '"#episodios(\d+)".*?>(.*?)</a>'
episodesPattern = 'id="episodios%s">(.*?)</ul>'
matches = scrapertools.find_multiple_matches(data, seasonsPattern)
for season, title in matches:
itemlist.append(Item(channel = item.channel, title="[COLOR blue]%s[/COLOR]" % title,
folder=False, text_bold=True))
episodeMatches = scrapertools.find_single_match(data, episodesPattern % season)
put_episodes(itemlist, item, episodeMatches)
return itemlist
def shows(item):
logger.info()
itemlist = []
data = load_data(item.url)
pattern = '"in"><a href="([^"]+)">(.*?)</a>'
matches = scrapertools.find_multiple_matches(data, pattern)
for link, title in matches:
itemlist.append(Item(channel = item.channel, title=title, url=host + link, action="episodes"))
return itemlist
def movies(item):
logger.info()
itemlist = []
#descarga la pagina html
data = load_data(item.url)
#patron para buscar las peliculas
pattern = '<a href="([^"]+)"><div class="img">' #link
pattern += '<img width="120" height="160" src="([^"]+)" class="attachment-thumbnail wp-post-image" alt="([^"]+)".*?' #img and title
pattern += '<span style="width:([0-9]+)%">.*?'
pattern += '"txt">(.*?)</div>' # text
put_movies(itemlist, item, data, pattern)
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="([^"]+)">')
if next_page:
itemlist.append(Item(channel = item.channel, title='Siguiente Pagina', url=next_page, action="movies"))
#coloca las peliculas encontradas en la lista
return itemlist
def searchShows(itemlist, item, texto):
texto = texto.lower().split()
data = load_data(item.url)
pattern = '"in"><a href="([^"]+)">(.*?)</a>'
matches = scrapertools.find_multiple_matches(data, pattern)
for link, title in matches:
keywords = title.lower().split()
logger.info(keywords)
logger.info(texto)
if inArray(texto, keywords):
itemlist.append(Item(channel = item.channel, title=title, url=host + link, action="episodes"))
def searchMovies(itemlist, item, texto):
data = load_data(item.url + texto)
#patron para buscar las peliculas
pattern = '<a href="([^"]+)"><div class="img">' #link
pattern += '<img width="120" height="160" src="([^"]+)" class="attachment-thumbnail wp-post-image" alt="([^"]+)".*?' #img and title
pattern += '<span style="width:([0-9]+)%">.*?'
pattern += '"txt">(.*?)</div>' # text
#ahora ya no se necesita el do while
put_movies(itemlist, item, data, pattern)
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="([^"]+)">')
if next_page:
itemlist.append(Item(channel = item.channel, title='Siguiente Pagina', url=next_page, action="movies"))
def search(item, texto):
itemlist = []
if item.extra:
searchShows(itemlist, item, texto)
else:
searchMovies(itemlist, item, texto)
return itemlist
def by(item, pattern):
logger.info()
itemlist = []
#descarga la pagina html
data = load_data(item.url)
#patron para buscar en la pagina
pattern = '<li class="cat-item cat-item-\d+"><a href="([^"]+)" >&&</a>'.replace('&&', pattern)
matches = scrapertools.find_multiple_matches(data, pattern)
for link, genre in matches:
itemlist.append(Item(channel = item.channel, title=genre, url=link, action="movies"))
return itemlist
def genre(item):
return by(item, '(\D+)')
def age(item):
return by(item, '(\d+)')
def GKPluginLink(hash):
hashdata = urllib.urlencode({r'link':hash})
try:
json = httptools.downloadpage('https://player4.cuevana2.com/plugins/gkpluginsphp.php', post=hashdata).data
except:
return None
logger.info(jsontools.load(json))
data = jsontools.load(json) if json else False
if data:
return data['link'] if 'link' in data else None
else:
return None
def RedirectLink(hash):
hashdata = urllib.urlencode({r'url':hash})
return redirect_url('https://player4.cuevana2.com/r.php', hashdata)
def OpenloadLink(hash):
hashdata = urllib.urlencode({r'h':hash})
json = httptools.downloadpage('https://api.cuevana2.com/openload/api.php', post=hashdata).data
logger.info("CUEVANA OL JSON %s" % json)
data = jsontools.load(json) if json else False
return data['url'] if data['status'] == 1 else None
#el pattern esta raro para eliminar los duplicados, de todas formas asi es un lenguaje de programacion verificando su sintaxis
def getContentMovie(data, item):
item.infoLabels["year"] = scrapertools.find_single_match(data, 'rel="tag">(\d+)</a>')
genre = ''
for found_genre in scrapertools.find_multiple_matches(data, 'genero/.*?">(.*?)</a>(?=.*?</p>)'):
genre += found_genre + ', '
item.infoLabels["genre"] = genre.strip(', ')
director = ''
for found_director in scrapertools.find_multiple_matches(data, 'director/.*?">(.*?)</a>(?=.*?</p>)'):
director += found_director + ', '
item.infoLabels["director"] = director.strip(', ')
item.infoLabels["cast"] = tuple(found_cast for found_cast in scrapertools.find_multiple_matches(
data, 'reparto/.*?">(.*?)</a>(?=.*?</p>)'))
def getContentShow(data, item):
item.thumbnail = scrapertools.find_single_match(data, 'width="120" height="160" src="([^"]+)"')
item.infoLabels['genre'] = scrapertools.find_single_match(data, '-4px;">(.*?)</div>')
def findvideos(item):
logger.info()
itemlist = []
data = load_data(item.url)
if item.extra:
getContentShow(data, item)
else:
getContentMovie(data, item)
pattern = '<iframe width="650" height="450" scrolling="no" src="([^"]+)'
subtitles = scrapertools.find_single_match(data, '<iframe width="650" height="450" scrolling="no" src=".*?sub=([^"]+)"')
title = "[COLOR blue]Servidor [%s][/COLOR]"
#itemlist.append(Item(channel = item.channel, title=item.url))
for link in scrapertools.find_multiple_matches(data, pattern):
#php.*?=(\w+)&
#url=(.*?)&
if 'player4' in link:
# Por si acaso están los dos metodos, de todas maneras esto es corto circuito
if r'ir.php' in link:
link = scrapertools.find_single_match(link, 'php\?url=(.*?)&').replace('%3A', ':').replace('%2F', '/')
logger.info("CUEVANA IR %s" % link)
elif r'irgoto.php' in link:
link = scrapertools.find_single_match(link, 'php\?url=(.*?)&').replace('%3A', ':').replace('%2F', '/')
link = RedirectLink(link)
logger.info("CUEVANA IRGOTO %s" % link)
elif r'gdv.php' in link:
# google drive hace lento la busqueda de links, ademas no es tan buena opcion y es el primero que eliminan
continue
else:
link = scrapertools.find_single_match(link, 'php.*?=(\w+)&')
link = GKPluginLink(link)
elif 'openload' in link:
link = scrapertools.find_single_match(link, '\?h=(\w+)&')
logger.info("CUEVANA OL HASH %s" % link)
link = OpenloadLink(link)
logger.info("CUEVANA OL %s" % link)
elif 'youtube' in link:
title = "[COLOR yellow]Ver Trailer (%s)[/COLOR]"
else: # En caso de que exista otra cosa no implementada, reportar si no aparece pelicula
continue
if not link:
continue
# GKplugin puede devolver multiples links con diferentes calidades, si se pudiera colocar una lista de opciones
# personalizadas para Directo, se agradece, por ahora solo devuelve el primero que encuentre
if type(link) is list:
link = link[0]['link']
if r'chomikuj.pl' in link:
# En algunas personas la opcion CH les da error 401
link += "|Referer=https://player4.cuevana2.com/plugins/gkpluginsphp.php"
elif r'vidcache.net' in link:
# Para que no salga error 500
link += '|Referer=https://player4.cuevana2.com/yourupload.com.php'
itemlist.append(
item.clone(
channel = item.channel,
title=title,
url=link, action='play',
subtitle=subtitles))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist):
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
fulltitle = item.fulltitle
))
return itemlist

12
channels/cumlouder.json Normal file
View File

@@ -0,0 +1,12 @@
{
"id": "cumlouder",
"name": "Cumlouder",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "cumlouder.png",
"banner": "cumlouder.png",
"categories": [
"adult"
]
}

210
channels/cumlouder.py Normal file
View File

@@ -0,0 +1,210 @@
# -*- coding: utf-8 -*-
import re
import urllib
import urlparse
from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import config, logger
def mainlist(item):
logger.info()
itemlist = []
config.set_setting("url_error", False, "cumlouder")
itemlist.append(item.clone(title="Últimos videos", action="videos", url="https://www.cumlouder.com/"))
itemlist.append(item.clone(title="Categorias", action="categorias", url="https://www.cumlouder.com/categories/"))
itemlist.append(item.clone(title="Pornstars", action="pornstars_list", url="https://www.cumlouder.com/girls/"))
itemlist.append(item.clone(title="Listas", action="series", url="https://www.cumlouder.com/series/"))
itemlist.append(item.clone(title="Buscar", action="search", url="https://www.cumlouder.com/search?q=%s"))
return itemlist
def search(item, texto):
logger.info()
item.url = item.url % texto
item.action = "videos"
try:
return videos(item)
except:
import traceback
logger.error(traceback.format_exc())
return []
def pornstars_list(item):
logger.info()
itemlist = []
for letra in "abcdefghijklmnopqrstuvwxyz":
itemlist.append(item.clone(title=letra.upper(), url=urlparse.urljoin(item.url, letra), action="pornstars"))
return itemlist
def pornstars(item):
logger.info()
itemlist = []
data = get_data(item.url)
patron = '<a girl-url="[^"]+" class="[^"]+" href="([^"]+)" title="([^"]+)">[^<]+'
patron += '<img class="thumb" src="([^"]+)" [^<]+<h2[^<]+<span[^<]+</span[^<]+</h2[^<]+'
patron += '<span[^<]+<span[^<]+<span[^<]+</span>([^<]+)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title, thumbnail, count in matches:
if "go.php?" in url:
url = urllib.unquote(url.split("/go.php?u=")[1].split("&")[0])
thumbnail = urllib.unquote(thumbnail.split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin(item.url, url)
if not thumbnail.startswith("https"):
thumbnail = "https:%s" % thumbnail
itemlist.append(item.clone(title="%s (%s)" % (title, count), url=url, action="videos", thumbnail=thumbnail))
# Paginador
matches = re.compile('<li[^<]+<a href="([^"]+)" rel="nofollow">Next[^<]+</a[^<]+</li>', re.DOTALL).findall(data)
if matches:
if "go.php?" in matches[0]:
url = urllib.unquote(matches[0].split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin(item.url, matches[0])
itemlist.append(item.clone(title="Pagina Siguiente", url=url))
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = get_data(item.url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<a tag-url=.*?href="([^"]+)" title="([^"]+)".*?<img class="thumb" src="([^"]+)".*?<span class="cantidad">([^<]+)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title, thumbnail, count in matches:
if "go.php?" in url:
url = urllib.unquote(url.split("/go.php?u=")[1].split("&")[0])
thumbnail = urllib.unquote(thumbnail.split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin(item.url, url)
if not thumbnail.startswith("https"):
thumbnail = "https:%s" % thumbnail
itemlist.append(
item.clone(title="%s (%s videos)" % (title, count), url=url, action="videos", thumbnail=thumbnail))
# Paginador
matches = re.compile('<li[^<]+<a href="([^"]+)" rel="nofollow">Next[^<]+</a[^<]+</li>', re.DOTALL).findall(data)
if matches:
if "go.php?" in matches[0]:
url = urllib.unquote(matches[0].split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin(item.url, matches[0])
itemlist.append(item.clone(title="Pagina Siguiente", url=url))
return itemlist
def series(item):
logger.info()
itemlist = []
data = get_data(item.url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<a onclick=.*?href="([^"]+)".*?\<img src="([^"]+)".*?h2 itemprop="name">([^<]+).*?p>([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, thumbnail, title, count in matches:
itemlist.append(
item.clone(title="%s (%s) " % (title, count), url=urlparse.urljoin(item.url, url), action="videos", thumbnail=thumbnail))
# Paginador
matches = re.compile('<li[^<]+<a href="([^"]+)" rel="nofollow">Next[^<]+</a[^<]+</li>', re.DOTALL).findall(data)
if matches:
if "go.php?" in matches[0]:
url = urllib.unquote(matches[0].split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin(item.url, matches[0])
itemlist.append(item.clone(title="Pagina Siguiente", url=url))
return itemlist
def videos(item):
logger.info()
itemlist = []
data = get_data(item.url)
patron = '<a class="muestra-escena" href="([^"]+)" title="([^"]+)"[^<]+<img class="thumb" src="([^"]+)".*?<span class="minutos"> <span class="ico-minutos sprite"></span> ([^<]+)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title, thumbnail, duration in matches:
if "go.php?" in url:
url = urllib.unquote(url.split("/go.php?u=")[1].split("&")[0])
thumbnail = urllib.unquote(thumbnail.split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin("https://www.cumlouder.com", url)
if not thumbnail.startswith("https"):
thumbnail = "https:%s" % thumbnail
itemlist.append(item.clone(title="%s (%s)" % (title, duration), url=urlparse.urljoin(item.url, url),
action="play", thumbnail=thumbnail, contentThumbnail=thumbnail,
contentType="movie", contentTitle=title))
# Paginador
nextpage = scrapertools.find_single_match(data, '<ul class="paginador"(.*?)</ul>')
matches = re.compile('<a href="([^"]+)" rel="nofollow">Next »</a>', re.DOTALL).findall(nextpage)
if not matches:
matches = re.compile('<li[^<]+<a href="([^"]+)">Next »</a[^<]+</li>', re.DOTALL).findall(nextpage)
if matches:
if "go.php?" in matches[0]:
url = urllib.unquote(matches[0].split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin(item.url, matches[0])
itemlist.append(item.clone(title="Pagina Siguiente", url=url))
return itemlist
def play(item):
logger.info()
itemlist = []
data = get_data(item.url)
patron = '<source src="([^"]+)" type=\'video/([^\']+)\' label=\'[^\']+\' res=\'([^\']+)\' />'
url, type, res = re.compile(patron, re.DOTALL).findall(data)[0]
if "go.php?" in url:
url = urllib.unquote(url.split("/go.php?u=")[1].split("&")[0])
elif not url.startswith("http"):
url = "http:" + url.replace("&amp;", "&")
itemlist.append(
Item(channel='cumlouder', action="play", title='Video' + res, fulltitle=type.upper() + ' ' + res, url=url,
server="directo", folder=False))
return itemlist
def get_data(url_orig):
try:
if config.get_setting("url_error", "cumlouder"):
raise Exception
response = httptools.downloadpage(url_orig)
if not response.data or "urlopen error [Errno 1]" in str(response.code):
raise Exception
except:
config.set_setting("url_error", True, "cumlouder")
import random
server_random = ['nl', 'de', 'us']
server = server_random[random.randint(0, 2)]
url = "https://%s.hideproxy.me/includes/process.php?action=update" % server
post = "u=%s&proxy_formdata_server=%s&allowCookies=1&encodeURL=0&encodePage=0&stripObjects=0&stripJS=0&go=" \
% (urllib.quote(url_orig), server)
while True:
response = httptools.downloadpage(url, post, follow_redirects=False)
if response.headers.get("location"):
url = response.headers["location"]
post = ""
else:
break
return response.data

15
channels/czechvideo.json Normal file
View File

@@ -0,0 +1,15 @@
{
"id": "czechvideo",
"name": "Czechvideo",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://czechvideo.org/templates/Default/images/black75.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

87
channels/czechvideo.py Normal file
View File

@@ -0,0 +1,87 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
host = 'http://czechvideo.org'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="lista", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/tags/%s/" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data,'<div class="category">(.*?)</ul>')
patron = '<li><a href="(.*?)".*?>(.*?)</a></li>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="short-story">.*?'
patron += '<a href="([^"]+)" title="([^"]+)"><img src="([^"]+)".*?'
patron += 'div class="short-time">(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
scrapedthumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<del><a href="([^"]+)">Next</a></del>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info()
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist

12
channels/datoporn.json Normal file
View File

@@ -0,0 +1,12 @@
{
"id": "datoporn",
"name": "DatoPorn",
"language": ["*"],
"active": true,
"adult": true,
"thumbnail": "http://i.imgur.com/tBSWudd.png?1",
"banner": "datoporn.png",
"categories": [
"adult"
]
}

73
channels/datoporn.py Normal file
View File

@@ -0,0 +1,73 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from platformcode import logger
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="categorias", title="Categorías", url="http://dato.porn/categories_all", contentType="movie", viewmode="movie"))
itemlist.append(item.clone(title="Buscar...", action="search", contentType="movie", viewmode="movie"))
return itemlist
def search(item, texto):
logger.info()
item.url = "http://dato.porn/?k=%s&op=search" % texto.replace(" ", "+")
return lista(item)
def lista(item):
logger.info()
itemlist = []
# Descarga la pagina
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
# Extrae las entradas
patron = '<div class="videobox">\s*<a href="([^"]+)".*?url\(\'([^\']+)\'.*?<span>(.*?)<\/span><\/div><\/a>.*?class="title">(.*?)<\/a><span class="views">.*?<\/a><\/span><\/div> '
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, duration, scrapedtitle in matches:
if "/embed-" not in scrapedurl:
#scrapedurl = scrapedurl.replace("dato.porn/", "dato.porn/embed-") + ".html"
scrapedurl = scrapedurl.replace("datoporn.co/", "datoporn.co/embed-") + ".html"
if duration:
scrapedtitle = "%s - %s" % (duration, scrapedtitle)
scrapedtitle += ' gb'
scrapedtitle = scrapedtitle.replace(":", "'")
#logger.debug(scrapedurl + ' / ' + scrapedthumbnail + ' / ' + duration + ' / ' + scrapedtitle)
itemlist.append(item.clone(action="play", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
server="datoporn", fanart=scrapedthumbnail.replace("_t.jpg", ".jpg")))
# Extrae la marca de siguiente página
#next_page = scrapertools.find_single_match(data, '<a href=["|\']([^["|\']+)["|\']>Next')
next_page = scrapertools.find_single_match(data, '<a class=["|\']page-link["|\'] href=["|\']([^["|\']+)["|\']>Next')
if next_page and itemlist:
itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page))
return itemlist
def categorias(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# Extrae las entradas (carpetas)
patron = '<div class="vid_block">\s*<a href="([^"]+)".*?url\((.*?)\).*?<span>(.*?)</span>.*?<b>(.*?)</b>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, numero, scrapedtitle in matches:
if numero:
scrapedtitle = "%s (%s)" % (scrapedtitle, numero)
itemlist.append(item.clone(action="lista", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail))
return itemlist

View File

@@ -0,0 +1,36 @@
{
"id": "documentaristreamingda",
"name": "DocumentariStreamingDa",
"language": ["ita"],
"active": true,
"adult": false,
"thumbnail": "documentaristreamingda.png",
"banner": "documentaristreamingda.png",
"categories": ["documentary"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_documentales",
"type": "bool",
"label": "Includi in Novità - Documentari",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,261 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per documentaristreamingda
# ------------------------------------------------------------
import re, urlparse
from platformcode import logger, config
from core import httptools, scrapertools, servertools
from core.item import Item
host = "https://documentari-streaming-da.com"
def mainlist(item):
logger.info("kod.documentaristreamingda mainlist")
itemlist = [Item(channel=item.channel,
title="[COLOR azure]Aggiornamenti[/COLOR]",
action="peliculas",
url=host + "/?searchtype=movie&post_type=movie&sl=lasts&s=",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="[COLOR azure]Categorie[/COLOR]",
action="categorias",
url=host + "/documentari-streaming-dataarchive/",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="[COLOR yellow]Cerca...[/COLOR]",
action="search",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
return itemlist
def newest(categoria):
logger.info("kod.documentaristreamingda newest" + categoria)
itemlist = []
item = Item()
try:
if categoria == "documentales":
item.url = host + "/?searchtype=movie&post_type=movie&sl=lasts&s="
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def categorias(item):
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, 'Categorie</a></li>(.*?)</ul>')
# Estrae i contenuti
patron = '<a href="([^"]+)">([^<]+)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.replace("Documentari ", ""))
html = httptools.downloadpage(scrapedurl).data
patron = '>Ultime uscite[^<]+<\/h3><a href="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(html)
for url in matches:
url = url.replace("&#038;", "&")
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=url,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png",
folder=True))
return itemlist
def search(item, texto):
logger.info("kod.documentaristreamingda " + item.url + " search " + texto)
item.url = host + "/?searchtype=movie&post_type=movie&s=" + texto
try:
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def peliculas(item):
logger.info("kod.documentaristreamingda peliculas")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
# Estrae i contenuti
patron = '<div class="movie-poster">\s*<img[^s]+src="([^"]+)"[^=]+=[^=]+="([^"]+)"[^>]+>[^<]+<a[^h]+href="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
# html = httptools.downloadpage(scrapedurl)
# start = html.find("</div><h2>")
# end = html.find("<p><strong>", start)
# scrapedplot = html[start:end]
# scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
# scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
scrapedplot = ""
scrapedtitle = scrapedtitle.replace("streaming", "")
scrapedtitle = scrapedtitle.replace("_", " ")
scrapedtitle = scrapedtitle.replace("-", " ")
scrapedtitle = scrapedtitle.title()
itemlist.append(
Item(channel=item.channel,
action="findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
viewmode="movie_with_plot",
thumbnail=scrapedthumbnail,
plot=scrapedplot,
folder=True))
# Paginazione
patronvideos = '<a class="next page-numbers" href="(.*?)">'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
scrapedurl = scrapedurl.replace("&#038;", "&")
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
return itemlist
def findvideos(item):
logger.info("kod.documentaristreamingda findvideos")
data = httptools.downloadpage(item.url).data
links = []
begin = data.find('<div class="moview-details-text">')
if begin != -1:
end = data.find('<!-- //movie-details -->', begin)
mdiv = data[begin:end]
items = [[m.end(), m.group(1)] for m in re.finditer('<b style="color:#333333;">(.*?)<\/b>', mdiv)]
if items:
for idx, val in enumerate(items):
if idx == len(items) - 1:
_data = mdiv[val[0]:-1]
else:
_data = mdiv[val[0]:items[idx + 1][0]]
for link in re.findall('<a.*?href="([^"]+)"[^>]+>.*?<b>(.*?)<\/b><\/a>+', _data):
if not link[0].strip() in [l[1] for l in links]: links.append(
[val[1], link[0].strip(), link[1].strip()])
items = [[m.end(), m.group(1)] for m in re.finditer('<p><strong>(.*?)<\/strong><\/p>', mdiv)]
if items:
_title = ''
for idx, val in enumerate(items):
if idx == len(items) - 1:
_data = mdiv[val[0]:-1]
else:
_data = mdiv[val[0]:items[idx + 1][0]]
for link in re.findall('<a\s.*?href="([^"]+)".*?>(?:<span[^>]+>)*(?:<strong>)*([^<]+)', _data):
if not link[0].strip() in [l[1] for l in links]:
if not link[1].strip() in link[0]: _title = link[1].strip()
links.append([_title, link[0].strip(), 'unknown'])
items = [[m.start(), m.group(1)] for m in re.finditer('<li><strong>([^<]+)<', mdiv)]
if items:
for idx, val in enumerate(items):
if idx == len(items) - 1:
_data = mdiv[val[0]:-1]
else:
_data = mdiv[val[0]:items[idx + 1][0]]
for link in re.findall('<a\s.*?href="([^"]+)".*?>(?:<span[^>]+>)*(?:<strong>)*([^<]+)', _data):
if not link[0].strip() in [l[1] for l in links]: links.append(
[val[1], link[0].strip(), link[1].strip()])
itemlist = []
if links:
for l in links:
title = unicode(l[0], 'utf8', 'ignore')
title = title.replace(u'\xa0', ' ').replace('Documentario ', '').replace(' doc ', ' ').replace(' streaming',
'').replace(
' Streaming', '')
url = l[1]
action = "play"
server = "unknown"
folder = False
if url == '#' or not title: continue
logger.info('server: %s' % l[2])
if l[2] != 'unknown':
server = unicode(l[2], 'utf8', 'ignore')
else:
logger.info(url)
match = re.search('https?:\/\/(?:www\.)*([^\.]+)\.', url)
if match:
server = match.group(1)
if server == "documentari-streaming-db":
action = "findvideos"
folder = True
logger.info('server: %s, action: %s' % (server, action))
logger.info(title + ' - [COLOR blue]' + server + '[/COLOR]')
itemlist.append(Item(
channel=item.channel,
title=title + ' - [COLOR blue]' + server + '[/COLOR]',
action=action,
server=server, # servertools.get_server_from_url(url),
url=url,
thumbnail=item.thumbnail,
fulltitle=title,
show=item.show,
plot=item.plot,
parentContent=item,
folder=folder)
)
else:
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = "".join([item.title, '[COLOR green][B]' + videoitem.title + '[/B][/COLOR]'])
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist

36
channels/doramasmp4.json Normal file
View File

@@ -0,0 +1,36 @@
{
"id": "doramasmp4",
"name": "DoramasMP4",
"active": true,
"adult": false,
"language": [],
"thumbnail": "https://s14.postimg.cc/ibh4znkox/doramasmp4.png",
"banner": "",
"categories": [
"tvshow",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"VOSE",
"VO"
]
}
]
}

235
channels/doramasmp4.py Normal file
View File

@@ -0,0 +1,235 @@
# -*- coding: utf-8 -*-
# -*- Channel DoramasMP4 -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import jsontools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'https://www4.doramasmp4.com/'
IDIOMAS = {'sub': 'VOSE', 'VO': 'VO'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['openload', 'streamango', 'netutv', 'okru', 'directo', 'mp4upload']
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel= item.channel, title="Doramas", action="doramas_menu",
thumbnail=get_thumb('doramas', auto=True), type='dorama'))
itemlist.append(Item(channel=item.channel, title="Variedades", action="list_all",
url=host + 'catalogue?format%5B%5D=varieties&sort=latest',
thumbnail='', type='dorama'))
itemlist.append(Item(channel=item.channel, title="Películas", action="list_all",
url=host + 'catalogue?format%5B%5D=movie&sort=latest',
thumbnail=get_thumb('movies', auto=True), type='movie'))
itemlist.append(Item(channel=item.channel, title = 'Buscar', action="search", url= host+'search?s=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def doramas_menu(item):
logger.info()
itemlist =[]
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all",
url=host + 'catalogue?format%5B%5D=drama&sort=latest', thumbnail=get_thumb('all', auto=True),
type='dorama'))
itemlist.append(Item(channel=item.channel, title="Nuevos capitulos", action="latest_episodes",
url=host + 'latest-episodes', thumbnail=get_thumb('new episodes', auto=True), type='dorama'))
return itemlist
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<div class="col-lg-2 col-md-3 col-6 mb-3"><a href="([^"]+)".*?<img src="([^"]+)".*?'
patron += 'txt-size-12">(\d{4})<.*?text-truncate">([^<]+)<.*?description">([^<]+)<.*?'
matches = re.compile(patron, re.DOTALL).findall(data)
media_type = item.type
for scrapedurl, scrapedthumbnail, year, scrapedtitle, scrapedplot in matches:
url = scrapedurl
scrapedtitle = scrapedtitle
thumbnail = scrapedthumbnail
new_item = Item(channel=item.channel, title=scrapedtitle, url=url,
thumbnail=thumbnail, type=media_type, infoLabels={'year':year})
if media_type != 'dorama':
new_item.action = 'findvideos'
new_item.contentTitle = scrapedtitle
new_item.type = item.type
else:
new_item.contentSerieName=scrapedtitle
new_item.action = 'episodios'
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
if itemlist != []:
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" aria-label="Netx">')
if next_page != '':
itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>',
url=host+'catalogue'+next_page, thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
type=item.type))
return itemlist
def latest_episodes(item):
logger.info()
itemlist = []
infoLabels = dict()
data = get_source(item.url)
patron = 'shadow-lg rounded" href="([^"]+)".*?src="([^"]+)".*?style="">([^<]+)<.*?>Capítulo (\d+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedep in matches:
title = '%s %s' % (scrapedtitle, scrapedep)
contentSerieName = scrapedtitle
itemlist.append(Item(channel=item.channel, action='findvideos', url=scrapedurl, thumbnail=scrapedthumbnail,
title=title, contentSerieName=contentSerieName, type='episode'))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<a itemprop="url".*?href="([^"]+)".*?title="(.*?) Cap.*?".*?>Capítulo (\d+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, scrapedtitle, scrapedep in matches:
url = scrapedurl
contentEpisodeNumber = scrapedep
infoLabels['season'] = 1
infoLabels['episode'] = contentEpisodeNumber
if scrapedtitle != '':
title = '%sx%s - %s' % ('1',scrapedep, scrapedtitle)
else:
title = 'episodio %s' % scrapedep
infoLabels = item.infoLabels
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url,
contentEpisodeNumber=contentEpisodeNumber, type='episode', infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios", text_color='yellow'))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
new_dom=scrapertools.find_single_match(data,"var web = { domain: '(.*?)'")
patron = 'link="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
if '</strong> ¡Este capítulo no tiene subtítulos, solo audio original! </div>' in data:
language = IDIOMAS['vo']
else:
language = IDIOMAS['sub']
#if item.type !='episode' and '<meta property=article:section content=Pelicula>' not in data:
# if item.type !='episode' and item.type != 'movie':
# item.type = 'dorama'
# item.contentSerieName = item.contentTitle
# item.contentTitle = ''
# return episodios(item)
# else:
for video_url in matches:
headers = {'referer': video_url}
token = scrapertools.find_single_match(video_url, 'token=(.*)')
if 'fast.php' in video_url:
video_url = 'https://player.rldev.in/fast.php?token=%s' % token
video_data = httptools.downloadpage(video_url, headers=headers).data
url = scrapertools.find_single_match(video_data, "'file':'([^']+)'")
else:
video_url = new_dom+'api/redirect.php?token=%s' % token
video_data = httptools.downloadpage(video_url, headers=headers, follow_redirects=False).headers
url = scrapertools.find_single_match(video_data['location'], '\d+@@@(.*?)@@@')
new_item = Item(channel=item.channel, title='[%s] [%s]', url=url, action='play', language = language)
itemlist.append(new_item)
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
if len(itemlist) == 0 and item.type == 'search':
item.contentSerieName = item.contentTitle
item.contentTitle = ''
return episodios(item)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def search(item, texto):
logger.info()
import urllib
itemlist = []
texto = texto.replace(" ", "+")
item.url = item.url + texto
item.type = 'search'
if texto != '':
try:
return list_all(item)
except:
itemlist.append(item.clone(url='', title='No hay elementos...', action=''))
return itemlist

174
channels/downloads.json Normal file
View File

@@ -0,0 +1,174 @@
{
"id": "downloads",
"name": "Descargas",
"active": false,
"adult": false,
"language": ["*"],
"categories": [
"movie"
],
"settings": [
{
"type": "label",
"label": "@70229",
"enabled": true,
"visible": true
},
{
"id": "library_add",
"type": "bool",
"label": "@70230",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "library_move",
"type": "bool",
"label": "@70231",
"default": false,
"enabled": "eq(-1,true)",
"visible": true
},
{
"id": "browser",
"type": "bool",
"label": "@70232",
"default": true,
"enabled": true,
"visible": true
},
{
"type": "label",
"label": "@70243",
"enabled": true,
"visible": true
},
{
"id": "block_size",
"type": "list",
"label": "@70233",
"lvalues": [
"128 KB",
"256 KB",
"512 KB",
"1 MB",
"2 MB"
],
"default": 1,
"enabled": true,
"visible": true
},
{
"id": "part_size",
"type": "list",
"label": "@70234",
"lvalues": [
"1 MB",
"2 MB",
"4 MB",
"8 MB",
"16 MB",
"32 MB"
],
"default": 1,
"enabled": true,
"visible": true
},
{
"id": "max_connections",
"type": "list",
"label": "@70235",
"lvalues": [
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10"
],
"default": 4,
"enabled": true,
"visible": true
},
{
"id": "max_buffer",
"type": "list",
"label": "@70236",
"lvalues": [
"0",
"2",
"4",
"6",
"8",
"10",
"12",
"14",
"16",
"18",
"20"
],
"default": 5,
"enabled": true,
"visible": true
},
{
"type": "label",
"label": "@70237",
"enabled": true,
"visible": true
},
{
"id": "server_reorder",
"type": "list",
"label": "@70238",
"lvalues": [
"@70244",
"Reordenar"
],
"default": 1,
"enabled": true,
"visible": true
},
{
"id": "language",
"type": "list",
"label": "@70246",
"lvalues": [
"Esp, Lat, Sub, Eng, Vose",
"Esp, Sub, Lat, Eng, Vose",
"Eng, Sub, Vose, Esp, Lat",
"Vose, Eng, Sub, Esp, Lat"
],
"default": 0,
"enabled": "eq(-1,'Reordenar')",
"visible": true
},
{
"id": "quality",
"type": "list",
"label": "@70240",
"lvalues": [
"@70241",
"HD 1080",
"HD 720",
"SD"
],
"default": 0,
"enabled": "eq(-2,'Reordenar')",
"visible": true
},
{
"id": "server_speed",
"type": "bool",
"label": "@70242",
"default": true,
"enabled": "eq(-3,'Reordenar')",
"visible": true
}
]
}

871
channels/downloads.py Normal file
View File

@@ -0,0 +1,871 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Gestor de descargas
# ------------------------------------------------------------
import os
import re
import time
import unicodedata
from core import filetools
from core import jsontools
from core import scraper
from core import scrapertools
from core import servertools
from core import videolibrarytools
from core.downloader import Downloader
from core.item import Item
from platformcode import config, logger
from platformcode import platformtools
STATUS_COLORS = {0: "orange", 1: "orange", 2: "green", 3: "red"}
STATUS_CODES = type("StatusCode", (), {"stoped": 0, "canceled": 1, "completed": 2, "error": 3})
DOWNLOAD_LIST_PATH = config.get_setting("downloadlistpath")
DOWNLOAD_PATH = config.get_setting("downloadpath")
STATS_FILE = os.path.join(config.get_data_path(), "servers.json")
TITLE_FILE = "[COLOR %s][%i%%][/COLOR] %s"
TITLE_TVSHOW = "[COLOR %s][%i%%][/COLOR] %s [%s]"
def mainlist(item):
logger.info()
itemlist = []
# Lista de archivos
for file in sorted(filetools.listdir(DOWNLOAD_LIST_PATH)):
# Saltamos todos los que no sean JSON
if not file.endswith(".json"): continue
# cargamos el item
file = os.path.join(DOWNLOAD_LIST_PATH, file)
i = Item(path=file).fromjson(filetools.read(file))
i.thumbnail = i.contentThumbnail
# Listado principal
if not item.contentType == "tvshow":
# Series
if i.contentType == "episode":
# Comprobamos que la serie no este ya en el itemlist
if not filter(
lambda x: x.contentSerieName == i.contentSerieName and x.contentChannel == i.contentChannel,
itemlist):
title = TITLE_TVSHOW % (
STATUS_COLORS[i.downloadStatus], i.downloadProgress, i.contentSerieName, i.contentChannel)
itemlist.append(Item(title=title, channel="downloads", action="mainlist", contentType="tvshow",
contentSerieName=i.contentSerieName, contentChannel=i.contentChannel,
downloadStatus=i.downloadStatus, downloadProgress=[i.downloadProgress],
fanart=i.fanart, thumbnail=i.thumbnail))
else:
s = \
filter(
lambda x: x.contentSerieName == i.contentSerieName and x.contentChannel == i.contentChannel,
itemlist)[0]
s.downloadProgress.append(i.downloadProgress)
downloadProgress = sum(s.downloadProgress) / len(s.downloadProgress)
if not s.downloadStatus in [STATUS_CODES.error, STATUS_CODES.canceled] and not i.downloadStatus in [
STATUS_CODES.completed, STATUS_CODES.stoped]:
s.downloadStatus = i.downloadStatus
s.title = TITLE_TVSHOW % (
STATUS_COLORS[s.downloadStatus], downloadProgress, i.contentSerieName, i.contentChannel)
# Peliculas
elif i.contentType == "movie" or i.contentType == "video":
i.title = TITLE_FILE % (STATUS_COLORS[i.downloadStatus], i.downloadProgress, i.contentTitle)
itemlist.append(i)
# Listado dentro de una serie
else:
if i.contentType == "episode" and i.contentSerieName == item.contentSerieName and i.contentChannel == item.contentChannel:
i.title = TITLE_FILE % (STATUS_COLORS[i.downloadStatus], i.downloadProgress,
"%dx%0.2d: %s" % (i.contentSeason, i.contentEpisodeNumber, i.contentTitle))
itemlist.append(i)
estados = [i.downloadStatus for i in itemlist]
# Si hay alguno completado
if 2 in estados:
itemlist.insert(0, Item(channel=item.channel, action="clean_ready", title=config.get_localized_string(70218),
contentType=item.contentType, contentChannel=item.contentChannel,
contentSerieName=item.contentSerieName, text_color="sandybrown"))
# Si hay alguno con error
if 3 in estados:
itemlist.insert(0, Item(channel=item.channel, action="restart_error", title=config.get_localized_string(70219),
contentType=item.contentType, contentChannel=item.contentChannel,
contentSerieName=item.contentSerieName, text_color="orange"))
# Si hay alguno pendiente
if 1 in estados or 0 in estados:
itemlist.insert(0, Item(channel=item.channel, action="download_all", title=config.get_localized_string(70220),
contentType=item.contentType, contentChannel=item.contentChannel,
contentSerieName=item.contentSerieName, text_color="green"))
if len(itemlist):
itemlist.insert(0, Item(channel=item.channel, action="clean_all", title=config.get_localized_string(70221),
contentType=item.contentType, contentChannel=item.contentChannel,
contentSerieName=item.contentSerieName, text_color="red"))
if not item.contentType == "tvshow" and config.get_setting("browser", "downloads") == True:
itemlist.insert(0, Item(channel=item.channel, action="browser", title=config.get_localized_string(70222),
url=DOWNLOAD_PATH, text_color="yellow"))
if not item.contentType == "tvshow":
itemlist.insert(0, Item(channel=item.channel, action="settings", title=config.get_localized_string(70223),
text_color="blue"))
return itemlist
def settings(item):
ret = platformtools.show_channel_settings(caption=config.get_localized_string(70224))
platformtools.itemlist_refresh()
return ret
def browser(item):
logger.info()
itemlist = []
for file in filetools.listdir(item.url):
if file == "list": continue
if filetools.isdir(filetools.join(item.url, file)):
itemlist.append(
Item(channel=item.channel, title=file, action=item.action, url=filetools.join(item.url, file)))
else:
itemlist.append(Item(channel=item.channel, title=file, action="play", url=filetools.join(item.url, file)))
return itemlist
def clean_all(item):
logger.info()
for fichero in sorted(filetools.listdir(DOWNLOAD_LIST_PATH)):
if fichero.endswith(".json"):
download_item = Item().fromjson(filetools.read(os.path.join(DOWNLOAD_LIST_PATH, fichero)))
if not item.contentType == "tvshow" or (
item.contentSerieName == download_item.contentSerieName and item.contentChannel == download_item.contentChannel):
filetools.remove(os.path.join(DOWNLOAD_LIST_PATH, fichero))
platformtools.itemlist_refresh()
def clean_ready(item):
logger.info()
for fichero in sorted(filetools.listdir(DOWNLOAD_LIST_PATH)):
if fichero.endswith(".json"):
download_item = Item().fromjson(filetools.read(os.path.join(DOWNLOAD_LIST_PATH, fichero)))
if not item.contentType == "tvshow" or (
item.contentSerieName == download_item.contentSerieName and item.contentChannel == download_item.contentChannel):
if download_item.downloadStatus == STATUS_CODES.completed:
filetools.remove(os.path.join(DOWNLOAD_LIST_PATH, fichero))
platformtools.itemlist_refresh()
def restart_error(item):
logger.info()
for fichero in sorted(filetools.listdir(DOWNLOAD_LIST_PATH)):
if fichero.endswith(".json"):
download_item = Item().fromjson(filetools.read(os.path.join(DOWNLOAD_LIST_PATH, fichero)))
if not item.contentType == "tvshow" or (
item.contentSerieName == download_item.contentSerieName and item.contentChannel == download_item.contentChannel):
if download_item.downloadStatus == STATUS_CODES.error:
if filetools.isfile(
os.path.join(config.get_setting("downloadpath"), download_item.downloadFilename)):
filetools.remove(
os.path.join(config.get_setting("downloadpath"), download_item.downloadFilename))
update_json(item.path,
{"downloadStatus": STATUS_CODES.stoped, "downloadComplete": 0, "downloadProgress": 0})
platformtools.itemlist_refresh()
def download_all(item):
time.sleep(0.5)
for fichero in sorted(filetools.listdir(DOWNLOAD_LIST_PATH)):
if fichero.endswith(".json"):
download_item = Item(path=os.path.join(DOWNLOAD_LIST_PATH, fichero)).fromjson(
filetools.read(os.path.join(DOWNLOAD_LIST_PATH, fichero)))
if not item.contentType == "tvshow" or (
item.contentSerieName == download_item.contentSerieName and item.contentChannel == download_item.contentChannel):
if download_item.downloadStatus in [STATUS_CODES.stoped, STATUS_CODES.canceled]:
res = start_download(download_item)
platformtools.itemlist_refresh()
# Si se ha cancelado paramos
if res == STATUS_CODES.canceled: break
def menu(item):
logger.info()
if item.downloadServer:
servidor = item.downloadServer.get("server", "Auto")
else:
servidor = "Auto"
# Opciones disponibles para el menu
op = [config.get_localized_string(70225), config.get_localized_string(70226), config.get_localized_string(70227),
"Modificar servidor: %s" % (servidor.capitalize())]
opciones = []
# Opciones para el menu
if item.downloadStatus == 0: # Sin descargar
opciones.append(op[0]) # Descargar
if not item.server: opciones.append(op[3]) # Elegir Servidor
opciones.append(op[1]) # Eliminar de la lista
if item.downloadStatus == 1: # descarga parcial
opciones.append(op[0]) # Descargar
if not item.server: opciones.append(op[3]) # Elegir Servidor
opciones.append(op[2]) # Reiniciar descarga
opciones.append(op[1]) # Eliminar de la lista
if item.downloadStatus == 2: # descarga completada
opciones.append(op[1]) # Eliminar de la lista
opciones.append(op[2]) # Reiniciar descarga
if item.downloadStatus == 3: # descarga con error
opciones.append(op[2]) # Reiniciar descarga
opciones.append(op[1]) # Eliminar de la lista
# Mostramos el dialogo
seleccion = platformtools.dialog_select(config.get_localized_string(30163), opciones)
# -1 es cancelar
if seleccion == -1: return
logger.info("opcion=%s" % (opciones[seleccion]))
# Opcion Eliminar
if opciones[seleccion] == op[1]:
filetools.remove(item.path)
# Opcion inicaiar descarga
if opciones[seleccion] == op[0]:
start_download(item)
# Elegir Servidor
if opciones[seleccion] == op[3]:
select_server(item)
# Reiniciar descarga
if opciones[seleccion] == op[2]:
if filetools.isfile(os.path.join(config.get_setting("downloadpath"), item.downloadFilename)):
filetools.remove(os.path.join(config.get_setting("downloadpath"), item.downloadFilename))
update_json(item.path, {"downloadStatus": STATUS_CODES.stoped, "downloadComplete": 0, "downloadProgress": 0,
"downloadServer": {}})
platformtools.itemlist_refresh()
def move_to_libray(item):
download_path = filetools.join(config.get_setting("downloadpath"), item.downloadFilename)
library_path = filetools.join(config.get_videolibrary_path(), *filetools.split(item.downloadFilename))
final_path = download_path
if config.get_setting("library_add", "downloads") == True and config.get_setting("library_move",
"downloads") == True:
if not filetools.isdir(filetools.dirname(library_path)):
filetools.mkdir(filetools.dirname(library_path))
if filetools.isfile(library_path) and filetools.isfile(download_path):
filetools.remove(library_path)
if filetools.isfile(download_path):
if filetools.move(download_path, library_path):
final_path = library_path
if len(filetools.listdir(filetools.dirname(download_path))) == 0:
filetools.rmdir(filetools.dirname(download_path))
if config.get_setting("library_add", "downloads") == True:
if filetools.isfile(final_path):
if item.contentType == "movie" and item.infoLabels["tmdb_id"]:
library_item = Item(title=config.get_localized_string(70228) % item.downloadFilename, channel="downloads",
action="findvideos", infoLabels=item.infoLabels, url=final_path)
videolibrarytools.save_movie(library_item)
elif item.contentType == "episode" and item.infoLabels["tmdb_id"]:
library_item = Item(title=config.get_localized_string(70228) % item.downloadFilename, channel="downloads",
action="findvideos", infoLabels=item.infoLabels, url=final_path)
tvshow = Item(channel="downloads", contentType="tvshow",
infoLabels={"tmdb_id": item.infoLabels["tmdb_id"]})
videolibrarytools.save_tvshow(tvshow, [library_item])
def update_json(path, params):
item = Item().fromjson(filetools.read(path))
item.__dict__.update(params)
filetools.write(path, item.tojson())
def save_server_statistics(server, speed, success):
if os.path.isfile(STATS_FILE):
servers = jsontools.load(open(STATS_FILE, "rb").read())
else:
servers = {}
if not server in servers:
servers[server] = {"success": [], "count": 0, "speeds": [], "last": 0}
servers[server]["count"] += 1
servers[server]["success"].append(bool(success))
servers[server]["success"] = servers[server]["success"][-5:]
servers[server]["last"] = time.time()
if success:
servers[server]["speeds"].append(speed)
servers[server]["speeds"] = servers[server]["speeds"][-5:]
open(STATS_FILE, "wb").write(jsontools.dump(servers))
return
def get_server_position(server):
if os.path.isfile(STATS_FILE):
servers = jsontools.load(open(STATS_FILE, "rb").read())
else:
servers = {}
if server in servers:
pos = [s for s in sorted(servers, key=lambda x: (sum(servers[x]["speeds"]) / (len(servers[x]["speeds"]) or 1),
float(sum(servers[x]["success"])) / (
len(servers[x]["success"]) or 1)), reverse=True)]
return pos.index(server) + 1
else:
return 0
def get_match_list(data, match_list, order_list=None, only_ascii=False, ignorecase=False):
"""
Busca coincidencias en una cadena de texto, con un diccionario de "ID" / "Listado de cadenas de busqueda":
{ "ID1" : ["Cadena 1", "Cadena 2", "Cadena 3"],
"ID2" : ["Cadena 4", "Cadena 5", "Cadena 6"]
}
El diccionario no pude contener una misma cadena de busqueda en varías IDs.
La busqueda se realiza por orden de tamaño de cadena de busqueda (de mas larga a mas corta) si una cadena coincide,
se elimina de la cadena a buscar para las siguientes, para que no se detecten dos categorias si una cadena es parte de otra:
por ejemplo: "Idioma Español" y "Español" si la primera aparece en la cadena "Pablo sabe hablar el Idioma Español"
coincidira con "Idioma Español" pero no con "Español" ya que la coincidencia mas larga tiene prioridad.
"""
match_dict = dict()
matches = []
# Pasamos la cadena a unicode
data = unicode(data, "utf8")
# Pasamos el diccionario a {"Cadena 1": "ID1", "Cadena 2", "ID1", "Cadena 4", "ID2"} y los pasamos a unicode
for key in match_list:
if order_list and not key in order_list:
raise Exception("key '%s' not in match_list" % key)
for value in match_list[key]:
if value in match_dict:
raise Exception("Duplicate word in list: '%s'" % value)
match_dict[unicode(value, "utf8")] = key
# Si ignorecase = True, lo pasamos todo a mayusculas
if ignorecase:
data = data.upper()
match_dict = dict((key.upper(), match_dict[key]) for key in match_dict)
# Si ascii = True, eliminamos todos los accentos y Ñ
if only_ascii:
data = ''.join((c for c in unicodedata.normalize('NFD', data) if unicodedata.category(c) != 'Mn'))
match_dict = dict((''.join((c for c in unicodedata.normalize('NFD', key) if unicodedata.category(c) != 'Mn')),
match_dict[key]) for key in match_dict)
# Ordenamos el listado de mayor tamaño a menor y buscamos.
for match in sorted(match_dict, key=lambda x: len(x), reverse=True):
s = data
for a in matches:
s = s.replace(a, "")
if match in s:
matches.append(match)
if matches:
if order_list:
return type("Mtch_list", (),
{"key": match_dict[matches[-1]], "index": order_list.index(match_dict[matches[-1]])})
else:
return type("Mtch_list", (), {"key": match_dict[matches[-1]], "index": None})
else:
if order_list:
return type("Mtch_list", (), {"key": None, "index": len(order_list)})
else:
return type("Mtch_list", (), {"key": None, "index": None})
def sort_method(item):
"""
Puntua cada item en funcion de varios parametros:
@type item: item
@param item: elemento que se va a valorar.
@return: puntuacion otenida
@rtype: int
"""
lang_orders = {}
lang_orders[0] = ["ES", "LAT", "SUB", "ENG", "VOSE"]
lang_orders[1] = ["ES", "SUB", "LAT", "ENG", "VOSE"]
lang_orders[2] = ["ENG", "SUB", "VOSE", "ESP", "LAT"]
lang_orders[3] = ["VOSE", "ENG", "SUB", "ESP", "LAT"]
quality_orders = {}
quality_orders[0] = ["BLURAY", "FULLHD", "HD", "480P", "360P", "240P"]
quality_orders[1] = ["FULLHD", "HD", "480P", "360P", "240P", "BLURAY"]
quality_orders[2] = ["HD", "480P", "360P", "240P", "FULLHD", "BLURAY"]
quality_orders[3] = ["480P", "360P", "240P", "BLURAY", "FULLHD", "HD"]
order_list_idiomas = lang_orders[int(config.get_setting("language", "downloads"))]
match_list_idimas = {"ES": ["CAST", "ESP", "Castellano", "Español", "Audio Español"],
"LAT": ["LAT", "Latino"],
"SUB": ["Subtitulo Español", "Subtitulado", "SUB"],
"ENG": ["EN", "ENG", "Inglés", "Ingles", "English"],
"VOSE": ["VOSE"]}
order_list_calidad = ["BLURAY", "FULLHD", "HD", "480P", "360P", "240P"]
order_list_calidad = quality_orders[int(config.get_setting("quality", "downloads"))]
match_list_calidad = {"BLURAY": ["BR", "BLURAY"],
"FULLHD": ["FULLHD", "FULL HD", "1080", "HD1080", "HD 1080"],
"HD": ["HD", "HD REAL", "HD 720", "720", "HDTV"],
"480P": ["SD", "480P"],
"360P": ["360P"],
"240P": ["240P"]}
value = (get_match_list(item.title, match_list_idimas, order_list_idiomas, ignorecase=True, only_ascii=True).index, \
get_match_list(item.title, match_list_calidad, order_list_calidad, ignorecase=True, only_ascii=True).index)
if config.get_setting("server_speed", "downloads"):
value += tuple([get_server_position(item.server)])
return value
def download_from_url(url, item):
logger.info("Intentando descargar: %s" % (url))
if url.lower().endswith(".m3u8") or url.lower().startswith("rtmp"):
save_server_statistics(item.server, 0, False)
return {"downloadStatus": STATUS_CODES.error}
# Obtenemos la ruta de descarga y el nombre del archivo
item.downloadFilename = item.downloadFilename.replace('/','-')
download_path = filetools.dirname(filetools.join(DOWNLOAD_PATH, item.downloadFilename))
file_name = filetools.basename(filetools.join(DOWNLOAD_PATH, item.downloadFilename))
# Creamos la carpeta si no existe
if not filetools.exists(download_path):
filetools.mkdir(download_path)
# Lanzamos la descarga
d = Downloader(url, download_path, file_name,
max_connections=1 + int(config.get_setting("max_connections", "downloads")),
block_size=2 ** (17 + int(config.get_setting("block_size", "downloads"))),
part_size=2 ** (20 + int(config.get_setting("part_size", "downloads"))),
max_buffer=2 * int(config.get_setting("max_buffer", "downloads")))
d.start_dialog(config.get_localized_string(60332))
# Descarga detenida. Obtenemos el estado:
# Se ha producido un error en la descarga
if d.state == d.states.error:
logger.info("Error al intentar descargar %s" % (url))
status = STATUS_CODES.error
# La descarga se ha detenifdo
elif d.state == d.states.stopped:
logger.info("Descarga detenida")
status = STATUS_CODES.canceled
# La descarga ha finalizado
elif d.state == d.states.completed:
logger.info("Descargado correctamente")
status = STATUS_CODES.completed
if item.downloadSize and item.downloadSize != d.size[0]:
status = STATUS_CODES.error
save_server_statistics(item.server, d.speed[0], d.state != d.states.error)
dir = os.path.dirname(item.downloadFilename)
file = filetools.join(dir, d.filename)
if status == STATUS_CODES.completed:
move_to_libray(item.clone(downloadFilename=file))
return {"downloadUrl": d.download_url, "downloadStatus": status, "downloadSize": d.size[0],
"downloadProgress": d.progress, "downloadCompleted": d.downloaded[0], "downloadFilename": file}
def download_from_server(item):
logger.info(item.tostring())
unsupported_servers = ["torrent"]
progreso = platformtools.dialog_progress(config.get_localized_string(30101), config.get_localized_string(70178) % item.server)
channel = __import__('channels.%s' % item.contentChannel, None, None, ["channels.%s" % item.contentChannel])
if hasattr(channel, "play") and not item.play_menu:
progreso.update(50, config.get_localized_string(70178) % item.server, config.get_localized_string(60003) % item.contentChannel)
try:
itemlist = getattr(channel, "play")(item.clone(channel=item.contentChannel, action=item.contentAction))
except:
logger.error("Error en el canal %s" % item.contentChannel)
else:
if len(itemlist) and isinstance(itemlist[0], Item):
download_item = item.clone(**itemlist[0].__dict__)
download_item.contentAction = download_item.action
download_item.infoLabels = item.infoLabels
item = download_item
elif len(itemlist) and isinstance(itemlist[0], list):
item.video_urls = itemlist
if not item.server: item.server = "directo"
else:
logger.info("No hay nada que reproducir")
return {"downloadStatus": STATUS_CODES.error}
progreso.close()
logger.info("contentAction: %s | contentChannel: %s | server: %s | url: %s" % (
item.contentAction, item.contentChannel, item.server, item.url))
if not item.server or not item.url or not item.contentAction == "play" or item.server in unsupported_servers:
logger.error("El Item no contiene los parametros necesarios.")
return {"downloadStatus": STATUS_CODES.error}
if not item.video_urls:
video_urls, puedes, motivo = servertools.resolve_video_urls_for_playing(item.server, item.url, item.password,
True)
else:
video_urls, puedes, motivo = item.video_urls, True, ""
# Si no esta disponible, salimos
if not puedes:
logger.info("El vídeo **NO** está disponible")
return {"downloadStatus": STATUS_CODES.error}
else:
logger.info("El vídeo **SI** está disponible")
result = {}
# Recorre todas las opciones hasta que consiga descargar una correctamente
for video_url in reversed(video_urls):
result = download_from_url(video_url[1], item)
if result["downloadStatus"] in [STATUS_CODES.canceled, STATUS_CODES.completed]:
break
# Error en la descarga, continuamos con la siguiente opcion
if result["downloadStatus"] == STATUS_CODES.error:
continue
# Devolvemos el estado
return result
def download_from_best_server(item):
logger.info(
"contentAction: %s | contentChannel: %s | url: %s" % (item.contentAction, item.contentChannel, item.url))
result = {"downloadStatus": STATUS_CODES.error}
progreso = platformtools.dialog_progress(config.get_localized_string(30101), config.get_localized_string(70179))
channel = __import__('channels.%s' % item.contentChannel, None, None, ["channels.%s" % item.contentChannel])
progreso.update(50, config.get_localized_string(70184), config.get_localized_string(70180) % item.contentChannel)
if hasattr(channel, item.contentAction):
play_items = getattr(channel, item.contentAction)(
item.clone(action=item.contentAction, channel=item.contentChannel))
else:
play_items = servertools.find_video_items(item.clone(action=item.contentAction, channel=item.contentChannel))
play_items = filter(lambda x: x.action == "play" and not "trailer" in x.title.lower(), play_items)
progreso.update(100, config.get_localized_string(70183), config.get_localized_string(70181) % len(play_items),
config.get_localized_string(70182))
if config.get_setting("server_reorder", "downloads") == 1:
play_items.sort(key=sort_method)
if progreso.iscanceled():
return {"downloadStatus": STATUS_CODES.canceled}
progreso.close()
# Recorremos el listado de servers, hasta encontrar uno que funcione
for play_item in play_items:
play_item = item.clone(**play_item.__dict__)
play_item.contentAction = play_item.action
play_item.infoLabels = item.infoLabels
result = download_from_server(play_item)
if progreso.iscanceled():
result["downloadStatus"] = STATUS_CODES.canceled
# Tanto si se cancela la descarga como si se completa dejamos de probar mas opciones
if result["downloadStatus"] in [STATUS_CODES.canceled, STATUS_CODES.completed]:
result["downloadServer"] = {"url": play_item.url, "server": play_item.server}
break
return result
def select_server(item):
logger.info(
"contentAction: %s | contentChannel: %s | url: %s" % (item.contentAction, item.contentChannel, item.url))
progreso = platformtools.dialog_progress(config.get_localized_string(30101), config.get_localized_string(70179))
channel = __import__('channels.%s' % item.contentChannel, None, None, ["channels.%s" % item.contentChannel])
progreso.update(50, config.get_localized_string(70184), config.get_localized_string(70180) % item.contentChannel)
if hasattr(channel, item.contentAction):
play_items = getattr(channel, item.contentAction)(
item.clone(action=item.contentAction, channel=item.contentChannel))
else:
play_items = servertools.find_video_items(item.clone(action=item.contentAction, channel=item.contentChannel))
play_items = filter(lambda x: x.action == "play" and not "trailer" in x.title.lower(), play_items)
progreso.update(100, config.get_localized_string(70183), config.get_localized_string(70181) % len(play_items),
config.get_localized_string(70182))
for x, i in enumerate(play_items):
if not i.server and hasattr(channel, "play"):
play_items[x] = getattr(channel, "play")(i)
seleccion = platformtools.dialog_select(config.get_localized_string(70192), ["Auto"] + [s.title for s in play_items])
if seleccion > 1:
update_json(item.path, {
"downloadServer": {"url": play_items[seleccion - 1].url, "server": play_items[seleccion - 1].server}})
elif seleccion == 0:
update_json(item.path, {"downloadServer": {}})
platformtools.itemlist_refresh()
def start_download(item):
logger.info(
"contentAction: %s | contentChannel: %s | url: %s" % (item.contentAction, item.contentChannel, item.url))
# Ya tenemnos server, solo falta descargar
if item.contentAction == "play":
ret = download_from_server(item)
update_json(item.path, ret)
return ret["downloadStatus"]
elif item.downloadServer and item.downloadServer.get("server"):
ret = download_from_server(
item.clone(server=item.downloadServer.get("server"), url=item.downloadServer.get("url"),
contentAction="play"))
update_json(item.path, ret)
return ret["downloadStatus"]
# No tenemos server, necesitamos buscar el mejor
else:
ret = download_from_best_server(item)
update_json(item.path, ret)
return ret["downloadStatus"]
def get_episodes(item):
logger.info("contentAction: %s | contentChannel: %s | contentType: %s" % (
item.contentAction, item.contentChannel, item.contentType))
# El item que pretendemos descargar YA es un episodio
if item.contentType == "episode":
episodes = [item.clone()]
# El item es uma serie o temporada
elif item.contentType in ["tvshow", "season"]:
# importamos el canal
channel = __import__('channels.%s' % item.contentChannel, None, None, ["channels.%s" % item.contentChannel])
# Obtenemos el listado de episodios
episodes = getattr(channel, item.contentAction)(item)
itemlist = []
# Tenemos las lista, ahora vamos a comprobar
for episode in episodes:
# Si partiamos de un item que ya era episodio estos datos ya están bien, no hay que modificarlos
if item.contentType != "episode":
episode.contentAction = episode.action
episode.contentChannel = episode.channel
# Si el resultado es una temporada, no nos vale, tenemos que descargar los episodios de cada temporada
if episode.contentType == "season":
itemlist.extend(get_episodes(episode))
# Si el resultado es un episodio ya es lo que necesitamos, lo preparamos para añadirlo a la descarga
if episode.contentType == "episode":
# Pasamos el id al episodio
if not episode.infoLabels["tmdb_id"]:
episode.infoLabels["tmdb_id"] = item.infoLabels["tmdb_id"]
# Episodio, Temporada y Titulo
if not episode.contentSeason or not episode.contentEpisodeNumber:
season_and_episode = scrapertools.get_season_and_episode(episode.title)
if season_and_episode:
episode.contentSeason = season_and_episode.split("x")[0]
episode.contentEpisodeNumber = season_and_episode.split("x")[1]
# Buscamos en tmdb
if item.infoLabels["tmdb_id"]:
scraper.find_and_set_infoLabels(episode)
# Episodio, Temporada y Titulo
if not episode.contentTitle:
episode.contentTitle = re.sub("\[[^\]]+\]|\([^\)]+\)|\d*x\d*\s*-", "", episode.title).strip()
episode.downloadFilename = filetools.validate_path(os.path.join(item.downloadFilename, "%dx%0.2d - %s" % (
episode.contentSeason, episode.contentEpisodeNumber, episode.contentTitle.strip())))
itemlist.append(episode)
# Cualquier otro resultado no nos vale, lo ignoramos
else:
logger.info("Omitiendo item no válido: %s" % episode.tostring())
return itemlist
def write_json(item):
logger.info()
item.action = "menu"
item.channel = "downloads"
item.downloadStatus = STATUS_CODES.stoped
item.downloadProgress = 0
item.downloadSize = 0
item.downloadCompleted = 0
if not item.contentThumbnail:
item.contentThumbnail = item.thumbnail
for name in ["text_bold", "text_color", "text_italic", "context", "totalItems", "viewmode", "title", "fulltitle",
"thumbnail"]:
if item.__dict__.has_key(name):
item.__dict__.pop(name)
path = os.path.join(config.get_setting("downloadlistpath"), str(time.time()) + ".json")
filetools.write(path, item.tojson())
item.path = path
time.sleep(0.1)
def save_download(item):
logger.info()
# Menu contextual
if item.from_action and item.from_channel:
item.channel = item.from_channel
item.action = item.from_action
del item.from_action
del item.from_channel
item.contentChannel = item.channel
item.contentAction = item.action
if item.contentType in ["tvshow", "episode", "season"]:
save_download_tvshow(item)
elif item.contentType == "movie":
save_download_movie(item)
else:
save_download_video(item)
def save_download_video(item):
logger.info("contentAction: %s | contentChannel: %s | contentTitle: %s" % (
item.contentAction, item.contentChannel, item.contentTitle))
set_movie_title(item)
item.downloadFilename = filetools.validate_path("%s [%s]" % (item.contentTitle.strip(), item.contentChannel))
write_json(item)
if not platformtools.dialog_yesno(config.get_localized_string(30101), config.get_localized_string(70189)):
platformtools.dialog_ok(config.get_localized_string(30101), item.contentTitle,
config.get_localized_string(30109))
else:
start_download(item)
def save_download_movie(item):
logger.info("contentAction: %s | contentChannel: %s | contentTitle: %s" % (
item.contentAction, item.contentChannel, item.contentTitle))
progreso = platformtools.dialog_progress(config.get_localized_string(30101), config.get_localized_string(70191))
set_movie_title(item)
result = scraper.find_and_set_infoLabels(item)
if not result:
progreso.close()
return save_download_video(item)
progreso.update(0, config.get_localized_string(60062))
item.downloadFilename = filetools.validate_path("%s [%s]" % (item.contentTitle.strip(), item.contentChannel))
write_json(item)
progreso.close()
if not platformtools.dialog_yesno(config.get_localized_string(30101), config.get_localized_string(70189)):
platformtools.dialog_ok(config.get_localized_string(30101), item.contentTitle,
config.get_localized_string(30109))
else:
start_download(item)
def save_download_tvshow(item):
logger.info("contentAction: %s | contentChannel: %s | contentType: %s | contentSerieName: %s" % (
item.contentAction, item.contentChannel, item.contentType, item.contentSerieName))
progreso = platformtools.dialog_progress(config.get_localized_string(30101), config.get_localized_string(70188))
scraper.find_and_set_infoLabels(item)
item.downloadFilename = filetools.validate_path("%s [%s]" % (item.contentSerieName, item.contentChannel))
progreso.update(0, config.get_localized_string(70186), config.get_localized_string(70187) % item.contentChannel)
episodes = get_episodes(item)
progreso.update(0, config.get_localized_string(70190), " ")
for x, i in enumerate(episodes):
progreso.update(x * 100 / len(episodes),
"%dx%0.2d: %s" % (i.contentSeason, i.contentEpisodeNumber, i.contentTitle))
write_json(i)
progreso.close()
if not platformtools.dialog_yesno(config.get_localized_string(30101), config.get_localized_string(70189)):
platformtools.dialog_ok(config.get_localized_string(30101),
str(len(episodes)) + " capitulos de: " + item.contentSerieName,
config.get_localized_string(30109))
else:
for i in episodes:
res = start_download(i)
if res == STATUS_CODES.canceled:
break
def set_movie_title(item):
if not item.contentTitle:
item.contentTitle = re.sub("\[[^\]]+\]|\([^\)]+\)", "", item.fulltitle).strip()
if not item.contentTitle:
item.contentTitle = re.sub("\[[^\]]+\]|\([^\)]+\)", "", item.title).strip()

46
channels/dramasjc.json Normal file
View File

@@ -0,0 +1,46 @@
{
"id": "dramasjc",
"name": "DramasJC",
"active": true,
"adult": false,
"language": [],
"thumbnail": "https://www.dramasjc.com/wp-content/uploads/2018/03/logo.png",
"banner": "",
"version": 1,
"categories": [
"tvshow",
"movie",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"VOSE",
"VO"
]
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
}
]
}

282
channels/dramasjc.py Normal file
View File

@@ -0,0 +1,282 @@
# -*- coding: utf-8 -*-
# -*- Channel DramasJC -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
from channels import filtertools
host = 'https://www.dramasjc.com/'
IDIOMAS = {'VOSE': 'VOSE', 'VO':'VO'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['okru', 'mailru', 'openload']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, title="Doramas", action="menu_doramas",
thumbnail=get_thumb('doramas', auto=True)))
itemlist.append(Item(channel=item.channel, title="Películas", action="list_all", url=host+'peliculas/',
type='movie', thumbnail=get_thumb('movies', auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+'?s=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def menu_doramas(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Todos", action="list_all", url=host + 'series',
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, title="Generos", action="section",
thumbnail=get_thumb('genres', auto=True)))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
full_data = data
data = scrapertools.find_single_match(data, '<ul class="MovieList NoLmtxt.*?>(.*?)</ul>')
patron = '<article id="post-.*?<a href="([^"]+)">.*?(?:<img |-)src="([^"]+)".*?alt=".*?'
patron += '<h3 class="Title">([^<]+)<\/h3>.?(?:</a>|<span class="Year">(\d{4})<\/span>).*?'
patron += '(movie|TV)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year, type in matches:
url = scrapedurl
if year == '':
year = '-'
if "|" in scrapedtitle:
scrapedtitle= scrapedtitle.split("|")
contentname = scrapedtitle[0].strip()
else:
contentname = scrapedtitle
contentname = re.sub('\(.*?\)','', contentname)
title = '%s [%s]'%(contentname, year)
thumbnail = 'http:'+scrapedthumbnail
new_item = Item(channel=item.channel,
title=title,
url=url,
thumbnail=thumbnail,
infoLabels={'year':year}
)
if type == 'movie':
new_item.contentTitle = contentname
new_item.action = 'findvideos'
else:
new_item.contentSerieName = contentname
new_item.action = 'seasons'
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, True)
# Paginación
url_next_page = scrapertools.find_single_match(full_data,'<a class="next.*?href="([^"]+)">')
if url_next_page:
itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all'))
return itemlist
def section(item):
logger.info()
itemlist = []
full_data = get_source(host)
data = scrapertools.find_single_match(full_data, '<a href="#">Dramas por Genero</a>(.*?)</ul>')
patron = '<a href="([^ ]+)">([^<]+)<'
action = 'list_all'
matches = re.compile(patron, re.DOTALL).findall(data)
for data_one, data_two in matches:
url = data_one
title = data_two
new_item = Item(channel=item.channel, title= title, url=url, action=action)
itemlist.append(new_item)
return itemlist
def seasons(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'class="Title AA-Season On" data-tab="1">Temporada <span>([^<]+)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for temporada in matches:
title = 'Temporada %s' % temporada
contentSeasonNumber = temporada
item.infoLabels['season'] = contentSeasonNumber
itemlist.append(item.clone(action='episodesxseason',
title=title,
contentSeasonNumber=contentSeasonNumber
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url=item.url,
action="add_serie_to_library",
extra="episodios",
contentSerieName=item.contentSerieName,
contentSeasonNumber=contentSeasonNumber
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseason(tempitem)
return itemlist
def episodesxseason(item):
logger.info()
itemlist = []
season = item.contentSeasonNumber
data = get_source(item.url)
data = scrapertools.find_single_match(data, '>Temporada <span>%s</span>(.*?)</ul>' % season)
patron = '<a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
ep = 1
for scrapedurl, scrapedtitle in matches:
epi = str(ep)
title = season + 'x%s - Episodio %s' % (epi, epi)
url = scrapedurl
contentEpisodeNumber = epi
item.infoLabels['episode'] = contentEpisodeNumber
if 'próximamente' not in scrapedtitle.lower():
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
contentEpisodeNumber=contentEpisodeNumber,
))
ep += 1
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = scrapertools.unescape(data)
data = scrapertools.decodeHtmlentities(data)
# patron = 'id="(Opt\d+)">.*?src="([^"]+)" frameborder.*?</iframe>'
patron = 'id="(Opt\d+)">.*?src="(?!about:blank)([^"]+)" frameborder.*?</iframe>'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, scrapedurl in matches:
scrapedurl = scrapedurl.replace('"','').replace('&#038;','&')
data_video = get_source(scrapedurl)
url = scrapertools.find_single_match(data_video, '<div class="Video">.*?src="([^"]+)"')
opt_data = scrapertools.find_single_match(data,'"%s"><span>.*?</span>.*?<span>([^<]+)</span>'%option).split('-')
language = opt_data[0].strip()
quality = opt_data[1].strip()
if 'sub' in language.lower():
language='VOSE'
else:
language = 'VO'
if url != '' and 'youtube' not in url:
itemlist.append(Item(channel=item.channel, title='%s', url=url, language=IDIOMAS[language], quality=quality,
action='play'))
elif 'youtube' in url:
trailer = Item(channel=item.channel, title='Trailer', url=url, action='play', server='youtube')
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % '%s [%s] [%s]'%(i.server.capitalize(),
i.language, i.quality))
try:
itemlist.append(trailer)
except:
pass
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return list_all(item)
else:
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas']:
item.url = host+'peliculas/'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

36
channels/dreamsub.json Normal file
View File

@@ -0,0 +1,36 @@
{
"id": "dreamsub",
"name": "DreamSub",
"language": ["ita"],
"active": true,
"adult": false,
"thumbnail": "http://www.dreamsub.it/res/img/logo.png",
"banner": "http://www.dreamsub.it/res/img/logo.png",
"categories": ["anime","vosi"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Includi in Novità - Anime",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
}
]
}

256
channels/dreamsub.py Normal file
View File

@@ -0,0 +1,256 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per dreamsub
# ------------------------------------------------------------
import re, urlparse
from core import scrapertools, httptools, servertools, tmdb
from core.item import Item
from platformcode import logger, config
host = "https://www.dreamsub.co"
def mainlist(item):
logger.info("kod.dreamsub mainlist")
itemlist = [Item(channel=item.channel,
title="[COLOR azure]Anime / Cartoni[/COLOR]",
action="serietv",
url="%s/anime" % host,
thumbnail="http://orig09.deviantart.net/df5a/f/2014/169/2/a/fist_of_the_north_star_folder_icon_by_minacsky_saya-d7mq8c8.png"),
Item(channel=item.channel,
title="[COLOR azure]Categorie[/COLOR]",
action="categorie",
url=host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="[COLOR azure]Ultimi episodi Anime[/COLOR]",
action="ultimiep",
url=host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="[COLOR yellow]Cerca...[/COLOR]",
action="search",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
return itemlist
def newest(categoria):
logger.info("kod.altadefinizione01 newest" + categoria)
itemlist = []
item = Item()
try:
if categoria == "anime":
item.url = "https://www.dreamsub.tv"
item.action = "ultimiep"
itemlist = ultimiep(item)
if itemlist[-1].action == "ultimiep":
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def serietv(item):
logger.info("kod.dreamsub peliculas")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data,
'<input type="submit" value="Vai!" class="blueButton">(.*?)<div class="footer">')
# Estrae i contenuti
patron = 'Lingua[^<]+<br>\s*<a href="([^"]+)" title="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedurl, scrapedtitle in matches:
scrapedurl = host + scrapedurl
scrapedplot = ""
scrapedthumbnail = ""
scrapedtitle = scrapedtitle.replace("Streaming", "")
scrapedtitle = scrapedtitle.replace("Lista episodi ", "")
itemlist.append(
Item(channel=item.channel,
action="episodios",
contentType="tvshow",
title="[COLOR azure]%s[/COLOR]" % scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
show=scrapedtitle,
plot=scrapedplot,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginazione
patronvideos = '<li class="currentPage">[^>]+><li[^<]+<a href="([^"]+)">'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
itemlist.append(
Item(channel=item.channel,
action="serietv",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
return itemlist
def ultimiep(item):
logger.info("kod.dreamsub ultimiep")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<ul class="last" id="recentAddedEpisodesAnimeDDM">(.*?)</ul>')
# Estrae i contenuti
patron = '<li><a href="([^"]+)"[^>]+>([^<]+)<br>'
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedurl, scrapedtitle in matches:
ep = scrapertools.find_single_match(scrapedtitle, r'\d+$').zfill(2)
scrapedtitle = re.sub(r'\d+$', ep, scrapedtitle)
scrapedurl = host + scrapedurl
scrapedplot = ""
scrapedthumbnail = ""
cleantitle = re.sub(r'\d*-?\d+$', '', scrapedtitle).strip()
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="tvshow",
title=scrapedtitle,
fulltitle=cleantitle,
text_color="azure",
url=scrapedurl,
thumbnail=scrapedthumbnail,
show=cleantitle,
plot=scrapedplot,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
for itm in itemlist:
itm.contentType = "episode"
return itemlist
def categorie(item):
logger.info("[dreamsub.py] categorie")
itemlist = []
data = httptools.downloadpage(item.url).data
blocco = scrapertools.find_single_match(data,
r'<select name="genere" id="genere" class="selectInput">(.*?)</select>')
patron = r'<option value="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for value in matches:
url = "%s/genere/%s" % (host, value)
itemlist.append(
Item(channel=item.channel,
action="serietv",
title="[COLOR azure]%s[/COLOR]" % value.capitalize(),
url=url,
extra="tv",
thumbnail=item.thumbnail,
folder=True))
return itemlist
def search(item, texto):
logger.info("[dreamsub.py] " + item.url + " search " + texto)
item.url = "%s/search/%s" % (host, texto)
try:
return serietv(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def episodios(item):
logger.info("kod.channels.dreamsub episodios")
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<div class="seasonEp">(.*?)<div class="footer">')
patron = '<li><a href="([^"]+)"[^<]+<b>(.*?)<\/b>[^>]+>([^<]+)<\/i>(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedurl, title1, title2, title3 in matches:
scrapedurl = host + scrapedurl
scrapedtitle = title1 + " " + title2 + title3
scrapedtitle = scrapedtitle.replace("Download", "")
scrapedtitle = scrapedtitle.replace("Streaming", "")
scrapedtitle = scrapedtitle.replace("& ", "")
scrapedtitle = re.sub(r'\s+', ' ', scrapedtitle)
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="episode",
fulltitle=scrapedtitle,
show=item.show,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=item.thumbnail,
plot=item.plot,
folder=True))
if config.get_videolibrary_support() and len(itemlist) != 0:
itemlist.append(
Item(channel=item.channel,
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
url=item.url,
action="add_serie_to_library",
extra="episodios",
show=item.show))
return itemlist
def findvideos(item):
logger.info()
print item.url
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
if 'keepem.online' in data:
urls = scrapertools.find_multiple_matches(data, r'(https://keepem\.online/f/[^"]+)"')
for url in urls:
url = httptools.downloadpage(url).url
itemlist += servertools.find_video_items(data=url)
for videoitem in itemlist:
server = re.sub(r'[-\[\]\s]+', '', videoitem.title)
videoitem.title = "".join(
["[[COLOR orange]%s[/COLOR]] " % server.capitalize(), "[COLOR azure]%s[/COLOR]" % item.title])
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
videoitem.contentType = item.contentType
return itemlist

16
channels/elreyx.json Normal file
View File

@@ -0,0 +1,16 @@
{
"id": "elreyx",
"name": "elreyx",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://www.elreyx.com/template/images/logo.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

96
channels/elreyx.py Normal file
View File

@@ -0,0 +1,96 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
host = 'http://www.elreyx.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/peliculasporno.html") )
itemlist.append( Item(channel=item.channel, title="Escenas" , action="lista", url=host + "/index.html"))
itemlist.append( Item(channel=item.channel, title="Productora" , action="categorias", url=host + "/index.html") )
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/peliculasporno.html") )
itemlist.append( Item(channel=item.channel, title="Buscar", action="search") )
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search-%s" % texto + ".html"
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
if item.title == "Categorias" :
patron = '<td><a href="([^<]+)" title="Movies ([^<]+)">.*?</a>'
else:
patron = '<a href="([^<]+)" title="View Category ([^<]+)">.*?</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
url="https:" + scrapedurl
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=url,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if not "/peliculasporno" in item.url:
patron = '<div class="notice_image">.*?<a title="([^"]+)" href="([^"]+)">.*?<img src="(.*?)">'
else:
patron = '<div class="captura"><a title="([^"]+)" href="([^"]+)".*?><img src="(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedtitle,scrapedurl,scrapedthumbnail in matches:
scrapedplot = ""
url="https:" + scrapedurl
thumbnail="https:" + scrapedthumbnail
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=url, thumbnail=thumbnail,
fanart=thumbnail, plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<li class="float-xs-right"><a href=\'([^\']+)\' title=\'Pagina \d+\'>')
if next_page == "":
next_page = scrapertools.find_single_match(data,'<li><a href=\'([^\']+)\' title=\'Pagina \d+\'>&raquo;</a>')
if next_page!= "":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info()
data = httptools.downloadpage(item.url).data
url = scrapertools.find_single_match(data, '<IFRAME SRC="(.*?)"')
if url == "":
url = scrapertools.find_single_match(data,'<iframe src="(.*?)"')
data = httptools.downloadpage(url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist

12
channels/eporner.json Normal file
View File

@@ -0,0 +1,12 @@
{
"id": "eporner",
"name": "Eporner",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "eporner.png",
"banner": "eporner.png",
"categories": [
"adult"
]
}

145
channels/eporner.py Normal file
View File

@@ -0,0 +1,145 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import jsontools
from platformcode import logger
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Últimos videos", action="videos", url="http://www.eporner.com/0/"))
itemlist.append(item.clone(title="Categorias", action="categorias", url="http://www.eporner.com/categories/"))
itemlist.append(item.clone(title="Pornstars", action="pornstars_list", url="http://www.eporner.com/pornstars/"))
itemlist.append(item.clone(title="Buscar", action="search", url="http://www.eporner.com/search/%s/"))
return itemlist
def search(item, texto):
logger.info()
item.url = item.url % texto
item.action = "videos"
try:
return videos(item)
except:
import traceback
logger.error(traceback.format_exc())
return []
def pornstars_list(item):
logger.info()
itemlist = []
for letra in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
itemlist.append(item.clone(title=letra, url=urlparse.urljoin(item.url, letra), action="pornstars"))
return itemlist
def pornstars(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="mbtit" itemprop="name"><a href="([^"]+)" title="([^"]+)">[^<]+</a></div> '
patron += '<a href="[^"]+" title="[^"]+"> <img src="([^"]+)" alt="[^"]+" style="width:190px;height:152px;" /> </a> '
patron += '<div class="mbtim"><span>Videos: </span>([^<]+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title, thumbnail, count in matches:
itemlist.append(
item.clone(title="%s (%s videos)" % (title, count), url=urlparse.urljoin(item.url, url), action="videos",
thumbnail=thumbnail))
# Paginador
patron = "<span style='color:#FFCC00;'>[^<]+</span></a> <a href='([^']+)' title='[^']+'><span>[^<]+</span></a>"
matches = re.compile(patron, re.DOTALL).findall(data)
if matches:
itemlist.append(item.clone(title="Pagina siguiente", url=urlparse.urljoin(item.url, matches[0])))
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="categoriesbox" id="[^"]+"> <div class="ctbinner"> <a href="([^"]+)" title="[^"]+"> <img src="([^"]+)" alt="[^"]+"> <h2>([^"]+)</h2> </a> </div> </div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, thumbnail, title in matches:
itemlist.append(
item.clone(title=title, url=urlparse.urljoin(item.url, url), action="videos", thumbnail=thumbnail))
return sorted(itemlist, key=lambda i: i.title)
def videos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<a href="([^"]+)" title="([^"]+)" id="[^"]+">.*?<img id="[^"]+" src="([^"]+)"[^>]+>.*?<div class="mbtim">([^<]+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title, thumbnail, duration in matches:
itemlist.append(item.clone(title="%s (%s)" % (title, duration), url=urlparse.urljoin(item.url, url),
action="play", thumbnail=thumbnail, contentThumbnail=thumbnail,
contentType="movie", contentTitle=title))
# Paginador
patron = "<span style='color:#FFCC00;'>[^<]+</span></a> <a href='([^']+)' title='[^']+'><span>[^<]+</span></a>"
matches = re.compile(patron, re.DOTALL).findall(data)
if matches:
itemlist.append(item.clone(title="Página siguiente", url=urlparse.urljoin(item.url, matches[0])))
return itemlist
def int_to_base36(num):
"""Converts a positive integer into a base36 string."""
assert num >= 0
digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'.lower()
res = ''
while not res or num > 0:
num, i = divmod(num, 36)
res = digits[i] + res
return res
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = "EP: { vid: '([^']+)', hash: '([^']+)'"
vid, hash = re.compile(patron, re.DOTALL).findall(data)[0]
hash = int_to_base36(int(hash[0:8], 16)) + int_to_base36(int(hash[8:16], 16)) + int_to_base36(
int(hash[16:24], 16)) + int_to_base36(int(hash[24:32], 16))
url = "https://www.eporner.com/xhr/video/%s?hash=%s" % (vid, hash)
data = httptools.downloadpage(url).data
jsondata = jsontools.load(data)
for source in jsondata["sources"]["mp4"]:
url = jsondata["sources"]["mp4"][source]["src"]
title = source.split(" ")[0]
itemlist.append(["%s %s [directo]" % (title, url[-4:]), url])
return sorted(itemlist, key=lambda i: int(i[0].split("p")[0]))

16
channels/eroticage.json Normal file
View File

@@ -0,0 +1,16 @@
{
"id": "eroticage",
"name": "eroticage",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://eroticage.net/wp-content/themes/oz-movie-v3/img/logo.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

80
channels/eroticage.py Normal file
View File

@@ -0,0 +1,80 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
host = 'http://www.eroticage.net'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Novedades" , action="lista", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data,'<h2>TAGS</h2>(.*?)<div class="sideitem"')
patron = '<a href="(.*?)".*?>(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="titleFilm"><a href="([^"]+)">([^"]+)</a>.*?src="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
contentTitle = scrapedtitle
title = scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, thumbnail=thumbnail,
plot=plot, fanart=scrapedthumbnail, contentTitle=contentTitle ))
next_page = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info()
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.thumbnail = item.thumbnail
videochannel=item.channel
return itemlist

View File

@@ -0,0 +1,17 @@
{
"id": "eroticasonlinetv",
"name": "eroticasonlinetv",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://www.peliculaseroticasonline.tv/wp-content/themes/wpeliculaseroticasonlinetv/favicon.png",
"banner": "",
"categories": [
"adult"
],
"settings": [
]
}

View File

@@ -0,0 +1,83 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
host = 'http://www.peliculaseroticasonline.tv'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)".*?>([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="movie-poster"><a href="([^"]+)".*?<img src="([^"]+)" alt="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
plot = ""
url = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=url,
fanart=scrapedthumbnail, thumbnail=scrapedthumbnail, plot=plot, contentTitle = scrapedtitle) )
next_page = scrapertools.find_single_match(data, '<div class="naviright"><a href="([^"]+)">Siguiente &raquo;</a>')
if next_page:
next_page = urlparse.urljoin(item.url, next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page ))
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
url = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
url = urlparse.urljoin(item.url, url)
data = httptools.downloadpage(url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist

22
channels/erotik.json Normal file
View File

@@ -0,0 +1,22 @@
{
"id": "erotik",
"name": "Erotik",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://www.youfreeporntube.com/uploads/custom-logo.png",
"banner": "http://www.youfreeporntube.com/uploads/custom-logo.png",
"categories": [
"adult"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
}
]
}

Some files were not shown because too many files have changed in this diff Show More