Merge pull request #2 from alfa-addon/master

v2.7.29
This commit is contained in:
Intel1
2019-03-05 10:59:16 -05:00
committed by GitHub
22 changed files with 849 additions and 433 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.7.28" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.7.29" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,10 +19,13 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Arreglos[/B][/COLOR]
¤ maxipelis24 ¤ thevid ¤ gamovideo
¤ pack +18
Agradecimientos a @chivmalev por colaborar en ésta versión
¤ pack +18 ¤ cinehindi ¤ anonfile
¤ fembed ¤ doomtv ¤ vk
¤ vshare ¤ CineCalidad ¤ seriesblanco
¤ dospelis
[COLOR green][B]Novedades[/B][/COLOR]
¤ cineonline ¤ pelix
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>

View File

@@ -130,7 +130,7 @@ def anyos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<a href="([^"]+)">([^<]+)</a><br'
patron = '<a href=([^>]+)>([^<]+)</a><br'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
@@ -171,8 +171,8 @@ def generos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<li id="menu-item-.*?" class="menu-item menu-item-type-taxonomy menu-item-object-category ' \
'menu-item-.*?"><a href="([^"]+)">([^<]+)<\/a></li>'
patron = '<li id=menu-item-.*? class="menu-item menu-item-type-taxonomy menu-item-object-category menu-item-.*?'
patron +='"><a href=([^>]+)>([^<]+)<\/a></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
url = urlparse.urljoin(item.url, scrapedurl)
@@ -206,8 +206,8 @@ def peliculas(item):
data = httptools.downloadpage(item.url).data
patron = '<div class="home_post_cont.*? post_box">.*?<a href="(.*?)".*?'
patron += 'src="(.*?)".*?title="(.*?) \((.*?)\).*?".*?p&gt;(.*?)&lt'
patron = '<div class="home_post_cont.*? post_box">.*?<a href=([^>]+)>.*?src=([^ ]+).*?'
patron += 'title="(.*?) \((.*?)\).*?".*?p&gt;(.*?)&lt'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedplot in matches:
@@ -232,7 +232,7 @@ def peliculas(item):
))
try:
patron = "<link rel='next' href='([^']+)' />"
patron = "<link rel=next href=([^>]+)>"
next_page = re.compile(patron, re.DOTALL).findall(data)
itemlist.append(Item(channel=item.channel,
action="peliculas",
@@ -298,7 +298,7 @@ def findvideos(item):
lang = 'latino'
data = httptools.downloadpage(item.url).data
patron = 'target="_blank".*? service=".*?" data="(.*?)"><li>(.*?)<\/li>'
patron = 'target=_blank.*? service=.*? data="(.*?)"><li>(.*?)<\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
server_url = {'YourUpload': 'https://www.yourupload.com/embed/',
@@ -315,7 +315,6 @@ def findvideos(item):
if server_id not in ['Mega', 'MediaFire', 'Trailer', '']:
video_id = dec(video_cod, dec_value)
logger.debug('server_id %s' % server_id)
if server_id in server_url:
server = server_id.lower()
thumbnail = item.thumbnail

View File

@@ -3,7 +3,7 @@
"name": "CineHindi",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"language": ["vos"],
"thumbnail": "cinehindi.png",
"banner": "http://i.imgur.com/cau9TVe.png",
"categories": [

View File

@@ -27,8 +27,8 @@ def mainlist(item):
itemlist = list()
itemlist.append(Item(channel=item.channel, action="genero", title="Generos", url=host, thumbnail = get_thumb("genres", auto = True)))
itemlist.append(Item(channel=item.channel, action="lista", title="Novedades", url=host, thumbnail = get_thumb("newest", auto = True)))
itemlist.append(Item(channel=item.channel, action="proximas", title="Próximas Películas",
url=urlparse.urljoin(host, "proximamente")))
#itemlist.append(Item(channel=item.channel, action="proximas", title="Próximas Películas",
# url=urlparse.urljoin(host, "proximamente")))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=urlparse.urljoin(host, "?s="), thumbnail = get_thumb("search", auto = True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -38,8 +38,8 @@ def genero(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(host).data
patron = 'level-0.*?value="([^"]+)"'
patron += '>([^<]+)'
patron = '<option class=.*? value=([^<]+)>'
patron += '([^<]+)<\/option>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
if 'Próximas Películas' in scrapedtitle:
@@ -94,28 +94,29 @@ def lista(item):
else:
url = httptools.downloadpage("%s?cat=%s" %(host, item.cat), follow_redirects=False, only_headers=True).headers.get("location", "")
data = httptools.downloadpage(url).data
bloque = scrapertools.find_single_match(data, """class="item_1 items.*?id="paginador">""")
patron = 'class="item">.*?' # Todos los items de peliculas (en esta web) empiezan con esto
patron += '<a href="([^"]+).*?' # scrapedurl
patron += '<img src="([^"]+).*?' # scrapedthumbnail
patron += 'alt="([^"]+).*?' # scrapedtitle
patron += '<div class="fixyear">(.*?)</span></div><' # scrapedfixyear
bloque = data#scrapertools.find_single_match(data, """class="item_1 items.*?id="paginador">""")
patron = '<div id=mt.+?>' # Todos los items de peliculas (en esta web) empiezan con esto
patron += '<a href=([^"]+)\/><div class=image>' # scrapedurl
patron += '<img src=([^"]+) alt=.*?' # scrapedthumbnail
patron += '<span class=tt>([^"]+)<\/span>' # scrapedtitle
patron += '<span class=ttx>([^"]+)<div class=degradado>.*?' # scrapedplot
patron += '<span class=year>([^"]+)<\/span><\/div><\/div>' # scrapedfixyear
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedfixyear in matches:
patron = '<span class="year">([^<]+)' # scrapedyear
scrapedyear = scrapertools.find_single_match(scrapedfixyear, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot, scrapedyear in matches:
#patron = '<span class="year">([^<]+)' # scrapedyear
#scrapedyear = scrapertools.find_single_match(scrapedfixyear, patron)
scrapedtitle = scrapedtitle.replace(scrapertools.find_single_match(scrapedtitle,'\(\d{4}\)'),'').strip()
title = scrapedtitle
if scrapedyear:
title += ' (%s)' % (scrapedyear)
item.infoLabels['year'] = int(scrapedyear)
patron = '<span class="calidad2">([^<]+).*?' # scrapedquality
scrapedquality = scrapertools.find_single_match(scrapedfixyear, patron)
if scrapedquality:
title += ' [%s]' % (scrapedquality)
#scrapedquality = scrapertools.find_single_match(scrapedfixyear, patron)
#if scrapedquality:
# title += ' [%s]' % (scrapedquality)
itemlist.append(
item.clone(title=title, url=scrapedurl, action="findvideos", extra=scrapedtitle,
contentTitle=scrapedtitle, thumbnail=scrapedthumbnail, contentType="movie", context=["buscar_trailer"]))
contentTitle=scrapedtitle, thumbnail=scrapedthumbnail, plot=scrapedplot, contentType="movie", context=["buscar_trailer"]))
tmdb.set_infoLabels(itemlist)
# Paginacion
patron = 'rel="next" href="([^"]+)'

View File

@@ -0,0 +1,63 @@
{
"id": "cineonline",
"name": "cineonline",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "https://www.cine-online.eu/wp-content/uploads/2015/04/CINE-logo-bueno.png",
"banner": "",
"categories": [
"movie"
],
"settings": [
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"ESP",
"VOSE"
]
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verificar si los enlaces existen",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
}
]
}

View File

@@ -0,0 +1,210 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import re, urllib, urlparse
from channels import autoplay
from platformcode import config, logger, platformtools
from core.item import Item
from core import httptools, scrapertools, jsontools, tmdb
from core import servertools
from channels import filtertools
host = 'https://www.cine-online.eu'
IDIOMAS = {'Español': 'ESP', 'Cast': 'ESP', 'Latino': 'LAT', 'Lat': 'LAT', 'Subtitulado': 'VOSE', 'Sub': 'VOSE'}
list_language = IDIOMAS.values()
list_servers = ['Streamango', 'Vidoza', 'Openload', 'Streamcherry', 'Netutv']
list_quality = []
__channel__='cineonline'
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', __channel__)
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', __channel__)
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
except:
__modo_grafico__ = True
def mainlist(item):
logger.info()
itemlist = []
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(item.clone(title ="Películas", action ="mainlist_pelis"))
itemlist.append(item.clone(title="Series" , action="lista", url= host + "/serie/"))
itemlist.append(item.clone(title="Buscar", action="search"))
itemlist.append(item.clone(title="Configurar canal...", text_color="gold", action="configuracion", folder=False))
autoplay.show_option(item.channel, itemlist)
return itemlist
def mainlist_pelis(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Novedades" , action="lista", url= host))
itemlist.append(item.clone(title="Castellano" , action="lista", url= host + "/tag/castellano/"))
itemlist.append(item.clone(title="Latino" , action="lista", url= host + "/tag/latino/"))
itemlist.append(item.clone(title="Subtituladas" , action="lista", url= host + "/tag/subtitulado/"))
itemlist.append(item.clone(title="Categorias" , action="categorias", url= host))
itemlist.append(item.clone(title="Año" , action="categorias", url= host))
itemlist.append(item.clone( title = 'Buscar', action = 'search', search_type = 'movie' ))
return itemlist
def configuracion(item):
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if "Año" in item.title:
data = scrapertools.get_match(data,'<h3>Año de estreno(.*?)</ul>')
patron = '<li><a href="([^"]+)">(\d+)</(\w)>'
else:
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)">([^"]+)</a> <span>(\d+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, cantidad in matches:
scrapedplot = ""
scrapedthumbnail = ""
title = scrapedtitle + " %s" % cantidad
itemlist.append(item.clone(channel=item.channel, action="lista", title=title , url=scrapedurl ,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div id="mt-\d+".*?<a href="([^"]+)".*?'
patron += '<img src="([^"]+)" alt="([^"]+)".*?'
patron += '<span class="year">(\d+)</span>.*?'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
scrapedtitle = scrapedtitle.replace("Ver", "").replace("online", "")
title = '%s (%s)' % (scrapedtitle, scrapedyear)
url = scrapedurl
new_item = Item(channel=item.channel,
title=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
infoLabels={'year':scrapedyear})
if '/serie/' in url:
new_item.action = 'temporadas'
new_item.contentSerieName = scrapedtitle
else:
new_item.action = 'findvideos'
new_item.contentTitle = scrapedtitle
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, True)
next_page_url = scrapertools.find_single_match(data,'<a href="([^"]+)">Siguiente</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append(item.clone(channel=item.channel , action="lista" , title="Next page >>" ,
text_color="blue", url=next_page_url) )
return itemlist
def temporadas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<span class="se-t">(\d+)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for numtempo in matches:
itemlist.append(item.clone( action='episodesxseason', title='Temporada %s' % numtempo, url = item.url,
contentType='season', contentSeason=numtempo ))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
# return sorted(itemlist, key=lambda it: it.title)
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = temporadas(item)
for tempitem in templist:
itemlist += episodesxseason(tempitem)
return itemlist
def episodesxseason(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="numerando">%s x (\d+)</div>.*?' % item.contentSeason
patron += '<a href="([^"]+)">([^"]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for episode, url, title in matches:
titulo = '%sx%s %s' % (item.contentSeason, episode, title)
itemlist.append(item.clone( action='findvideos', url=url, title=titulo,
contentType='episode', contentEpisodeNumber=episode ))
tmdb.set_infoLabels(itemlist)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = 'id="plays-(\d+)">\s*([^<]+)</div'
matches = scrapertools.find_multiple_matches(data, patron)
for xnumber, xname in matches:
if "/episodios/" in item.url:
lang = scrapertools.find_single_match(data, '#player2%s">([^<]+)</a>' % xnumber)
else:
lang = scrapertools.find_single_match(data, '#div%s">([^<]+)<' % xnumber)
if "lat" in lang.lower(): lang= "Lat"
if 'cast' in lang.lower(): lang= "Cast"
if 'sub' in lang.lower(): lang= "Sub"
if lang in IDIOMAS:
lang = IDIOMAS[lang]
post= {"nombre":xname}
url= httptools.downloadpage("https://www.cine-online.eu/ecrypt", post=urllib.urlencode(post)).data
url = scrapertools.find_single_match(url,'<(?:IFRAME SRC|iframe src)="([^"]+)"')
if not config.get_setting('unify'):
title = ' (%s)' % (lang)
else:
title = ''
if url != '':
itemlist.append(item.clone(action="play", title='%s'+title, url=url, language=lang ))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if not "/episodios/" in item.url:
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra !='findvideos':
itemlist.append(Item(channel=item.channel, action="add_pelicula_to_library",
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
extra="findvideos", contentTitle=item.contentTitle))
return itemlist

View File

@@ -15,12 +15,10 @@ from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
IDIOMAS = {'latino': 'Latino'}
IDIOMAS = {'Latino': 'Latino'}
list_language = IDIOMAS.values()
CALIDADES = {'1080p': '1080p', '720p': '720p', '480p': '480p', '360p': '360p'}
list_quality = CALIDADES.values()
list_servers = ['directo', 'openload']
list_quality = []
list_servers = ['dostream', 'openload']
host = 'http://doomtv.net/'
@@ -28,6 +26,8 @@ host = 'http://doomtv.net/'
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(
@@ -65,6 +65,8 @@ def mainlist(item):
fanart='https://s30.postimg.cc/pei7txpa9/buscar.png'
))
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -75,7 +77,6 @@ def get_source(url, referer=None):
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
logger.debug(data)
return data
def lista(item):
@@ -98,9 +99,9 @@ def lista(item):
for scrapedurl, quality, scrapedthumbnail, scrapedtitle, plot in matches[first:last]:
url = 'http:'+scrapedurl
thumbnail = scrapedthumbnail
filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w185", "")
url = host+scrapedurl
thumbnail = 'https:'+scrapedthumbnail.strip()
filtro_thumb = thumbnail.replace("https://image.tmdb.org/t/p/w185", "")
filtro_list = {"poster_path": filtro_thumb.strip()}
filtro_list = filtro_list.items()
title = scrapedtitle
@@ -144,7 +145,7 @@ def seccion(item):
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
url = 'http:'+ scrapedurl
url = host+scrapedurl
title = scrapedtitle
thumbnail = ''
if url not in duplicado:
@@ -196,22 +197,36 @@ def findvideos(item):
itemlist = []
data = get_source(item.url)
patron = 'id="(tab\d+)"><div class="movieplay">.*?src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, urls in matches:
language = 'Latino'
if 'http' not in urls:
urls = 'https:'+urls
if not config.get_setting('unify'):
title = ' [%s]' % language
else:
title = '%s'
new_item = Item(
channel=item.channel,
url=urls,
title=item.title,
title= '%s'+ title,
contentTitle=item.title,
action='play',
language = IDIOMAS[language],
infoLabels = item.infoLabels
)
itemlist.append(new_item)
itemlist = servertools.get_servers_itemlist(itemlist)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
@@ -223,4 +238,5 @@ def findvideos(item):
contentTitle=item.contentTitle,
))
return itemlist

View File

@@ -4,7 +4,7 @@
"active": true,
"adult": false,
"language": ["lat", "cast"],
"thumbnail": "https://www.dospelis.com/wp-content/uploads/2018/07/dospelislogo.png",
"thumbnail": "https://www.dospelis.net/wp-content/uploads/2019/02/logodospelisamor.png",
"banner": "",
"categories": [
"movie",

View File

@@ -90,11 +90,11 @@ def section(item):
logger.info()
itemlist=[]
duplicados=[]
data = get_source(host+'/'+item.type)
data = get_source(host+item.type)
if 'Genero' in item.title:
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)" >(.*?)/i>'
patron = '<liclass="cat-item cat-item-\d+"><ahref=([^ ]+) .*?>(.*?)/i>'
elif 'Año' in item.title:
patron = '<li><a href="(.*?release.*?)">([^<]+)</a>'
patron = '<li><ahref=(.*?release.*?)>([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -102,7 +102,7 @@ def section(item):
title = scrapedtitle
plot=''
if 'Genero' in item.title:
quantity = scrapertools.find_single_match(scrapedtitle,'</a> <i>(.*?)<')
quantity = scrapertools.find_single_match(scrapedtitle,'<i>(.*?)<')
title = scrapertools.find_single_match(scrapedtitle,'(.*?)</')
title = title
plot = '%s elementos' % quantity.replace('.','')
@@ -123,9 +123,8 @@ def list_all(item):
data = get_source(item.url)
if item.type == 'movies':
patron = '<article id="post-\d+" class="item movies"><div class="poster">.?<img src="([^"]+)" alt="([^"]+)">.*?'
patron +='"quality">([^<]+)</span><\/div>.?<a href="([^"]+)">.*?'
patron +='<\/h3>.?<span>([^"]+)<\/span><\/div>.*?"flags"(.*?)metadata'
patron = '<articleid=post-\d+ class="item movies"><divclass=poster>.?<imgsrc=([^ ]+) alt="([^"]+)">.*?'
patron += 'quality>([^<]+)<.*?<ahref=([^>]+)>.*?<\/h3><span>([^<]+)<.*?flags(.*?)metadata'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -148,8 +147,8 @@ def list_all(item):
infoLabels={'year':year}))
elif item.type == 'tvshows':
patron = '<article id="post-\d+" class="item tvshows">.?<div class="poster">.?<img src="([^"]+)"'
patron += ' alt="([^"]+)">.*?<a href="([^"]+)">.*?<\/h3>.?<span>(.*?)<\/span><\/div>'
patron = '<articleid=post-\d+ class="item tvshows">.?<divclass=poster>.?<imgsrc=([^ ]+)'
patron += ' alt="([^"]+)">.*?<ahref=([^>]+)>.*?<\/h3>.?<span>(.*?)<\/span><\/div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, scrapedurl, year in matches:
@@ -168,7 +167,7 @@ def list_all(item):
tmdb.set_infoLabels(itemlist, seekTmdb=True)
# Paginación
url_next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)" />')
url_next_page = scrapertools.find_single_match(data,'<linkrel=next href=([^>]+)>')
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
@@ -180,7 +179,7 @@ def seasons(item):
itemlist=[]
data=get_source(item.url)
patron='Temporada.?\d+'
patron='title>Temporada.?(\d+)'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
@@ -214,7 +213,7 @@ def episodesxseasons(item):
itemlist = []
data=get_source(item.url)
patron='class="numerando">%s - (\d+)</div>.?<div class="episodiotitle">.?<a href="([^"]+)">([^<]+)<' % item.infoLabels['season']
patron='class=numerando>%s - (\d+)</div>.?<divclass=episodiotitle>.?<ahref=([^>]+)>([^<]+)<' % item.infoLabels['season']
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
@@ -236,12 +235,15 @@ def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'id="option-(\d+)".*?rptss" src="([^"]+)" frameborder'
patron = 'id=option-(\d+).*?src=([^ ]+) frameborder'
matches = re.compile(patron, re.DOTALL).findall(data)
lang=''
for option, scrapedurl in matches:
lang = scrapertools.find_single_match(data, 'href=#option-%s>.*?/flags/(.*?).png' % option)
quality = ''
if 'goo.gl' in scrapedurl:
new_data = httptools.downloadpage(scrapedurl, follow_redirects=False).headers
scrapedurl = new_data['location']
if lang not in IDIOMAS:
lang = 'en'
title = '%s %s'
@@ -291,8 +293,7 @@ def search_results(item):
itemlist=[]
data=get_source(item.url)
patron = '<article>.*?<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)" \/>.*?meta.*?'
patron += '"year">([^<]+)<(.*?)<p>([^<]+)<\/p>'
patron = '<article>.*?<ahref=([^>]+)><imgsrc=([^ ]+) alt="([^"]+)">.*?year>([^<]+)<(.*?)<p>([^<]+)<\/p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumb, scrapedtitle, year, lang_data, scrapedplot in matches:

View File

@@ -1,33 +0,0 @@
{
"id": "mastorrents",
"name": "MasTorrents",
"active": true,
"adult": false,
"language": ["cast","lat"],
"thumbnail": "https://s33.postimg.cc/3y8720l9b/mastorrents.png",
"banner": "",
"version": 1,
"categories": [
"movie",
"tvshow",
"torrent"
],
"settings": [
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_torrent",
"type": "bool",
"label": "Incluir en Novedades - Torrent",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,323 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Channel MasTorrents -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channelselector import get_thumb
from platformcode import logger
from platformcode import config
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
host = 'http://www.mastorrents.com/'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Peliculas",
action="movie_list",
thumbnail=get_thumb("channels_movie.png")
))
itemlist.append(item.clone(title="Series",
action="series_list",
thumbnail=get_thumb("channels_tvshow.png")
))
return itemlist
def movie_list(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Todas",
action="lista",
url=host+'peliculas',
extra='movie',
thumbnail=get_thumb('all', auto=True)
))
itemlist.append(item.clone(title="Generos",
action="genres",
url=host,
extra='movie',
thumbnail=get_thumb('genres', auto=True)
))
itemlist.append(item.clone(title="Buscar",
action="search",
url=host + '?pTit=', thumbnail=get_thumb('search', auto=True),
extra='movie'
))
return itemlist
def series_list(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Todas",
action="lista",
url=host + 'series',
extra='serie',
thumbnail=get_thumb('all', auto=True)
))
itemlist.append(item.clone(title="Generos",
action="genres",
url=host + 'series/',
extra='serie',
thumbnail=get_thumb('genres', auto=True)
))
itemlist.append(item.clone(title="Buscar",
action="search",
url=host + 'series/?pTit=',
extra='serie',
thumbnail=get_thumb('search', auto=True)
))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def lista (item):
logger.info ()
itemlist = []
infoLabels = dict()
data = get_source(item.url)
patron = "<div class=moviesbox>.*?</div><a href=(.*?)><div class=moviesbox_img style=background-image:url\('("
patron += ".*?)'\)>.*?tooltipbox>(.*?)(?: <i>| <br /><i>)(.*?)<"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, extra_data in matches:
extra_data = extra_data.replace('(','').replace(')','')
url = scrapedurl
thumbnail = scrapedthumbnail
contentTitle = scrapedtitle.decode('latin1').encode('utf8')
title = contentTitle
tvshow = False
if 'x' in extra_data:
tvshow = True
filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w200_and_h300_bestv2", "")
filtro_list = {"poster_path": filtro_thumb}
filtro_list = filtro_list.items()
infoLabels['filtro']= filtro_list
else:
infoLabels['year']=extra_data
new_item=(Item(channel=item.channel,
action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
infoLabels=infoLabels,
extra=item.extra
))
if tvshow:
new_item.contentSerieName = contentTitle
new_item.action = 'seasons'
else:
new_item.contentTitle = contentTitle
new_item.action = 'findvideos'
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb =True)
#Paginacion
if itemlist !=[]:
actual_page_url = item.url
next_page = scrapertools.find_single_match(data,'<span class=pagination_next><a href=(.*?)>')
import inspect
if next_page !='':
itemlist.append(item.clone(action = "lista",
title = 'Siguiente >>>',
url = next_page
))
return itemlist
def genres(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = scrapertools.find_single_match(data,'G&eacute;neros</option>(.+)</select></div>')
patron = '<option value=(.*?)>(.*?)</option>'
matches = re.compile(patron,re.DOTALL).findall(data)
for value, title in matches:
url = item.url + value
title = title.decode('latin1').encode('utf8')
itemlist.append(Item(channel=item.channel, title=title, url=url, action='lista'))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return lista(item)
else:
return []
def seasons(item):
logger.info()
itemlist=[]
infoLabels = item.infoLabels
data=get_source(item.url)
patron ='href=javascript:showSeasson\(.*?\); id=.*?>Temporada (.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for season in matches:
title='Temporada %s' % season
infoLabels['season'] = season
itemlist.append(Item(channel=item.channel,
title= title,
url=item.url,
action='episodesxseasons',
contentSeasonNumber=season,
contentSerieName=item.contentSerieName,
infoLabels=infoLabels
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
itemlist = itemlist[::-1]
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="all_episodes", contentSerieName=item.contentSerieName))
return itemlist
def all_episodes(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron = "<div class=corner-episode>%sx(.\d+)<\/div><a href=(.*?)>.*?" % item.contentSeasonNumber
patron += "image:url\('(.*?)'.*?href.*?>(%s)<" % item.contentSerieName
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels=item.infoLabels
for episode, scrapedurl, scrapedthumbnail, scrapedtitle in matches:
contentEpisodeNumber=episode
season = item.contentSeasonNumber
url=scrapedurl
thumbnail=scrapedthumbnail
infoLabels['episode']=episode
title = '%sx%s - %s' % (season, episode, item.contentSerieName)
itemlist.append(Item(channel=item.channel,
action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=item.contentSerieName,
contentEpisodeNumber=contentEpisodeNumber,
infoLabels=infoLabels
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist[::-1]
def findvideos(item):
logger.info()
itemlist=[]
data = get_source(item.url)
patron = "showDownload\(([^\)]+)\);.*?alt=.*?torrent (.*?) "
matches = re.compile(patron, re.DOTALL).findall(data)
for extra_info, quality in matches:
extra_info= extra_info.replace(",'",'|')
extra_info= extra_info.split('|')
title = '%s [%s]' % ('Torrent', quality.strip())
if item.extra == 'movie':
url = extra_info[2].strip("'")
else:
url = extra_info[3].strip("'")
server = 'torrent'
if not '.torrent' in url:
if 'tvsinpagar' in url:
url = url.replace('http://','http://www.')
try:
from_web = httptools.downloadpage(url, follow_redirects=False)
url = from_web.headers['location']
except:
pass
if '.torrent' in url:
itemlist.append(Item(channel=item.channel,
title=title,
contentTitle= item.title,
url=url,
action='play',
quality=quality,
server=server,
thumbnail = item.infoLabels['thumbnail'],
infoLabels=item.infoLabels
))
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle
))
return itemlist
def newest(category):
logger.info()
item = Item()
try:
if category in ['peliculas', 'torrent']:
item.url = host + 'estrenos-de-cine'
item.extra='movie'
itemlist = lista(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
if category == 'torrent':
item.url = host+'series'
item.extra = 'serie'
itemlist.extend(lista(item))
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return itemlist

View File

@@ -348,7 +348,7 @@ def listado(item):
title = re.sub(r'- $', '', title)
#Limpiamos el título de la basura innecesaria
title = re.sub(r'TV|Online|Spanish|Torrent|en Espa\xc3\xb1ol|Español|Latino|Subtitulado|Blurayrip|Bluray rip|\[.*?\]|R2 Pal|\xe3\x80\x90 Descargar Torrent \xe3\x80\x91|Completa|Temporada|Descargar|Torren', '', title, flags=re.IGNORECASE)
title = re.sub(r'(?i)TV|Online|Spanish|Torrent|en Espa\xc3\xb1ol|Español|Latino|Subtitulado|Blurayrip|Bluray rip|\[.*?\]|R2 Pal|\xe3\x80\x90 Descargar Torrent \xe3\x80\x91|Completa|Temporada|Descargar|Torren', '', title)
#Terminamos de limpiar el título
title = re.sub(r'\??\s?\d*?\&.*', '', title)

View File

@@ -0,0 +1,78 @@
{
"id": "pelix",
"name": "Pelix",
"active": true,
"adult": false,
"language": ["lat", "cast"],
"thumbnail": "https://pelix.tv/build/images/logo.png",
"banner": "",
"categories": [
"movie",
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino",
"Castellano",
"VOSE"
]
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verificar si los enlaces existen",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
}
]
}

View File

@@ -0,0 +1,352 @@
# -*- coding: utf-8 -*-
# -*- Channel Pelix -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
import base64
from channelselector import get_thumb
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from lib import jsunpack
from core.item import Item
from channels import filtertools
from channels import autoplay
from platformcode import config, logger
IDIOMAS = {'6': 'Latino', '7': 'Castellano'}
list_language = IDIOMAS.values()
CALIDADES = {'1': '1080p', '3': '720p', '4':'720p'}
list_quality = CALIDADES.values()
list_servers = [
'openload',
'streamango',
'fastplay',
'rapidvideo',
'netutv'
]
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'pelix')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'pelix')
host = 'https://pelix.tv/'
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title='Peliculas', action='menu_movies',
thumbnail= get_thumb('movies', auto=True), page=0))
itemlist.append(Item(channel=item.channel, title='Series', url=host+'home/genero/5', action='list_all',
type='tvshows', thumbnail= get_thumb('tvshows', auto=True), page=0))
itemlist.append(
item.clone(title="Buscar", action="search", url=host + 'movies/headserach', thumbnail=get_thumb("search", auto=True),
extra='movie'))
autoplay.show_option(item.channel, itemlist)
return itemlist
def menu_movies(item):
logger.info()
itemlist=[]
itemlist.append(Item(channel=item.channel, title='Ultimas', url=host, path='home/newest?show=', action='list_all',
thumbnail=get_thumb('last', auto=True), type='movies', page=0))
#itemlist.append(Item(channel=item.channel, title='Mas Vistas', url=host, path='home/views?show=', action='list_all',
# thumbnail=get_thumb('all', auto=True), type='movies', page=0))
itemlist.append(Item(channel=item.channel, title='Genero', action='section',
thumbnail=get_thumb('genres', auto=True), type='movies'))
itemlist.append(Item(channel=item.channel, title='Por Año', action='section',
thumbnail=get_thumb('year', auto=True), type='movies'))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def get_language(lang_data):
logger.info()
language = []
lang_list = scrapertools.find_multiple_matches(lang_data, '/flags/(.*?).png\)')
for lang in lang_list:
if lang == 'en':
lang = 'vose'
if lang not in language:
language.append(lang)
return language
def section(item):
logger.info()
itemlist=[]
data = get_source(host)
if 'Genero' in item.title:
data = scrapertools.find_single_match(data, '<a href="#">Género</a>(.*?)</ul>')
elif 'Año' in item.title:
data = scrapertools.find_single_match(data, '<a href="#">Año</a>(.*?)</ul>')
patron = '<a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
itemlist.append(Item(channel=item.channel, url=scrapedurl, title=scrapedtitle, action='list_all',
type=item.type, page=0))
return itemlist
def list_all(item):
logger.info()
import urllib
itemlist = []
if item.page == 0:
data = get_source(item.url+item.path)
else:
post = {'page': str(item.page)}
post = urllib.urlencode(post)
data = httptools.downloadpage(host+'home/%sAjax/%s' % ('newest', str(item.page)), post=post).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = '<div class="base-used">.*?<a href="([^"]+)">.*?<img class="img-thumbnail" src="([^"]+)".*?'
patron += '<h2>([^<]+)</h2><p class="year">(\d{4})</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
title = '%s [%s]' % (scrapedtitle, year)
contentTitle = scrapedtitle
thumbnail = scrapedthumbnail
url = scrapedurl
new_item= Item(channel=item.channel,
title=title,
url=url,
thumbnail=thumbnail,
infoLabels={'year':year})
if item.type == 'movies':
new_item.action = 'findvideos'
new_item.contentTitle = contentTitle
else:
new_item.action = 'seasons'
new_item.contentSerieName = contentTitle
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, seekTmdb=True)
# Paginación
next_page = item.page + 30
itemlist.append(item.clone(title="Siguiente >>", url=item.url, action='list_all', page=next_page, path=item.path))
return itemlist
def seasons(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron='data-type="host">(Temporada \d+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
if matches is None:
return findvideos(item)
infoLabels = item.infoLabels
for season in matches:
season = season.lower().replace('temporada','')
infoLabels['season']=season
title = 'Temporada %s' % season
itemlist.append(Item(channel=item.channel, title=title, url=item.url, action='episodesxseasons',
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist = []
duplicados = []
data=get_source(item.url)
patron='data-id="(\d+)" season="%s" id_lang="(\d+)" id_movies_types="\d".*?' \
'block;">([^<]+)</a>' % item.infoLabels['season']
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedepisode, lang, scrapedtitle in matches:
infoLabels['episode'] = scrapedepisode
url = item.url
title = '%sx%s - %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle)
if scrapedepisode not in duplicados:
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', infoLabels=infoLabels))
duplicados.append(scrapedepisode)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
if 'episode="0" season="0"' not in data and item.contentType != 'episode':
item.contentSerieName = item.contentTitle
item.contentTitle = None
item.contentType = None
item.infoLabels = None
tmdb.set_infoLabels_item(item, seekTmdb=True)
return seasons(item)
if 'episode="0" season="0"' not in data:
season = item.infoLabels['season']
episode = item.infoLabels['episode']
else:
season = '0'
episode = '0'
patron = '<span class="movie-online-list" id_movies_types="(\d)".*?'
patron += 'episode="%s" season="%s" id_lang="([^"]+)".*?online-link="([^"]+)" link-id="\d+">' % (episode, season)
matches = re.compile(patron, re.DOTALL).findall(data)
for quality_value, lang_value, scrapedurl in matches:
if lang_value not in IDIOMAS:
lang_value = '6'
if quality_value not in CALIDADES:
quality_value = '3'
language = IDIOMAS[lang_value]
quality = CALIDADES[quality_value]
if not config.get_setting("unify"):
title = ' [%s] [%s]' % (quality, language)
else:
title = ''
itemlist.append(Item(channel=item.channel, url=scrapedurl, title='%s'+title, action='play',
language=language, quality=quality, infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
itemlist = sorted(itemlist, key=lambda it: it.language)
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
post = 'search=%s' % texto
item.post = post
item.url = item.url
if texto != '':
return search_results(item)
else:
return []
def search_results(item):
logger.info()
itemlist=[]
headers = {'Referer': host, 'X-Requested-With': 'XMLHttpRequest'}
data = httptools.downloadpage(item.url, headers=headers, post=item.post).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = 'class="results\d+".*?<a href="([^"]+)"><img src="([^"]+)".*?#\w+">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumb, scrapedtitle in matches:
if '(' in scrapedtitle:
title = scrapertools.find_single_match(scrapedtitle, '(.*?)\(').strip()
year = scrapertools.find_single_match(scrapedtitle, '\((\d+)\)')
else:
title = scrapedtitle
year = '-'
url = scrapedurl
thumbnail = scrapedthumb
new_item=Item(channel=item.channel, title=title, url=url, thumbnail=thumbnail,
action='findvideos', infoLabels={'year':year})
itemlist.append(new_item)
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
item.type = 'movies'
item.page = 0
if categoria in ['peliculas']:
item.url = host + 'home/newest?show='
elif categoria == 'infantiles':
item.url = host + 'home/genero/54'
elif categoria == 'terror':
item.url = host + 'home/genero/49'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -336,8 +336,8 @@ def listado(item):
item_local.season_colapse = True #Muestra las series agrupadas por temporadas
#Limpiamos el título de la basura innecesaria
title = re.sub(r'TV|Online', '', title, flags=re.IGNORECASE).strip()
item_local.quality = re.sub(r'proper|unrated|directors|cut|german|repack|internal|real|korean|extended|masted|docu|oar|super|duper|amzn|uncensored|hulu', '', item_local.quality, flags=re.IGNORECASE).strip()
title = re.sub(r'(?i)TV|Online', '', title).strip()
item_local.quality = re.sub(r'(?i)proper|unrated|directors|cut|german|repack|internal|real|korean|extended|masted|docu|oar|super|duper|amzn|uncensored|hulu', '', item_local.quality).strip()
#Analizamos el año. Si no está claro ponemos '-'
try:
@@ -472,7 +472,7 @@ def findvideos(item):
item_local.quality = ''
title = title.replace('.', ' ')
item_local.quality = item_local.quality.replace('.', ' ')
item_local.quality = re.sub(r'proper|unrated|directors|cut|german|repack|internal|real|korean|extended|masted|docu|oar|super|duper|amzn|uncensored|hulu', '', item_local.quality, flags=re.IGNORECASE).strip()
item_local.quality = re.sub(r'(?i)proper|unrated|directors|cut|german|repack|internal|real|korean|extended|masted|docu|oar|super|duper|amzn|uncensored|hulu', '', item_local.quality).strip()
#Buscamos si ya tiene tamaño, si no, los buscamos en el archivo .torrent
size = scrapedsize

View File

@@ -114,6 +114,49 @@ def list_all(item):
))
return itemlist
def list_from_genre(item):
logger.info()
itemlist = []
data = get_source(item.url)
contentSerieName = ''
patron = '<div style="float.*?<a href="([^"]+)">.*?src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail in matches:
url = scrapedurl
thumbnail = scrapedthumbnail
title = scrapertools.find_single_match(scrapedurl, 'https://seriesblanco.org/capitulos/([^/]+)/')
title = title.replace('-', ' ').capitalize()
itemlist.append(Item(channel=item.channel,
action='seasons',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=title,
context=filtertools.context(item, list_language, list_quality),
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# #Paginacion
if itemlist != []:
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" ><i class="Next')
if next_page != '':
itemlist.append(Item(channel=item.channel,
action="list_from_genre",
title='Siguiente >>>',
url=next_page,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
))
return itemlist
def section(item):
logger.info()
@@ -121,8 +164,10 @@ def section(item):
data = get_source(item.url)
if item.title == 'Generos':
patron = '<li><a href="([^ ]+)"><i class="fa fa-bookmark-o"></i> ([^<]+)</a></li>'
action = 'list_from_genre'
elif item.title == 'A - Z':
patron = '<a dir="ltr" href="([^"]+)" class="label label-primary">([^<]+)</a>'
action = 'list_all'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
@@ -130,7 +175,7 @@ def section(item):
url = scrapedurl
title = scrapedtitle
itemlist.append(Item(channel=item.channel,
action='list_all',
action=action,
title=title,
url=url
))

View File

@@ -294,7 +294,7 @@ def listado(item):
title = re.sub(r'\d+[M|m|G|g][B|b]', '', title)
#Limpiamos el título de la basura innecesaria
title = re.sub(r'TV|Online|Spanish|Torrent|en Espa\xc3\xb1ol|Español|Latino|Subtitulado|Blurayrip|Bluray rip|\[.*?\]|R2 Pal|\xe3\x80\x90 Descargar Torrent \xe3\x80\x91|Completa|Temporada|Descargar|Torren|\(iso\)|\(dvd.*?\)|(?:\d+\s*)?\d{3,4}p.*?$|extended|(?:\d+\s*)?bdrip.*?$|\(.*?\).*?$|iso$|unrated|\[.*?$|\d{4}$', '', title, flags=re.IGNORECASE)
title = re.sub(r'(?i)TV|Online|Spanish|Torrent|en Espa\xc3\xb1ol|Español|Latino|Subtitulado|Blurayrip|Bluray rip|\[.*?\]|R2 Pal|\xe3\x80\x90 Descargar Torrent \xe3\x80\x91|Completa|Temporada|Descargar|Torren|\(iso\)|\(dvd.*?\)|(?:\d+\s*)?\d{3,4}p.*?$|extended|(?:\d+\s*)?bdrip.*?$|\(.*?\).*?$|iso$|unrated|\[.*?$|\d{4}$', '', title)
#Obtenemos temporada y episodio si se trata de Episodios
if item_local.contentType == "episode":

View File

@@ -274,7 +274,7 @@ def listado(item):
#Limpiamos el título de la basura innecesaria
title = re.sub(r'- $', '', title)
title = re.sub(r'TV|Online|Spanish|Torrent|en Espa\xc3\xb1ol|Español|Latino|Subtitulado|Blurayrip|Bluray rip|\[.*?\]|R2 Pal|\xe3\x80\x90 Descargar Torrent \xe3\x80\x91|Completa|Temporada|Descargar|Torren', '', title, flags=re.IGNORECASE)
title = re.sub(r'(?i)TV|Online|Spanish|Torrent|en Espa\xc3\xb1ol|Español|Latino|Subtitulado|Blurayrip|Bluray rip|\[.*?\]|R2 Pal|\xe3\x80\x90 Descargar Torrent \xe3\x80\x91|Completa|Temporada|Descargar|Torren', '', title)
#Terminamos de limpiar el título
title = re.sub(r'\??\s?\d*?\&.*', '', title)

View File

@@ -79,7 +79,7 @@ def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data,'--more--></p>(.*?)/a></p>')
data = scrapertools.get_match(data,'--more-->(.*?)/a>')
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)".*?class="external">(.*?)<'
matches = re.compile(patron,re.DOTALL).findall(data)

View File

@@ -38,9 +38,8 @@ def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data,'<h4>Trending Categories</h4>(.*?)</ul>')
data = scrapertools.get_match(data,'<h4>Trending(.*?)</ul>')
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
itemlist.append( Item(channel=item.channel, action="lista", title="big tits", url= host + "/search/big-tits-1.html?") )
patron = '<li><a href="([^"]+)">([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:

View File

@@ -353,7 +353,7 @@ def listado(item):
item_local.quality += " 3D"
else:
item_local.quality = "3D"
title = re.sub('3D', '', title, flags=re.IGNORECASE)
title = re.sub('(?i)3D', '', title)
title = title.replace('[]', '')
if item_local.quality:
item_local.quality += ' %s' % scrapertools.find_single_match(title, '\[(.*?)\]')
@@ -418,7 +418,7 @@ def listado(item):
title = re.sub(r'- $', '', title)
#Limpiamos el título de la basura innecesaria
title = re.sub(r'TV|Online|Spanish|Torrent|en Espa\xc3\xb1ol|Español|Latino|Subtitulado|Blurayrip|Bluray rip|\[.*?\]|R2 Pal|\xe3\x80\x90 Descargar Torrent \xe3\x80\x91|Completa|Temporada|Descargar|Torren', '', title, flags=re.IGNORECASE)
title = re.sub(r'(?i)TV|Online|Spanish|Torrent|en Espa\xc3\xb1ol|Español|Latino|Subtitulado|Blurayrip|Bluray rip|\[.*?\]|R2 Pal|\xe3\x80\x90 Descargar Torrent \xe3\x80\x91|Completa|Temporada|Descargar|Torren', '', title)
title = title.replace("Dual", "").replace("dual", "").replace("Subtitulada", "").replace("subtitulada", "").replace("Subt", "").replace("subt", "").replace("(Proper)", "").replace("(proper)", "").replace("Proper", "").replace("proper", "").replace("#", "").replace("(Latino)", "").replace("Latino", "").replace("LATINO", "").replace("Spanish", "").replace("Trailer", "").replace("Audio", "")
title = title.replace("HDTV-Screener", "").replace("DVDSCR", "").replace("TS ALTA", "").replace("- HDRip", "").replace("(HDRip)", "").replace("- Hdrip", "").replace("(microHD)", "").replace("(DVDRip)", "").replace("HDRip", "").replace("(BR-LINE)", "").replace("(HDTS-SCREENER)", "").replace("(BDRip)", "").replace("(BR-Screener)", "").replace("(DVDScreener)", "").replace("TS-Screener", "").replace(" TS", "").replace(" Ts", "").replace(" 480p", "").replace(" 480P", "").replace(" 720p", "").replace(" 720P", "").replace(" 1080p", "").replace(" 1080P", "").replace("DVDRip", "").replace(" Dvd", "").replace(" DVD", "").replace(" V.O", "").replace(" Unrated", "").replace(" UNRATED", "").replace(" unrated", "").replace("screener", "").replace("TS-SCREENER", "").replace("TSScreener", "").replace("HQ", "").replace("AC3 5.1", "").replace("Telesync", "").replace("Line Dubbed", "").replace("line Dubbed", "").replace("LineDuB", "").replace("Line", "").replace("XviD", "").replace("xvid", "").replace("XVID", "").replace("Mic Dubbed", "").replace("HD", "").replace("V2", "").replace("CAM", "").replace("VHS.SCR", "").replace("Dvd5", "").replace("DVD5", "").replace("Iso", "").replace("ISO", "").replace("Reparado", "").replace("reparado", "").replace("DVD9", "").replace("Dvd9", "")

View File

@@ -280,7 +280,9 @@ def post_tmdb_listado(item, itemlist):
item.category_new = ''
for item_local in itemlist: #Recorremos el Itemlist generado por el canal
title = re.sub(r'online|descarga|downloads|trailer|videoteca|gb|autoplay', '', item_local.title, flags=re.IGNORECASE).strip()
item_local.title = re.sub(r'(?i)online|descarga|downloads|trailer|videoteca|gb|autoplay', '', item_local.title).strip()
#item_local.title = re.sub(r'online|descarga|downloads|trailer|videoteca|gb|autoplay', '', item_local.title, flags=re.IGNORECASE).strip()
title = item_local.title
#logger.debug(item_local)
item_local.last_page = 0
@@ -375,11 +377,13 @@ def post_tmdb_listado(item, itemlist):
item_local.contentSerieName = item_local.from_title
if item_local.contentType == 'season':
item_local.title = item_local.from_title
title = re.sub(r'online|descarga|downloads|trailer|videoteca|gb|autoplay', '', item_local.title, flags=re.IGNORECASE).strip()
item_local.title = re.sub(r'(?i)online|descarga|downloads|trailer|videoteca|gb|autoplay', '', item_local.title).strip()
title = item_local.title
#Limpiamos calidad de títulos originales que se hayan podido colar
if item_local.infoLabels['originaltitle'].lower() in item_local.quality.lower():
item_local.quality = re.sub(item_local.infoLabels['originaltitle'], '', item_local.quality, flags=re.IGNORECASE)
item_local.quality = re.sub(item_local.infoLabels['originaltitle'], '', item_local.quality)
#item_local.quality = re.sub(item_local.infoLabels['originaltitle'], '', item_local.quality, flags=re.IGNORECASE)
# Preparamos el título para series, con los núm. de temporadas, si las hay
if item_local.contentType in ['season', 'tvshow', 'episode']:
@@ -775,7 +779,7 @@ def post_tmdb_episodios(item, itemlist):
del item_local.totalItems
item_local.unify = 'xyz'
del item_local.unify
item_local.title = re.sub(r'online|descarga|downloads|trailer|videoteca|gb|autoplay', '', item_local.title, flags=re.IGNORECASE).strip()
item_local.title = re.sub(r'(?i)online|descarga|downloads|trailer|videoteca|gb|autoplay', '', item_local.title).strip()
#logger.debug(item_local)
@@ -851,7 +855,8 @@ def post_tmdb_episodios(item, itemlist):
#Limpiamos calidad de títulos originales que se hayan podido colar
if item_local.infoLabels['originaltitle'].lower() in item_local.quality.lower():
item_local.quality = re.sub(item_local.infoLabels['originaltitle'], '', item_local.quality, flags=re.IGNORECASE)
item_local.quality = re.sub(item_local.infoLabels['originaltitle'], '', item_local.quality)
#item_local.quality = re.sub(item_local.infoLabels['originaltitle'], '', item_local.quality, flags=re.IGNORECASE)
#Si no está el título del episodio, pero sí está en "title", lo rescatamos
if not item_local.infoLabels['episodio_titulo'] and item_local.infoLabels['title'].lower() != item_local.infoLabels['tvshowtitle'].lower():