removed last spanish channels
This commit is contained in:
@@ -1,48 +0,0 @@
|
||||
{
|
||||
"id": "bloghorror",
|
||||
"name": "BlogHorror",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": [],
|
||||
"thumbnail": "https://i.postimg.cc/gcgQhKTL/2018-10-10_20_34_57-_Peliculas_de_Terror_BLOGHORROR.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"movie",
|
||||
"vo",
|
||||
"torrent"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_torrent",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Torrent",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,234 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel BlogHorror -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
|
||||
host = 'http://bloghorror.com/'
|
||||
fanart = 'http://bloghorror.com/wp-content/uploads/2015/04/bloghorror-2017-x.jpg'
|
||||
|
||||
def get_source(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(Item(channel=item.channel, fanart=fanart, title="Todas", action="list_all",
|
||||
url=host+'/category/terror', thumbnail=get_thumb('all', auto=True)))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, fanart=fanart, title="Asiaticas", action="list_all",
|
||||
url=host+'/category/asiatico', thumbnail=get_thumb('asiaticas', auto=True)))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, fanart=fanart, title = 'Buscar', action="search", url=host + '?s=', pages=3,
|
||||
thumbnail=get_thumb('search', auto=True)))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def list_all(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
patron = '<article id="post-\d+".*?data-background="([^"]+)".*?href="([^"]+)".*?<h3.*?internal">([^<]+)'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
|
||||
url = scrapedurl
|
||||
title = scrapertools.find_single_match(scrapedtitle, '(.*?)(?:|\(|\| )\d{4}').strip()
|
||||
year = scrapertools.find_single_match(scrapedtitle, '(\d{4})')
|
||||
thumbnail = scrapedthumbnail
|
||||
new_item = Item(channel=item.channel, fanart=fanart, title=title, url=url, action='findvideos',
|
||||
thumbnail=thumbnail, infoLabels={'year':year})
|
||||
|
||||
new_item.contentTitle=title
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Paginacion
|
||||
|
||||
if itemlist != []:
|
||||
|
||||
next_page = scrapertools.find_single_match(data, 'page-numbers current.*?<a class="page-numbers" href="([^"]+)"')
|
||||
if next_page != '':
|
||||
itemlist.append(Item(channel=item.channel, fanart=fanart, action="list_all", title='Siguiente >>>', url=next_page))
|
||||
else:
|
||||
item.url=next_page
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def section(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data=get_source(host)
|
||||
if item.title == 'Generos':
|
||||
data = scrapertools.find_single_match(data, 'tabindex="0">Generos<.*?</ul>')
|
||||
elif 'Años' in item.title:
|
||||
data = scrapertools.find_single_match(data, 'tabindex="0">Año<.*?</ul>')
|
||||
|
||||
patron = 'href="([^"]+)">([^<]+)</a>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for url, title in matches:
|
||||
|
||||
itemlist.append(Item(channel=item.channel, fanart=fanart, title=title, url=url, action='list_all', pages=3))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
full_data = get_source(item.url)
|
||||
data = scrapertools.find_single_match(full_data, '>FICHA TECNICA:<.*?</ul>')
|
||||
#patron = '(?:bold|strong>|<br/>|<em>)([^<]+)(?:</em>|<br/>).*?="(magnet[^"]+)"'
|
||||
patron = '(?:<em>|<br/><em>|/> )(DVD|720|1080)(?:</em>|<br/>|</span>).*?="(magnet[^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) == 0:
|
||||
patron = '<a href="(magnet[^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(full_data)
|
||||
|
||||
patron_sub = 'href="(http://www.subdivx.com/bajar.php[^"]+)"'
|
||||
sub_url = scrapertools.find_single_match(full_data, patron_sub)
|
||||
sub_num = scrapertools.find_single_match(sub_url, 'u=(\d+)')
|
||||
|
||||
if sub_url == '':
|
||||
sub = ''
|
||||
lang = 'VO'
|
||||
else:
|
||||
try:
|
||||
sub = get_sub_from_subdivx(sub_url, sub_num)
|
||||
except:
|
||||
sub = ''
|
||||
lang = 'VOSE'
|
||||
|
||||
try:
|
||||
|
||||
for quality, scrapedurl in matches:
|
||||
if quality.strip() not in ['DVD', '720', '1080']:
|
||||
quality = 'DVD'
|
||||
url = scrapedurl
|
||||
if not config.get_setting('unify'):
|
||||
title = ' [Torrent] [%s] [%s]' % (quality, lang)
|
||||
else:
|
||||
title = 'Torrent'
|
||||
|
||||
itemlist.append(Item(channel=item.channel, fanart=fanart, title=title, url=url, action='play',
|
||||
server='torrent', quality=quality, language=lang, infoLabels=item.infoLabels,
|
||||
subtitle=sub))
|
||||
|
||||
except:
|
||||
for scrapedurl in matches:
|
||||
quality = 'DVD'
|
||||
url = scrapedurl
|
||||
if not config.get_setting('unify'):
|
||||
title = ' [Torrent] [%s] [%s]' % (quality, lang)
|
||||
else:
|
||||
title = 'Torrent'
|
||||
itemlist.append(Item(channel=item.channel, fanart=fanart, title=title, url=url, action='play',
|
||||
server='torrent', quality=quality, language=lang, infoLabels=item.infoLabels,
|
||||
subtitle=sub))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
|
||||
url=item.url,
|
||||
action="add_pelicula_to_library",
|
||||
extra="findvideos",
|
||||
contentTitle=item.contentTitle
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
if texto != '':
|
||||
try:
|
||||
return list_all(item)
|
||||
except:
|
||||
itemlist.append(item.clone(url='', title='No hay elementos...', action=''))
|
||||
return itemlist
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria in ['peliculas', 'terror', 'torrent']:
|
||||
item.url = host
|
||||
itemlist = list_all(item)
|
||||
if itemlist[-1].title == 'Siguiente >>>':
|
||||
itemlist.pop()
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def get_sub_from_subdivx(sub_url, sub_num):
|
||||
logger.info()
|
||||
|
||||
import xbmc
|
||||
from time import sleep
|
||||
import urlparse
|
||||
sub_dir = os.path.join(config.get_data_path(), 'temp_subs')
|
||||
|
||||
if os.path.exists(sub_dir):
|
||||
for sub_file in os.listdir(sub_dir):
|
||||
old_sub = os.path.join(sub_dir, sub_file)
|
||||
os.remove(old_sub)
|
||||
|
||||
sub_data = httptools.downloadpage(sub_url, follow_redirects=False)
|
||||
|
||||
if 'x-frame-options' not in sub_data.headers:
|
||||
sub_url = 'http://subdivx.com/sub%s/%s' % (sub_num, sub_data.headers['location'])
|
||||
sub_url = sub_url.replace('http:///', '')
|
||||
sub_data = httptools.downloadpage(sub_url).data
|
||||
|
||||
fichero_rar = os.path.join(config.get_data_path(), "subtitle.rar")
|
||||
outfile = open(fichero_rar, 'wb')
|
||||
outfile.write(sub_data)
|
||||
outfile.close()
|
||||
xbmc.executebuiltin("XBMC.Extract(%s, %s/temp_subs)" % (fichero_rar, config.get_data_path()))
|
||||
sleep(1)
|
||||
if len(os.listdir(sub_dir)) > 0:
|
||||
sub = os.path.join(sub_dir, os.listdir(sub_dir)[0])
|
||||
else:
|
||||
sub = ''
|
||||
else:
|
||||
logger.info('sub no valido')
|
||||
sub = ''
|
||||
return sub
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
{
|
||||
"id": "cinehindi",
|
||||
"name": "CineHindi",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["vos"],
|
||||
"thumbnail": "cinehindi.png",
|
||||
"banner": "http://i.imgur.com/cau9TVe.png",
|
||||
"categories": [
|
||||
"movie",
|
||||
"vos"
|
||||
]
|
||||
}
|
||||
@@ -1,163 +0,0 @@
|
||||
# -*- coding: UTF-8 -*-
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from channelselector import get_thumb
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
IDIOMAS = {'Hindi': 'Hindi'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = []
|
||||
list_servers = ['verystream', 'openload', 'netutv']
|
||||
|
||||
host = "http://www.cinehindi.com/"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = list()
|
||||
itemlist.append(Item(channel=item.channel, action="genero", title="Generos", url=host, thumbnail = get_thumb("genres", auto = True)))
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Novedades", url=host, thumbnail = get_thumb("newest", auto = True)))
|
||||
#itemlist.append(Item(channel=item.channel, action="proximas", title="Próximas Películas",
|
||||
# url=urlparse.urljoin(host, "proximamente")))
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=urlparse.urljoin(host, "?s="), thumbnail = get_thumb("search", auto = True)))
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def genero(item):
|
||||
logger.info()
|
||||
itemlist = list()
|
||||
data = httptools.downloadpage(host).data
|
||||
patron = '<option class=.*? value=([^<]+)>'
|
||||
patron += '([^<]+)<\/option>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
if 'Próximas Películas' in scrapedtitle:
|
||||
continue
|
||||
itemlist.append(item.clone(action='lista', title=scrapedtitle, cat=scrapedurl))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
if texto != '':
|
||||
return lista(item)
|
||||
|
||||
|
||||
def proximas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) # Eliminamos tabuladores, dobles espacios saltos de linea, etc...
|
||||
patron = 'class="item">.*?' # Todos los items de peliculas (en esta web) empiezan con esto
|
||||
patron += '<a href="([^"]+).*?' # scrapedurl
|
||||
patron += '<img src="([^"]+).*?' # scrapedthumbnail
|
||||
patron += 'alt="([^"]+).*?' # scrapedtitle
|
||||
patron += '<span class="player">.+?<span class="year">([^"]+)<\/span>' # scrapedyear
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
|
||||
if "ver" in scrapedurl:
|
||||
scrapedtitle = scrapedtitle + " [" + scrapedyear + "]"
|
||||
else:
|
||||
scrapedtitle = scrapedtitle + " [" + scrapedyear + "]" + '(Proximamente)'
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="findvideos", extra=scrapedtitle,
|
||||
show=scrapedtitle, thumbnail=scrapedthumbnail, contentType="movie",
|
||||
context=["buscar_trailer"]))
|
||||
# Paginacion
|
||||
patron_pag = '<a rel=.+?nofollow.+? class=.+?page larger.+? href=.+?(.+?)proximamente.+?>([^"]+)<\/a>'
|
||||
pagina = scrapertools.find_multiple_matches(data, patron_pag)
|
||||
for next_page_url, i in pagina:
|
||||
if int(i) == 2:
|
||||
item.url = next_page_url + 'proximamente/page/' + str(i) + '/'
|
||||
itemlist.append(Item(channel=item.channel, action="proximas", title=">> Página siguiente", url=item.url,
|
||||
thumbnail='https://s32.postimg.cc/4zppxf5j9/siguiente.png'))
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
if not item.cat:
|
||||
data = httptools.downloadpage(item.url).data
|
||||
else:
|
||||
url = httptools.downloadpage("%s?cat=%s" %(host, item.cat), follow_redirects=False, only_headers=True).headers.get("location", "")
|
||||
data = httptools.downloadpage(url).data
|
||||
bloque = data#scrapertools.find_single_match(data, """class="item_1 items.*?id="paginador">""")
|
||||
patron = '<div id=mt.+?>' # Todos los items de peliculas (en esta web) empiezan con esto
|
||||
patron += '<a href=([^"]+)\/><div class=image>' # scrapedurl
|
||||
patron += '<img src=([^"]+) alt=.*?' # scrapedthumbnail
|
||||
patron += '<span class=tt>([^"]+)<\/span>' # scrapedtitle
|
||||
patron += '<span class=ttx>([^"]+)<div class=degradado>.*?' # scrapedplot
|
||||
patron += '<span class=year>([^"]+)<\/span><\/div><\/div>' # scrapedfixyear
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot, scrapedyear in matches:
|
||||
#patron = '<span class="year">([^<]+)' # scrapedyear
|
||||
#scrapedyear = scrapertools.find_single_match(scrapedfixyear, patron)
|
||||
scrapedtitle = scrapedtitle.replace(scrapertools.find_single_match(scrapedtitle,'\(\d{4}\)'),'').strip()
|
||||
title = scrapedtitle
|
||||
if scrapedyear:
|
||||
title += ' (%s)' % (scrapedyear)
|
||||
item.infoLabels['year'] = int(scrapedyear)
|
||||
patron = '<span class="calidad2">([^<]+).*?' # scrapedquality
|
||||
#scrapedquality = scrapertools.find_single_match(scrapedfixyear, patron)
|
||||
#if scrapedquality:
|
||||
# title += ' [%s]' % (scrapedquality)
|
||||
itemlist.append(
|
||||
item.clone(title=title, url=scrapedurl, action="findvideos", extra=scrapedtitle,
|
||||
contentTitle=scrapedtitle, thumbnail=scrapedthumbnail, plot=scrapedplot, contentType="movie", context=["buscar_trailer"]))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
# Paginacion
|
||||
patron = 'rel="next" href="([^"]+)'
|
||||
next_page_url = scrapertools.find_single_match(data, patron)
|
||||
if next_page_url != "":
|
||||
item.url = next_page_url
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=next_page_url,
|
||||
thumbnail='https://s32.postimg.cc/4zppxf5j9/siguiente.png'))
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist1 = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist1.extend(servertools.find_video_items(data=data))
|
||||
patron_show = '<div class="data"><h1 itemprop="name">([^<]+)<\/h1>'
|
||||
show = scrapertools.find_single_match(data, patron_show)
|
||||
for videoitem in itemlist1:
|
||||
videoitem.channel = item.channel
|
||||
videoitem.infoLabels = item.infoLabels
|
||||
for i in range(len(itemlist1)):
|
||||
if not 'youtube' in itemlist1[i].title:
|
||||
itemlist.append(itemlist1[i])
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
# Requerido para FilterTools
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentChannel!='videolibrary':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
|
||||
action="add_pelicula_to_library", extra="findvideos", contentTitle=show))
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
item.thumbnail = item.contentThumbnail
|
||||
return [item]
|
||||
@@ -1,31 +0,0 @@
|
||||
{
|
||||
"id": "cuevana2",
|
||||
"name": "Cuevana2",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["en"],
|
||||
"thumbnail": "cuevana2.png",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"vos"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Buscar información extra",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,320 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import urllib
|
||||
from channelselector import get_thumb
|
||||
|
||||
from core.item import Item
|
||||
from core import httptools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from platformcode import config, logger
|
||||
from channels import autoplay
|
||||
|
||||
host = "http://www.cuevana2.com/"
|
||||
list_quality = []
|
||||
list_servers = ['rapidvideo', 'streamango', 'directo', 'yourupload', 'openload', 'dostream']
|
||||
|
||||
### MENUS ###
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = []
|
||||
# PELICULAS
|
||||
itemlist.append(Item(channel = item.channel, title = "--- Peliculas ---", folder=False, text_bold=True))
|
||||
|
||||
itemlist.append(Item(channel = item.channel, title = "Novedades", action = "movies",
|
||||
url = host + "pelicula", thumbnail = get_thumb("newest", auto = True)))
|
||||
itemlist.append(Item(channel = item.channel, title = "Por género", action = "genre",
|
||||
url = host + "pelicula", thumbnail = get_thumb("genres", auto = True) ))
|
||||
itemlist.append(Item(channel = item.channel, title = "Por año", action = "age",
|
||||
url = host + "pelicula", thumbnail = get_thumb("year", auto = True)))
|
||||
itemlist.append(Item(channel = item.channel, title = "Favoritas", action = "movies",
|
||||
url = host + "peliculas-destacadas", thumbnail = get_thumb("favorites", auto = True) ))
|
||||
itemlist.append(Item(channel = item.channel, title = "Buscar...", action = "search",
|
||||
url = host + "pelicula/?s=", thumbnail = get_thumb("search", auto = True)))
|
||||
|
||||
# SERIES
|
||||
itemlist.append(Item(channel = item.channel, title = "--- Series ---", folder=False, text_bold=True))
|
||||
|
||||
itemlist.append(Item(channel = item.channel, title = "Todas las Series", action = "shows",
|
||||
url = host + "listar-series", thumbnail = get_thumb("tvshows", auto = True)))
|
||||
itemlist.append(Item(channel = item.channel, title = "Buscar...", action = "search", extra='1',
|
||||
url = host + "listar-series", thumbnail = get_thumb("search", auto = True)))
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
### FIN MENUS ###
|
||||
def inArray(arr, arr2):
|
||||
for word in arr:
|
||||
if word not in arr2:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def load_data(url):
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
|
||||
return data
|
||||
|
||||
def redirect_url(url, parameters=None):
|
||||
data = httptools.downloadpage(url, post=parameters)
|
||||
logger.info(data.url)
|
||||
return data.url
|
||||
|
||||
def put_movies(itemlist, item, data, pattern):
|
||||
matches = scrapertools.find_multiple_matches(data, pattern)
|
||||
for link, img, title, rating, plot in matches:
|
||||
if 'pelicula' in link:
|
||||
itemTitle = "%s [COLOR yellow](%s/100)[/COLOR]" % (title, rating)
|
||||
itemlist.append(Item(channel = item.channel, title=itemTitle, fulltitle=title, thumbnail=img,
|
||||
url=link, plot=plot, action="findvideos"))
|
||||
logger.info(link)
|
||||
|
||||
return itemlist
|
||||
|
||||
def put_episodes(itemlist, item, text):
|
||||
pattern = '<li>.*?ref="([^"]+).*?"tit">(.*?)</span>'
|
||||
|
||||
matches = scrapertools.find_multiple_matches(text, pattern)
|
||||
for link, title in matches:
|
||||
itemlist.append(item.clone(title=title, fulltitle=item.title, url=link, action='findvideos', extra=1))
|
||||
|
||||
def episodes(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = load_data(item.url)
|
||||
seasonsPattern = '"#episodios(\d+)".*?>(.*?)</a>'
|
||||
episodesPattern = 'id="episodios%s">(.*?)</ul>'
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, seasonsPattern)
|
||||
for season, title in matches:
|
||||
itemlist.append(Item(channel = item.channel, title="[COLOR blue]%s[/COLOR]" % title,
|
||||
folder=False, text_bold=True))
|
||||
episodeMatches = scrapertools.find_single_match(data, episodesPattern % season)
|
||||
put_episodes(itemlist, item, episodeMatches)
|
||||
|
||||
return itemlist
|
||||
|
||||
def shows(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = load_data(item.url)
|
||||
pattern = '"in"><a href="([^"]+)">(.*?)</a>'
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, pattern)
|
||||
for link, title in matches:
|
||||
itemlist.append(Item(channel = item.channel, title=title, url=host + link, action="episodes"))
|
||||
|
||||
return itemlist
|
||||
|
||||
def movies(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
#descarga la pagina html
|
||||
data = load_data(item.url)
|
||||
|
||||
#patron para buscar las peliculas
|
||||
pattern = '<a href="([^"]+)"><div class="img">' #link
|
||||
pattern += '<img width="120" height="160" src="([^"]+)" class="attachment-thumbnail wp-post-image" alt="([^"]+)".*?' #img and title
|
||||
pattern += '<span style="width:([0-9]+)%">.*?'
|
||||
pattern += '"txt">(.*?)</div>' # text
|
||||
|
||||
put_movies(itemlist, item, data, pattern)
|
||||
|
||||
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="([^"]+)">')
|
||||
if next_page:
|
||||
itemlist.append(Item(channel = item.channel, title='Siguiente Pagina', url=next_page, action="movies"))
|
||||
|
||||
#coloca las peliculas encontradas en la lista
|
||||
return itemlist
|
||||
|
||||
def searchShows(itemlist, item, texto):
|
||||
texto = texto.lower().split()
|
||||
data = load_data(item.url)
|
||||
|
||||
pattern = '"in"><a href="([^"]+)">(.*?)</a>'
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, pattern)
|
||||
for link, title in matches:
|
||||
keywords = title.lower().split()
|
||||
logger.info(keywords)
|
||||
logger.info(texto)
|
||||
|
||||
if inArray(texto, keywords):
|
||||
itemlist.append(Item(channel = item.channel, title=title, url=host + link, action="episodes"))
|
||||
|
||||
def searchMovies(itemlist, item, texto):
|
||||
data = load_data(item.url + texto)
|
||||
#patron para buscar las peliculas
|
||||
pattern = '<a href="([^"]+)"><div class="img">' #link
|
||||
pattern += '<img width="120" height="160" src="([^"]+)" class="attachment-thumbnail wp-post-image" alt="([^"]+)".*?' #img and title
|
||||
pattern += '<span style="width:([0-9]+)%">.*?'
|
||||
pattern += '"txt">(.*?)</div>' # text
|
||||
|
||||
#ahora ya no se necesita el do while
|
||||
put_movies(itemlist, item, data, pattern)
|
||||
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="([^"]+)">')
|
||||
|
||||
if next_page:
|
||||
itemlist.append(Item(channel = item.channel, title='Siguiente Pagina', url=next_page, action="movies"))
|
||||
|
||||
def search(item, texto):
|
||||
itemlist = []
|
||||
|
||||
if item.extra:
|
||||
searchShows(itemlist, item, texto)
|
||||
else:
|
||||
searchMovies(itemlist, item, texto)
|
||||
|
||||
return itemlist
|
||||
|
||||
def by(item, pattern):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
#descarga la pagina html
|
||||
data = load_data(item.url)
|
||||
|
||||
#patron para buscar en la pagina
|
||||
pattern = '<li class="cat-item cat-item-\d+"><a href="([^"]+)" >&&</a>'.replace('&&', pattern)
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, pattern)
|
||||
for link, genre in matches:
|
||||
itemlist.append(Item(channel = item.channel, title=genre, url=link, action="movies"))
|
||||
|
||||
return itemlist
|
||||
|
||||
def genre(item):
|
||||
return by(item, '(\D+)')
|
||||
|
||||
def age(item):
|
||||
return by(item, '(\d+)')
|
||||
|
||||
def GKPluginLink(hash):
|
||||
hashdata = urllib.urlencode({r'link':hash})
|
||||
try:
|
||||
json = httptools.downloadpage('https://player4.cuevana2.com/plugins/gkpluginsphp.php', post=hashdata).data
|
||||
except:
|
||||
return None
|
||||
logger.info(jsontools.load(json))
|
||||
|
||||
data = jsontools.load(json) if json else False
|
||||
if data:
|
||||
return data['link'] if 'link' in data else None
|
||||
else:
|
||||
return None
|
||||
|
||||
def RedirectLink(hash):
|
||||
hashdata = urllib.urlencode({r'url':hash})
|
||||
return redirect_url('https://player4.cuevana2.com/r.php', hashdata)
|
||||
|
||||
def OpenloadLink(hash):
|
||||
hashdata = urllib.urlencode({r'h':hash})
|
||||
json = httptools.downloadpage('https://api.cuevana2.com/openload/api.php', post=hashdata).data
|
||||
logger.info("CUEVANA OL JSON %s" % json)
|
||||
data = jsontools.load(json) if json else False
|
||||
|
||||
return data['url'] if data['status'] == 1 else None
|
||||
|
||||
#el pattern esta raro para eliminar los duplicados, de todas formas asi es un lenguaje de programacion verificando su sintaxis
|
||||
def getContentMovie(data, item):
|
||||
item.infoLabels["year"] = scrapertools.find_single_match(data, 'rel="tag">(\d+)</a>')
|
||||
genre = ''
|
||||
for found_genre in scrapertools.find_multiple_matches(data, 'genero/.*?">(.*?)</a>(?=.*?</p>)'):
|
||||
genre += found_genre + ', '
|
||||
item.infoLabels["genre"] = genre.strip(', ')
|
||||
|
||||
director = ''
|
||||
for found_director in scrapertools.find_multiple_matches(data, 'director/.*?">(.*?)</a>(?=.*?</p>)'):
|
||||
director += found_director + ', '
|
||||
item.infoLabels["director"] = director.strip(', ')
|
||||
|
||||
item.infoLabels["cast"] = tuple(found_cast for found_cast in scrapertools.find_multiple_matches(
|
||||
data, 'reparto/.*?">(.*?)</a>(?=.*?</p>)'))
|
||||
|
||||
def getContentShow(data, item):
|
||||
item.thumbnail = scrapertools.find_single_match(data, 'width="120" height="160" src="([^"]+)"')
|
||||
item.infoLabels['genre'] = scrapertools.find_single_match(data, '-4px;">(.*?)</div>')
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = load_data(item.url)
|
||||
if item.extra:
|
||||
getContentShow(data, item)
|
||||
else:
|
||||
getContentMovie(data, item)
|
||||
pattern = '<iframe width="650" height="450" scrolling="no" src="([^"]+)'
|
||||
subtitles = scrapertools.find_single_match(data, '<iframe width="650" height="450" scrolling="no" src=".*?sub=([^"]+)"')
|
||||
|
||||
title = "[COLOR blue]Servidor [%s][/COLOR]"
|
||||
#itemlist.append(Item(channel = item.channel, title=item.url))
|
||||
for link in scrapertools.find_multiple_matches(data, pattern):
|
||||
#php.*?=(\w+)&
|
||||
#url=(.*?)&
|
||||
if 'player4' in link:
|
||||
# Por si acaso están los dos metodos, de todas maneras esto es corto circuito
|
||||
if r'ir.php' in link:
|
||||
link = scrapertools.find_single_match(link, 'php\?url=(.*?)&').replace('%3A', ':').replace('%2F', '/')
|
||||
logger.info("CUEVANA IR %s" % link)
|
||||
elif r'irgoto.php' in link:
|
||||
link = scrapertools.find_single_match(link, 'php\?url=(.*?)&').replace('%3A', ':').replace('%2F', '/')
|
||||
link = RedirectLink(link)
|
||||
logger.info("CUEVANA IRGOTO %s" % link)
|
||||
elif r'gdv.php' in link:
|
||||
# google drive hace lento la busqueda de links, ademas no es tan buena opcion y es el primero que eliminan
|
||||
continue
|
||||
else:
|
||||
link = scrapertools.find_single_match(link, 'php.*?=(\w+)&')
|
||||
link = GKPluginLink(link)
|
||||
|
||||
elif 'openload' in link:
|
||||
link = scrapertools.find_single_match(link, '\?h=(\w+)&')
|
||||
logger.info("CUEVANA OL HASH %s" % link)
|
||||
link = OpenloadLink(link)
|
||||
logger.info("CUEVANA OL %s" % link)
|
||||
|
||||
elif 'youtube' in link:
|
||||
title = "[COLOR yellow]Ver Trailer (%s)[/COLOR]"
|
||||
else: # En caso de que exista otra cosa no implementada, reportar si no aparece pelicula
|
||||
continue
|
||||
|
||||
if not link:
|
||||
continue
|
||||
|
||||
# GKplugin puede devolver multiples links con diferentes calidades, si se pudiera colocar una lista de opciones
|
||||
# personalizadas para Directo, se agradece, por ahora solo devuelve el primero que encuentre
|
||||
if type(link) is list:
|
||||
link = link[0]['link']
|
||||
if r'chomikuj.pl' in link:
|
||||
# En algunas personas la opcion CH les da error 401
|
||||
link += "|Referer=https://player4.cuevana2.com/plugins/gkpluginsphp.php"
|
||||
elif r'vidcache.net' in link:
|
||||
# Para que no salga error 500
|
||||
link += '|Referer=https://player4.cuevana2.com/yourupload.com.php'
|
||||
|
||||
itemlist.append(
|
||||
item.clone(
|
||||
channel = item.channel,
|
||||
title=title,
|
||||
url=link, action='play',
|
||||
subtitle=subtitles))
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist):
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
|
||||
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
|
||||
fulltitle = item.fulltitle
|
||||
))
|
||||
return itemlist
|
||||
@@ -1,36 +0,0 @@
|
||||
{
|
||||
"id": "doramasmp4",
|
||||
"name": "DoramasMP4",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": [],
|
||||
"thumbnail": "https://s14.postimg.cc/ibh4znkox/doramasmp4.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"tvshow",
|
||||
"vos"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"VOSE",
|
||||
"VO"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,235 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel DoramasMP4 -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import jsontools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
|
||||
host = 'https://www4.doramasmp4.com/'
|
||||
|
||||
IDIOMAS = {'sub': 'VOSE', 'VO': 'VO'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = []
|
||||
list_servers = ['verystream', 'openload', 'streamango', 'netutv', 'okru', 'directo', 'mp4upload']
|
||||
|
||||
def get_source(url, referer=None):
|
||||
logger.info()
|
||||
if referer is None:
|
||||
data = httptools.downloadpage(url).data
|
||||
else:
|
||||
data = httptools.downloadpage(url, headers={'Referer':referer}).data
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(Item(channel= item.channel, title="Doramas", action="doramas_menu",
|
||||
thumbnail=get_thumb('doramas', auto=True), type='dorama'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Variedades", action="list_all",
|
||||
url=host + 'catalogue?format%5B%5D=varieties&sort=latest',
|
||||
thumbnail='', type='dorama'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Películas", action="list_all",
|
||||
url=host + 'catalogue?format%5B%5D=movie&sort=latest',
|
||||
thumbnail=get_thumb('movies', auto=True), type='movie'))
|
||||
itemlist.append(Item(channel=item.channel, title = 'Buscar', action="search", url= host+'search?s=',
|
||||
thumbnail=get_thumb('search', auto=True)))
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
def doramas_menu(item):
|
||||
logger.info()
|
||||
|
||||
itemlist =[]
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all",
|
||||
url=host + 'catalogue?format%5B%5D=drama&sort=latest', thumbnail=get_thumb('all', auto=True),
|
||||
type='dorama'))
|
||||
itemlist.append(Item(channel=item.channel, title="Nuevos capitulos", action="latest_episodes",
|
||||
url=host + 'latest-episodes', thumbnail=get_thumb('new episodes', auto=True), type='dorama'))
|
||||
return itemlist
|
||||
|
||||
def list_all(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
|
||||
patron = '<div class="col-lg-2 col-md-3 col-6 mb-3"><a href="([^"]+)".*?<img src="([^"]+)".*?'
|
||||
patron += 'txt-size-12">(\d{4})<.*?text-truncate">([^<]+)<.*?description">([^<]+)<.*?'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
media_type = item.type
|
||||
for scrapedurl, scrapedthumbnail, year, scrapedtitle, scrapedplot in matches:
|
||||
url = scrapedurl
|
||||
scrapedtitle = scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
new_item = Item(channel=item.channel, title=scrapedtitle, url=url,
|
||||
thumbnail=thumbnail, type=media_type, infoLabels={'year':year})
|
||||
if media_type != 'dorama':
|
||||
new_item.action = 'findvideos'
|
||||
new_item.contentTitle = scrapedtitle
|
||||
new_item.type = item.type
|
||||
|
||||
else:
|
||||
new_item.contentSerieName=scrapedtitle
|
||||
new_item.action = 'episodios'
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
# Paginacion
|
||||
|
||||
if itemlist != []:
|
||||
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" aria-label="Netx">')
|
||||
if next_page != '':
|
||||
itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>',
|
||||
url=host+'catalogue'+next_page, thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
|
||||
type=item.type))
|
||||
return itemlist
|
||||
|
||||
def latest_episodes(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
infoLabels = dict()
|
||||
data = get_source(item.url)
|
||||
patron = 'shadow-lg rounded" href="([^"]+)".*?src="([^"]+)".*?style="">([^<]+)<.*?>Capítulo (\d+)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedep in matches:
|
||||
|
||||
title = '%s %s' % (scrapedtitle, scrapedep)
|
||||
contentSerieName = scrapedtitle
|
||||
itemlist.append(Item(channel=item.channel, action='findvideos', url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
title=title, contentSerieName=contentSerieName, type='episode'))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
patron = '<a itemprop="url".*?href="([^"]+)".*?title="(.*?) Cap.*?".*?>Capítulo (\d+)<'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
infoLabels = item.infoLabels
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedep in matches:
|
||||
url = scrapedurl
|
||||
contentEpisodeNumber = scrapedep
|
||||
|
||||
infoLabels['season'] = 1
|
||||
infoLabels['episode'] = contentEpisodeNumber
|
||||
|
||||
if scrapedtitle != '':
|
||||
title = '%sx%s - %s' % ('1',scrapedep, scrapedtitle)
|
||||
else:
|
||||
title = 'episodio %s' % scrapedep
|
||||
|
||||
infoLabels = item.infoLabels
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url,
|
||||
contentEpisodeNumber=contentEpisodeNumber, type='episode', infoLabels=infoLabels))
|
||||
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(
|
||||
item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios", text_color='yellow'))
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
new_dom=scrapertools.find_single_match(data,"var web = { domain: '(.*?)'")
|
||||
|
||||
patron = 'link="([^"]+)"'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
if '</strong> ¡Este capítulo no tiene subtítulos, solo audio original! </div>' in data:
|
||||
language = IDIOMAS['vo']
|
||||
else:
|
||||
language = IDIOMAS['sub']
|
||||
|
||||
#if item.type !='episode' and '<meta property=article:section content=Pelicula>' not in data:
|
||||
# if item.type !='episode' and item.type != 'movie':
|
||||
# item.type = 'dorama'
|
||||
# item.contentSerieName = item.contentTitle
|
||||
# item.contentTitle = ''
|
||||
# return episodios(item)
|
||||
# else:
|
||||
|
||||
for video_url in matches:
|
||||
headers = {'referer': video_url}
|
||||
token = scrapertools.find_single_match(video_url, 'token=(.*)')
|
||||
if 'fast.php' in video_url:
|
||||
video_url = 'https://player.rldev.in/fast.php?token=%s' % token
|
||||
video_data = httptools.downloadpage(video_url, headers=headers).data
|
||||
url = scrapertools.find_single_match(video_data, "'file':'([^']+)'")
|
||||
else:
|
||||
video_url = new_dom+'api/redirect.php?token=%s' % token
|
||||
video_data = httptools.downloadpage(video_url, headers=headers, follow_redirects=False).headers
|
||||
url = scrapertools.find_single_match(video_data['location'], '\d+@@@(.*?)@@@')
|
||||
|
||||
|
||||
|
||||
new_item = Item(channel=item.channel, title='[%s] [%s]', url=url, action='play', language = language)
|
||||
itemlist.append(new_item)
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
|
||||
|
||||
if len(itemlist) == 0 and item.type == 'search':
|
||||
item.contentSerieName = item.contentTitle
|
||||
item.contentTitle = ''
|
||||
return episodios(item)
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
import urllib
|
||||
itemlist = []
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
item.type = 'search'
|
||||
if texto != '':
|
||||
try:
|
||||
return list_all(item)
|
||||
except:
|
||||
itemlist.append(item.clone(url='', title='No hay elementos...', action=''))
|
||||
return itemlist
|
||||
@@ -1,46 +0,0 @@
|
||||
{
|
||||
"id": "dramasjc",
|
||||
"name": "DramasJC",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": [],
|
||||
"thumbnail": "https://www.dramasjc.com/wp-content/uploads/2018/03/logo.png",
|
||||
"banner": "",
|
||||
"version": 1,
|
||||
"categories": [
|
||||
"tvshow",
|
||||
"movie",
|
||||
"vos"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"VOSE",
|
||||
"VO"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,282 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel DramasJC -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
import urllib
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
|
||||
|
||||
host = 'https://www.dramasjc.com/'
|
||||
|
||||
IDIOMAS = {'VOSE': 'VOSE', 'VO':'VO'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = []
|
||||
list_servers = ['okru', 'mailru', 'openload']
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = list()
|
||||
itemlist.append(Item(channel=item.channel, title="Doramas", action="menu_doramas",
|
||||
thumbnail=get_thumb('doramas', auto=True)))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Películas", action="list_all", url=host+'peliculas/',
|
||||
type='movie', thumbnail=get_thumb('movies', auto=True)))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+'?s=',
|
||||
thumbnail=get_thumb('search', auto=True)))
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def menu_doramas(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Todos", action="list_all", url=host + 'series',
|
||||
thumbnail=get_thumb('all', auto=True)))
|
||||
itemlist.append(Item(channel=item.channel, title="Generos", action="section",
|
||||
thumbnail=get_thumb('genres', auto=True)))
|
||||
|
||||
return itemlist
|
||||
|
||||
def get_source(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
|
||||
def list_all(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
full_data = data
|
||||
data = scrapertools.find_single_match(data, '<ul class="MovieList NoLmtxt.*?>(.*?)</ul>')
|
||||
|
||||
patron = '<article id="post-.*?<a href="([^"]+)">.*?(?:<img |-)src="([^"]+)".*?alt=".*?'
|
||||
patron += '<h3 class="Title">([^<]+)<\/h3>.?(?:</a>|<span class="Year">(\d{4})<\/span>).*?'
|
||||
patron += '(movie|TV)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, year, type in matches:
|
||||
|
||||
url = scrapedurl
|
||||
if year == '':
|
||||
year = '-'
|
||||
if "|" in scrapedtitle:
|
||||
scrapedtitle= scrapedtitle.split("|")
|
||||
contentname = scrapedtitle[0].strip()
|
||||
else:
|
||||
contentname = scrapedtitle
|
||||
|
||||
contentname = re.sub('\(.*?\)','', contentname)
|
||||
|
||||
title = '%s [%s]'%(contentname, year)
|
||||
thumbnail = 'http:'+scrapedthumbnail
|
||||
new_item = Item(channel=item.channel,
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
infoLabels={'year':year}
|
||||
)
|
||||
|
||||
if type == 'movie':
|
||||
new_item.contentTitle = contentname
|
||||
new_item.action = 'findvideos'
|
||||
else:
|
||||
new_item.contentSerieName = contentname
|
||||
new_item.action = 'seasons'
|
||||
itemlist.append(new_item)
|
||||
tmdb.set_infoLabels_itemlist(itemlist, True)
|
||||
|
||||
# Paginación
|
||||
|
||||
url_next_page = scrapertools.find_single_match(full_data,'<a class="next.*?href="([^"]+)">')
|
||||
if url_next_page:
|
||||
itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all'))
|
||||
return itemlist
|
||||
|
||||
def section(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
full_data = get_source(host)
|
||||
data = scrapertools.find_single_match(full_data, '<a href="#">Dramas por Genero</a>(.*?)</ul>')
|
||||
patron = '<a href="([^ ]+)">([^<]+)<'
|
||||
action = 'list_all'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for data_one, data_two in matches:
|
||||
|
||||
url = data_one
|
||||
title = data_two
|
||||
new_item = Item(channel=item.channel, title= title, url=url, action=action)
|
||||
itemlist.append(new_item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def seasons(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
patron = 'class="Title AA-Season On" data-tab="1">Temporada <span>([^<]+)</span>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for temporada in matches:
|
||||
title = 'Temporada %s' % temporada
|
||||
contentSeasonNumber = temporada
|
||||
item.infoLabels['season'] = contentSeasonNumber
|
||||
itemlist.append(item.clone(action='episodesxseason',
|
||||
title=title,
|
||||
contentSeasonNumber=contentSeasonNumber
|
||||
))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios",
|
||||
contentSerieName=item.contentSerieName,
|
||||
contentSeasonNumber=contentSeasonNumber
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
templist = seasons(item)
|
||||
for tempitem in templist:
|
||||
itemlist += episodesxseason(tempitem)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodesxseason(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
season = item.contentSeasonNumber
|
||||
data = get_source(item.url)
|
||||
data = scrapertools.find_single_match(data, '>Temporada <span>%s</span>(.*?)</ul>' % season)
|
||||
patron = '<a href="([^"]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
ep = 1
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
epi = str(ep)
|
||||
title = season + 'x%s - Episodio %s' % (epi, epi)
|
||||
url = scrapedurl
|
||||
contentEpisodeNumber = epi
|
||||
item.infoLabels['episode'] = contentEpisodeNumber
|
||||
if 'próximamente' not in scrapedtitle.lower():
|
||||
itemlist.append(item.clone(action='findvideos',
|
||||
title=title,
|
||||
url=url,
|
||||
contentEpisodeNumber=contentEpisodeNumber,
|
||||
))
|
||||
ep += 1
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
data = scrapertools.unescape(data)
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
|
||||
# patron = 'id="(Opt\d+)">.*?src="([^"]+)" frameborder.*?</iframe>'
|
||||
patron = 'id="(Opt\d+)">.*?src="(?!about:blank)([^"]+)" frameborder.*?</iframe>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for option, scrapedurl in matches:
|
||||
scrapedurl = scrapedurl.replace('"','').replace('&','&')
|
||||
data_video = get_source(scrapedurl)
|
||||
url = scrapertools.find_single_match(data_video, '<div class="Video">.*?src="([^"]+)"')
|
||||
opt_data = scrapertools.find_single_match(data,'"%s"><span>.*?</span>.*?<span>([^<]+)</span>'%option).split('-')
|
||||
language = opt_data[0].strip()
|
||||
quality = opt_data[1].strip()
|
||||
if 'sub' in language.lower():
|
||||
language='VOSE'
|
||||
else:
|
||||
language = 'VO'
|
||||
if url != '' and 'youtube' not in url:
|
||||
itemlist.append(Item(channel=item.channel, title='%s', url=url, language=IDIOMAS[language], quality=quality,
|
||||
action='play'))
|
||||
elif 'youtube' in url:
|
||||
trailer = Item(channel=item.channel, title='Trailer', url=url, action='play', server='youtube')
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % '%s [%s] [%s]'%(i.server.capitalize(),
|
||||
i.language, i.quality))
|
||||
try:
|
||||
itemlist.append(trailer)
|
||||
except:
|
||||
pass
|
||||
|
||||
# Requerido para FilterTools
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
|
||||
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
|
||||
if texto != '':
|
||||
return list_all(item)
|
||||
else:
|
||||
return []
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria in ['peliculas']:
|
||||
item.url = host+'peliculas/'
|
||||
|
||||
itemlist = list_all(item)
|
||||
if itemlist[-1].title == 'Siguiente >>':
|
||||
itemlist.pop()
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
@@ -1,38 +0,0 @@
|
||||
{
|
||||
"id": "estrenosdoramas",
|
||||
"name": "Estrenos Doramas",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["VOSE","LAT"],
|
||||
"thumbnail": "https://www.estrenosdoramas.net/wp-content/uploads/2016/08/estrenos-doramasss-net3.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"tvshow",
|
||||
"vos"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"VOSE",
|
||||
"VO",
|
||||
"LAT"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,296 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel Estreno Doramas -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the BDamian (Based on channels from Alfa Develop Group) -*-
|
||||
|
||||
import re
|
||||
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import jsontools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
import ast
|
||||
|
||||
host = 'https://www.estrenosdoramas.net/'
|
||||
|
||||
IDIOMAS = {'Latino': 'LAT', 'Vo':'VO', 'Vose': 'VOSE'}
|
||||
IDIOMA = "no filtrar"
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = []
|
||||
list_servers = ['verystream', 'openload', 'streamango', 'netutv', 'okru', 'mp4upload']
|
||||
|
||||
def get_source(url, referer=None):
|
||||
logger.info()
|
||||
if referer is None:
|
||||
data = httptools.downloadpage(url).data
|
||||
else:
|
||||
data = httptools.downloadpage(url, headers={'Referer':referer}).data
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(Item(channel= item.channel, title="Doramas", action="list_all",
|
||||
url=host + 'category/doramas-online',
|
||||
thumbnail=get_thumb('doramas', auto=True), type='dorama'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Películas", action="list_all",
|
||||
url=host + 'category/peliculas',
|
||||
thumbnail=get_thumb('movies', auto=True), type='movie'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Últimos capítulos", action="list_all",
|
||||
url=host + 'category/ultimos-capitulos-online',
|
||||
thumbnail=get_thumb('doramas', auto=True), type='movie'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Por Genero", action="menu_generos",
|
||||
url=host,
|
||||
thumbnail=get_thumb('doramas', auto=True), type='dorama'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Doblado Latino", action="list_all",
|
||||
url=host + 'category/latino',
|
||||
thumbnail=get_thumb('doramas', auto=True), type='dorama'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title = 'Buscar', action="search", url= host+'search/',
|
||||
thumbnail=get_thumb('search', auto=True)))
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def menu_generos(item):
|
||||
logger.info()
|
||||
|
||||
data = get_source(item.url)
|
||||
data = scrapertools.find_single_match(data, '<div id="genuno">(.*?)</div>')
|
||||
|
||||
itemlist = []
|
||||
|
||||
patron = '<li><a.*?href="(.*?)">(.*?)</a>.*?</li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
media_type = item.type
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
new_item = Item(channel=item.channel, title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=item.thumbnail, type=item.type, action="list_all")
|
||||
itemlist.append(new_item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def list_all(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
data = scrapertools.find_single_match(data, '<h3 class="widgetitulo">Resultados</h3>.*?<div id="sidebar-wrapper">')
|
||||
|
||||
patron = '<div.*?<a href="(.*?)"><img src="(.*?)" alt="(.*?)".*?</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
new_item = Item(channel=item.channel, title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail)
|
||||
if scrapedtitle.startswith("Pelicula") or item.type == "movie":
|
||||
new_item.action = 'findvideos'
|
||||
new_item.contentTitle = scrapedtitle
|
||||
else:
|
||||
new_item.contentSerieName=scrapedtitle
|
||||
new_item.action = 'episodios'
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Paginacion
|
||||
patron = '<a class="nextpostslink" rel="next" href="(.*?)">'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
if matches:
|
||||
itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>',
|
||||
url=matches[0], type=item.type))
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
|
||||
plot_regex = '(<span class="clms"><b>Nombre.*?)<\/div>'
|
||||
plot_match = re.compile(plot_regex, re.DOTALL).findall(data)
|
||||
if plot_match:
|
||||
plot = scrapertools.htmlclean(plot_match[0].replace('<br />', '\n'))
|
||||
|
||||
data = scrapertools.find_single_match(data, '<ul class="lcp_catlist".*?</ul>')
|
||||
patron = '<li.*?<a href="(.*?)" title="(.*?)">.*?(\d*?)<\/a>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
infoLabels = item.infoLabels
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedep in matches:
|
||||
if item.url == scrapedurl:
|
||||
continue
|
||||
url = scrapedurl
|
||||
contentEpisodeNumber = scrapedep
|
||||
if contentEpisodeNumber == "":
|
||||
title = '1xEE - ' + scrapedtitle
|
||||
else:
|
||||
title = '1x' + ("0" + contentEpisodeNumber)[-2:] + " - " + scrapedtitle
|
||||
# title = ("0" + contentEpisodeNumber)[-2:]
|
||||
|
||||
infoLabels['season'] = 1
|
||||
infoLabels['episode'] = contentEpisodeNumber
|
||||
infoLabels = item.infoLabels
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, plot=plot,
|
||||
contentEpisodeNumber=contentEpisodeNumber, type='episode', infoLabels=infoLabels))
|
||||
|
||||
itemlist.sort(key=lambda x: x.title)
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(
|
||||
item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios", text_color='yellow'))
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
data = scrapertools.find_single_match(data, '<div id="marco-post">.*?<div id="sidebar">')
|
||||
data = scrapertools.unescape(data)
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
|
||||
options_regex = '<a href="#tab.*?">.*?<b>(.*?)</b>'
|
||||
option_matches = re.compile(options_regex, re.DOTALL).findall(data)
|
||||
|
||||
video_regex = '<iframe.*?src="(.*?)".*?</iframe>'
|
||||
video_matches = re.compile(video_regex, re.DOTALL).findall(data)
|
||||
|
||||
# for option, scrapedurl in matches:
|
||||
for option, scrapedurl in map(None, option_matches, video_matches):
|
||||
if scrapedurl is None:
|
||||
continue
|
||||
|
||||
scrapedurl = scrapedurl.replace('"','').replace('&','&')
|
||||
logger.info(scrapedurl)
|
||||
try:
|
||||
data_video = get_source(scrapedurl)
|
||||
except Exception as e:
|
||||
logger.info('Error en url: ' + scrapedurl)
|
||||
continue
|
||||
|
||||
# logger.info(data_video)
|
||||
|
||||
# Este sitio pone multiples páginas intermedias, cada una con sus reglas.
|
||||
source_headers = dict()
|
||||
source_headers["Content-Type"] = "application/x-www-form-urlencoded; charset=UTF-8"
|
||||
source_headers["X-Requested-With"] = "XMLHttpRequest"
|
||||
if scrapedurl.find("https://repro") != 0:
|
||||
logger.info("Caso 0: url externa")
|
||||
url = scrapedurl
|
||||
itemlist.append(Item(channel=item.channel, title=option, url=url, action='play', language=IDIOMA))
|
||||
elif scrapedurl.find("pi76823.php") > 0:
|
||||
logger.info("Caso 1")
|
||||
source_data = get_source(scrapedurl)
|
||||
source_regex = 'post\( "(.*?)", { acc: "(.*?)", id: \'(.*?)\', tk: \'(.*?)\' }'
|
||||
source_matches = re.compile(source_regex, re.DOTALL).findall(source_data)
|
||||
for source_page, source_acc, source_id, source_tk in source_matches:
|
||||
source_url = scrapedurl[0:scrapedurl.find("pi76823.php")] + source_page
|
||||
source_result = httptools.downloadpage(source_url, 'acc=' + source_acc + '&id=' +
|
||||
source_id + '&tk=' + source_tk, source_headers)
|
||||
if source_result.code == 200:
|
||||
source_json = jsontools.load(source_result.data)
|
||||
itemlist.append(Item(channel=item.channel, title=option, url=source_json['urlremoto'], action='play', language=IDIOMA))
|
||||
elif scrapedurl.find("pi7.php") > 0:
|
||||
logger.info("Caso 2")
|
||||
source_data = get_source(scrapedurl)
|
||||
source_regex = 'post\( "(.*?)", { acc: "(.*?)", id: \'(.*?)\', tk: \'(.*?)\' }'
|
||||
source_matches = re.compile(source_regex, re.DOTALL).findall(source_data)
|
||||
for source_page, source_acc, source_id, source_tk in source_matches:
|
||||
source_url = scrapedurl[0:scrapedurl.find("pi7.php")] + source_page
|
||||
source_result = httptools.downloadpage(source_url, 'acc=' + source_acc + '&id=' +
|
||||
source_id + '&tk=' + source_tk, source_headers)
|
||||
if source_result.code == 200:
|
||||
source_json = jsontools.load(source_result.data)
|
||||
itemlist.append(Item(channel=item.channel, title=option, url=source_json['urlremoto'], action='play', language=IDIOMA))
|
||||
elif scrapedurl.find("reproducir120.php") > 0:
|
||||
logger.info("Caso 3")
|
||||
source_data = get_source(scrapedurl)
|
||||
|
||||
videoidn = scrapertools.find_single_match(source_data, 'var videoidn = \'(.*?)\';')
|
||||
tokensn = scrapertools.find_single_match(source_data, 'var tokensn = \'(.*?)\';')
|
||||
|
||||
source_regex = 'post\( "(.*?)", { acc: "(.*?)"'
|
||||
source_matches = re.compile(source_regex, re.DOTALL).findall(source_data)
|
||||
for source_page, source_acc in source_matches:
|
||||
source_url = scrapedurl[0:scrapedurl.find("reproducir120.php")] + source_page
|
||||
source_result = httptools.downloadpage(source_url, 'acc=' + source_acc + '&id=' +
|
||||
videoidn + '&tk=' + tokensn, source_headers)
|
||||
if source_result.code == 200:
|
||||
source_json = jsontools.load(source_result.data)
|
||||
urlremoto_regex = "file:'(.*?)'"
|
||||
urlremoto_matches = re.compile(urlremoto_regex, re.DOTALL).findall(source_json['urlremoto'])
|
||||
if len(urlremoto_matches) == 1:
|
||||
itemlist.append(Item(channel=item.channel, title=option, url=urlremoto_matches[0], action='play', language=IDIOMA))
|
||||
elif scrapedurl.find("reproducir14.php") > 0:
|
||||
logger.info("Caso 4")
|
||||
source_data = get_source(scrapedurl)
|
||||
|
||||
source_regex = '<div id="player-contenido" vid="(.*?)" name="(.*?)"'
|
||||
source_matches = re.compile(source_regex, re.DOTALL).findall(source_data)
|
||||
videoidn = source_matches[0][0]
|
||||
tokensn = source_matches[0][1]
|
||||
|
||||
source_regex = 'post\( "(.*?)", { acc: "(.*?)"'
|
||||
source_matches = re.compile(source_regex, re.DOTALL).findall(source_data)
|
||||
for source_page, source_acc in source_matches:
|
||||
source_url = scrapedurl[0:scrapedurl.find("reproducir14.php")] + source_page
|
||||
source_result = httptools.downloadpage(source_url, 'acc=' + source_acc + '&id=' +
|
||||
videoidn + '&tk=' + tokensn, source_headers)
|
||||
if source_result.code == 200:
|
||||
source_json = jsontools.load(source_result.data)
|
||||
itemlist.append(Item(channel=item.channel, title=option, url=source_json['urlremoto'], action='play', language=IDIOMA))
|
||||
else:
|
||||
logger.info("Caso nuevo")
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
|
||||
# Requerido para FilterTools
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
|
||||
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
import urllib
|
||||
itemlist = []
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
item.type = 'search'
|
||||
if texto != '':
|
||||
try:
|
||||
return list_all(item)
|
||||
except:
|
||||
itemlist.append(item.clone(url='', title='No hay elementos...', action=''))
|
||||
return itemlist
|
||||
@@ -1,50 +0,0 @@
|
||||
{
|
||||
"id": "mundopelis",
|
||||
"name": "mundopelis",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["vos"],
|
||||
"thumbnail": "https://mundopelis.xyz/images/logo.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"movie",
|
||||
"vos"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Buscar información extra",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces",
|
||||
"type": "bool",
|
||||
"label": "Verificar si los enlaces existen",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces_num",
|
||||
"type": "list",
|
||||
"label": "Número de enlaces a verificar",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "5", "10", "15", "20" ]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
@@ -1,145 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import re, urllib, urlparse
|
||||
|
||||
from channels import autoplay
|
||||
from platformcode import config, logger, platformtools
|
||||
from core.item import Item
|
||||
from core import httptools, scrapertools, jsontools, tmdb
|
||||
from core import servertools
|
||||
from channels import filtertools
|
||||
|
||||
host = 'https://mundopelis.xyz'
|
||||
|
||||
list_language = []
|
||||
list_servers = ['Rapidvideo', 'Vidoza', 'Openload', 'Youtube']
|
||||
list_quality = []
|
||||
__channel__='mundopelis'
|
||||
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', __channel__)
|
||||
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', __channel__)
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
except:
|
||||
__modo_grafico__ = True
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist.append(item.clone(title="Novedades" , action="lista", url= host + "/todos-los-estrenos", first=0))
|
||||
itemlist.append(item.clone(title="Categorias" , action="categorias", url= host))
|
||||
itemlist.append(item.clone(title="Buscar", action="search"))
|
||||
|
||||
itemlist.append(item.clone(title="Configurar canal...", text_color="gold", action="configuracion", folder=False))
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def configuracion(item):
|
||||
ret = platformtools.show_channel_settings()
|
||||
platformtools.itemlist_refresh()
|
||||
return ret
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?option=com_spmoviedb&view=searchresults&searchword=%s&type=movies&Itemid=544" % texto
|
||||
item.first = 0
|
||||
try:
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<a class="btn btn-xs btn-primary" href="/index.php([^"]+)".*?</i> ([^"]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
title = scrapedtitle
|
||||
itemlist.append(item.clone(channel=item.channel, action="lista", title=title , url=url, first=0,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
next = False
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="movie-poster">.*?'
|
||||
patron += '<img src="([^"]+)".*?'
|
||||
patron += '<a href="/index.php([^"]+)"><h4 class="movie-title">([^<]+)</h4>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
first = item.first
|
||||
last = first+20
|
||||
if last > len(matches):
|
||||
last = len(matches)
|
||||
next = True
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle in matches[first:last]:
|
||||
scrapedyear = "-"
|
||||
title = scrapedtitle.replace(" (2018)", "")
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append(item.clone(channel=item.channel, action = 'findvideos', title=title, contentTitle = scrapedtitle,
|
||||
url=url, thumbnail=scrapedthumbnail, infoLabels={'year':scrapedyear} ))
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
# Paginación
|
||||
if not next:
|
||||
url_next_page = item.url
|
||||
first = last
|
||||
else:
|
||||
url_next_page = scrapertools.find_single_match(data, '<a title="Siguiente" href="([^"]+)"')
|
||||
url_next_page = urlparse.urljoin(item.url,url_next_page)
|
||||
first = 0
|
||||
if url_next_page:
|
||||
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='lista', first=first))
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<(?:iframe|IFRAME).*?(?:src|SRC)="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for url in matches:
|
||||
lang = "VOSE"
|
||||
if not config.get_setting('unify'):
|
||||
title = ' (%s)' % (lang)
|
||||
else:
|
||||
title = ''
|
||||
if url != '':
|
||||
itemlist.append(item.clone(action="play", title='%s'+title, url=url, language=lang ))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
|
||||
# Requerido para Filtrar enlaces
|
||||
if __comprueba_enlaces__:
|
||||
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
# Requerido para FilterTools
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# Requerido para AutoPlay
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra !='findvideos' and not "/episodios/" in item.url :
|
||||
itemlist.append(Item(channel=item.channel, action="add_pelicula_to_library",
|
||||
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
|
||||
extra="findvideos", contentTitle=item.contentTitle))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
{
|
||||
"id": "playview",
|
||||
"name": "Playview",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": [
|
||||
"esp", "lat", "cast"
|
||||
],
|
||||
"thumbnail": "https://s15.postimg.cc/pkcz7kda3/playview.png",
|
||||
"banner": "",
|
||||
"version": 1,
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"anime"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"Cast",
|
||||
"Lat",
|
||||
"VOSE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,317 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel Playview -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
import urllib
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
|
||||
|
||||
IDIOMAS = {'Latino':'Lat', 'Español':'Cast', 'Subtitulado':'VOSE'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = ['HD 1080p', 'HD 720p', 'DVDRIP', 'CAM']
|
||||
list_servers = ['verystream', 'openload', 'vidoza', 'clipwatching', 'fastplay', 'flashx', 'gamovideo', 'powvideo', 'streamango',
|
||||
'streamcherry', 'rapidvideo']
|
||||
|
||||
host = 'https://playview.io/'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title='Películas', action='submenu', type='movie',
|
||||
thumbnail=get_thumb('movies', auto=True)))
|
||||
itemlist.append(Item(channel=item.channel, title='Series', action='submenu', type='tvshow',
|
||||
thumbnail=get_thumb('tvshows', auto=True)))
|
||||
itemlist.append(Item(channel=item.channel, title='Anime', action='list_all', url=host+'anime-online',
|
||||
type='tvshow', first=0, thumbnail=get_thumb('anime', auto=True)))
|
||||
itemlist.append(Item(channel=item.channel, title='Buscar', action='search', url=host+'search/',
|
||||
thumbnail=get_thumb('search', auto=True)))
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def submenu(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
if item.type == 'movie':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='Todas', action='list_all', url=host + 'peliculas-online', type='movie',
|
||||
first=0, thumbnail=get_thumb('all', auto=True)))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='Generos', action='genres', thumbnail=get_thumb('genres', auto=True)))
|
||||
else:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='Todas', action='list_all', url=host + 'series-online', type='tvshow',
|
||||
first=0, thumbnail=get_thumb('all', auto=True)))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='Series Animadas', action='list_all', url=host + 'series-animadas-online',
|
||||
type='tvshow', first=0, thumbnail=get_thumb('animacion', auto=True)))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def get_source(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
def genres(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = get_source(host)
|
||||
patron = '<li value=(\d+)><a href=(.*?)>(.*?)</a></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for value, url, title in matches:
|
||||
if value not in ['1', '4', '22', '23', '24']:
|
||||
if value == '20':
|
||||
title = 'Familiar'
|
||||
itemlist.append(Item(channel=item.channel, title=title, action='list_all', url=url, type='Movie', first=0))
|
||||
|
||||
|
||||
return sorted(itemlist, key=lambda i: i.title)
|
||||
|
||||
def list_all(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
next = False
|
||||
|
||||
data = get_source(item.url)
|
||||
patron = 'spotlight_container>.*?image lazy data-original=(.*?)>.*?<div class=spotlight_title>(.*?)<'
|
||||
patron += '(.*?) sres>(\d{4})<.*?playLink href=(.*?)>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
first = item.first
|
||||
last = first+19
|
||||
if last > len(matches):
|
||||
last = len(matches)
|
||||
next = True
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, type_data, year, scrapedurl in matches[first:last]:
|
||||
|
||||
url = scrapedurl
|
||||
title = scrapedtitle
|
||||
season = scrapertools.find_single_match(type_data, 'class=title-season>Temporada<.*?> (\d+) <')
|
||||
episode = scrapertools.find_single_match(type_data, 'class=title-season>Episodio<.*?> (\d+) <')
|
||||
if season != '' or episode != '':
|
||||
item.type = 'tvshow'
|
||||
else:
|
||||
item.type = 'movie'
|
||||
|
||||
new_item = Item(channel=item.channel, title=title, url=url, thumbnail=scrapedthumbnail, type=item.type,
|
||||
infoLabels={'year': year})
|
||||
|
||||
if item.type == 'tvshow':
|
||||
new_item.action = 'episodios'
|
||||
new_item.contentSerieName = scrapedtitle
|
||||
season = season.strip()
|
||||
episode = episode.strip()
|
||||
if season == '':
|
||||
if 'Anime' in item.title:
|
||||
season = 1
|
||||
else:
|
||||
season = scrapertools.find_single_match(url, '.*?temp-(\d+)')
|
||||
new_item.contentSeasonNumber = season
|
||||
else:
|
||||
new_item.contentSeasonNumber = season
|
||||
|
||||
if episode != '':
|
||||
new_item.contentEpisodeNumber = episode
|
||||
|
||||
if season != '' and episode != '':
|
||||
new_item.title = '%s %sx%s' % (new_item.title, season, episode)
|
||||
elif episode == '':
|
||||
new_item.title = '%s Temporada %s' % (new_item.title, season)
|
||||
else:
|
||||
new_item.action = 'findvideos'
|
||||
new_item.contentTitle = scrapedtitle
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
# Paginación
|
||||
|
||||
if not next:
|
||||
url_next_page = item.url
|
||||
first = last
|
||||
else:
|
||||
url_next_page = scrapertools.find_single_match(data, "<a href=([^ ]+) class=page-link aria-label=Next>")
|
||||
first = 0
|
||||
|
||||
if url_next_page:
|
||||
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all', first=first))
|
||||
return itemlist
|
||||
|
||||
|
||||
def get_data(post):
|
||||
logger.info()
|
||||
|
||||
post = urllib.urlencode(post)
|
||||
data = httptools.downloadpage(host + 'playview', post=post).data
|
||||
|
||||
return data
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
try:
|
||||
id, type = scrapertools.find_single_match(data, 'data-id=(\d+) data-type=(.*?) ')
|
||||
post = {'set': 'LoadOptionsEpisode', 'action': 'EpisodeList', 'id': id, 'type': '1'}
|
||||
data = get_data(post)
|
||||
patron = 'data-episode="(\d+)".*?title="(.*?)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
infoLabels = item.infoLabels
|
||||
for episode, title in matches:
|
||||
post = {'set': 'LoadOptionsEpisode', 'action': 'Step1', 'id': id, 'type': '1',
|
||||
'episode': episode}
|
||||
season = scrapertools.find_single_match(item.url, '.*?temp-(\d+)')
|
||||
if season == '':
|
||||
season = 1
|
||||
infoLabels['season'] = season
|
||||
infoLabels['episode'] = episode
|
||||
if title[0].isdigit():
|
||||
title = '%sx%s' % (season, title)
|
||||
else:
|
||||
title = '%sx%s - %s' % (season, episode, title)
|
||||
itemlist.append(Item(channel=item.channel, title=title, contentSeasonNumber=season,
|
||||
contentEpisodeNumber=episode, action='findvideos', post=post, type=item.type,
|
||||
infoLabels=infoLabels))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName, ))
|
||||
except:
|
||||
pass
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
set_mode = 'LoadOptions'
|
||||
episode = ''
|
||||
if item.type == 'tvshow':
|
||||
post = item.post
|
||||
id= post['id']
|
||||
episode = post['episode']
|
||||
type = post['type']
|
||||
set_mode = 'LoadOptionsEpisode'
|
||||
|
||||
else:
|
||||
data=get_source(item.url)
|
||||
try:
|
||||
id, type = scrapertools.find_single_match(data, 'data-id=(\d+) data-type=(.*?) ')
|
||||
post = {'set': set_mode, 'action': 'Step1', 'id': id, 'type': type}
|
||||
except:
|
||||
pass
|
||||
|
||||
try:
|
||||
data = get_data(post)
|
||||
patron = 'data-quality="(.*?)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for quality in matches:
|
||||
post = {'set': set_mode, 'action': 'Step2', 'id': id, 'type': type, 'quality': quality, 'episode':episode}
|
||||
data = get_data(post)
|
||||
patron = 'getplayer" data-id="(\d+)"> <h4>(.*?)</h4>.*?title="(.*?)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for video_id, language, server in matches:
|
||||
post = {'set': set_mode, 'action': 'Step3', 'id': video_id, 'type': type}
|
||||
data = get_data(post)
|
||||
url = scrapertools.find_single_match(data, '<iframe class="embed.*?src="(.*?)"')
|
||||
if 'clipwatching' in url:
|
||||
url = url.replace('https://clipwatching.com/embed-', '')
|
||||
title = '%s [%s] [%s]'
|
||||
quality = quality.replace('(','').replace(')', '')
|
||||
if url != '':
|
||||
itemlist.append(Item(channel=item.channel, title=title, language=IDIOMAS[language], url=url,
|
||||
action='play', quality=quality, infoLabels=item.infoLabels))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % (i.server.capitalize(), i.quality,
|
||||
i.language))
|
||||
|
||||
itemlist=sorted(itemlist, key=lambda i: i.language)
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos' and type == 'Movie':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
|
||||
url=item.url, action="add_pelicula_to_library", extra="findvideos",
|
||||
contentTitle=item.contentTitle))
|
||||
return itemlist
|
||||
except:
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
item.first = 0
|
||||
|
||||
if texto != '':
|
||||
return list_all(item)
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
item.type = 'movie'
|
||||
item.first = 0
|
||||
try:
|
||||
if categoria == 'peliculas':
|
||||
item.url = host + 'peliculas-online'
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + 'peliculas-online/animacion'
|
||||
elif categoria == 'terror':
|
||||
item.url = host + 'peliculas-online/terror'
|
||||
|
||||
itemlist = list_all(item)
|
||||
if itemlist[-1].title == 'Siguiente >>':
|
||||
itemlist.pop()
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
{
|
||||
"id": "rarbg",
|
||||
"name": "Rarbg",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": [],
|
||||
"thumbnail": "https://dyncdn.me/static/20/img/logo_dark_nodomain2_optimized.png",
|
||||
"categories": [
|
||||
"torrent",
|
||||
"movie",
|
||||
"tvshow",
|
||||
"vos"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "include_in_global_search",
|
||||
"label": "Incluir en busqueda global",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"id": "modo_grafico",
|
||||
"label": "Buscar información extra (TMDB)",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"CAST",
|
||||
"LAT",
|
||||
"VO",
|
||||
"VOS",
|
||||
"VOSE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "timeout_downloadpage",
|
||||
"type": "list",
|
||||
"label": "Timeout (segs.) en descarga de páginas o verificación de servidores",
|
||||
"default": 5,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"None",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5",
|
||||
"6",
|
||||
"7",
|
||||
"8",
|
||||
"9",
|
||||
"10"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "seleccionar_ult_temporadda_activa",
|
||||
"type": "bool",
|
||||
"label": "Seleccionar para Videoteca si estará activa solo la última Temporada",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,832 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import sys
|
||||
import urllib
|
||||
import urlparse
|
||||
import time
|
||||
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import tmdb
|
||||
from lib import generictools
|
||||
from channels import filtertools
|
||||
from channels import autoplay
|
||||
|
||||
|
||||
#IDIOMAS = {'CAST': 'Castellano', 'LAT': 'Latino', 'VO': 'Version Original'}
|
||||
IDIOMAS = {'Castellano': 'CAST', 'Latino': 'LAT', 'Version Original': 'VO'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = []
|
||||
list_servers = ['torrent']
|
||||
|
||||
host = 'https://rarbgmirror.xyz/'
|
||||
channel = 'rarbg'
|
||||
categoria = channel.capitalize()
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', channel)
|
||||
modo_ultima_temp = config.get_setting('seleccionar_ult_temporadda_activa', channel) #Actualización sólo últ. Temporada?
|
||||
timeout = config.get_setting('timeout_downloadpage', channel)
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
thumb_cartelera = get_thumb("now_playing.png")
|
||||
thumb_pelis = get_thumb("channels_movie.png")
|
||||
thumb_pelis_hd = get_thumb("channels_movie_hd.png")
|
||||
thumb_pelis_VO = get_thumb("channels_vos.png")
|
||||
thumb_series = get_thumb("channels_tvshow.png")
|
||||
thumb_series_hd = get_thumb("channels_tvshow_hd.png")
|
||||
thumb_series_VOD = get_thumb("videolibrary_tvshow.png")
|
||||
thumb_generos = get_thumb("genres.png")
|
||||
thumb_buscar = get_thumb("search.png")
|
||||
thumb_separador = get_thumb("next.png")
|
||||
thumb_settings = get_thumb("setting_0.png")
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR yellow]NOTA: Esta web puede considerar una intrusión[/COLOR]", folder=False, thumbnail=thumb_separador))
|
||||
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR yellow]más de 1 usuario o 10 accesos por IP/Router.[/COLOR]", folder=False, thumbnail=thumb_separador))
|
||||
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR yellow]Si es bloqueado, renueve la IP en el Router[/COLOR]", folder=False, thumbnail=thumb_separador))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Películas", action="listado", url=host + "torrents.php?category=movies&search=&order=data&by=DESC", thumbnail=thumb_pelis_VO, extra="peliculas"))
|
||||
itemlist.append(Item(channel=item.channel, title=" - Calidades", action="calidades", url=host + "torrents.php?category=movies&search=&order=data&by=DESC", thumbnail=thumb_pelis_hd, extra="peliculas"))
|
||||
#itemlist.append(Item(channel=item.channel, title=" - Géneros", action="generos", url=host + "catalog/movies/", thumbnail=thumb_generos, extra="peliculas"))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Series", action="listado", url=host + "torrents.php?category=2;18;41;49&search=&order=data&by=DESC", thumbnail=thumb_series_VOD, extra="series"))
|
||||
itemlist.append(Item(channel=item.channel, title=" - Calidades", action="calidades", url=host + "torrents.php?category=2;18;41;49&search=&order=data&by=DESC", thumbnail=thumb_series_hd, extra="series"))
|
||||
#itemlist.append(Item(channel=item.channel, title=" - Géneros", action="generos", url=host + "catalog/tv/", thumbnail=thumb_generos, extra="series"))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", url=host + "?s=%s", thumbnail=thumb_buscar, extra="search"))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR yellow]Configuración:[/COLOR]", folder=False, thumbnail=thumb_separador))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="configuracion", title="Configurar canal", thumbnail=thumb_settings))
|
||||
|
||||
autoplay.show_option(item.channel, itemlist) #Activamos Autoplay
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def configuracion(item):
|
||||
from platformcode import platformtools
|
||||
ret = platformtools.show_channel_settings()
|
||||
platformtools.itemlist_refresh()
|
||||
return
|
||||
|
||||
|
||||
def calidades(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = ''
|
||||
try:
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url, timeout=timeout).data)
|
||||
data = unicode(data, "utf-8", errors="replace").encode("utf-8")
|
||||
except:
|
||||
pass
|
||||
|
||||
patron = '<div align="[^"]+"><div style="[^"]+" id="divadvsearch">(.*?)<\/a><\/div><\/div><\/form><\/div>'
|
||||
#Verificamos si se ha cargado una página, y si además tiene la estructura correcta
|
||||
if not data or not scrapertools.find_single_match(data, patron):
|
||||
status, itemlist = check_blocked_IP(data, itemlist) #Comprobamos si la IP ha sido bloqueada
|
||||
if status:
|
||||
return itemlist #IP bloqueada
|
||||
|
||||
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
|
||||
if item.intervencion: #Sí ha sido clausurada judicialmente
|
||||
for clone_inter, autoridad in item.intervencion:
|
||||
thumb_intervenido = get_thumb(autoridad)
|
||||
itemlist.append(item.clone(action='', title="[COLOR yellow]" + clone_inter.capitalize() + ': [/COLOR]' + intervenido_judicial + '. Reportar el problema en el foro', thumbnail=thumb_intervenido))
|
||||
return itemlist #Salimos
|
||||
|
||||
logger.error("ERROR 01: SUBMENU: La Web no responde o ha cambiado de URL: " + item.url + data)
|
||||
|
||||
if not data: #Si no ha logrado encontrar nada, salimos
|
||||
itemlist.append(item.clone(action='', title=item.category + ': ERROR 01: SUBMENU: La Web no responde o ha cambiado de URL. Si la Web está activa, reportar el error con el log'))
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
data = scrapertools.find_single_match(data, patron) #Seleccionamos el bloque
|
||||
patron = '<div class="divadvscat"><input class="inputadvscat" type="checkbox" name="category\[.*?\]" value="[^"]+"\s*(?:.*?)?\/> <a href="([^"]+)">(.*?)<\/a>\s*<\/div>\s*'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
if not matches:
|
||||
logger.error("ERROR 02: SUBMENU: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
|
||||
itemlist.append(item.clone(action='', title=item.category + ': ERROR 02: SUBMENU: Ha cambiado la estructura de la Web. Reportar el error con el log'))
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
#logger.debug(patron)
|
||||
#logger.debug(matches)
|
||||
#logger.debug(data)
|
||||
|
||||
itemlist.append(item.clone(action="listado", title="ALL", extra2="calidades"))
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
if not "Mov" in scrapedtitle and item.extra == 'peliculas':
|
||||
continue
|
||||
if not "TV" in scrapedtitle and item.extra == 'series':
|
||||
continue
|
||||
|
||||
title = scrapedtitle.strip().replace('Movs/', '').replace('Movies/', '')
|
||||
url = urlparse.urljoin(host, scrapedurl + "&search=&order=data&by=DESC")
|
||||
|
||||
itemlist.append(item.clone(action="listado", title=title, url=url, extra2="calidades"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def listado(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item.category = categoria
|
||||
|
||||
#logger.debug(item)
|
||||
|
||||
curr_page = 1 # Página inicial
|
||||
last_page = 99999 # Última página inicial
|
||||
if item.curr_page:
|
||||
curr_page = int(item.curr_page) # Si viene de una pasada anterior, lo usamos
|
||||
del item.curr_page # ... y lo borramos
|
||||
if item.last_page:
|
||||
last_page = int(item.last_page) # Si viene de una pasada anterior, lo usamos
|
||||
del item.last_page # ... y lo borramos
|
||||
|
||||
cnt_tot = 40 # Poner el num. máximo de items por página
|
||||
cnt_title = 0 # Contador de líneas insertadas en Itemlist
|
||||
inicio = time.time() # Controlaremos que el proceso no exceda de un tiempo razonable
|
||||
fin = inicio + 3 # Después de este tiempo pintamos (segundos)
|
||||
timeout_search = timeout # Timeout para descargas
|
||||
if item.extra == 'search':
|
||||
timeout_search = timeout * 2 # Timeout un poco más largo para las búsquedas
|
||||
if timeout_search < 5:
|
||||
timeout_search = 5 # Timeout un poco más largo para las búsquedas
|
||||
|
||||
#Sistema de paginado para evitar páginas vacías o semi-vacías en casos de búsquedas con series con muchos episodios
|
||||
title_lista = [] # Guarda la lista de series que ya están en Itemlist, para no duplicar lineas
|
||||
if item.title_lista: # Si viene de una pasada anterior, la lista ya estará guardada
|
||||
title_lista.extend(item.title_lista) # Se usa la lista de páginas anteriores en Item
|
||||
del item.title_lista # ... limpiamos
|
||||
|
||||
if not item.extra2: # Si viene de Catálogo o de Alfabeto
|
||||
item.extra2 = ''
|
||||
|
||||
next_page_url = item.url
|
||||
#Máximo num. de líneas permitidas por TMDB. Máx de 3 segundos por Itemlist para no degradar el rendimiento
|
||||
while cnt_title <= cnt_tot * 0.50 and curr_page <= last_page and fin > time.time():
|
||||
|
||||
# Descarga la página
|
||||
data = ''
|
||||
try:
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)| ", "", httptools.downloadpage(next_page_url, timeout=timeout_search).data)
|
||||
data = unicode(data, "utf-8", errors="replace").encode("utf-8")
|
||||
except:
|
||||
pass
|
||||
|
||||
curr_page += 1 #Apunto ya a la página siguiente
|
||||
if not data and not item.extra2: #Si la web está caída salimos sin dar error
|
||||
logger.error("ERROR 01: LISTADO: La Web no responde o ha cambiado de URL: " + item.url + " / DATA: " + data)
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: LISTADO:. La Web no responde o ha cambiado de URL. Si la Web está activa, reportar el error con el log'))
|
||||
break #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
status, itemlist = check_blocked_IP(data, itemlist) #Comprobamos si la IP ha sido bloqueada
|
||||
if status:
|
||||
return itemlist #IP bloqueada
|
||||
|
||||
#Patrón para todo, incluido búsquedas en cualquier caso
|
||||
patron = '<tr class="lista2"><td align="(?:[^"]+)?"\s*class="(?:[^"]+)?"\s*width="(?:[^"]+)?"\s*style="(?:[^"]+)?"><a href="[^"]+"><img src="([^"]+)?"\s*border="(?:[^"]+)?"\s*alt="(?:[^"]+)?"\s*\/><\/a><\/td><td\s*align="(?:[^"]+)?"\s*class="(?:[^"]+)?"><a onmouseover="(?:[^"]+)?"\s*onmouseout="(?:[^"]+)?"\s*href="[^"]+" title="[^"]+">([^<]+)<\/a>\s*<a href="([^"]+)"><img src="[^"]+"\s*border="(?:[^"]+)?"\s*alt="(?:[^"]+)?"\s*><\/a>\s*(?:<a href="([^"]+)"><img src="[^"]+"\s*border="(?:[^"]+)?"\s*alt="(?:[^"]+)?"\s*><\/a>)?\s*<br><span.*?<\/span>\s*<\/td><td align="(?:[^"]+)?"\s*width="(?:[^"]+)?"\s*class="(?:[^"]+)?">.*?<\/td><td align="(?:[^"]+)?"\s*width="(?:[^"]+)?"\s*class="(?:[^"]+)?">([^<]+)?<\/td><td align="(?:[^"]+)?"\s*width="(?:[^"]+)?"\s*class="(?:[^"]+)?">\s*<font color="(?:[^"]+)?">(\d+)?<\/font>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
if not matches and item.extra != 'search': #error
|
||||
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
|
||||
if item.intervencion: #Sí ha sido clausurada judicialmente
|
||||
item, itemlist = generictools.post_tmdb_episodios(item, itemlist) #Llamamos al método para el pintado del error
|
||||
return itemlist #Salimos
|
||||
|
||||
logger.error("ERROR 02: LISTADO: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: LISTADO: Ha cambiado la estructura de la Web. Reportar el error con el log'))
|
||||
break #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
if not matches and item.extra == 'search': #búsqueda vacía
|
||||
return itemlist #Salimos
|
||||
|
||||
#logger.debug("PATRON: " + patron)
|
||||
#logger.debug(matches)
|
||||
#logger.debug(data)
|
||||
|
||||
#Buscamos la próxima y la última página
|
||||
patron_next = '<a href="([^"]+page=(\d+))" title="next page">'
|
||||
if item.extra == 'search':
|
||||
patron_last = '<a href="[^"]+"\s*title="page\s*\d+">\d+<\/a>\s*<b>(\d+)<\/b><\/div><\/div><\/td>'
|
||||
else:
|
||||
patron_last = 'title="previous page"[^<]+<\/a>\s*<a href="[^>]+>(\d+)<\/a>'
|
||||
|
||||
try:
|
||||
next_page_url, next_page = scrapertools.find_single_match(data, patron_next)
|
||||
next_page = int(next_page)
|
||||
next_page_url = item.url + '&page=' + str(next_page)
|
||||
except: #Si no lo encuentra, lo ponemos a 1
|
||||
#logger.error('ERROR 03: LISTADO: Al obtener la paginación: ' + patron_next + ' / ' + patron_last + ' / ' + scrapertools.find_single_match(data, "<ul class=\"pagination\">.*?<\/span><\/a><\/li><\/ul><\/nav><\/div><\/div><\/div>"))
|
||||
next_page = 1
|
||||
#logger.debug('curr_page: ' + str(curr_page) + ' / next_page: ' + str(next_page) + ' / last_page: ' + str(last_page))
|
||||
|
||||
if last_page == 99999: #Si es el valor inicial, buscamos
|
||||
if item.extra == 'search':
|
||||
last_page = 99
|
||||
try:
|
||||
data_last = ''
|
||||
data_last = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)| ", "", httptools.downloadpage(item.url + '&page=%s' % last_page, timeout=timeout_search).data)
|
||||
data_last = unicode(data_last, "utf-8", errors="replace").encode("utf-8")
|
||||
last_page = int(scrapertools.find_single_match(data_last, patron_last)) #lo cargamos como entero
|
||||
except: #Si no lo encuentra, lo ponemos a 1
|
||||
#logger.error('ERROR 03: LISTADO: Al obtener la paginación: ' + patron_next + ' / ' + patron_last + ' / ' + scrapertools.find_single_match(data, "<ul class=\"pagination\">.*?<\/span><\/a><\/li><\/ul><\/nav><\/div><\/div><\/div>"))
|
||||
last_page = next_page
|
||||
#logger.debug('curr_page: ' + str(curr_page) + ' / next_page: ' + str(next_page) + ' / last_page: ' + str(last_page))
|
||||
|
||||
#Empezamos el procesado de matches
|
||||
for scrapedthumbnail, scrapedtitle, scrapedurl, scrapedepisodes, scrapedsize, scrapedseeds in matches:
|
||||
|
||||
title = scrapedtitle
|
||||
if scrapedepisodes:
|
||||
url = scrapedepisodes
|
||||
else:
|
||||
url = scrapedurl
|
||||
size = scrapedsize
|
||||
title = title.replace("á", "a").replace("é", "e").replace("í", "i").replace("ó", "o").replace("ú", "u").replace("ü", "u").replace("�", "ñ").replace("ñ", "ñ").replace("ã", "a").replace("&etilde;", "e").replace("ĩ", "i").replace("õ", "o").replace("ũ", "u").replace("ñ", "ñ").replace("’", "'")
|
||||
|
||||
if scrapedurl in title_lista: #Si ya hemos procesado el título, lo ignoramos
|
||||
continue
|
||||
else:
|
||||
title_lista += [scrapedurl] #la añadimos a la lista de títulos
|
||||
|
||||
#cnt_title += 1
|
||||
item_local = item.clone() #Creamos copia de Item para trabajar
|
||||
if item_local.tipo: #... y limpiamos
|
||||
del item_local.tipo
|
||||
if item_local.totalItems:
|
||||
del item_local.totalItems
|
||||
if item_local.intervencion:
|
||||
del item_local.intervencion
|
||||
if item_local.viewmode:
|
||||
del item_local.viewmode
|
||||
item_local.extra2 = True
|
||||
del item_local.extra2
|
||||
item_local.text_bold = True
|
||||
del item_local.text_bold
|
||||
item_local.text_color = True
|
||||
del item_local.text_color
|
||||
|
||||
title_subs = [] #creamos una lista para guardar info importante
|
||||
item_local.language = ['VO'] #creamos lista para los idiomas
|
||||
item_local.quality = '' #iniciamos calidad
|
||||
item_local.thumbnail = scrapedthumbnail #iniciamos thumbnail
|
||||
|
||||
if item.extra == 'search':
|
||||
if scrapedepisodes:
|
||||
item_local.extra = 'series'
|
||||
else:
|
||||
item_local.extra = 'peliculas'
|
||||
|
||||
item_local.url = urlparse.urljoin(host, url) #guardamos la url final
|
||||
if item_local.extra != 'series':
|
||||
item_local.url += '&order=size&by=ASC' #guardamos la url final
|
||||
item_local.context = "['buscar_trailer']"
|
||||
|
||||
item_local.contentType = "movie" #por defecto, son películas
|
||||
item_local.action = "findvideos"
|
||||
|
||||
#Analizamos los formatos de la películas
|
||||
if item_local.extra == 'peliculas':
|
||||
patron_title = '(.*?)\.([1|2][9|0]\d{2})?\.(.*?)(?:-.*?)?$'
|
||||
if not scrapertools.find_single_match(title, patron_title):
|
||||
logger.error('ERROR tratando título PELI: ' + title)
|
||||
continue
|
||||
try:
|
||||
title, year, item_local.quality = scrapertools.find_single_match(title, patron_title)
|
||||
except:
|
||||
title = scrapedtitle
|
||||
year = ''
|
||||
item_local.quality = ''
|
||||
title = title.replace('.', ' ')
|
||||
item_local.quality = item_local.quality.replace('.', ' ')
|
||||
|
||||
#Analizamos los formatos de series, temporadas y episodios
|
||||
elif item_local.extra == 'series':
|
||||
patron_title = '(.*?)(\.[1|2][9|0]\d{2})?\.S\d{2}.*?\.([\d|A-Z]{2}.*?)(?:-.*?)?$'
|
||||
if not scrapertools.find_single_match(title, patron_title):
|
||||
patron_title = '(.*?)\.*([1|2][9|0]\d{2})?(?:\.\d{2}\.\d{2}).*?\.([\d|A-Z]{2}.*?)(?:-.*?)?$'
|
||||
if not scrapertools.find_single_match(title, patron_title):
|
||||
logger.error('ERROR tratando título SERIE: ' + title)
|
||||
continue
|
||||
try:
|
||||
title, year, item_local.quality = scrapertools.find_single_match(title, patron_title)
|
||||
except:
|
||||
title = scrapedtitle
|
||||
year = ''
|
||||
item_local.quality = ''
|
||||
title = title.replace('.', ' ')
|
||||
item_local.quality = item_local.quality.replace('.', ' ')
|
||||
year = '-'
|
||||
|
||||
item_local.contentType = "tvshow"
|
||||
item_local.action = "episodios"
|
||||
item_local.season_colapse = True #Muestra las series agrupadas por temporadas
|
||||
|
||||
#Limpiamos el título de la basura innecesaria
|
||||
title = re.sub(r'(?i)TV|Online', '', title).strip()
|
||||
item_local.quality = re.sub(r'(?i)proper|unrated|directors|cut|german|repack|internal|real|korean|extended|masted|docu|oar|super|duper|amzn|uncensored|hulu', '', item_local.quality).strip()
|
||||
|
||||
#Analizamos el año. Si no está claro ponemos '-'
|
||||
try:
|
||||
year_int = int(year)
|
||||
if year_int >= 1940 and year_int <= 2050:
|
||||
item_local.infoLabels["year"] = year_int
|
||||
else:
|
||||
item_local.infoLabels["year"] = '-'
|
||||
except:
|
||||
item_local.infoLabels["year"] = '-'
|
||||
|
||||
#Terminamos de limpiar el título
|
||||
title = re.sub(r'[\(|\[]\s+[\)|\]]', '', title)
|
||||
title = title.replace('()', '').replace('[]', '').strip().lower().title()
|
||||
item_local.from_title = title.strip().lower().title() #Guardamos esta etiqueta para posible desambiguación de título
|
||||
|
||||
#Salvamos el título según el tipo de contenido
|
||||
if item_local.contentType == "movie":
|
||||
item_local.contentTitle = title
|
||||
else:
|
||||
item_local.contentSerieName = title.strip().lower().title()
|
||||
|
||||
item_local.title = title.strip().lower().title()
|
||||
|
||||
#Guarda la variable temporal que almacena la info adicional del título a ser restaurada después de TMDB
|
||||
item_local.title_subs = title_subs
|
||||
|
||||
#Salvamos y borramos el número de temporadas porque TMDB a veces hace tonterias. Lo pasamos como serie completa
|
||||
if item_local.contentSeason and (item_local.contentType == "season" or item_local.contentType == "tvshow"):
|
||||
item_local.contentSeason_save = item_local.contentSeason
|
||||
del item_local.infoLabels['season']
|
||||
|
||||
#Ahora se filtra por idioma, si procede, y se pinta lo que vale
|
||||
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
|
||||
itemlist = filtertools.get_link(itemlist, item_local, list_language)
|
||||
else:
|
||||
itemlist.append(item_local.clone()) #Si no, pintar pantalla
|
||||
|
||||
cnt_title = len(itemlist) #Contador de líneas añadidas
|
||||
|
||||
#logger.debug(item_local)
|
||||
|
||||
#Pasamos a TMDB la lista completa Itemlist
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
|
||||
#Llamamos al método para el maquillaje de los títulos obtenidos desde TMDB
|
||||
item, itemlist = generictools.post_tmdb_listado(item, itemlist)
|
||||
|
||||
# Si es necesario añadir paginacion
|
||||
if curr_page <= last_page:
|
||||
if last_page:
|
||||
title = '%s de %s' % (curr_page-1, last_page)
|
||||
else:
|
||||
title = '%s' % curr_page-1
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente " + title, title_lista=title_lista, url=next_page_url, extra=item.extra, extra2=item.extra2, last_page=str(last_page), curr_page=str(curr_page)))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist_t = [] #Itemlist total de enlaces
|
||||
itemlist_f = [] #Itemlist de enlaces filtrados
|
||||
if not item.language:
|
||||
item.language = ['VO'] #VO por defecto
|
||||
matches = []
|
||||
item.category = categoria
|
||||
|
||||
#logger.debug(item)
|
||||
|
||||
#Bajamos los datos de la página
|
||||
data = ''
|
||||
patron = '<tr class="lista2">\s*<td align="(?:[^"]+)?"\s*class="(?:[^"]+)?"\s*width="(?:[^"]+)?"\s*style="(?:[^"]+)?">\s*<a href="[^"]+">\s*<img src="([^"]+)?"\s*border="(?:[^"]+)?"\s*alt="(?:[^"]+)?"\s*\/><\/a><\/td>\s*<td\s*align="(?:[^"]+)?"(?:\s*width="[^"]+")?\s*class="(?:[^"]+)?">\s*<a onmouseover="(?:[^"]+)?"\s*onmouseout="(?:[^"]+)?"\s*href="([^"]+)" title="[^"]+">(.*?)<\/a>\s*<a href="[^"]+">\s*<img src="[^"]+"\s*border="(?:[^"]+)?"\s*alt="(?:[^"]+)?"\s*><\/a>(?:\s*<a.*?<\/a>)?\s*<br><span.*?<\/span>\s*<\/td>\s*<td align="(?:[^"]+)?"\s*width="(?:[^"]+)?"\s*class="(?:[^"]+)?">.*?<\/td>\s*<td align="(?:[^"]+)?"\s*width="(?:[^"]+)?"\s*class="(?:[^"]+)?">(.*?)?<\/td>\s*<td align="(?:[^"]+)?"\s*width="(?:[^"]+)?"\s*class="(?:[^"]+)?">\s*<font color="(?:[^"]+)?">(\d+)?<\/font>'
|
||||
|
||||
try:
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url, timeout=timeout).data)
|
||||
data = unicode(data, "utf-8", errors="replace").encode("utf-8")
|
||||
except:
|
||||
pass
|
||||
|
||||
if not data:
|
||||
logger.error("ERROR 01: FINDVIDEOS: La Web no responde o la URL es erronea: " + item.url)
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: FINDVIDEOS:. La Web no responde o la URL es erronea. Si la Web está activa, reportar el error con el log'))
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
status, itemlist = check_blocked_IP(data, itemlist) #Comprobamos si la IP ha sido bloqueada
|
||||
if status:
|
||||
return itemlist #IP bloqueada
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
if not matches: #error
|
||||
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
|
||||
if item.intervencion: #Sí ha sido clausurada judicialmente
|
||||
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist) #Llamamos al método para el pintado del error
|
||||
else:
|
||||
logger.error("ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web " + " / PATRON: " + patron + data)
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log'))
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
#logger.debug("PATRON: " + patron)
|
||||
#logger.debug(matches)
|
||||
#logger.debug(data)
|
||||
|
||||
#Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB
|
||||
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist)
|
||||
|
||||
#Ahora tratamos los enlaces .torrent con las diferentes calidades
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle, scrapedsize, scrapedseeds in matches:
|
||||
#Generamos una copia de Item para trabajar sobre ella
|
||||
item_local = item.clone()
|
||||
title = scrapedtitle
|
||||
|
||||
#Analizamos los formatos de la películas y series
|
||||
if item_local.contentType == 'movie':
|
||||
patron_title = '(.*?)\.([1|2][9|0]\d{2})?\.(.*?)(?:-.*?)?$'
|
||||
if not scrapertools.find_single_match(title, patron_title):
|
||||
continue
|
||||
else:
|
||||
patron_title = '(.*?)(\.[1|2][9|0]\d{2})?\.S\d{2}.*?\.([\d|A-Z]{2}.*?)(?:-.*?)?$'
|
||||
if not scrapertools.find_single_match(title, patron_title):
|
||||
patron_title = '(.*?)\.*([1|2][9|0]\d{2})?(?:\.\d{2}\.\d{2}).*?\.([\d|A-Z]{2}.*?)(?:-.*?)?$'
|
||||
if not scrapertools.find_single_match(title, patron_title):
|
||||
continue
|
||||
|
||||
try:
|
||||
title, year, item_local.quality = scrapertools.find_single_match(title, patron_title)
|
||||
except:
|
||||
title = scrapedtitle
|
||||
year = ''
|
||||
item_local.quality = ''
|
||||
title = title.replace('.', ' ')
|
||||
item_local.quality = item_local.quality.replace('.', ' ')
|
||||
item_local.quality = re.sub(r'(?i)proper|unrated|directors|cut|german|repack|internal|real|korean|extended|masted|docu|oar|super|duper|amzn|uncensored|hulu', '', item_local.quality).strip()
|
||||
|
||||
#Buscamos si ya tiene tamaño, si no, los buscamos en el archivo .torrent
|
||||
size = scrapedsize
|
||||
if size:
|
||||
item_local.title = '%s [%s]' % (item_local.title, size) #Agregamos size al final del título
|
||||
size = size.replace('GB', 'G B').replace('Gb', 'G b').replace('MB', 'M B').replace('Mb', 'M b')
|
||||
item_local.quality = '%s [%s]' % (item_local.quality, size) #Agregamos size al final de la calidad
|
||||
|
||||
#Añadimos los seeds en calidad, como información adicional
|
||||
if scrapedseeds:
|
||||
item_local.quality = '%s [Seeds: %s]' % (item_local.quality, scrapedseeds) #Agregamos seeds a la calidad
|
||||
|
||||
#Ahora pintamos el link del Torrent
|
||||
item_local.url = urlparse.urljoin(host, scrapedurl)
|
||||
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language))
|
||||
|
||||
#Preparamos título y calidad, quitamos etiquetas vacías
|
||||
item_local.title = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = item_local.title.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
item_local.quality = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.quality)
|
||||
item_local.quality = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.quality)
|
||||
item_local.quality = item_local.quality.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
|
||||
item_local.alive = "??" #Calidad del link sin verificar
|
||||
item_local.action = "play" #Visualizar vídeo
|
||||
item_local.server = "torrent" #Seridor Torrent
|
||||
|
||||
itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas
|
||||
|
||||
# Requerido para FilterTools
|
||||
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
|
||||
itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío
|
||||
|
||||
#logger.debug("TORRENT: " + scrapedurl + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName)
|
||||
#logger.debug(item_local)
|
||||
|
||||
if len(itemlist_f) > 0: #Si hay entradas filtradas...
|
||||
itemlist.extend(itemlist_f) #Pintamos pantalla filtrada
|
||||
else:
|
||||
if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ...
|
||||
thumb_separador = get_thumb("next.png") #... pintamos todo con aviso
|
||||
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador))
|
||||
itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado
|
||||
|
||||
# Requerido para AutoPlay
|
||||
autoplay.start(itemlist, item) #Lanzamos Autoplay
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item): #Permite preparar la descarga de los .torrents y subtítulos externos
|
||||
logger.info()
|
||||
itemlist = []
|
||||
headers = []
|
||||
import os
|
||||
from core import downloadtools
|
||||
from core import ziptools
|
||||
|
||||
#buscamos la url del .torrent
|
||||
patron = '<tr><td align="(?:[^"]+)?"\s*class="(?:[^"]+)?"\s*width="(?:[^"]+)?">\s*Torrent:<\/td><td class="(?:[^"]+)?">\s*<img src="(?:[^"]+)?"\s*alt="(?:[^"]+)?"\s*border="(?:[^"]+)?"\s*\/>\s*<a onmouseover="(?:[^"]+)?"\s*onmouseout="(?:[^"]+)?" href="([^"]+)".*?<\/a>'
|
||||
try:
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url, timeout=timeout).data)
|
||||
data = unicode(data, "utf-8", errors="replace").encode("utf-8")
|
||||
except:
|
||||
pass
|
||||
status, itemlist = check_blocked_IP(data, itemlist) #Comprobamos si la IP ha sido bloqueada
|
||||
if status:
|
||||
return itemlist #IP bloqueada
|
||||
if not scrapertools.find_single_match(data, patron):
|
||||
logger.error('ERROR 02: PLAY: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log: PATRON: ' + patron + ' / DATA: ' + data)
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: PLAY: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log'))
|
||||
return itemlist
|
||||
item.url = urlparse.urljoin(host, scrapertools.find_single_match(data, patron))
|
||||
|
||||
#buscamos subtítulos en español
|
||||
patron = '<tr><td align="(?:[^"]+)?"\s*class="(?:[^"]+)?"\s*>\s*Subs.*?<\/td><td class="(?:[^"]+)?"\s*>(.*?)(?:<br\/>)?<\/td><\/tr>'
|
||||
data_subt = scrapertools.find_single_match(data, patron)
|
||||
if data_subt:
|
||||
patron = '<a href="([^"]+)"\s*onmouseover="return overlib\('
|
||||
patron += "'Download Spanish subtitles'"
|
||||
patron += '\)"\s*onmouseout="(?:[^"]+)?"\s*><img src="(?:[^"]+)?"\s*><\/a>'
|
||||
subt = scrapertools.find_single_match(data_subt, patron)
|
||||
if subt:
|
||||
item.subtitle = urlparse.urljoin(host, subt)
|
||||
|
||||
if item.subtitle: #Si hay urls de sub-títulos, se descargan
|
||||
headers.append(["User-Agent", httptools.get_user_agent()]) #Se busca el User-Agent por defecto
|
||||
videolibrary_path = config.get_videolibrary_path() #Calculamos el path absoluto a partir de la Videoteca
|
||||
if videolibrary_path.lower().startswith("smb://"): #Si es una conexión SMB, usamos userdata local
|
||||
videolibrary_path = config.get_data_path() #Calculamos el path absoluto a partir de Userdata
|
||||
videolibrary_path = os.path.join(videolibrary_path, "subtitles")
|
||||
#Primero se borra la carpeta de subtitulos para limpiar y luego se crea
|
||||
if os.path.exists(videolibrary_path):
|
||||
import shutil
|
||||
shutil.rmtree(videolibrary_path, ignore_errors=True)
|
||||
time.sleep(1)
|
||||
if not os.path.exists(videolibrary_path):
|
||||
os.mkdir(videolibrary_path)
|
||||
subtitle_name = 'Rarbg-ES_SUBT.zip' #Nombre del archivo de sub-títulos
|
||||
subtitle_folder_path = os.path.join(videolibrary_path, subtitle_name) #Path de descarga
|
||||
ret = downloadtools.downloadfile(item.subtitle, subtitle_folder_path, headers=headers, continuar=True, silent=True)
|
||||
|
||||
if os.path.exists(subtitle_folder_path):
|
||||
# Descomprimir zip dentro del addon
|
||||
# ---------------------------------
|
||||
try:
|
||||
unzipper = ziptools.ziptools()
|
||||
unzipper.extract(subtitle_folder_path, videolibrary_path)
|
||||
except:
|
||||
import xbmc
|
||||
xbmc.executebuiltin('XBMC.Extract("%s", "%s")' % (subtitle_folder_path, videolibrary_path))
|
||||
time.sleep(1)
|
||||
|
||||
# Borrar el zip descargado
|
||||
# ------------------------
|
||||
os.remove(subtitle_folder_path)
|
||||
|
||||
#Tomo el primer archivo de subtítulos como valor por defecto
|
||||
for raiz, subcarpetas, ficheros in os.walk(videolibrary_path):
|
||||
for f in ficheros:
|
||||
if f.endswith(".srt"):
|
||||
#f_es = 'rarbg_subtitle.spa.srt'
|
||||
f_es = scrapertools.find_single_match(item.url, '&f=(.*?).torrent$').replace('.', ' ').replace('-', ' ').lower() + '.spa.srt'
|
||||
if not f_es:
|
||||
f_es = item.infoLabels['originaltitle'] + '.spa.srt'
|
||||
f_es = f_es.replace(':', '').lower()
|
||||
os.rename(os.path.join(videolibrary_path, f), os.path.join(videolibrary_path, f_es))
|
||||
item.subtitle = os.path.join(videolibrary_path, f_es) #Archivo de subtitulos
|
||||
break
|
||||
break
|
||||
|
||||
itemlist.append(item.clone()) #Reproducción normal
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item.category = categoria
|
||||
|
||||
#logger.debug(item)
|
||||
|
||||
if item.from_title:
|
||||
item.title = item.from_title
|
||||
|
||||
#Limpiamos num. Temporada y Episodio que ha podido quedar por Novedades
|
||||
season_display = 0
|
||||
if item.contentSeason:
|
||||
if item.season_colapse: #Si viene del menú de Temporadas...
|
||||
season_display = item.contentSeason #... salvamos el num de sesión a pintar
|
||||
item.from_num_season_colapse = season_display
|
||||
del item.season_colapse
|
||||
item.contentType = "tvshow"
|
||||
if item.from_title_season_colapse:
|
||||
item.title = item.from_title_season_colapse
|
||||
del item.from_title_season_colapse
|
||||
if item.infoLabels['title']:
|
||||
del item.infoLabels['title']
|
||||
del item.infoLabels['season']
|
||||
if item.contentEpisodeNumber:
|
||||
del item.infoLabels['episode']
|
||||
if season_display == 0 and item.from_num_season_colapse:
|
||||
season_display = item.from_num_season_colapse
|
||||
|
||||
# Obtener la información actualizada de la Serie. TMDB es imprescindible para Videoteca
|
||||
if not item.infoLabels['tmdb_id']:
|
||||
tmdb.set_infoLabels(item, True)
|
||||
|
||||
modo_ultima_temp_alt = modo_ultima_temp
|
||||
if item.ow_force == "1": #Si hay un traspaso de canal o url, se actualiza todo
|
||||
modo_ultima_temp_alt = False
|
||||
|
||||
max_temp = 1
|
||||
if item.infoLabels['number_of_seasons']:
|
||||
max_temp = item.infoLabels['number_of_seasons']
|
||||
y = []
|
||||
if modo_ultima_temp_alt and item.library_playcounts: #Averiguar cuantas temporadas hay en Videoteca
|
||||
patron = 'season (\d+)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(str(item.library_playcounts))
|
||||
for x in matches:
|
||||
y += [int(x)]
|
||||
max_temp = max(y)
|
||||
|
||||
# Descarga la página
|
||||
data = '' #Inserto en num de página en la url
|
||||
try:
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)| ", "", httptools.downloadpage(item.url, timeout=timeout).data)
|
||||
data = unicode(data, "utf-8", errors="replace").encode("utf-8")
|
||||
except: #Algún error de proceso, salimos
|
||||
pass
|
||||
|
||||
if not data:
|
||||
logger.error("ERROR 01: EPISODIOS: La Web no responde o la URL es erronea" + item.url)
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: EPISODIOS:. La Web no responde o la URL es erronea. Si la Web está activa, reportar el error con el log'))
|
||||
return itemlist
|
||||
|
||||
status, itemlist = check_blocked_IP(data, itemlist) #Comprobamos si la IP ha sido bloqueada
|
||||
if status:
|
||||
return itemlist #IP bloqueada
|
||||
|
||||
#Capturamos las temporadas de episodios dentro de la serie
|
||||
patron_temp = '<h1\s*class="[^"]+">Season\s*(\d+)<\/h1><div class="tvcontent"><div id="[^"]+"><\/div>(.*?<\/div><\/div>)(?:<script>.*?<\/script>)?<\/div>'
|
||||
temp_serie = re.compile(patron_temp, re.DOTALL).findall(data)
|
||||
|
||||
for season_num, temporada in temp_serie:
|
||||
patron = '<div id="episode_(\d+)"><div class="[^"]+">\s*<a onclick="[^"]+"\s*class="[^"]+"><div class="[^"]+">.*?\s*(\d+)<\/div>\s*(.*?)\s*<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(temporada)
|
||||
if not matches: #error
|
||||
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
|
||||
if item.intervencion: #Sí ha sido clausurada judicialmente
|
||||
item, itemlist = generictools.post_tmdb_episodios(item, itemlist) #Llamamos al método para el pintado del error
|
||||
return itemlist #Salimos
|
||||
|
||||
logger.error("ERROR 02: EPISODIOS: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: EPISODIOS: Ha cambiado la estructura de la Web. Reportar el error con el log'))
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
#logger.debug("PATRON: " + patron)
|
||||
#logger.debug(matches)
|
||||
#logger.debug(data)
|
||||
|
||||
season = max_temp
|
||||
#Comprobamos si realmente sabemos el num. máximo de temporadas
|
||||
if item.library_playcounts or (item.infoLabels['number_of_seasons'] and item.tmdb_stat):
|
||||
num_temporadas_flag = True
|
||||
else:
|
||||
num_temporadas_flag = False
|
||||
|
||||
if modo_ultima_temp_alt and item.library_playcounts: #Si solo se actualiza la última temporada de Videoteca
|
||||
if int(season_num) < max_temp:
|
||||
break #Sale del bucle actual del FOR
|
||||
|
||||
# Recorremos todos los episodios generando un Item local por cada uno en Itemlist
|
||||
for epi_id, episode_num, scrapedtitle in matches:
|
||||
item_local = item.clone()
|
||||
item_local.action = "findvideos"
|
||||
item_local.contentType = "episode"
|
||||
item_local.extra = "episodios"
|
||||
if item_local.library_playcounts:
|
||||
del item_local.library_playcounts
|
||||
if item_local.library_urls:
|
||||
del item_local.library_urls
|
||||
if item_local.path:
|
||||
del item_local.path
|
||||
if item_local.update_last:
|
||||
del item_local.update_last
|
||||
if item_local.update_next:
|
||||
del item_local.update_next
|
||||
if item_local.channel_host:
|
||||
del item_local.channel_host
|
||||
if item_local.active:
|
||||
del item_local.active
|
||||
if item_local.contentTitle:
|
||||
del item_local.infoLabels['title']
|
||||
if item_local.season_colapse:
|
||||
del item_local.season_colapse
|
||||
|
||||
item_local.title = ''
|
||||
item_local.context = "['buscar_trailer']"
|
||||
item_local.url = urlparse.urljoin(host, 'tv.php?ajax=1&tvepisode=%s' % epi_id)
|
||||
title = scrapedtitle
|
||||
item_local.language = ['VO']
|
||||
if not item_local.infoLabels['poster_path']:
|
||||
item_local.thumbnail = item_local.infoLabels['thumbnail']
|
||||
epi_rango = False
|
||||
|
||||
try:
|
||||
item_local.contentSeason = int(season_num)
|
||||
if 'season pack' in title.lower():
|
||||
item_local.contentEpisodeNumber = 1
|
||||
epi_rango = True
|
||||
else:
|
||||
item_local.contentEpisodeNumber = int(episode_num)
|
||||
except:
|
||||
logger.error('ERROR al extraer Temporada/Episodio: ' + title)
|
||||
item_local.contentSeason = 1
|
||||
item_local.contentEpisodeNumber = 0
|
||||
|
||||
#Si son episodios múltiples, lo extraemos
|
||||
if epi_rango:
|
||||
item_local.infoLabels['episodio_titulo'] = 'al 99'
|
||||
item_local.title = '%sx%s al 99 - Season Pack' % (str(item_local.contentSeason), str(item_local.contentEpisodeNumber).zfill(2))
|
||||
else:
|
||||
item_local.title = '%sx%s - ' % (str(item_local.contentSeason), str(item_local.contentEpisodeNumber).zfill(2))
|
||||
|
||||
if season_display > 0:
|
||||
if item_local.contentSeason > season_display:
|
||||
continue
|
||||
elif item_local.contentSeason < season_display:
|
||||
break
|
||||
|
||||
itemlist.append(item_local.clone())
|
||||
|
||||
#logger.debug(item_local)
|
||||
|
||||
if len(itemlist) > 1:
|
||||
itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) #clasificamos
|
||||
|
||||
if item.season_colapse and not item.add_videolibrary: #Si viene de listado, mostramos solo Temporadas
|
||||
item, itemlist = generictools.post_tmdb_seasons(item, itemlist)
|
||||
|
||||
if not item.season_colapse: #Si no es pantalla de Temporadas, pintamos todo
|
||||
# Pasada por TMDB y clasificación de lista por temporada y episodio
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
#Llamamos al método para el maquillaje de los títulos obtenidos desde TMDB
|
||||
item, itemlist = generictools.post_tmdb_episodios(item, itemlist)
|
||||
|
||||
#logger.debug(item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def check_blocked_IP(data, itemlist):
|
||||
logger.info()
|
||||
thumb_separador = get_thumb("next.png")
|
||||
|
||||
if 'Please wait while we try to verify your browser...' in data:
|
||||
logger.error("ERROR 99: La IP ha sido bloqueada por la Web" + " / DATA: " + data)
|
||||
itemlist.append(Item(channel=channel, url=host, title="[COLOR yellow]La IP ha sido bloqueada por la Web.[/COLOR]", folder=False, thumbnail=thumb_separador))
|
||||
itemlist.append(Item(channel=channel, url=host, title="[COLOR yellow]Fuerce la renovación de la IP en el Router[/COLOR]", folder=False, thumbnail=thumb_separador))
|
||||
from platformcode import platformtools
|
||||
platformtools.dialog_notification("IP bloqueada", "RARBG: Reiniciar ROUTER")
|
||||
|
||||
return (True, itemlist)
|
||||
return (False, itemlist)
|
||||
|
||||
|
||||
def actualizar_titulos(item):
|
||||
logger.info()
|
||||
|
||||
item = generictools.update_title(item) #Llamamos al método que actualiza el título con tmdb.find_and_set_infoLabels
|
||||
|
||||
#Volvemos a la siguiente acción en el canal
|
||||
return item
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
|
||||
try:
|
||||
item.url = host + 'torrents.php?category=2;18;41;49;14;48;17;44;45;47;50;51;52;42;46&search=%s' % texto
|
||||
item.extra = 'search'
|
||||
|
||||
if texto != '':
|
||||
return listado(item)
|
||||
except:
|
||||
import sys
|
||||
import traceback
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
logger.error(traceback.format_exc())
|
||||
return []
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
{
|
||||
"id": "watchseries",
|
||||
"name": "WatchSeries",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": [],
|
||||
"thumbnail": "https://www2.watchmovie.io/img/icon/new-logo.png",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"vos"
|
||||
],
|
||||
"settings":[
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,268 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urllib
|
||||
import base64
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from channels import autoplay
|
||||
from platformcode import config, logger
|
||||
|
||||
|
||||
IDIOMAS = {'default': 'VO'}
|
||||
title2 = {'Action': 'Action2','Xmas':'Christmas', 'Kungfu':'Martial%20Arts','Psychological':'Genres','TV Show':'TV', 'Sitcom':'Genres', 'Costume':'Genres', 'Mythological':'Genres'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['directo', 'rapidvideo', 'streamango', 'openload', 'xstreamcdn']
|
||||
list_quality = ['default']
|
||||
|
||||
|
||||
host = "https://www2.watchmovie.io/"
|
||||
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(item.clone(title="Películas", action='menu_movies', text_color="0xFFD4AF37", text_bold=True, thumbnail= "https://github.com/master-1970/resources/raw/master/images/genres/0/Directors%20Chair.png"))
|
||||
itemlist.append(item.clone(title='Series', action='menu_series', thumbnail= "https://github.com/master-1970/resources/raw/master/images/genres/0/TV%20Series.png", text_color="0xFFD4AF37", text_bold=True))
|
||||
itemlist.append(
|
||||
item.clone(title="Buscar...", action="search", text_color="0xFF5AC0E0", text_bold=True, url=host + 'search.html?keyword=', thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Search.png"))
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
def menu_movies(item):
|
||||
logger.info()
|
||||
|
||||
itemlist=[]
|
||||
|
||||
itemlist.append(Item(channel=item.channel, text_color="0xFF5AC0E0", text_bold=True, title="Estrenos", fanart="http://i.imgur.com/c3HS8kj.png", action="novedades_cine", url=host, thumbnail="https://github.com/master-1970/resources/raw/master/images/genres/0/New%20Releases.png"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, text_color="0xFF5AC0E0", text_bold=True, title="Más Vistas", action="popular", url=host + "popular", extra="popular", thumbnail="https://github.com/master-1970/resources/raw/master/images/genres/0/All%20Movies%20by%20Watched.png"))
|
||||
itemlist.append(Item(channel=item.channel, text_color="0xFFD4AF37", text_bold=True, title="Géneros", action="section", url=host + "popular", thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Genres.png"))
|
||||
itemlist.append(Item(channel=item.channel, text_color="0xFFD4AF37", text_bold=True, title="Año", action="section", url=host + "popular", thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Year.png"))
|
||||
|
||||
return itemlist
|
||||
|
||||
def menu_series(item):
|
||||
logger.info()
|
||||
|
||||
itemlist=[]
|
||||
itemlist.append(Item(channel=item.channel, text_color="0xFF399437", text_bold=True, action="novedades_episodios", title="Últimos Episodios de:", folder=False, thumbnail=item.thumbnail))
|
||||
itemlist.append(Item(channel=item.channel, text_color="0xFF5AC0E0", text_bold=True, action="novedades_episodios", title=" Series Tv", url=host + "watch-series", extra= "watch-series", thumbnail='https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/New%20TV%20Episodes.png', type='tvshows'))
|
||||
itemlist.append(Item(channel=item.channel, text_color="0xFF5AC0E0", text_bold=True, action="novedades_episodios", title=" Doramas", url=host + "drama", extra= "drama", thumbnail='https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Asian%20Movies.png', type='tvshows'))
|
||||
itemlist.append(Item(channel=item.channel, text_color="0xFF5AC0E0", text_bold=True, action="novedades_episodios", title=" Animes", url=host + "anime", extra= "anime", thumbnail='https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Anime.png', type='anime'))
|
||||
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
if texto != '':
|
||||
try:
|
||||
return popular(item)
|
||||
except:
|
||||
itemlist.append(item.clone(url='', title='No match found...', action=''))
|
||||
return itemlist
|
||||
|
||||
def section(item):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = []
|
||||
if 'Géneros' in item.title:
|
||||
patron = '<a href="([^"]+)" class="wpb_button wpb_btn-primary wpb_btn-small ">(.*?)</a>'
|
||||
action = 'popular'
|
||||
icono = ''
|
||||
elif 'Año' in item.title:
|
||||
patron = '<a href="([^"]+)" class="wpb_button wpb_btn-info wpb_btn-small ">(.*?)</a>'
|
||||
action = 'popular'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
|
||||
url = host + scrapedurl
|
||||
title = scrapedtitle
|
||||
if 'Géneros' in item.title:
|
||||
if title in title2:
|
||||
title1 = title2[title]
|
||||
else:
|
||||
title1 = title
|
||||
icono = 'https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/'+ title1 +'.png'
|
||||
else:
|
||||
icono = 'https://raw.githubusercontent.com/master-1970/resources/master/images/genres/0/Year.png'
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action=action,
|
||||
title=title,
|
||||
url=url,
|
||||
text_color="0xFF5AC0E0",
|
||||
extra="popular",
|
||||
thumbnail = icono
|
||||
))
|
||||
return itemlist
|
||||
|
||||
def novedades_episodios(item):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
url_pagination = scrapertools.find_single_match(data, "<li class='next next page-numbers'><a href='(.*?)'")
|
||||
matches = re.compile('<div class="video_likes icon-tag"> (.*?)</div>[\s\S]+?<a href="(.*?)" class="view_more"></a>[\s\S]+?<img src="([^"]+)" alt="" class="imgHome" title="" alt="([^"]+)"[\s\S]+?</li>', re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
for episode, url, thumbnail,season in matches:
|
||||
|
||||
if item.extra == "watch-series":
|
||||
scrapedinfo = season.split(' - ')
|
||||
scrapedtitle = scrapedinfo[0]
|
||||
season = scrapertools.find_single_match(scrapedinfo[1], 'Season (\d+)')
|
||||
episode = scrapertools.find_single_match(episode, 'Episode (\d+)')
|
||||
title = scrapedtitle + " %sx%s" % (season, episode)
|
||||
else:
|
||||
scrapedtitle = season
|
||||
title = scrapedtitle + ' - ' + episode
|
||||
url = urlparse.urljoin(host, url)
|
||||
|
||||
new_item = Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail,
|
||||
contentSerieName=scrapedtitle,)
|
||||
itemlist.append(new_item)
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True, idioma_busqueda='en')
|
||||
if url_pagination:
|
||||
url = urlparse.urljoin(host + item.extra, url_pagination)
|
||||
title = ">> Pagina Siguiente"
|
||||
itemlist.append(Item(channel=item.channel, action="novedades_episodios", title=title, url=url, extra=item.extra))
|
||||
return itemlist
|
||||
|
||||
|
||||
def novedades_cine(item):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
url_pagination = scrapertools.find_single_match(data, "<li class='next next page-numbers'><a href='(.*?)'")
|
||||
matches = re.compile('<div class="video_likes icon-tag"> (.*?)</div>[\s\S]+?<a href="(.*?)" class="view_more"></a>[\s\S]+?<img src="([^"]+)" alt="" class="imgHome" title="" alt="([^"]+)"[\s\S]+?</li>', re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
for episode, url, thumbnail,season in matches:
|
||||
scrapedyear = '-'
|
||||
title = "%s [%s]" % (season, episode)
|
||||
url = urlparse.urljoin(host, url)
|
||||
new_item = Item(channel=item.channel, action="findvideos",title=title, url=url, contentTitle=season, thumbnail=thumbnail,infoLabels={'year':scrapedyear})
|
||||
itemlist.append(new_item)
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True, idioma_busqueda='en')
|
||||
if url_pagination:
|
||||
url = urlparse.urljoin(host + item.extra, url_pagination)
|
||||
title = ">> Pagina Siguiente"
|
||||
itemlist.append(Item(channel=item.channel, action="novedades_cine", title=title, url=url))
|
||||
return itemlist
|
||||
|
||||
def popular(item):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
url_pagination = scrapertools.find_single_match(data, "<li class='next next page-numbers'><a href='(.*?)'")
|
||||
matches = re.compile('<div class="video_image_container sdimg">[\s\S]+?<a href="(.*?)" class="view_more" title="([^"]+)"></a>[\s\S]+?<img src="([^"]+)" alt=""', re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
for url, title, thumbnail in matches:
|
||||
scrapedyear = '-'
|
||||
if "- Season " in title:
|
||||
scrapedinfo = title.split(' - Season ')
|
||||
title2 = scrapedinfo[0]
|
||||
season = scrapedinfo[1]
|
||||
url = urlparse.urljoin(host, url)+ "/season"
|
||||
new_item = Item(channel=item.channel, action="episodios",title=title, contentSerieName=title2, url=url, thumbnail=thumbnail,infoLabels={'season':season})
|
||||
elif "-info/" in url:
|
||||
url = urlparse.urljoin(host, url)
|
||||
url = url.replace("-info/", "/")+ "/all"
|
||||
new_item = Item(channel=item.channel, action="episodios",title=title, contentSerieName=title, url=url, thumbnail=thumbnail)
|
||||
else:
|
||||
url = urlparse.urljoin(host, url)+"-episode-0"
|
||||
extra = "film"
|
||||
new_item = Item(channel=item.channel, action="findvideos",title=title, url=url, extra=extra, contentTitle=title, thumbnail=thumbnail,infoLabels={'year':scrapedyear})
|
||||
itemlist.append(new_item)
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True, idioma_busqueda='en')
|
||||
if url_pagination:
|
||||
url = urlparse.urljoin(host + item.extra, url_pagination)
|
||||
title = ">> Pagina Siguiente"
|
||||
itemlist.append(Item(channel=item.channel, action="popular", title=title, url=url))
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
infoLabels = item.infoLabels
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
|
||||
matches = re.compile('<div class="vid_info"><span><a href="(.*?)" title="(.*?)" class="videoHname"><b>Episode (\d+)', re.DOTALL).findall(data)
|
||||
for url, title, episode in matches:
|
||||
url = urlparse.urljoin(host, url)
|
||||
thumbnail = item.thumbnail
|
||||
title = title + " - Ep. " + episode
|
||||
if " Season " in title:
|
||||
scrapedinfo = title.split(' Season ')
|
||||
title = scrapedinfo[0] + " " + infoLabels['season'] + "x" + episode
|
||||
infoLabels['episode'] = episode
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
infoLabels=infoLabels
|
||||
))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True, idioma_busqueda='en')
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
if "-episode-0" in item.url:
|
||||
data1 = httptools.downloadpage(item.url).data
|
||||
if "Page not found</h1>" in data1:
|
||||
item.url = item.url.replace("-episode-0", "-episode-1")
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", httptools.downloadpage(item.url).data)
|
||||
matches = scrapertools.find_multiple_matches(data, 'data-video="(.*?)"')
|
||||
url = ''
|
||||
urlsub = ''
|
||||
urlsub = scrapertools.find_single_match(data, "&sub=(.*?)&cover")
|
||||
if urlsub != '':
|
||||
urlsub = base64.b64decode(urlsub)
|
||||
urlsub = 'https://sub.movie-series.net' + urlsub
|
||||
for source in matches:
|
||||
if '/streaming.php' in source:
|
||||
new_data = httptools.downloadpage("https:" + source).data
|
||||
url = scrapertools.find_single_match(new_data, "file: '(https://redirector.*?)'")
|
||||
thumbnail= "https://martechforum.com/wp-content/uploads/2015/07/drive-300x300.png"
|
||||
if url == "":
|
||||
source = source.replace("streaming.php", "load.php")
|
||||
elif '/load.php' in source:
|
||||
new_data = httptools.downloadpage("https:" + source).data
|
||||
url = scrapertools.find_single_match(new_data, "file: '(https://[A-z0-9]+.cdnfile.info/.*?)'")
|
||||
thumbnail= "https://vidcloud.icu/img/logo_vid.png"
|
||||
else:
|
||||
url = source
|
||||
thumbnail= ""
|
||||
if "https://redirector." in url or "cdnfile.info" in url:
|
||||
url = url+"|referer=https://vidcloud.icu/"
|
||||
|
||||
if url != "":
|
||||
itemlist.append(Item(channel=item.channel, url=url, title='%s', action='play',plot=item.plot, thumbnail=thumbnail, subtitle=urlsub))
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server)
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra == 'film':
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir a la Videoteca", text_color="yellow",
|
||||
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
|
||||
contentTitle = item.contentTitle
|
||||
))
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
Reference in New Issue
Block a user