Merge remote-tracking branch 'alfa-addon/master'
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.7.8" provider-name="Alfa Addon">
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.7.9" provider-name="Alfa Addon">
|
||||
<requires>
|
||||
<import addon="xbmc.python" version="2.1.0"/>
|
||||
<import addon="script.module.libtorrent" optional="true"/>
|
||||
@@ -18,13 +18,18 @@
|
||||
<screenshot>resources/media/themes/ss/4.jpg</screenshot>
|
||||
</assets>
|
||||
<news>[B]Estos son los cambios para esta versión:[/B]
|
||||
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
|
||||
¤ cinehindi ¤ repelis
|
||||
¤ rexpelis ¤ yape
|
||||
¤ bloghorror ¤ pelkex
|
||||
¤ documaniatv ¤ mejortorrent
|
||||
[COLOR green][B]Arreglos[/B][/COLOR]
|
||||
¤ cinetux ¤ asialiveaction
|
||||
¤ dospelis ¤ pelisfox
|
||||
¤ pelisplus ¤ pelisplusco
|
||||
¤ poseidonhd ¤ yts
|
||||
|
||||
¤ arreglos internos
|
||||
[COLOR green][B]Novedades[/B][/COLOR]
|
||||
¤ peliculashd ¤ peliculonhd
|
||||
¤ tikiwiki ¤ vidcloud
|
||||
¤ dramasjc ¤ xms
|
||||
|
||||
¤Agradecimientos a @diegotcba y @wrlopez por colaborar en ésta versión
|
||||
|
||||
</news>
|
||||
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
|
||||
|
||||
@@ -49,7 +49,7 @@ def category(item):
|
||||
data = scrapertools.find_single_match(data, '<span>Año</span>.*?</ul>')
|
||||
elif item.cat == 'quality':
|
||||
data = scrapertools.find_single_match(data, '<span>Calidad</span>.*?</ul>')
|
||||
patron = "<li>([^<]+)<a href='([^']+)'>"
|
||||
patron = "<li.*?>([^<]+)<a href='([^']+)'>"
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedtitle, scrapedurl in matches:
|
||||
if scrapedtitle != 'Próximas Películas':
|
||||
@@ -82,8 +82,10 @@ def search_results(item):
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
|
||||
if texto != '':
|
||||
return search_results(item)
|
||||
|
||||
@@ -113,40 +115,67 @@ def lista(item):
|
||||
logger.info()
|
||||
next = True
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
css_data = scrapertools.find_single_match(data, "<style id='page-skin-1' type='text/css'>(.*?)</style>")
|
||||
|
||||
data = scrapertools.find_single_match(data, "itemprop='headline'>.*?</h2>.*?</ul>")
|
||||
patron = '<span class="([^"]+)">.*?<figure class="poster-bg"><header><span>(\d{4})</span></header><img src="([^"]+)" />'
|
||||
patron += '<footer>(.*?)</footer></figure><h6>([^<]+)</h6><a href="([^"]+)"></a>'
|
||||
|
||||
patron = '<span class="([^"]+)">.*?<figure class="poster-bg">(.*?)<img src="([^"]+)" />'
|
||||
patron += '(.*?)</figure><h6>([^<]+)</h6><a href="([^"]+)"></a>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
first = int(item.first)
|
||||
last = first + 19
|
||||
if last > len(matches):
|
||||
last = len(matches)
|
||||
next = False
|
||||
for scrapedtype, scrapedyear, scrapedthumbnail, scrapedquality, scrapedtitle ,scrapedurl in matches[first:last]:
|
||||
patron_quality="<span>(.+?)</span>"
|
||||
quality = scrapertools.find_multiple_matches(scrapedquality, patron_quality)
|
||||
qual=""
|
||||
for calidad in quality:
|
||||
qual=qual+"["+calidad+"] "
|
||||
title="%s [%s] %s" % (scrapedtitle,scrapedyear,qual)
|
||||
new_item= Item(channel=item.channel, title=title, url=host+scrapedurl, thumbnail=scrapedthumbnail,
|
||||
type=scrapedtype, infoLabels={'year':scrapedyear})
|
||||
|
||||
for scrapedtype, scrapedyear, scrapedthumbnail, scrapedquality, scrapedtitle, scrapedurl in matches[first:last]:
|
||||
year = scrapertools.find_single_match(scrapedyear, '<span>(\d{4})</span>')
|
||||
|
||||
if not year:
|
||||
class_year = scrapertools.find_single_match(scrapedyear, 'class="([^\"]+)"')
|
||||
year = scrapertools.find_single_match(css_data, "\." + class_year + ":after {content:'(\d{4})';}")
|
||||
if not year:
|
||||
year = scrapertools.find_single_match(data, "headline'>(\d{4})</h2>")
|
||||
|
||||
qual = ""
|
||||
if scrapedquality:
|
||||
patron_qualities='<i class="([^"]+)"></i>'
|
||||
qualities = scrapertools.find_multiple_matches(scrapedquality, patron_qualities)
|
||||
|
||||
for quality in qualities:
|
||||
patron_desc = "\." + quality + ":after {content:'([^\']+)';}"
|
||||
quality_desc = scrapertools.find_single_match(css_data, patron_desc)
|
||||
|
||||
qual = qual+ "[" + quality_desc + "] "
|
||||
|
||||
title="%s [%s] %s" % (scrapedtitle,year,qual)
|
||||
|
||||
new_item = Item(channel=item.channel, title=title, url=host+scrapedurl, thumbnail=scrapedthumbnail,
|
||||
type=scrapedtype, infoLabels={'year':year})
|
||||
|
||||
if scrapedtype.strip() == 'sr':
|
||||
new_item.contentSerieName = scrapedtitle
|
||||
new_item.action = 'episodios'
|
||||
else:
|
||||
new_item.contentTitle = scrapedtitle
|
||||
new_item.action = 'findvideos'
|
||||
|
||||
if scrapedtype == item.type or item.type == 'cat':
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
#pagination
|
||||
url_next_page = item.url
|
||||
first = last
|
||||
if next:
|
||||
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='lista', first=first))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -3,8 +3,8 @@
|
||||
"name": "BlogHorror",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": [""],
|
||||
"thumbnail": "",
|
||||
"language": [],
|
||||
"thumbnail": "https://i.postimg.cc/gcgQhKTL/2018-10-10_20_34_57-_Peliculas_de_Terror_BLOGHORROR.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"movie",
|
||||
@@ -29,17 +29,9 @@
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"id": "include_in_newest_torrent",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"label": "Incluir en Novedades - Torrent",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
@@ -51,18 +43,6 @@
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"LAT"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
@@ -231,6 +232,7 @@ def findvideos(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if filtro_enlaces != 0:
|
||||
list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas, "online", item)
|
||||
return
|
||||
if list_enlaces:
|
||||
itemlist.append(item.clone(action="", title="Enlaces Online", text_color=color1,
|
||||
text_bold=True))
|
||||
@@ -260,103 +262,159 @@ def findvideos(item):
|
||||
return itemlist
|
||||
|
||||
|
||||
def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
|
||||
logger.info()
|
||||
lista_enlaces = []
|
||||
matches = []
|
||||
if type == "online": t_tipo = "Ver Online"
|
||||
if type == "descarga": t_tipo = "Descargar"
|
||||
data = data.replace("\n", "")
|
||||
if type == "online":
|
||||
patron = '(?is)class="playex.*?sheader'
|
||||
bloque1 = scrapertools.find_single_match(data, patron)
|
||||
patron = '(?is)#(option-[^"]+).*?png">([^<]+)'
|
||||
match = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedoption, language in match:
|
||||
scrapedserver = ""
|
||||
lazy = ""
|
||||
if "lazy" in bloque1:
|
||||
lazy = "lazy-"
|
||||
patron = '(?s)id="%s".*?metaframe.*?%ssrc="([^"]+)' % (scrapedoption, lazy)
|
||||
url = scrapertools.find_single_match(bloque1, patron)
|
||||
if "goo.gl" in url:
|
||||
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
|
||||
if "drive.php" in url:
|
||||
scrapedserver = "gvideo"
|
||||
if "player" in url:
|
||||
scrapedserver = scrapertools.find_single_match(url, 'player/(\w+)')
|
||||
if "ok" in scrapedserver: scrapedserver = "okru"
|
||||
matches.append([url, scrapedserver, "", language.strip(), t_tipo])
|
||||
bloque2 = scrapertools.find_single_match(data, '(?s)box_links.*?dt_social_single')
|
||||
bloque2 = bloque2.replace("\t", "").replace("\r", "")
|
||||
patron = '(?s)optn" href="([^"]+)'
|
||||
patron += '.*?alt="([^\.]+)'
|
||||
patron += '.*?src.*?src="[^>]+"?/>([^<]+)'
|
||||
patron += '.*?src="[^>]+"?/>([^<]+)'
|
||||
patron += '.*?/span>([^<]+)'
|
||||
matches.extend(scrapertools.find_multiple_matches(bloque2, patron))
|
||||
filtrados = []
|
||||
for match in matches:
|
||||
scrapedurl = match[0]
|
||||
scrapedserver = match[1]
|
||||
scrapedcalidad = match[2]
|
||||
language = match[3]
|
||||
scrapedtipo = match[4]
|
||||
if t_tipo.upper() not in scrapedtipo.upper():
|
||||
continue
|
||||
title = " Mirror en %s (" + language + ")"
|
||||
if len(scrapedcalidad.strip()) > 0:
|
||||
title += " (Calidad " + scrapedcalidad.strip() + ")"
|
||||
# def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
|
||||
# logger.info()
|
||||
# lista_enlaces = []
|
||||
# matches = []
|
||||
# if type == "online": t_tipo = "Ver Online"
|
||||
# if type == "descarga": t_tipo = "Descargar"
|
||||
# data = data.replace("\n", "")
|
||||
# if type == "online":
|
||||
# patron = '(?is)class="playex.*?sheader'
|
||||
# bloque1 = scrapertools.find_single_match(data, patron)
|
||||
# patron = '(?is)#(option-[^"]+).*?png">([^<]+)'
|
||||
# match = scrapertools.find_multiple_matches(data, patron)
|
||||
# for scrapedoption, language in match:
|
||||
# scrapedserver = ""
|
||||
# lazy = ""
|
||||
# if "lazy" in bloque1:
|
||||
# lazy = "lazy-"
|
||||
# patron = '(?s)id="%s".*?metaframe.*?%ssrc="([^"]+)' % (scrapedoption, lazy)
|
||||
# url = scrapertools.find_single_match(bloque1, patron)
|
||||
# if "goo.gl" in url:
|
||||
# url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
|
||||
# if "drive.php" in url:
|
||||
# scrapedserver = "gvideo"
|
||||
# if "player" in url:
|
||||
# scrapedserver = scrapertools.find_single_match(url, 'player/(\w+)')
|
||||
# if "ok" in scrapedserver: scrapedserver = "okru"
|
||||
# matches.append([url, scrapedserver, "", language.strip(), t_tipo])
|
||||
# bloque2 = scrapertools.find_single_match(data, '(?s)box_links.*?dt_social_single')
|
||||
# bloque2 = bloque2.replace("\t", "").replace("\r", "")
|
||||
# patron = '(?s)optn" href="([^"]+)'
|
||||
# patron += '.*?alt="([^\.]+)'
|
||||
# patron += '.*?src.*?src="[^>]+"?/>([^<]+)'
|
||||
# patron += '.*?src="[^>]+"?/>([^<]+)'
|
||||
# patron += '.*?/span>([^<]+)'
|
||||
# matches.extend(scrapertools.find_multiple_matches(bloque2, patron))
|
||||
# filtrados = []
|
||||
# for match in matches:
|
||||
# scrapedurl = match[0]
|
||||
# scrapedserver = match[1]
|
||||
# scrapedcalidad = match[2]
|
||||
# language = match[3]
|
||||
# scrapedtipo = match[4]
|
||||
# if t_tipo.upper() not in scrapedtipo.upper():
|
||||
# continue
|
||||
# title = " Mirror en %s (" + language + ")"
|
||||
# if len(scrapedcalidad.strip()) > 0:
|
||||
# title += " (Calidad " + scrapedcalidad.strip() + ")"
|
||||
#
|
||||
# if filtro_idioma == 3 or item.filtro:
|
||||
# lista_enlaces.append(item.clone(title=title, action="play", text_color=color2,
|
||||
# url=scrapedurl, server=scrapedserver,
|
||||
# extra=item.url, contentThumbnail = item.thumbnail,
|
||||
# language=language))
|
||||
# else:
|
||||
# idioma = dict_idiomas[language]
|
||||
# if idioma == filtro_idioma:
|
||||
# lista_enlaces.append(item.clone(title=title, action="play", text_color=color2,
|
||||
# url=scrapedurl, server=scrapedserver,
|
||||
# extra=item.url, contentThumbnail = item.thumbnail,
|
||||
# language=language))
|
||||
# else:
|
||||
# if language not in filtrados:
|
||||
# filtrados.append(language)
|
||||
# lista_enlaces = servertools.get_servers_itemlist(lista_enlaces, lambda i: i.title % i.server.capitalize())
|
||||
# if filtro_idioma != 3:
|
||||
# if len(filtrados) > 0:
|
||||
# title = "Mostrar también enlaces filtrados en %s" % ", ".join(filtrados)
|
||||
# lista_enlaces.append(item.clone(title=title, action="findvideos", url=item.url, text_color=color3,
|
||||
# filtro=True))
|
||||
# return lista_enlaces
|
||||
#
|
||||
#
|
||||
# def play(item):
|
||||
# logger.info()
|
||||
# itemlist = []
|
||||
# if "api.cinetux" in item.url or item.server == "okru" or "drive.php" in item.url or "youtube" in item.url:
|
||||
# data = httptools.downloadpage(item.url, headers={'Referer': item.extra}).data.replace("\\", "")
|
||||
# id = scrapertools.find_single_match(data, 'img src="[^#]+#(.*?)"')
|
||||
# item.url = "http://docs.google.com/get_video_info?docid=" + id
|
||||
# if item.server == "okru":
|
||||
# item.url = "https://ok.ru/videoembed/" + id
|
||||
# if item.server == "youtube":
|
||||
# item.url = "https://www.youtube.com/embed/" + id
|
||||
# elif "links" in item.url or "www.cinetux.me" in item.url:
|
||||
# data = httptools.downloadpage(item.url).data
|
||||
# scrapedurl = scrapertools.find_single_match(data, '<a href="(http[^"]+)')
|
||||
# if scrapedurl == "":
|
||||
# scrapedurl = scrapertools.find_single_match(data, '(?i)frame.*?src="(http[^"]+)')
|
||||
# if scrapedurl == "":
|
||||
# scrapedurl = scrapertools.find_single_match(data, 'replace."([^"]+)"')
|
||||
# elif "goo.gl" in scrapedurl:
|
||||
# scrapedurl = httptools.downloadpage(scrapedurl, follow_redirects=False, only_headers=True).headers.get(
|
||||
# "location", "")
|
||||
# item.url = scrapedurl
|
||||
# item.server = ""
|
||||
# itemlist.append(item.clone())
|
||||
# itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
# for i in itemlist:
|
||||
# i.thumbnail = i.contentThumbnail
|
||||
# return itemlist
|
||||
|
||||
if filtro_idioma == 3 or item.filtro:
|
||||
lista_enlaces.append(item.clone(title=title, action="play", text_color=color2,
|
||||
url=scrapedurl, server=scrapedserver,
|
||||
extra=item.url, contentThumbnail = item.thumbnail,
|
||||
language=language))
|
||||
else:
|
||||
idioma = dict_idiomas[language]
|
||||
if idioma == filtro_idioma:
|
||||
lista_enlaces.append(item.clone(title=title, action="play", text_color=color2,
|
||||
url=scrapedurl, server=scrapedserver,
|
||||
extra=item.url, contentThumbnail = item.thumbnail,
|
||||
language=language))
|
||||
|
||||
def get_source(url, referer=None):
|
||||
logger.info()
|
||||
if referer == None:
|
||||
data = httptools.downloadpage(url).data
|
||||
else:
|
||||
data = httptools.downloadpage(url, headers={'Referer':referer}).data
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
def findvideos(item):
|
||||
import urllib
|
||||
logger.info()
|
||||
|
||||
itemlist=[]
|
||||
|
||||
data = get_source(item.url)
|
||||
|
||||
patron = 'class="title">([^>]+)</span>.*?data-type="([^"]+)" data-post="(\d+)" data-nume="(\d+)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for language, tp, pt, nm in matches:
|
||||
|
||||
post = {'action':'doo_player_ajax', 'post':pt, 'nume':nm, 'type':tp}
|
||||
post = urllib.urlencode(post)
|
||||
new_data = httptools.downloadpage(CHANNEL_HOST+'wp-admin/admin-ajax.php', post=post, headers={'Referer':item.url}).data
|
||||
if not config.get_setting('unify'):
|
||||
if item.quality == '':
|
||||
quality = 'SD'
|
||||
else:
|
||||
if language not in filtrados:
|
||||
filtrados.append(language)
|
||||
lista_enlaces = servertools.get_servers_itemlist(lista_enlaces, lambda i: i.title % i.server.capitalize())
|
||||
if filtro_idioma != 3:
|
||||
if len(filtrados) > 0:
|
||||
title = "Mostrar también enlaces filtrados en %s" % ", ".join(filtrados)
|
||||
lista_enlaces.append(item.clone(title=title, action="findvideos", url=item.url, text_color=color3,
|
||||
filtro=True))
|
||||
return lista_enlaces
|
||||
quality = item.quality
|
||||
title = ' [%s][%s]' % (quality, IDIOMAS[language])
|
||||
else:
|
||||
title = ''
|
||||
url = scrapertools.find_single_match(new_data, "src='([^']+)'")
|
||||
itemlist.append(Item(channel=item.channel, title ='%s'+title, url=url, action='play', quality=item.quality,
|
||||
language=IDIOMAS[language], infoLabels=item.infoLabels))
|
||||
|
||||
patron = "<a class='optn' href='([^']+)'.*?<img src='.*?>([^<]+)<.*?<img src='.*?>([^<]+)<"
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
if "api.cinetux" in item.url or item.server == "okru" or "drive.php" in item.url or "youtube" in item.url:
|
||||
data = httptools.downloadpage(item.url, headers={'Referer': item.extra}).data.replace("\\", "")
|
||||
id = scrapertools.find_single_match(data, 'img src="[^#]+#(.*?)"')
|
||||
item.url = "http://docs.google.com/get_video_info?docid=" + id
|
||||
if item.server == "okru":
|
||||
item.url = "https://ok.ru/videoembed/" + id
|
||||
if item.server == "youtube":
|
||||
item.url = "https://www.youtube.com/embed/" + id
|
||||
elif "links" in item.url or "www.cinetux.me" in item.url:
|
||||
data = httptools.downloadpage(item.url).data
|
||||
scrapedurl = scrapertools.find_single_match(data, '<a href="(http[^"]+)')
|
||||
if scrapedurl == "":
|
||||
scrapedurl = scrapertools.find_single_match(data, '(?i)frame.*?src="(http[^"]+)')
|
||||
if scrapedurl == "":
|
||||
scrapedurl = scrapertools.find_single_match(data, 'replace."([^"]+)"')
|
||||
elif "goo.gl" in scrapedurl:
|
||||
scrapedurl = httptools.downloadpage(scrapedurl, follow_redirects=False, only_headers=True).headers.get(
|
||||
"location", "")
|
||||
item.url = scrapedurl
|
||||
item.server = ""
|
||||
itemlist.append(item.clone())
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
for i in itemlist:
|
||||
i.thumbnail = i.contentThumbnail
|
||||
return itemlist
|
||||
for hidden_url, quality, language in matches:
|
||||
|
||||
if not config.get_setting('unify'):
|
||||
title = ' [%s][%s]' % (quality, IDIOMAS[language])
|
||||
else:
|
||||
title = ''
|
||||
new_data = get_source(hidden_url)
|
||||
url = scrapertools.find_single_match(new_data, '"url":"([^"]+)"')
|
||||
url = url.replace('\\/', '/')
|
||||
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', quality=quality,
|
||||
language=IDIOMAS[language], infoLabels=item.infoLabels))
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
return itemlist
|
||||
@@ -3,7 +3,7 @@
|
||||
"name": "Dilo",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": [],
|
||||
"language": ["cast", "lat"],
|
||||
"thumbnail": "https://s22.postimg.cc/u6efsniqp/dilo.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
|
||||
@@ -1,79 +0,0 @@
|
||||
{
|
||||
"id": "documaniatv",
|
||||
"name": "DocumaniaTV",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast"],
|
||||
"banner": "",
|
||||
"thumbnail": "https://www.documaniatv.com/uploads/xcustom-logo.png.pagespeed.ic.lxJKR_lQE9.webp",
|
||||
"version": 1,
|
||||
"categories": [
|
||||
"documentary",
|
||||
"vos",
|
||||
"direct",
|
||||
"torrent"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Buscar información extra",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"CAST",
|
||||
"LAT",
|
||||
"VO",
|
||||
"VOS",
|
||||
"VOSE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "timeout_downloadpage",
|
||||
"type": "list",
|
||||
"label": "Timeout (segs.) en descarga de páginas o verificación de servidores",
|
||||
"default": 5,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"None",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5",
|
||||
"6",
|
||||
"7",
|
||||
"8",
|
||||
"9",
|
||||
"10"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_documentales",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Documentales",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,792 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import sys
|
||||
import urllib
|
||||
import urlparse
|
||||
import time
|
||||
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import tmdb
|
||||
from lib import generictools
|
||||
from channels import filtertools
|
||||
from channels import autoplay
|
||||
|
||||
|
||||
#IDIOMAS = {'CAST': 'Castellano', 'LAT': 'Latino', 'VO': 'Version Original'}
|
||||
IDIOMAS = {'Castellano': 'CAST', 'Latino': 'LAT', 'Version Original': 'VO'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = []
|
||||
list_servers = ['directo']
|
||||
|
||||
|
||||
host = 'https://www.documaniatv.com/'
|
||||
channel = "documaniatv"
|
||||
|
||||
categoria = channel.capitalize()
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', channel)
|
||||
timeout = config.get_setting('timeout_downloadpage', channel)
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
thumb_docus = get_thumb("channels_documentary.png")
|
||||
thumb_series = get_thumb("channels_tvshow.png")
|
||||
thumb_buscar = get_thumb("search.png")
|
||||
thumb_separador = get_thumb("next.png")
|
||||
thumb_settings = get_thumb("setting_0.png")
|
||||
thumb_cartelera = get_thumb("now_playing.png")
|
||||
thumb_pelis_vos = get_thumb("channels_vos.png")
|
||||
thumb_popular = get_thumb("popular.png")
|
||||
thumb_generos = get_thumb("genres.png")
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Novedades", action="listado", url=host + "newvideos.html", thumbnail=thumb_docus, extra="novedades"))
|
||||
itemlist.append(Item(channel=item.channel, title="Los Más Vistos", action="listado", url=host + "topvideos.html", thumbnail=thumb_popular, extra="populares"))
|
||||
itemlist.append(Item(channel=item.channel, title="Por Géneros", action="categorias", url=host + "categorias-y-canales.html", thumbnail=thumb_generos, extra="categorias"))
|
||||
itemlist.append(Item(channel=item.channel, title="Series", action="listado", url=host + "top-series-documentales.html", thumbnail=thumb_series, extra="series"))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", url=host + "search.php?keywords=", thumbnail=thumb_buscar, extra="search"))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR yellow]Configuración:[/COLOR]", folder=False, thumbnail=thumb_separador))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="configuracion", title="Configurar canal", thumbnail=thumb_settings))
|
||||
|
||||
autoplay.show_option(item.channel, itemlist) #Activamos Autoplay
|
||||
|
||||
return itemlist
|
||||
|
||||
def configuracion(item):
|
||||
from platformcode import platformtools
|
||||
ret = platformtools.show_channel_settings()
|
||||
platformtools.itemlist_refresh()
|
||||
return
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = ''
|
||||
try:
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url, timeout=timeout).data)
|
||||
data = unicode(data, "utf-8", errors="replace").encode("utf-8")
|
||||
except:
|
||||
pass
|
||||
|
||||
patron = '<a href="([^"]+)" title="([^"]+)">'
|
||||
#Verificamos si se ha cargado una página, y si además tiene la estructura correcta
|
||||
if not data or not scrapertools.find_single_match(data, patron):
|
||||
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
|
||||
if item.intervencion: #Sí ha sido clausurada judicialmente
|
||||
for clone_inter, autoridad in item.intervencion:
|
||||
thumb_intervenido = get_thumb(autoridad)
|
||||
itemlist.append(item.clone(action='', title="[COLOR yellow]" + clone_inter.capitalize() + ': [/COLOR]' + intervenido_judicial + '. Reportar el problema en el foro', thumbnail=thumb_intervenido))
|
||||
return itemlist #Salimos
|
||||
|
||||
logger.error("ERROR 01: SUBMENU: La Web no responde o ha cambiado de URL: " + item.url + data)
|
||||
if not data: #Si no ha logrado encontrar nada, salimos
|
||||
itemlist.append(item.clone(action='', title=item.category + ': ERROR 01: SUBMENU: La Web no responde o ha cambiado de URL. Si la Web está activa, reportar el error con el log'))
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
if not matches:
|
||||
logger.error("ERROR 02: SUBMENU: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
|
||||
itemlist.append(item.clone(action='', title=item.category + ': ERROR 02: SUBMENU: Ha cambiado la estructura de la Web. Reportar el error con el log'))
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
#logger.debug(matches)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
if 'series documentales' in scrapedtitle.lower():
|
||||
continue
|
||||
itemlist.append(item.clone(action="listado", title=scrapedtitle.capitalize().strip(), url=scrapedurl))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def listado(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item.category = categoria
|
||||
|
||||
#logger.debug(item)
|
||||
|
||||
curr_page = 1 # Página inicial Web
|
||||
curr_page_foot = 1 # Página inicial Alfa
|
||||
last_page = 99999 # Última página inicial
|
||||
last_page_foot = 1 # Última página inicial
|
||||
cnt_tot = 40 # Poner el num. máximo de items por página
|
||||
cnt_title = 0 # Contador de líneas insertadas en Itemlist
|
||||
cnt_title_tot = 0 # Contador de líneas insertadas en Itemlist, total
|
||||
if item.curr_page:
|
||||
curr_page = int(item.curr_page) # Si viene de una pasada anterior, lo usamos
|
||||
del item.curr_page # ... y lo borramos
|
||||
if item.curr_page_foot:
|
||||
curr_page_foot = int(item.curr_page_foot) # Si viene de una pasada anterior, lo usamos
|
||||
del item.curr_page_foot # ... y lo borramos
|
||||
if item.last_page:
|
||||
last_page = int(item.last_page) # Si viene de una pasada anterior, lo usamos
|
||||
del item.last_page # ... y lo borramos
|
||||
if item.last_page_foot:
|
||||
last_page_foot = int(item.last_page_foot) # Si viene de una pasada anterior, lo usamos
|
||||
del item.last_page_foot # ... y lo borramos
|
||||
if item.cnt_tot:
|
||||
cnt_tot = int(item.cnt_tot) # Si viene de una pasada anterior, lo usamos
|
||||
del item.cnt_tot # ... y lo borramos
|
||||
if item.cnt_title_tot:
|
||||
cnt_title_tot = int(item.cnt_title_tot) # Si viene de una pasada anterior, lo usamos
|
||||
del item.cnt_title_tot # ... y lo borramos
|
||||
|
||||
inicio = time.time() # Controlaremos que el proceso no exceda de un tiempo razonable
|
||||
fin = inicio + 10 # Después de este tiempo pintamos (segundos)
|
||||
timeout_search = timeout # Timeout para descargas
|
||||
if item.extra == 'search':
|
||||
timeout_search = timeout * 2 # Timeout un poco más largo para las búsquedas
|
||||
if timeout_search < 5:
|
||||
timeout_search = 5 # Timeout un poco más largo para las búsquedas
|
||||
|
||||
if not item.extra2: # Si viene de Catálogo o de Alfabeto
|
||||
item.extra2 = ''
|
||||
|
||||
next_page_url = item.url
|
||||
#Máximo num. de líneas permitidas por TMDB. Máx de 10 segundos por Itemlist para no degradar el rendimiento
|
||||
while cnt_title < cnt_tot and curr_page <= last_page and fin > time.time():
|
||||
|
||||
# Descarga la página
|
||||
data = ''
|
||||
try:
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)| ", "", httptools.downloadpage(next_page_url, timeout=timeout_search).data)
|
||||
data = unicode(data, "utf-8", errors="replace").encode("utf-8")
|
||||
except:
|
||||
pass
|
||||
|
||||
if not data: #Si la web está caída salimos sin dar error
|
||||
logger.error("ERROR 01: LISTADO: La Web no responde o ha cambiado de URL: " + item.url + " / DATA: " + data)
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: LISTADO:. La Web no responde o ha cambiado de URL. Si la Web está activa, reportar el error con el log'))
|
||||
break #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
#Patrón para todo, menos para Series
|
||||
patron = '<span class="pm-label-duration">(.*?)<\/span>.*?<a href="([^"]+)" title="([^"]+)">.*?data-echo="([^"]+)"'
|
||||
|
||||
#Si viene de Series, ponemos un patrón especializado
|
||||
if item.extra == 'series':
|
||||
patron = '(?:<span class="pm-label-duration">(.*?)<\/span>.*?)?<a href="([^"]+)" title="([^"]+)">.*?<img src="([^"]+)"'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
if not matches and not 'Lo siento, tu búsqueda no arrojó ningún resultado, intenta con otras palabras.' in data: #error
|
||||
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
|
||||
if item.intervencion: #Sí ha sido clausurada judicialmente
|
||||
item, itemlist = generictools.post_tmdb_episodios(item, itemlist) #Llamamos al método para el pintado del error
|
||||
return itemlist #Salimos
|
||||
|
||||
logger.error("ERROR 02: LISTADO: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: LISTADO: Ha cambiado la estructura de la Web. Reportar el error con el log'))
|
||||
break #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
matches_len = len(matches)
|
||||
if matches_len > cnt_title_tot and cnt_title_tot > 0:
|
||||
matches = matches[cnt_title_tot:]
|
||||
|
||||
#logger.debug("PATRON: " + patron)
|
||||
#logger.debug(matches)
|
||||
#logger.debug(data)
|
||||
|
||||
#Buscamos la url de paginado y la última página
|
||||
data_page = scrapertools.find_single_match(data, '<ul class="pagination pagination-sm pagination-arrows">.*?<\/li><\/ul><\/div><\/div> <\/div>')
|
||||
if item.extra == 'series':
|
||||
patron = '>(\d+)?<\/a><\/li><li class=""><a href="([^"]+-(\d+).html)"><i class="fa fa-arrow-right"><\/i><\/a><\/li><\/ul><\/div><\/div>\s?<\/div>'
|
||||
elif item.extra == 'categorias':
|
||||
patron = '>(\d+)?<\/a><\/li><li class=""><a href="([^"]+-(\d+)-date.html)">»<\/a><\/li><\/ul><\/div><\/div>\s?<\/div>'
|
||||
if not scrapertools.find_single_match(data, patron):
|
||||
patron = '>(\d+)?<\/a><\/li><li class=""><a href="([^"]+page-(\d+)\/)">»<\/a><\/li><\/ul><\/div><\/div>\s?<\/div>'
|
||||
else:
|
||||
patron = '>(\d+)?<\/a><\/li><li class=""><a href="([^"]+&page=(\d+))">»<\/a><\/li><\/ul><\/div><\/div>\s?<\/div>'
|
||||
|
||||
#Si la página de la web es superior a la página del canal, se ponen los limites
|
||||
if item.extra == 'novedades': cnt_tot = matches_len
|
||||
elif item.extra == 'populares': cnt_tot = 25
|
||||
elif item.extra == 'categorias': cnt_tot = matches_len
|
||||
elif item.extra == 'series': cnt_tot = 25
|
||||
elif item.extra == 'search': cnt_tot = matches_len
|
||||
else: cnt_tot = 40
|
||||
|
||||
if last_page == 99999: #Si es el valor inicial, buscamos
|
||||
#Se busca el píe de página
|
||||
try:
|
||||
last_page, next_page_url, next_page = scrapertools.find_single_match(data, patron)
|
||||
last_page = int(last_page)
|
||||
curr_page = int(next_page)-1
|
||||
next_page_url = urlparse.urljoin(host, next_page_url)
|
||||
except: #Si no lo encuentra, lo ponemos a 1
|
||||
logger.error('ERROR 03: LISTADO: Al obtener la paginación: ' + patron)
|
||||
curr_page = 1
|
||||
last_page = 0
|
||||
if item.extra == 'series':
|
||||
next_page_url = item.url
|
||||
else:
|
||||
next_page_url = item.url + '?&page=1'
|
||||
|
||||
#Calculamos el num de páginas totales si la página web es más grande que la del canal
|
||||
last_page_foot = last_page
|
||||
if matches_len > cnt_tot:
|
||||
if last_page == 0:
|
||||
last_page = 1
|
||||
if last_page_foot == 0:
|
||||
last_page_foot = 1
|
||||
if item.extra == 'series':
|
||||
last_page_foot = last_page_foot * (100 / cnt_tot)
|
||||
else:
|
||||
last_page_foot = last_page_foot * (matches_len / cnt_tot)
|
||||
|
||||
#Calculamos la url de la siguiente página
|
||||
if last_page > 1 or last_page_foot > 1:
|
||||
curr_page_foot += 1 #Apunto ya a la página siguiente
|
||||
if item.extra == 'series':
|
||||
if cnt_title_tot + cnt_tot >= matches_len:
|
||||
curr_page += 1 #Apunto ya a la página siguiente
|
||||
cnt_title_tot = 0 - len(matches)
|
||||
if len(matches) < cnt_tot: #Si va a cargar otra página, no lo cuento
|
||||
curr_page_foot -= 1 #Vuelvo a la página actual
|
||||
next_page_url = re.sub(r'(?:-\d+)?.html', '-%s.html' % curr_page, next_page_url)
|
||||
item.url = next_page_url
|
||||
else:
|
||||
next_page_url = item.url
|
||||
elif item.extra == 'categorias':
|
||||
curr_page += 1 #Apunto ya a la página siguiente
|
||||
if scrapertools.find_single_match(next_page_url, '(?:-\d+)-date.html'):
|
||||
next_page_url = re.sub(r'(?:-\d+)-date.html', '-%s-date.html' % curr_page, next_page_url)
|
||||
else:
|
||||
next_page_url = re.sub(r'\/page-\d+', '/page-%s' % curr_page, next_page_url)
|
||||
elif item.extra == 'populares':
|
||||
next_page_url = item.url
|
||||
else:
|
||||
curr_page += 1 #Apunto ya a la página siguiente
|
||||
next_page_url = re.sub(r'page=\d+', 'page=%s' % curr_page, next_page_url)
|
||||
|
||||
#logger.debug('curr_page: ' + str(curr_page) + ' / last_page: ' + str(last_page) + ' / url: ' + next_page_url + ' / cnt_title: ' + str(cnt_title) + ' / cnt_title_tot: ' + str(cnt_title_tot) + ' / cnt_tot: ' + str(cnt_tot) + ' / matches_len: ' + str(matches_len))
|
||||
|
||||
#Empezamos el procesado de matches
|
||||
for scrapedduration, scrapedurl, scrapedtitle, scrapedthumb in matches:
|
||||
|
||||
title = scrapedtitle
|
||||
title = title.replace("á", "a").replace("é", "e").replace("í", "i").replace("ó", "o").replace("ú", "u").replace("ü", "u").replace("�", "ñ").replace("ñ", "ñ").replace("ã", "a").replace("&etilde;", "e").replace("ĩ", "i").replace("õ", "o").replace("ũ", "u").replace("ñ", "ñ").replace("’", "'")
|
||||
|
||||
item_local = item.clone() #Creamos copia de Item para trabajar
|
||||
if item_local.tipo: #... y limpiamos
|
||||
del item_local.tipo
|
||||
if item_local.totalItems:
|
||||
del item_local.totalItems
|
||||
if item_local.post_num:
|
||||
del item_local.post_num
|
||||
if item_local.intervencion:
|
||||
del item_local.intervencion
|
||||
if item_local.viewmode:
|
||||
del item_local.viewmode
|
||||
item_local.text_bold = True
|
||||
del item_local.text_bold
|
||||
item_local.text_color = True
|
||||
del item_local.text_color
|
||||
if item_local.url_plus:
|
||||
del item_local.url_plus
|
||||
|
||||
title_subs = [] #creamos una lista para guardar info importante
|
||||
item_local.language = [] #iniciamos Lenguaje
|
||||
item_local.quality = "" #iniciamos calidad
|
||||
item_local.url = scrapedurl #guardamos la url
|
||||
item_local.thumbnail = scrapedthumb #guardamos el thumb
|
||||
if channel not in item_local.thumbnail: #si el thumb está encriptado, paamos
|
||||
item_local.thumbnail = get_thumb("channels_tvshow.png") #... y ponemos el de Series por defecto
|
||||
item_local.context = "['buscar_trailer']"
|
||||
|
||||
item_local.contentType = "movie" #por defecto, son películas
|
||||
item_local.action = "findvideos"
|
||||
|
||||
#Analizamos los formatos de series
|
||||
if '/top-series' in scrapedurl or item_local.extra == 'series':
|
||||
item_local.contentType = "tvshow"
|
||||
item_local.action = "episodios"
|
||||
|
||||
#Buscamos calidades adicionales
|
||||
if "3d" in title.lower() and not "3d" in item_local.quality.lower():
|
||||
if item_local.quality:
|
||||
item_local.quality += " 3D"
|
||||
else:
|
||||
item_local.quality = "3D"
|
||||
title = re.sub('3D', '', title, flags=re.IGNORECASE)
|
||||
title = title.replace('[]', '')
|
||||
if item_local.quality:
|
||||
item_local.quality += ' %s' % scrapertools.find_single_match(title, '\[(.*?)\]')
|
||||
else:
|
||||
item_local.quality = '%s' % scrapertools.find_single_match(title, '\[(.*?)\]')
|
||||
|
||||
#Detectamos idiomas
|
||||
if ("latino" in scrapedurl.lower() or "latino" in title.lower()) and "LAT" not in item_local.language:
|
||||
item_local.language += ['LAT']
|
||||
elif ('subtitulado' in scrapedurl.lower() or 'subtitulado' in title.lower() or 'vose' in title.lower()) and "VOSE" not in item_local.language:
|
||||
item_local.language += ['VOSE']
|
||||
elif ('version-original' in scrapedurl.lower() or 'version original' in title.lower()) and "VO" not in item_local.language:
|
||||
item_local.language += ['VO']
|
||||
|
||||
if item_local.language == []:
|
||||
item_local.language = ['CAST']
|
||||
|
||||
#Detectamos info interesante a guardar para después de TMDB
|
||||
if scrapertools.find_single_match(title, '[m|M].*?serie'):
|
||||
title = re.sub(r'[m|M]iniserie', '', title)
|
||||
title_subs += ["Miniserie"]
|
||||
if scrapertools.find_single_match(title, '[s|S]aga'):
|
||||
title = re.sub(r'[s|S]aga', '', title)
|
||||
title_subs += ["Saga"]
|
||||
if scrapertools.find_single_match(title, '[c|C]olecc'):
|
||||
title = re.sub(r'[c|C]olecc...', '', title)
|
||||
title_subs += ["Colección"]
|
||||
|
||||
if "duolog" in title.lower():
|
||||
title_subs += ["[Saga]"]
|
||||
title = title.replace(" Duologia", "").replace(" duologia", "").replace(" Duolog", "").replace(" duolog", "")
|
||||
if "trilog" in title.lower():
|
||||
title_subs += ["[Saga]"]
|
||||
title = title.replace(" Trilogia", "").replace(" trilogia", "").replace(" Trilog", "").replace(" trilog", "")
|
||||
if "extendida" in title.lower() or "v.e." in title.lower()or "v e " in title.lower():
|
||||
title_subs += ["[V. Extendida]"]
|
||||
title = title.replace("Version Extendida", "").replace("(Version Extendida)", "").replace("V. Extendida", "").replace("VExtendida", "").replace("V Extendida", "").replace("V.Extendida", "").replace("V Extendida", "").replace("V.E.", "").replace("V E ", "").replace("V:Extendida", "")
|
||||
|
||||
#Ponemos el año a '-'
|
||||
item_local.infoLabels["year"] = '-'
|
||||
|
||||
#Limpiamos el título de la basura innecesaria
|
||||
title = re.sub(r'TV|Online|Spanish|Torrent|en Espa\xc3\xb1ol|Español|Latino|Subtitulado|Blurayrip|Bluray rip|\[.*?\]|R2 Pal|\xe3\x80\x90 Descargar Torrent \xe3\x80\x91|Completa|Temporada|Descargar|Torren', '', title, flags=re.IGNORECASE)
|
||||
|
||||
title = title.replace("Dual", "").replace("dual", "").replace("Subtitulada", "").replace("subtitulada", "").replace("Subt", "").replace("subt", "").replace("(Proper)", "").replace("(proper)", "").replace("Proper", "").replace("proper", "").replace("#", "").replace("(Latino)", "").replace("Latino", "").replace("LATINO", "").replace("Spanish", "").replace("Trailer", "").replace("Audio", "")
|
||||
|
||||
#Terminamos de limpiar el título
|
||||
title = re.sub(r'\??\s?\d*?\&.*', '', title)
|
||||
title = re.sub(r'[\(|\[]\s+[\)|\]]', '', title)
|
||||
title = title.replace('()', '').replace('[]', '').strip().lower().title()
|
||||
|
||||
item_local.from_title = title.strip().lower().title() #Guardamos esta etiqueta para posible desambiguación de título
|
||||
|
||||
#Salvamos el título según el tipo de contenido
|
||||
if item_local.contentType == "movie":
|
||||
item_local.contentTitle = title.strip().lower().title()
|
||||
else:
|
||||
item_local.contentSerieName = title.strip().lower().title()
|
||||
|
||||
item_local.title = title.strip().lower().title()
|
||||
|
||||
#Añadimos la duración a la Calidad
|
||||
if scrapedduration:
|
||||
if item_local.quality:
|
||||
item_local.quality += ' [%s]' % scrapedduration
|
||||
else:
|
||||
item_local.quality = '[%s]' % scrapedduration
|
||||
|
||||
#Guarda la variable temporal que almacena la info adicional del título a ser restaurada después de TMDB
|
||||
item_local.title_subs = title_subs
|
||||
|
||||
#Ahora se filtra por idioma, si procede, y se pinta lo que vale
|
||||
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
|
||||
itemlist = filtertools.get_link(itemlist, item_local, list_language)
|
||||
else:
|
||||
itemlist.append(item_local.clone()) #Si no, pintar pantalla
|
||||
|
||||
cnt_title = len(itemlist) #Contador de líneas añadidas
|
||||
if cnt_title >= cnt_tot: #Si hemos llegado al límite de la página, pintamos
|
||||
cnt_title_tot += cnt_title
|
||||
break
|
||||
|
||||
#logger.debug(item_local)
|
||||
|
||||
#Pasamos a TMDB la lista completa Itemlist
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
|
||||
#Llamamos al método para el maquillaje de los títulos obtenidos desde TMDB
|
||||
item, itemlist = generictools.post_tmdb_listado(item, itemlist)
|
||||
|
||||
# Si es necesario añadir paginacion
|
||||
if (curr_page <= last_page and item.extra not in ['populares']) or (cnt_title_tot < matches_len and 'populares' in item.extra):
|
||||
if last_page_foot > 1:
|
||||
title = '%s de %s' % (curr_page_foot-1, last_page_foot)
|
||||
else:
|
||||
title = '%s' % curr_page_foot-1
|
||||
|
||||
if item.extra not in ['populares', 'series']:
|
||||
cnt_title_tot = 0
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente " + title, url=next_page_url, extra=item.extra, extra2=item.extra2, last_page=str(last_page), last_page_foot=str(last_page_foot), curr_page=str(curr_page), curr_page_foot=str(curr_page_foot), cnt_tot=str(cnt_tot), cnt_title_tot=str(cnt_title_tot)))
|
||||
|
||||
#logger.debug(str(cnt_tot) + ' / ' + str(cnt_title) + ' / ' + str(cnt_title_tot))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist_t = [] #Itemlist total de enlaces
|
||||
itemlist_f = [] #Itemlist de enlaces filtrados
|
||||
if not item.language:
|
||||
item.language = ['CAST'] #Castellano por defecto
|
||||
matches = []
|
||||
item.category = categoria
|
||||
|
||||
item.extra2 = 'xyz'
|
||||
del item.extra2
|
||||
|
||||
#logger.debug(item)
|
||||
|
||||
#Bajamos los datos de la página
|
||||
data = ''
|
||||
patron = '<link itemprop="embedUrl"\s*href="([^"]+)"\s*\/>(?:<iframe src="([^"]*)")?'
|
||||
try:
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url, timeout=timeout).data)
|
||||
data = unicode(data, "utf-8", errors="replace").encode("utf-8")
|
||||
except:
|
||||
pass
|
||||
|
||||
if not data:
|
||||
logger.error("ERROR 01: FINDVIDEOS: La Web no responde o la URL es erronea: " + item.url)
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: FINDVIDEOS:. La Web no responde o la URL es erronea. Si la Web está activa, reportar el error con el log'))
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
if not matches: #error
|
||||
logger.error("ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web " + " / PATRON: " + patron + data)
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log'))
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
#logger.debug("PATRON: " + patron)
|
||||
#logger.debug(matches)
|
||||
#logger.debug(data)
|
||||
|
||||
#Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB
|
||||
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist)
|
||||
|
||||
#Si es un episodio suelto, se ofrece la posibilidad de ver la lista de Episodios completa
|
||||
if scrapertools.find_single_match(item.contentTitle, ':\s*(\d+)-\s*') and item.contentType != 'episode':
|
||||
serie = item.contentTitle
|
||||
url_serie = scrapertools.find_single_match(data, '<dt><span>Serie Documental<\/span><\/dt>\s*<dd><a href="([^"]+)"\s*>')
|
||||
serie = scrapertools.find_single_match(data, '<dt><span>Serie Documental<\/span><\/dt><dd><a href="[^"]+"\s*>(.*?)<')
|
||||
if url_serie:
|
||||
itemlist.append(item.clone(title="**-[COLOR yellow] Ver TODOS los episodios de la Serie [/COLOR]-**", action="episodios", contentType='tvshow', url=url_serie, extra="series", from_title=serie, wanted=serie, contentSerieName=serie, contentTitle=serie, quality="", language=[]))
|
||||
|
||||
#Recorremos la lista de servidores Directos, excluyendo YouTube para trailers
|
||||
for scrapedurl, scrapedplayer in matches:
|
||||
#Generamos una copia de Item para trabajar sobre ella
|
||||
item_local = item.clone()
|
||||
|
||||
#Buscamos la url del vídeo
|
||||
if 'cnubis.com' in scrapedplayer:
|
||||
videourl = conector_cnubis(scrapedurl, scrapedplayer)
|
||||
else:
|
||||
videourl = servertools.findvideos(scrapedurl)
|
||||
|
||||
#Ya tenemos un enlace, lo pintamos
|
||||
if len(videourl) > 0:
|
||||
server = videourl[0][0]
|
||||
enlace = videourl[0][1]
|
||||
mostrar_server = True
|
||||
if config.get_setting("hidepremium"): #Si no se aceptan servidore premium, se ignoran
|
||||
mostrar_server = servertools.is_server_enabled(server)
|
||||
|
||||
#Se comprueba si el vídeo existe
|
||||
if mostrar_server:
|
||||
item_local.alive = "??" #Se asume poe defecto que es link es dudoso
|
||||
item_local.alive = servertools.check_video_link(enlace, server, timeout=timeout)
|
||||
if '?' in item_local.alive:
|
||||
alive = '?' #No se ha podido comprobar el vídeo
|
||||
elif 'no' in item_local.alive.lower():
|
||||
continue #El enlace es malo
|
||||
else:
|
||||
alive = '' #El enlace está verificado
|
||||
|
||||
#Ahora pintamos el link del Servidor Directo
|
||||
item_local.url = enlace
|
||||
item_local.title = '[COLOR yellow][%s][/COLOR] [COLOR yellow][%s][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (alive, server.capitalize(), item_local.quality, str(item_local.language))
|
||||
|
||||
#Preparamos título y calidad, quitamos etiquetas vacías
|
||||
item_local.title = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = item_local.title.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
item_local.quality = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.quality)
|
||||
item_local.quality = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.quality)
|
||||
item_local.quality = item_local.quality.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
|
||||
item_local.action = "play" #Visualizar vídeo
|
||||
item_local.server = server.lower() #Servidor Directo
|
||||
|
||||
itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas
|
||||
|
||||
# Requerido para FilterTools
|
||||
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
|
||||
itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío
|
||||
|
||||
#logger.debug("DIRECTO: " server + ' / ' + enlace + " / title: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName)
|
||||
|
||||
#logger.debug(item_local)
|
||||
|
||||
if len(itemlist_f) > 0: #Si hay entradas filtradas...
|
||||
itemlist.extend(itemlist_f) #Pintamos pantalla filtrada
|
||||
else:
|
||||
if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ...
|
||||
thumb_separador = get_thumb("next.png") #... pintamos todo con aviso
|
||||
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador))
|
||||
itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado
|
||||
|
||||
# Requerido para AutoPlay
|
||||
autoplay.start(itemlist, item) #Lanzamos Autoplay
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item.category = categoria
|
||||
|
||||
#logger.debug(item)
|
||||
|
||||
if item.from_title:
|
||||
item.title = item.from_title
|
||||
item.extra2 = 'xyz'
|
||||
del item.extra2
|
||||
next_page_url = item.url
|
||||
|
||||
inicio = time.time() # Controlaremos que el proceso no exceda de un tiempo razonable
|
||||
fin = inicio + 10 # Después de este tiempo pintamos (segundos)
|
||||
timeout_search = timeout # Timeout para descargas
|
||||
|
||||
item.quality = re.sub(r'\s?\[\d+:\d+\]', '', item.quality) #quitamos la duración de la serie
|
||||
|
||||
# Obtener la información actualizada de la Serie. TMDB es imprescindible para Videoteca
|
||||
if not item.infoLabels['tmdb_id']:
|
||||
tmdb.set_infoLabels(item, True)
|
||||
|
||||
#Bucle para recorrer todas las páginas
|
||||
epis = 1
|
||||
while next_page_url and fin > time.time():
|
||||
|
||||
# Descarga la página
|
||||
data = '' #Inserto en num de página en la url
|
||||
try:
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)| ", "", httptools.downloadpage(next_page_url, timeout=timeout).data)
|
||||
data = unicode(data, "utf-8", errors="replace").encode("utf-8")
|
||||
except: #Algún error de proceso, salimos
|
||||
pass
|
||||
|
||||
if not data:
|
||||
logger.error("ERROR 01: EPISODIOS: La Web no responde o la URL es erronea" + item.url)
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: EPISODIOS:. La Web no responde o la URL es erronea. Si la Web está activa, reportar el error con el log'))
|
||||
return itemlist
|
||||
|
||||
#Buscamos los episodios
|
||||
patron = '<span class="pm-label-duration">(.*?)<\/span>.*?<a href="([^"]+)" title="([^"]+)">.*?data-echo="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
if not matches: #error
|
||||
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
|
||||
if item.intervencion: #Sí ha sido clausurada judicialmente
|
||||
item, itemlist = generictools.post_tmdb_episodios(item, itemlist) #Llamamos al método para el pintado del error
|
||||
return itemlist #Salimos
|
||||
|
||||
logger.error("ERROR 02: EPISODIOS: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: EPISODIOS: Ha cambiado la estructura de la Web. Reportar el error con el log'))
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
#logger.debug("PATRON: " + patron)
|
||||
#logger.debug(matches)
|
||||
#logger.debug(data)
|
||||
|
||||
patron = '<li class=""><a href="([^"]+)">»<\/a><\/li><\/ul><\/div><\/div>\s*<\/div>'
|
||||
next_page_url = ''
|
||||
next_page_url = scrapertools.find_single_match(data, patron)
|
||||
if next_page_url:
|
||||
next_page_url = urlparse.urljoin(host, next_page_url)
|
||||
#logger.debug(next_page_url)
|
||||
|
||||
# Recorremos todos los episodios generando un Item local por cada uno en Itemlist
|
||||
for scrapedduration, scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
item_local = item.clone()
|
||||
item_local.action = "findvideos"
|
||||
item_local.contentType = "episode"
|
||||
item_local.extra = "episodios"
|
||||
if item_local.library_playcounts:
|
||||
del item_local.library_playcounts
|
||||
if item_local.library_urls:
|
||||
del item_local.library_urls
|
||||
if item_local.path:
|
||||
del item_local.path
|
||||
if item_local.update_last:
|
||||
del item_local.update_last
|
||||
if item_local.update_next:
|
||||
del item_local.update_next
|
||||
if item_local.channel_host:
|
||||
del item_local.channel_host
|
||||
if item_local.active:
|
||||
del item_local.active
|
||||
if item_local.contentTitle:
|
||||
del item_local.infoLabels['title']
|
||||
if item_local.season_colapse:
|
||||
del item_local.season_colapse
|
||||
if item_local.unify:
|
||||
del item_local.unify
|
||||
if item_local.tmdb_stat:
|
||||
del item_local.tmdb_stat
|
||||
item_local.wanted = 'xyz'
|
||||
del item_local.wanted
|
||||
|
||||
item_local.title = ''
|
||||
item_local.context = "['buscar_trailer']"
|
||||
item_local.url = scrapedurl
|
||||
title = scrapedtitle
|
||||
item_local.language = []
|
||||
|
||||
#Buscamos calidades del episodio
|
||||
if 'hdtv' in scrapedtitle.lower() or 'hdtv' in scrapedurl:
|
||||
item_local.quality = 'HDTV'
|
||||
elif 'hd7' in scrapedtitle.lower() or 'hd7' in scrapedurl:
|
||||
item_local.quality = 'HD720p'
|
||||
elif 'hd1' in scrapedtitle.lower() or 'hd1' in scrapedurl:
|
||||
item_local.quality = 'HD1080p'
|
||||
|
||||
if not item_local.quality:
|
||||
item_local.quality = '[%s]' % scrapedduration
|
||||
|
||||
#Buscamos idiomas del episodio
|
||||
lang = scrapedtitle.strip()
|
||||
if ('v.o' in lang.lower() or 'v.o' in scrapedurl.lower()) and not 'VO' in item_local.language:
|
||||
item_local.language += ['VO']
|
||||
elif ('vose' in lang.lower() or 'v.o.s.e' in lang.lower() or 'vose' in scrapedurl.lower() or 'v.o.s.e' in scrapedurl.lower()) and not 'VOSE' in item_local.language:
|
||||
item_local.language += ['VOSE']
|
||||
elif ('latino' in lang.lower() or 'latino' in scrapedurl.lower()) and not 'LAT' in item_local.language:
|
||||
item_local.language += ['LAT']
|
||||
|
||||
if not item_local.language:
|
||||
item_local.language += ['CAST']
|
||||
|
||||
#Buscamos la Temporada y el Episodio
|
||||
item_local.contentSeason = 0
|
||||
item_local.contentEpisodeNumber = 0
|
||||
try:
|
||||
#Extraemos los episodios
|
||||
patron = ':\s*(\d+)-\s*'
|
||||
if scrapertools.find_single_match(title, patron):
|
||||
item_local.contentEpisodeNumber = int(scrapertools.find_single_match(title, patron))
|
||||
|
||||
#Extraemos la temporada
|
||||
patron = '\s*\(t|T(\d+)\):'
|
||||
if scrapertools.find_single_match(title, patron):
|
||||
item_local.contentSeason = int(scrapertools.find_single_match(title, patron))
|
||||
except:
|
||||
logger.error('ERROR al extraer Temporada/Episodio: ' + title)
|
||||
|
||||
if item_local.contentSeason == 0:
|
||||
if 'ii:' in title.lower(): item_local.contentSeason = 2
|
||||
elif 'iii:' in title.lower(): item_local.contentSeason = 3
|
||||
elif 'iv:' in title.lower(): item_local.contentSeason = 4
|
||||
else: item_local.contentSeason = 1
|
||||
|
||||
if item_local.contentEpisodeNumber == 0:
|
||||
item_local.contentEpisodeNumber = epis
|
||||
|
||||
#Formateamos el título compatible con la Videoteca
|
||||
item_local.title = '%sx%s -' % (str(item_local.contentSeason), str(item_local.contentEpisodeNumber).zfill(2))
|
||||
patron = ':(?:\s*\d+-)?\s*(.*?)$'
|
||||
item_local.infoLabels['episodio_titulo'] = scrapertools.find_single_match(title, patron)
|
||||
|
||||
itemlist.append(item_local.clone())
|
||||
epis += 1
|
||||
|
||||
#logger.debug(item_local)
|
||||
|
||||
if len(itemlist) > 1:
|
||||
itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) #clasificamos
|
||||
|
||||
# Pasada por TMDB y clasificación de lista por temporada y episodio
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
|
||||
#Llamamos al método para el maquillaje de los títulos obtenidos desde TMDB
|
||||
item, itemlist = generictools.post_tmdb_episodios(item, itemlist)
|
||||
|
||||
#logger.debug(item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def actualizar_titulos(item):
|
||||
logger.info()
|
||||
|
||||
item = generictools.update_title(item) #Llamamos al método que actualiza el título con tmdb.find_and_set_infoLabels
|
||||
|
||||
#Volvemos a la siguiente acción en el canal
|
||||
return item
|
||||
|
||||
|
||||
def conector_cnubis(scrapedurl, scrapedplayer):
|
||||
logger.info("url=%s, player=https:%s" % (scrapedurl, scrapedplayer))
|
||||
videourl = []
|
||||
|
||||
headers = { 'Referer': scrapedurl } #Referer con la url inical
|
||||
data = httptools.downloadpage('https:' + scrapedplayer, headers=headers).data #busca el video a partir del player + url inical
|
||||
|
||||
#url_file, url_type = scrapertools.find_single_match(data, 'file\s*:\s*"([^"]*)"\s*,\s*type\s*:\s*"([^"]*)')
|
||||
url_file = scrapertools.find_single_match(data, '<meta itemprop="contentURL" content="([^"]+)" />') #obtiene la url de vídeo
|
||||
url_type = 'directo'
|
||||
|
||||
#videourl.append([url_type, 'https:' + url_file])
|
||||
videourl.append([url_type, url_file]) #responde como si volviera de servertools.findvideos()
|
||||
|
||||
logger.info(videourl)
|
||||
return videourl
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
|
||||
try:
|
||||
item.url = item.url + texto
|
||||
|
||||
if texto != '':
|
||||
return listado(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
|
||||
try:
|
||||
if categoria == 'documentales':
|
||||
item.url = host + "newvideos.html"
|
||||
item.extra = "novedades"
|
||||
item.channel = channel
|
||||
item.category_new= 'newest'
|
||||
|
||||
itemlist = listado(item)
|
||||
if ">> Página siguiente" in itemlist[-1].title:
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
@@ -71,7 +71,7 @@ def menu_movies(item):
|
||||
def get_source(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
|
||||
@@ -92,11 +92,9 @@ def section(item):
|
||||
duplicados=[]
|
||||
data = get_source(host+'/'+item.type)
|
||||
if 'Genero' in item.title:
|
||||
patron = '<li class=cat-item cat-item-\d+><a href=(.*?) >(.*?)/i>'
|
||||
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)" >(.*?)/i>'
|
||||
elif 'Año' in item.title:
|
||||
patron = '<li><a href=(.*?release.*?)>(.*?)</a>'
|
||||
elif 'Calidad' in item.title:
|
||||
patron = 'menu-item-object-dtquality menu-item-\d+><a href=(.*?)>(.*?)</a>'
|
||||
patron = '<li><a href="(.*?release.*?)">([^<]+)</a>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
@@ -123,10 +121,12 @@ def list_all(item):
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
|
||||
if item.type == 'movies':
|
||||
patron = '<article id=post-\d+ class=item movies><div class=poster><img src=(.*?) alt=(.*?)>.*?quality>(.*?)'
|
||||
patron += '</span><\/div><a href=(.*?)>.*?<\/h3><span>(.*?)<\/span><\/div>.*?flags(.*?)metadata'
|
||||
|
||||
patron = '<article id="post-\d+" class="item movies"><div class="poster">.?<img src="([^"]+)" alt="([^"]+)">.*?'
|
||||
patron +='"quality">([^<]+)</span><\/div>.?<a href="([^"]+)">.*?'
|
||||
patron +='<\/h3>.?<span>([^"]+)<\/span><\/div>.*?"flags"(.*?)metadata'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, quality, scrapedurl, year, lang_data in matches:
|
||||
@@ -148,8 +148,8 @@ def list_all(item):
|
||||
infoLabels={'year':year}))
|
||||
|
||||
elif item.type == 'tvshows':
|
||||
patron = '<article id=post-\d+ class=item tvshows><div class=poster><img src=(.*?) alt=(.*?)>.*?'
|
||||
patron += '<a href=(.*?)>.*?<\/h3><span>(.*?)<\/span><\/div>'
|
||||
patron = '<article id="post-\d+" class="item tvshows">.?<div class="poster">.?<img src="([^"]+)"'
|
||||
patron += ' alt="([^"]+)">.*?<a href="([^"]+)">.*?<\/h3>.?<span>(.*?)<\/span><\/div>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, scrapedurl, year in matches:
|
||||
@@ -168,8 +168,7 @@ def list_all(item):
|
||||
tmdb.set_infoLabels(itemlist, seekTmdb=True)
|
||||
# Paginación
|
||||
|
||||
#url_next_page = scrapertools.find_single_match(data,"<a class='arrow_pag' href=([^>]+)><i id='nextpagination'")
|
||||
url_next_page = scrapertools.find_single_match(data,"<link rel=next href=([^ ]+) />")
|
||||
url_next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)" />')
|
||||
if url_next_page:
|
||||
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
|
||||
|
||||
@@ -181,7 +180,7 @@ def seasons(item):
|
||||
itemlist=[]
|
||||
|
||||
data=get_source(item.url)
|
||||
patron='Temporada \d+'
|
||||
patron='Temporada.?\d+'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
infoLabels = item.infoLabels
|
||||
@@ -215,7 +214,7 @@ def episodesxseasons(item):
|
||||
itemlist = []
|
||||
|
||||
data=get_source(item.url)
|
||||
patron='class=numerando>%s - (\d+)</div><div class=episodiotitle><a href=(.*?)>(.*?)<' % item.infoLabels['season']
|
||||
patron='class="numerando">%s - (\d+)</div>.?<div class="episodiotitle">.?<a href="([^"]+)">([^<]+)<' % item.infoLabels['season']
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
infoLabels = item.infoLabels
|
||||
@@ -237,7 +236,7 @@ def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
patron = 'id=option-(\d+).*?rptss src=(.*?) frameborder'
|
||||
patron = 'id="option-(\d+)".*?rptss" src="([^"]+)" frameborder'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
lang=''
|
||||
for option, scrapedurl in matches:
|
||||
@@ -292,7 +291,8 @@ def search_results(item):
|
||||
itemlist=[]
|
||||
|
||||
data=get_source(item.url)
|
||||
patron = '<article>.*?<a href=(.*?)><img src=(.*?) alt=(.*?) />.*?meta.*?year>(.*?)<(.*?)<p>(.*?)</p>'
|
||||
patron = '<article>.*?<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)" \/>.*?meta.*?'
|
||||
patron += '"year">([^<]+)<(.*?)<p>([^<]+)<\/p>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumb, scrapedtitle, year, lang_data, scrapedplot in matches:
|
||||
|
||||
46
plugin.video.alfa/channels/dramasjc.json
Normal file
46
plugin.video.alfa/channels/dramasjc.json
Normal file
@@ -0,0 +1,46 @@
|
||||
{
|
||||
"id": "dramasjc",
|
||||
"name": "DramasJC",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": [],
|
||||
"thumbnail": "https://www.dramasjc.com/wp-content/uploads/2018/03/logo.png",
|
||||
"banner": "",
|
||||
"version": 1,
|
||||
"categories": [
|
||||
"tvshow",
|
||||
"movie",
|
||||
"vos"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"VOSE",
|
||||
"VO"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
281
plugin.video.alfa/channels/dramasjc.py
Normal file
281
plugin.video.alfa/channels/dramasjc.py
Normal file
@@ -0,0 +1,281 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel DramasJC -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
import urllib
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
|
||||
|
||||
host = 'https://www.dramasjc.com/'
|
||||
|
||||
IDIOMAS = {'VOSE': 'VOSE', 'VO':'VO'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = []
|
||||
list_servers = ['okru', 'mailru', 'openload']
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = list()
|
||||
itemlist.append(Item(channel=item.channel, title="Doramas", action="menu_doramas",
|
||||
thumbnail=get_thumb('doramas', auto=True)))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Películas", action="list_all", url=host+'peliculas/',
|
||||
type='movie', thumbnail=get_thumb('movies', auto=True)))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+'?s=',
|
||||
thumbnail=get_thumb('search', auto=True)))
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def menu_doramas(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Todos", action="list_all", url=host + 'series',
|
||||
thumbnail=get_thumb('all', auto=True)))
|
||||
itemlist.append(Item(channel=item.channel, title="Generos", action="section",
|
||||
thumbnail=get_thumb('genres', auto=True)))
|
||||
|
||||
return itemlist
|
||||
|
||||
def get_source(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
|
||||
def list_all(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
full_data = data
|
||||
data = scrapertools.find_single_match(data, '<ul class="MovieList NoLmtxt.*?>(.*?)</ul>')
|
||||
|
||||
patron = '<article id="post-.*?<a href="([^"]+)">.*?(?:<img |-)src="([^"]+)".*?alt=".*?'
|
||||
patron += '<h3 class="Title">([^<]+)<\/h3>.?(?:</a>|<span class="Year">(\d{4})<\/span>).*?'
|
||||
patron += '(movie|TV)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, year, type in matches:
|
||||
|
||||
url = scrapedurl
|
||||
if year == '':
|
||||
year = '-'
|
||||
if "|" in scrapedtitle:
|
||||
scrapedtitle= scrapedtitle.split("|")
|
||||
contentname = scrapedtitle[0].strip()
|
||||
else:
|
||||
contentname = scrapedtitle
|
||||
|
||||
contentname = re.sub('\(.*?\)','', contentname)
|
||||
|
||||
title = '%s [%s]'%(contentname, year)
|
||||
thumbnail = 'http:'+scrapedthumbnail
|
||||
new_item = Item(channel=item.channel,
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
infoLabels={'year':year}
|
||||
)
|
||||
|
||||
if type == 'movie':
|
||||
new_item.contentTitle = contentname
|
||||
new_item.action = 'findvideos'
|
||||
else:
|
||||
new_item.contentSerieName = contentname
|
||||
new_item.action = 'seasons'
|
||||
itemlist.append(new_item)
|
||||
tmdb.set_infoLabels_itemlist(itemlist, True)
|
||||
|
||||
# Paginación
|
||||
|
||||
url_next_page = scrapertools.find_single_match(full_data,'<a class="next.*?href="([^"]+)">')
|
||||
if url_next_page:
|
||||
itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all'))
|
||||
return itemlist
|
||||
|
||||
def section(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
full_data = get_source(host)
|
||||
data = scrapertools.find_single_match(full_data, '<a href="#">Dramas por Genero</a>(.*?)</ul>')
|
||||
patron = '<a href="([^ ]+)">([^<]+)<'
|
||||
action = 'list_all'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for data_one, data_two in matches:
|
||||
|
||||
url = data_one
|
||||
title = data_two
|
||||
new_item = Item(channel=item.channel, title= title, url=url, action=action)
|
||||
itemlist.append(new_item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def seasons(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
patron = 'class="Title AA-Season On" data-tab="1">Temporada <span>([^<]+)</span>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for temporada in matches:
|
||||
title = 'Temporada %s' % temporada
|
||||
contentSeasonNumber = temporada
|
||||
item.infoLabels['season'] = contentSeasonNumber
|
||||
itemlist.append(item.clone(action='episodesxseason',
|
||||
title=title,
|
||||
contentSeasonNumber=contentSeasonNumber
|
||||
))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios",
|
||||
contentSerieName=item.contentSerieName,
|
||||
contentSeasonNumber=contentSeasonNumber
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
templist = seasons(item)
|
||||
for tempitem in templist:
|
||||
itemlist += episodesxseason(tempitem)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodesxseason(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
season = item.contentSeasonNumber
|
||||
data = get_source(item.url)
|
||||
data = scrapertools.find_single_match(data, '>Temporada <span>%s</span>(.*?)</ul>' % season)
|
||||
patron = '<a href="([^"]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
ep = 1
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
epi = str(ep)
|
||||
title = season + 'x%s - Episodio %s' % (epi, epi)
|
||||
url = scrapedurl
|
||||
contentEpisodeNumber = epi
|
||||
item.infoLabels['episode'] = contentEpisodeNumber
|
||||
if 'próximamente' not in scrapedtitle.lower():
|
||||
itemlist.append(item.clone(action='findvideos',
|
||||
title=title,
|
||||
url=url,
|
||||
contentEpisodeNumber=contentEpisodeNumber,
|
||||
))
|
||||
ep += 1
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
data = scrapertools.unescape(data)
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
|
||||
patron = 'id="(Opt\d+)">.*?src="([^"]+)" frameborder.*?</iframe>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for option, scrapedurl in matches:
|
||||
scrapedurl = scrapedurl.replace('"','').replace('&','&')
|
||||
data_video = get_source(scrapedurl)
|
||||
url = scrapertools.find_single_match(data_video, '<div class="Video">.*?src="([^"]+)"')
|
||||
opt_data = scrapertools.find_single_match(data,'"%s"><span>.*?</span>.*?<span>([^<]+)</span>'%option).split('-')
|
||||
language = opt_data[0].strip()
|
||||
quality = opt_data[1].strip()
|
||||
if 'sub' in language.lower():
|
||||
language='VOSE'
|
||||
else:
|
||||
language = 'VO'
|
||||
if url != '' and 'youtube' not in url:
|
||||
itemlist.append(Item(channel=item.channel, title='%s', url=url, language=IDIOMAS[language], quality=quality,
|
||||
action='play'))
|
||||
elif 'youtube' in url:
|
||||
trailer = Item(channel=item.channel, title='Trailer', url=url, action='play', server='youtube')
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % '%s [%s] [%s]'%(i.server.capitalize(),
|
||||
i.language, i.quality))
|
||||
try:
|
||||
itemlist.append(trailer)
|
||||
except:
|
||||
pass
|
||||
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
|
||||
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
|
||||
if texto != '':
|
||||
return list_all(item)
|
||||
else:
|
||||
return []
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria in ['peliculas']:
|
||||
item.url = host+'peliculas/'
|
||||
|
||||
itemlist = list_all(item)
|
||||
if itemlist[-1].title == 'Siguiente >>':
|
||||
itemlist.pop()
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
@@ -194,6 +194,8 @@ def peliculas(item):
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
tmdb.set_infoLabels(item, True) # para refrescar infolabels y obtener más datos en "segunda pasada"
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
@@ -251,8 +253,6 @@ def findvideos(item):
|
||||
language=idioma, quality=calidad))
|
||||
break
|
||||
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
|
||||
if __comprueba_enlaces__:
|
||||
|
||||
88
plugin.video.alfa/channels/peliculashd.json
Normal file
88
plugin.video.alfa/channels/peliculashd.json
Normal file
@@ -0,0 +1,88 @@
|
||||
{
|
||||
"id": "peliculashd",
|
||||
"name": "PeliculasHD",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat", "cast"],
|
||||
"thumbnail": "https://i.postimg.cc/05HTS7wC/peliculashd.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"vos",
|
||||
"direct"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"Latino",
|
||||
"Castellano",
|
||||
"VOSE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_anime",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Episodios de anime",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces",
|
||||
"type": "bool",
|
||||
"label": "Verificar si los enlaces existen",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces_num",
|
||||
"type": "list",
|
||||
"label": "Número de enlaces a verificar",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "5", "10", "15", "20" ]
|
||||
}
|
||||
]
|
||||
}
|
||||
361
plugin.video.alfa/channels/peliculashd.py
Normal file
361
plugin.video.alfa/channels/peliculashd.py
Normal file
@@ -0,0 +1,361 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel PeliculasHD -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
import urllib
|
||||
import base64
|
||||
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from lib import jsunpack
|
||||
from core.item import Item
|
||||
from channels import filtertools
|
||||
from channels import autoplay
|
||||
from platformcode import config, logger
|
||||
|
||||
|
||||
IDIOMAS = {'Latino': 'Latino', 'Español': 'Castellano', 'VOSE': 'VOSE'}
|
||||
list_language = IDIOMAS.values()
|
||||
|
||||
list_quality = []
|
||||
|
||||
list_servers = [
|
||||
'directo',
|
||||
'openload',
|
||||
'rapidvideo'
|
||||
]
|
||||
|
||||
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'peliculashd')
|
||||
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'peliculashd')
|
||||
|
||||
host = 'https://peliculashd.site/'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title='Peliculas', action='menu_movies',
|
||||
thumbnail= get_thumb('movies', auto=True)))
|
||||
itemlist.append(Item(channel=item.channel, title='Series', url=host+'/genero/serie', action='list_all', type='tvshows',
|
||||
thumbnail= get_thumb('tvshows', auto=True)))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title='Anime', url=host + '/genero/anime', action='list_all', type='tvshows',
|
||||
thumbnail=get_thumb('anime', auto=True)))
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='Telenovelas', url=host + '/genero/telenovelas-teleseries', action='list_all', type='tvshows',
|
||||
thumbnail=get_thumb('telenovelas', auto=True)))
|
||||
|
||||
itemlist.append(
|
||||
item.clone(title="Buscar", action="search", url=host + '?s=', thumbnail=get_thumb("search", auto=True),
|
||||
extra='movie'))
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
def menu_movies(item):
|
||||
logger.info()
|
||||
|
||||
itemlist=[]
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title='Todas', url=host + 'movies', action='list_all',
|
||||
thumbnail=get_thumb('all', auto=True), type='movies'))
|
||||
itemlist.append(Item(channel=item.channel, title='Genero', action='section',
|
||||
thumbnail=get_thumb('genres', auto=True), type='movies'))
|
||||
itemlist.append(Item(channel=item.channel, title='Por Año', action='section',
|
||||
thumbnail=get_thumb('year', auto=True), type='movies'))
|
||||
|
||||
return itemlist
|
||||
|
||||
def get_source(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
|
||||
def get_language(lang_data):
|
||||
logger.info()
|
||||
language = []
|
||||
lang_list = scrapertools.find_multiple_matches(lang_data, '/flags/(.*?).png\)')
|
||||
for lang in lang_list:
|
||||
if lang == 'en':
|
||||
lang = 'vose'
|
||||
if lang not in language:
|
||||
language.append(lang)
|
||||
return language
|
||||
|
||||
def section(item):
|
||||
logger.info()
|
||||
itemlist=[]
|
||||
duplicados=[]
|
||||
full_data = get_source(host+'/'+item.type)
|
||||
if 'Genero' in item.title:
|
||||
data = scrapertools.find_single_match(full_data, '<a>Generos</a>(.*?)</ul>')
|
||||
elif 'Año' in item.title:
|
||||
data = scrapertools.find_single_match(full_data, '<h2>Busqueda por Año</h2>(.*?)</ul>')
|
||||
patron = '<a href="([^"]+)">([^<]+)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapedtitle
|
||||
plot=''
|
||||
title = scrapedtitle
|
||||
url = scrapedurl
|
||||
if title not in duplicados and title.lower() != 'proximamente':
|
||||
itemlist.append(Item(channel=item.channel, url=url, title=title, plot=plot, action='list_all',
|
||||
type=item.type))
|
||||
duplicados.append(title)
|
||||
|
||||
return itemlist
|
||||
|
||||
def list_all(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
if item.type == 'movies':
|
||||
patron = '<article id="post-\d+" class="item movies"><div class="poster">\s?<img src="([^"]+)" alt="([^"]+)">.*?'
|
||||
patron += '"quality">([^<]+)</span><\/div>\s?<a href="([^"]+)">.*?</h3>.*?<span>([^<]+)</'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, quality, scrapedurl, year in matches:
|
||||
|
||||
|
||||
title = '%s [%s] [%s]' % (scrapedtitle, year, quality)
|
||||
contentTitle = scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
url = scrapedurl
|
||||
|
||||
itemlist.append(item.clone(action='findvideos',
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
contentTitle=contentTitle,
|
||||
quality=quality,
|
||||
infoLabels={'year':year}))
|
||||
|
||||
elif item.type == 'tvshows':
|
||||
patron = '<article id="post-\d+" class="item tvshows"><div class="poster">\s?<img src="([^"]+)" alt="([^"]+)">.*?'
|
||||
patron += '<\/div>\s?<a href="([^"]+)">.*?</h3>.*?<span>([^<]+)</'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, scrapedurl, year in matches:
|
||||
title = scrapedtitle
|
||||
contentSerieName = scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
url = scrapedurl
|
||||
|
||||
itemlist.append(item.clone(action='seasons',
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
contentSerieName=contentSerieName,
|
||||
infoLabels={'year':year}))
|
||||
|
||||
tmdb.set_infoLabels(itemlist, seekTmdb=True)
|
||||
# Paginación
|
||||
|
||||
#url_next_page = scrapertools.find_single_match(data,"<a class='arrow_pag' href=([^>]+)><i id='nextpagination'")
|
||||
url_next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^ ]+)" />')
|
||||
if url_next_page:
|
||||
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
|
||||
|
||||
return itemlist
|
||||
|
||||
def seasons(item):
|
||||
logger.info()
|
||||
|
||||
itemlist=[]
|
||||
|
||||
data=get_source(item.url)
|
||||
patron='Temporada \d+'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
infoLabels = item.infoLabels
|
||||
for season in matches:
|
||||
season = season.lower().replace('temporada','')
|
||||
infoLabels['season']=season
|
||||
title = 'Temporada %s' % season
|
||||
itemlist.append(Item(channel=item.channel, title=title, url=item.url, action='episodesxseasons',
|
||||
infoLabels=infoLabels))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
|
||||
|
||||
return itemlist
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
templist = seasons(item)
|
||||
for tempitem in templist:
|
||||
itemlist += episodesxseasons(tempitem)
|
||||
|
||||
return itemlist
|
||||
|
||||
def episodesxseasons(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data=get_source(item.url)
|
||||
patron='class="numerando">%s - (\d+)</div><div class="episodiotitle">.?<a href="([^"]+)">([^<]+)<' % item.infoLabels['season']
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
infoLabels = item.infoLabels
|
||||
|
||||
for scrapedepisode, scrapedurl, scrapedtitle in matches:
|
||||
|
||||
infoLabels['episode'] = scrapedepisode
|
||||
url = scrapedurl
|
||||
title = '%sx%s - %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', type='tv',
|
||||
infoLabels=infoLabels))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
from lib import generictools
|
||||
import urllib
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
patron = 'data-post="(\d+)" data-nume="(\d+).*?class="title">([^>]+)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for pt, nm, language in matches:
|
||||
|
||||
if 'sub' in language.lower() or language not in IDIOMAS:
|
||||
language = 'VOSE'
|
||||
post = {'action': 'doo_player_ajax', 'post': pt, 'nume': nm}
|
||||
post = urllib.urlencode(post)
|
||||
new_data = httptools.downloadpage(host + 'wp-admin/admin-ajax.php', post=post,
|
||||
headers={'Referer': item.url}).data
|
||||
hidden_url = scrapertools.find_single_match(new_data, "src='([^']+)'")
|
||||
new_data = get_source(hidden_url)
|
||||
matches = scrapertools.find_multiple_matches(new_data, '\["\d+","([^"]+)",\d+\]')
|
||||
for url in matches:
|
||||
if not config.get_setting('unify'):
|
||||
|
||||
title = ' [%s]' % IDIOMAS[language]
|
||||
|
||||
else:
|
||||
title = ''
|
||||
url = url.replace('\\/', '/')
|
||||
if 'playdrive' in url:
|
||||
new_data = get_source(url)
|
||||
url = scrapertools.find_single_match(new_data, 'file:"([^"]+)"')
|
||||
itemlist.append(Item(channel=item.channel, title='%s' + title, url=url, action='play',
|
||||
language=IDIOMAS[language], infoLabels=item.infoLabels))
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
|
||||
|
||||
# Requerido para Filtrar enlaces
|
||||
|
||||
if __comprueba_enlaces__:
|
||||
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
itemlist = sorted(itemlist, key=lambda it: it.language)
|
||||
|
||||
if item.contentType != 'episode':
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
|
||||
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
|
||||
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
|
||||
if texto != '':
|
||||
return search_results(item)
|
||||
else:
|
||||
return []
|
||||
|
||||
def search_results(item):
|
||||
logger.info()
|
||||
|
||||
itemlist=[]
|
||||
|
||||
data=get_source(item.url)
|
||||
patron = '<article>.*?<a href="([^"]+)">.?<img src="([^"]+)" alt="([^"]+)" />.?<span class="(tvshows|movies)".*?'
|
||||
patron += '"meta".*?"year">([^<]+)<(.*?)<p>([^<]+)</p>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumb, scrapedtitle, type, year, lang_data, scrapedplot in matches:
|
||||
|
||||
title = scrapedtitle
|
||||
url = scrapedurl
|
||||
thumbnail = scrapedthumb
|
||||
plot = scrapedplot
|
||||
language = get_language(lang_data)
|
||||
if language:
|
||||
action = 'findvideos'
|
||||
else:
|
||||
action = 'seasons'
|
||||
|
||||
new_item=Item(channel=item.channel, title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
action=action, type=type, language=language, infoLabels={'year':year})
|
||||
if new_item.action == 'findvideos':
|
||||
new_item.contentTitle = new_item.title
|
||||
else:
|
||||
new_item.contentSerieName = new_item.title
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
return itemlist
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria in ['peliculas']:
|
||||
item.url = host + 'movies/'
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + 'genero/animacion/'
|
||||
elif categoria == 'terror':
|
||||
item.url = host + 'genero/terror/'
|
||||
elif categoria == 'anime':
|
||||
item.url = host + 'genero/anime/'
|
||||
item.type='movies'
|
||||
itemlist = list_all(item)
|
||||
if itemlist[-1].title == 'Siguiente >>':
|
||||
itemlist.pop()
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
88
plugin.video.alfa/channels/peliculonhd.json
Normal file
88
plugin.video.alfa/channels/peliculonhd.json
Normal file
@@ -0,0 +1,88 @@
|
||||
{
|
||||
"id": "peliculonhd",
|
||||
"name": "PeliculonHD",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat", "cast"],
|
||||
"thumbnail": "https://peliculonhd.com/wp-content/uploads/2018/09/peliculonnewlogo3-.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"vos",
|
||||
"direct"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"Latino",
|
||||
"Castellano",
|
||||
"VOSE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_documentales",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Documentales",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces",
|
||||
"type": "bool",
|
||||
"label": "Verificar si los enlaces existen",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces_num",
|
||||
"type": "list",
|
||||
"label": "Número de enlaces a verificar",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "5", "10", "15", "20" ]
|
||||
}
|
||||
]
|
||||
}
|
||||
405
plugin.video.alfa/channels/peliculonhd.py
Normal file
405
plugin.video.alfa/channels/peliculonhd.py
Normal file
@@ -0,0 +1,405 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel PeliculonHD -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
import urllib
|
||||
import base64
|
||||
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from lib import jsunpack
|
||||
from core.item import Item
|
||||
from channels import filtertools
|
||||
from channels import autoplay
|
||||
from platformcode import config, logger
|
||||
|
||||
|
||||
IDIOMAS = {'mx': 'Latino', 'dk':'Latino', 'es': 'Castellano', 'en': 'VOSE', 'gb':'VOSE'}
|
||||
list_language = IDIOMAS.values()
|
||||
|
||||
list_quality = []
|
||||
|
||||
list_servers = [
|
||||
'directo',
|
||||
'openload',
|
||||
'rapidvideo',
|
||||
'jawcloud',
|
||||
'cloudvideo',
|
||||
'upvid',
|
||||
'vevio',
|
||||
'gamovideo'
|
||||
]
|
||||
|
||||
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'peliculonhd')
|
||||
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'peliculonhd')
|
||||
|
||||
host = 'https://peliculonhd.com/'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title='Peliculas', action='menu_movies',
|
||||
thumbnail= get_thumb('movies', auto=True)))
|
||||
itemlist.append(Item(channel=item.channel, title='Series', url=host+'serie', action='list_all', type='tv',
|
||||
thumbnail= get_thumb('tvshows', auto=True)))
|
||||
itemlist.append(
|
||||
item.clone(title="Buscar", action="search", url=host + '?s=', thumbnail=get_thumb("search", auto=True),
|
||||
extra='movie'))
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
def menu_movies(item):
|
||||
logger.info()
|
||||
|
||||
itemlist=[]
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title='Todas', url=host + 'ver', action='list_all',
|
||||
thumbnail=get_thumb('all', auto=True), type='movie'))
|
||||
itemlist.append(Item(channel=item.channel, title='Genero', action='section',
|
||||
thumbnail=get_thumb('genres', auto=True), type='movie'))
|
||||
itemlist.append(Item(channel=item.channel, title='Por Año', action='section',
|
||||
thumbnail=get_thumb('year', auto=True), type='movie'))
|
||||
|
||||
return itemlist
|
||||
|
||||
def get_source(url, referer=None):
|
||||
logger.info()
|
||||
if referer is not None:
|
||||
data = httptools.downloadpage(url).data
|
||||
else:
|
||||
data = httptools.downloadpage(url, headers={'Referer':referer}).data
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
|
||||
def get_language(lang_data):
|
||||
logger.info()
|
||||
language = []
|
||||
lang_list = scrapertools.find_multiple_matches(lang_data, '/flags/(.*?).png\)')
|
||||
for lang in lang_list:
|
||||
if lang == 'en':
|
||||
lang = 'vose'
|
||||
if lang not in language:
|
||||
language.append(lang)
|
||||
return language
|
||||
|
||||
def section(item):
|
||||
logger.info()
|
||||
itemlist=[]
|
||||
duplicados=[]
|
||||
full_data = get_source(host+'/'+item.type)
|
||||
if 'Genero' in item.title:
|
||||
data = scrapertools.find_single_match(full_data, '<a href="#">Genero</a>(.*?)</ul>')
|
||||
elif 'Año' in item.title:
|
||||
data = scrapertools.find_single_match(full_data, '<a href="#">Año</a>(.*?)</ul>')
|
||||
patron = '<a href="([^"]+)">([^<]+)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapedtitle
|
||||
plot=''
|
||||
title = scrapedtitle
|
||||
url = host+scrapedurl
|
||||
if title not in duplicados and title.lower() != 'proximamente':
|
||||
itemlist.append(Item(channel=item.channel, url=url, title=title, plot=plot, action='list_all',
|
||||
type=item.type))
|
||||
duplicados.append(title)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def list_all(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
|
||||
if item.type == 'movie':
|
||||
patron = '<article id="post-\d+" class="item movies"><div class="poster">\s?<img src="([^"]+)" alt="([^"]+)">.*?'
|
||||
patron += '"quality">([^<]+)</span><\/div>\s?<a href="([^"]+)">.*?</h3>.*?<span>([^<]+)</'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, quality, scrapedurl, year in matches:
|
||||
|
||||
|
||||
title = '%s [%s] [%s]' % (scrapedtitle, year, quality)
|
||||
contentTitle = scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
url = scrapedurl
|
||||
#language = get_language(lang_data)
|
||||
|
||||
if 'proximamente' not in quality.lower():
|
||||
itemlist.append(item.clone(action='findvideos',
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
contentTitle=contentTitle,
|
||||
quality=quality,
|
||||
type=item.type,
|
||||
infoLabels={'year':year}))
|
||||
|
||||
elif item.type == 'tv':
|
||||
patron = '<article id="post-\d+" class="item tvshows"><div class="poster">\s?<img src="([^"]+)" '
|
||||
patron += 'alt="([^"]+)">.*?<a href="([^"]+)">.*?</h3>.*?<span>([^<]+)</'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, scrapedurl, year in matches:
|
||||
title = scrapedtitle
|
||||
contentSerieName = scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
url = scrapedurl
|
||||
|
||||
itemlist.append(item.clone(action='seasons',
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
contentSerieName=contentSerieName,
|
||||
type=item.type,
|
||||
infoLabels={'year':year}))
|
||||
|
||||
tmdb.set_infoLabels(itemlist, seekTmdb=True)
|
||||
# Paginación
|
||||
|
||||
if item.type != 'movie':
|
||||
item.type = 'tv'
|
||||
url_next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^ ]+)" />')
|
||||
url_next_page = 'https:'+ url_next_page
|
||||
if url_next_page:
|
||||
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, type=item.type, action='list_all'))
|
||||
|
||||
return itemlist
|
||||
|
||||
def seasons(item):
|
||||
logger.info()
|
||||
|
||||
itemlist=[]
|
||||
|
||||
data=get_source(item.url)
|
||||
patron='Temporada \d+'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
infoLabels = item.infoLabels
|
||||
for season in matches:
|
||||
season = season.lower().replace('temporada','')
|
||||
infoLabels['season']=season
|
||||
title = 'Temporada %s' % season
|
||||
itemlist.append(Item(channel=item.channel, title=title, url=item.url, action='episodesxseasons',
|
||||
infoLabels=infoLabels))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
|
||||
|
||||
return itemlist
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
templist = seasons(item)
|
||||
for tempitem in templist:
|
||||
itemlist += episodesxseasons(tempitem)
|
||||
|
||||
return itemlist
|
||||
|
||||
def episodesxseasons(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data=get_source(item.url)
|
||||
patron='class="numerando">%s - (\d+)</div><div class="episodiotitle">.?<a href="([^"]+)">([^<]+)<' % item.infoLabels['season']
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
infoLabels = item.infoLabels
|
||||
|
||||
for scrapedepisode, scrapedurl, scrapedtitle in matches:
|
||||
|
||||
infoLabels['episode'] = scrapedepisode
|
||||
url = scrapedurl
|
||||
title = '%sx%s - %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', type='tv',
|
||||
infoLabels=infoLabels))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
from lib import generictools
|
||||
import urllib
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
patron = 'data-post="(\d+)" data-nume="(\d+)".*?img src=\'([^\']+)\''
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for id, option, lang in matches:
|
||||
lang = scrapertools.find_single_match(lang, '.*?/flags/(.*?).png')
|
||||
quality = ''
|
||||
if lang not in IDIOMAS:
|
||||
lang = 'en'
|
||||
if not config.get_setting('unify'):
|
||||
title = ' [%s]' % IDIOMAS[lang]
|
||||
else:
|
||||
title = ''
|
||||
|
||||
post = {'action': 'doo_player_ajax', 'post': id, 'nume': option, 'type':item.type}
|
||||
post = urllib.urlencode(post)
|
||||
|
||||
test_url = '%swp-admin/admin-ajax.php' % 'https://peliculonhd.com/'
|
||||
new_data = httptools.downloadpage(test_url, post=post).data
|
||||
test_url = scrapertools.find_single_match(new_data, "src='([^']+)'")
|
||||
if 'xyz' in test_url:
|
||||
new_data = get_source(test_url, item.url)
|
||||
patron = "addiframe\('([^']+)'"
|
||||
matches = scrapertools.find_multiple_matches(new_data, patron)
|
||||
|
||||
for test_url in matches:
|
||||
if 'play.php' in test_url:
|
||||
new_data = get_source(test_url)
|
||||
enc_data = scrapertools.find_single_match(new_data, '(eval.*?)</script')
|
||||
|
||||
dec_data = jsunpack.unpack(enc_data)
|
||||
url = scrapertools.find_single_match(dec_data, 'src="([^"]+)"')
|
||||
elif 'embedvip' in test_url:
|
||||
from lib import generictools
|
||||
new_data = get_source(test_url)
|
||||
dejuiced = generictools.dejuice(new_data)
|
||||
url = scrapertools.find_single_match(dejuiced, '"file":"([^"]+)"')
|
||||
if url != '':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, url=url, title='%s' + title, action='play', quality=quality,
|
||||
language=IDIOMAS[lang], infoLabels=item.infoLabels))
|
||||
else:
|
||||
new_data = get_source(test_url, item.url)
|
||||
|
||||
patron = 'data-embed="([^"]+)" data-issuer="([^"]+)" data-signature="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(new_data, patron)
|
||||
|
||||
for st, vt, tk in matches:
|
||||
post = {'streaming':st, 'validtime':vt, 'token':tk}
|
||||
post = urllib.urlencode(post)
|
||||
new_url = '%sedge-data/' % 'https://peliculonhd.net/'
|
||||
new_data = httptools.downloadpage(new_url, post, headers = {'Referer':test_url}).data
|
||||
json_data = jsontools.load(new_data)
|
||||
if 'peliculonhd' not in json_data['url']:
|
||||
url = json_data['url']
|
||||
else:
|
||||
new_data = get_source(json_data['url'], test_url)
|
||||
url = scrapertools.find_single_match(new_data, 'src: "([^"]+)"')
|
||||
url = url.replace('download', 'preview')
|
||||
if url != '':
|
||||
itemlist.append(Item(channel=item.channel, url=url, title='%s'+title, action='play', quality=quality,
|
||||
language=IDIOMAS[lang], infoLabels=item.infoLabels))
|
||||
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
|
||||
|
||||
# Requerido para Filtrar enlaces
|
||||
|
||||
if __comprueba_enlaces__:
|
||||
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
itemlist = sorted(itemlist, key=lambda it: it.language)
|
||||
|
||||
if item.contentType != 'episode':
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
|
||||
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
|
||||
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
|
||||
if texto != '':
|
||||
return search_results(item)
|
||||
else:
|
||||
return []
|
||||
|
||||
def search_results(item):
|
||||
logger.info()
|
||||
|
||||
itemlist=[]
|
||||
|
||||
data=get_source(item.url)
|
||||
patron = '<article>.*?<a href="([^"]+)">.?<img src="([^"]+)" alt="([^"]+)" />.?<span class="(tvshows|movies)".*?'
|
||||
patron += '"meta".*?"year">([^<]+)<(.*?)<p>([^<]+)</p>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumb, scrapedtitle, type, year, lang_data, scrapedplot in matches:
|
||||
|
||||
title = scrapedtitle
|
||||
url = scrapedurl
|
||||
thumbnail = scrapedthumb
|
||||
plot = scrapedplot
|
||||
language = get_language(lang_data)
|
||||
type = re.sub('shows|s', '', type)
|
||||
if language:
|
||||
action = 'findvideos'
|
||||
else:
|
||||
action = 'seasons'
|
||||
|
||||
new_item=Item(channel=item.channel, title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
action=action, type=type, language=language, infoLabels={'year':year})
|
||||
if new_item.action == 'findvideos':
|
||||
new_item.contentTitle = new_item.title
|
||||
else:
|
||||
new_item.contentSerieName = new_item.title
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
return itemlist
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria in ['peliculas']:
|
||||
item.url = host + 'ver/'
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + 'genero/animacion/'
|
||||
elif categoria == 'terror':
|
||||
item.url = host + 'genero/terror/'
|
||||
elif categoria == 'documentales':
|
||||
item.url = host + 'genero/terror/'
|
||||
item.type='movie'
|
||||
itemlist = list_all(item)
|
||||
if itemlist[-1].title == 'Siguiente >>':
|
||||
itemlist.pop()
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
19
plugin.video.alfa/channels/pelisfox.json
Executable file → Normal file
19
plugin.video.alfa/channels/pelisfox.json
Executable file → Normal file
@@ -1,14 +1,14 @@
|
||||
{
|
||||
"id": "pelisfox",
|
||||
"name": "pelisfox",
|
||||
"name": "Pelisfox",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat"],
|
||||
"thumbnail": "https://s14.postimg.cc/c43etc1lt/pelisfox.png",
|
||||
"banner": "https://s30.postimg.cc/p6twg905d/pelisfox-banner.png",
|
||||
"categories": [
|
||||
"direct",
|
||||
"movie"
|
||||
"movie",
|
||||
"vos"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
@@ -50,6 +50,19 @@
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"LAT",
|
||||
"VOSE"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -10,15 +10,17 @@ from core import tmdb
|
||||
from core import jsontools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channels import filtertools
|
||||
from channels import autoplay
|
||||
from channelselector import get_thumb
|
||||
|
||||
tgenero = {"Drama": "https://s16.postimg.cc/94sia332d/drama.png",
|
||||
u"Accción": "https://s3.postimg.cc/y6o9puflv/accion.png",
|
||||
u"Animación": "https://s13.postimg.cc/5on877l87/animacion.png",
|
||||
u"Ciencia Ficción": "https://s9.postimg.cc/diu70s7j3/cienciaficcion.png",
|
||||
"Terror": "https://s7.postimg.cc/yi0gij3gb/terror.png",
|
||||
}
|
||||
|
||||
audio = {'LAT': '[COLOR limegreen]LATINO[/COLOR]', 'SUB': '[COLOR red]Subtitulado[/COLOR]'}
|
||||
|
||||
IDIOMAS = {'latino': 'LAT', 'subtitulado': 'VOSE'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = ['CAM', '360p', '480p', '720p', '1080p']
|
||||
list_servers = ['vidlox', 'fembed', 'vidcolud', 'streamango', 'openload']
|
||||
|
||||
|
||||
host = 'http://pelisfox.tv'
|
||||
|
||||
@@ -26,46 +28,44 @@ host = 'http://pelisfox.tv'
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(item.clone(title="Ultimas",
|
||||
action="lista",
|
||||
thumbnail='https://s22.postimg.cc/cb7nmhwv5/ultimas.png',
|
||||
fanart='https://s22.postimg.cc/cb7nmhwv5/ultimas.png',
|
||||
thumbnail=get_thumb('last', auto=True),
|
||||
url=host + '/estrenos/'
|
||||
))
|
||||
|
||||
itemlist.append(item.clone(title="Generos",
|
||||
action="seccion",
|
||||
url=host,
|
||||
thumbnail='https://s3.postimg.cc/5s9jg2wtf/generos.png',
|
||||
fanart='https://s3.postimg.cc/5s9jg2wtf/generos.png',
|
||||
thumbnail=get_thumb('genres', auto=True),
|
||||
seccion='generos'
|
||||
))
|
||||
|
||||
itemlist.append(item.clone(title="Por Año",
|
||||
action="seccion",
|
||||
url=host + '/peliculas/2017/',
|
||||
thumbnail='https://s8.postimg.cc/7eoedwfg5/pora_o.png',
|
||||
fanart='https://s8.postimg.cc/7eoedwfg5/pora_o.png',
|
||||
thumbnail=get_thumb('year', auto=True),
|
||||
seccion='anios'
|
||||
))
|
||||
|
||||
itemlist.append(item.clone(title="Por Actor",
|
||||
action="seccion",
|
||||
url=host + '/actores/',
|
||||
thumbnail='https://s17.postimg.cc/w25je5zun/poractor.png',
|
||||
fanart='https://s17.postimg.cc/w25je5zun/poractor.png',
|
||||
thumbnail=get_thumb('actors', auto=True),
|
||||
seccion='actor'
|
||||
))
|
||||
|
||||
itemlist.append(item.clone(title="Buscar",
|
||||
action="search",
|
||||
url=host + '/api/elastic/suggest?query=',
|
||||
thumbnail='https://s30.postimg.cc/pei7txpa9/buscar.png',
|
||||
fanart='https://s30.postimg.cc/pei7txpa9/buscar.png'
|
||||
thumbnail=get_thumb('search', auto=True)
|
||||
))
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -140,8 +140,6 @@ def seccion(item):
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapedtitle.decode('utf-8')
|
||||
thumbnail = ''
|
||||
if item.seccion == 'generos':
|
||||
thumbnail = tgenero[title]
|
||||
fanart = ''
|
||||
url = host + scrapedurl
|
||||
|
||||
@@ -222,63 +220,37 @@ def search(item, texto):
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
templist = []
|
||||
video_list = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
links = scrapertools.find_single_match(data, '<script>var.*?_SOURCE.?=.?(.*?);')
|
||||
links = links.replace('false', '"false"').replace('true', '"true"')
|
||||
links = eval(links)
|
||||
for link in links:
|
||||
language = link['lang']
|
||||
quality = link['quality']
|
||||
url = link['source'].replace('\\/', '/')
|
||||
sub = link['srt']
|
||||
|
||||
patron = '<li data-quality=(.*?) data-lang=(.*?)><a href=(.*?) title=.*?'
|
||||
matches = matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for quality, lang, scrapedurl in matches:
|
||||
url = host + scrapedurl
|
||||
title = item.title + ' (' + lang + ') (' + quality + ')'
|
||||
templist.append(item.clone(title=title,
|
||||
language=lang,
|
||||
url=url
|
||||
))
|
||||
for videoitem in templist:
|
||||
data = httptools.downloadpage(videoitem.url).data
|
||||
urls_list = scrapertools.find_single_match(data, 'var.*?_SOURCE\s+=\s+\[(.*?)\]')
|
||||
urls_list = urls_list.split("},")
|
||||
for element in urls_list:
|
||||
if not element.endswith('}'):
|
||||
element=element+'}'
|
||||
json_data = jsontools.load(element)
|
||||
if 'id' in json_data:
|
||||
id = json_data['id']
|
||||
sub=''
|
||||
if 'srt' in json_data:
|
||||
sub = json_data['srt']
|
||||
if config.get_setting('unify'):
|
||||
title = ''
|
||||
else:
|
||||
title = ' [%s] [%s]' % (quality, language)
|
||||
|
||||
url = json_data['source'].replace('\\','')
|
||||
server = json_data['server']
|
||||
quality = json_data['quality']
|
||||
if 'http' not in url :
|
||||
itemlist.append(Item(channel=item.channel, action='play', title='%s'+title, url=url, quality=quality,
|
||||
language=IDIOMAS[language], subtitle=sub, infoLabels=item.infoLabels))
|
||||
|
||||
new_url = 'https://onevideo.tv/api/player?key=90503e3de26d45e455b55e9dc54f015b3d1d4150&link' \
|
||||
'=%s&srt=%s' % (url, sub)
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
|
||||
data = httptools.downloadpage(new_url).data
|
||||
data = re.sub(r'\\', "", data)
|
||||
video_list.extend(servertools.find_video_items(data=data))
|
||||
for urls in video_list:
|
||||
if urls.language == '':
|
||||
urls.language = videoitem.language
|
||||
urls.title = item.title + urls.language + '(%s)'
|
||||
# Requerido para FilterTools
|
||||
|
||||
for video_url in video_list:
|
||||
video_url.channel = item.channel
|
||||
video_url.action = 'play'
|
||||
video_url.quality = quality
|
||||
video_url.server = ""
|
||||
video_url.infoLabels = item.infoLabels
|
||||
else:
|
||||
title = '%s [%s]'% (server, quality)
|
||||
video_list.append(item.clone(title=title, url=url, action='play', quality = quality,
|
||||
server=server, subtitle=sub))
|
||||
tmdb.set_infoLabels(video_list)
|
||||
if config.get_videolibrary_support() and len(video_list) > 0 and item.extra != 'findvideos':
|
||||
video_list.append(
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
|
||||
url=item.url,
|
||||
@@ -286,7 +258,7 @@ def findvideos(item):
|
||||
extra="findvideos",
|
||||
contentTitle=item.contentTitle
|
||||
))
|
||||
return video_list
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
|
||||
@@ -14,6 +14,7 @@ from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
from lib import generictools
|
||||
|
||||
IDIOMAS = {'latino': 'Latino'}
|
||||
list_language = IDIOMAS.values()
|
||||
@@ -157,7 +158,7 @@ def seasons(item):
|
||||
infoLabels=infoLabels))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
|
||||
@@ -214,19 +215,32 @@ def section(item):
|
||||
itemlist.append(Item(channel=item.channel, url=url, title=title, action='list_all', type=item.type))
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
|
||||
servers_page = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
|
||||
data = get_source(servers_page, referer=item.url)
|
||||
data = get_source(servers_page)
|
||||
patron = '<a href="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for enc_url in matches:
|
||||
url_data = get_source(enc_url, referer=item.url)
|
||||
url = scrapertools.find_single_match(url_data, '<iframe src="([^"]+)"')
|
||||
hidden_url = scrapertools.find_single_match(url_data, '<iframe src="([^"]+)"')
|
||||
if 'server' in hidden_url:
|
||||
hidden_data = get_source(hidden_url)
|
||||
url = scrapertools.find_single_match(hidden_data, '<iframe src="([^"]+)"')
|
||||
|
||||
else:
|
||||
url = hidden_url
|
||||
if 'pelishd.tv' in url:
|
||||
vip_data = httptools.downloadpage(url, headers={'Referer':item.url}, follow_redirects=False).data
|
||||
dejuiced = generictools.dejuice(vip_data)
|
||||
url = scrapertools.find_single_match(dejuiced, '"file":"([^"]+)"')
|
||||
|
||||
language = 'latino'
|
||||
if not config.get_setting('unify'):
|
||||
title = ' [%s]' % language.capitalize()
|
||||
|
||||
@@ -301,7 +301,7 @@ def seasons(item):
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
itemlist = itemlist[::-1]
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
|
||||
@@ -322,10 +322,13 @@ def season_episodes(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
full_data = httptools.downloadpage(item.url).data
|
||||
full_data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", full_data)
|
||||
season = str(item.infoLabels['season'])
|
||||
patron = '<a href=(.*?temporada-%s\/.*?) title=.*?i-play><\/i> (.*?)<\/a>'%season
|
||||
if int(season) <= 9:
|
||||
season = '0'+season
|
||||
data = scrapertools.find_single_match(full_data, '</i>Temporada %s</div>(.*?)(?:down arrow|cuadre_comments)' % season)
|
||||
patron = '<a href="([^"]+)" title=".*?i-play"><\/i> (.*?)<\/a>'
|
||||
matches = matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
infoLabels = item.infoLabels
|
||||
for url, episode in matches:
|
||||
@@ -390,6 +393,7 @@ def findvideos(item):
|
||||
video_list = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
|
||||
patron_language ='(<ul id=level\d_.*?\s*class=.*?ul>)'
|
||||
matches = re.compile(patron_language, re.DOTALL).findall(data)
|
||||
|
||||
|
||||
@@ -331,6 +331,8 @@ def seasons_episodes(item):
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist=[]
|
||||
|
||||
tmdb.set_infoLabels(item, True) # para refrescar infolabels y obtener más datos en "segunda pasada"
|
||||
|
||||
if item.extra != "links_encoded":
|
||||
data = httptools.downloadpage(item.url).data
|
||||
@@ -366,7 +368,6 @@ def findvideos(item):
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = '%s [%s] [%s]' % (videoitem.server.capitalize(), videoitem.language, videoitem.quality)
|
||||
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
if itemlist and not item.show:
|
||||
itemlist.append(Item(channel = item.channel))
|
||||
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
|
||||
|
||||
@@ -291,7 +291,7 @@ def findvideos(item):
|
||||
url = data_url.headers['location']
|
||||
except:
|
||||
pass
|
||||
|
||||
url = url.replace(" ", "%20")
|
||||
itemlist.append(item.clone(title = '[%s] [%s]', url=url, action='play', subtitle=subs,
|
||||
language=language, quality=quality, infoLabels=item.infoLabels))
|
||||
|
||||
|
||||
@@ -1496,7 +1496,7 @@ def detalles_fa(item):
|
||||
|
||||
if item.contentType == "tvshow" and ob_tmdb.result:
|
||||
itemlist.append(item.clone(action="info_seasons", text_color=color4,
|
||||
title=config.get_localized_string(7007) % item.infoLabels["number_of_seasons"]))
|
||||
title=config.get_localized_string(70067) % item.infoLabels["number_of_seasons"]))
|
||||
if ob_tmdb.result:
|
||||
itemlist.append(item.clone(action="reparto", title=config.get_localized_string(70071), text_color=color4,
|
||||
infoLabels={'tmdb_id': item.infoLabels['tmdb_id'],
|
||||
@@ -2457,7 +2457,7 @@ def detalles_mal(item):
|
||||
|
||||
# Opción para ver la info de personajes y dobladores/equipo de rodaje
|
||||
if not "No characters or voice actors" in data and not "No staff for this anime" in data:
|
||||
itemlist.append(item.clone(action="staff_mal", title=onfig.get_localized_string(70354), text_color=color2,
|
||||
itemlist.append(item.clone(action="staff_mal", title=config.get_localized_string(70354), text_color=color2,
|
||||
url=item.url + "/characters"))
|
||||
if config.is_xbmc():
|
||||
item.contextual = True
|
||||
@@ -2514,7 +2514,7 @@ def detalles_mal(item):
|
||||
for url, title in matches:
|
||||
new_item = item.clone(infoLabels={'mediatype': item.contentType}, extra="", fanart=default_fan,
|
||||
thumbnail="")
|
||||
new_item.title = onfig.get_localized_string(70355) % title
|
||||
new_item.title = config.get_localized_string(70355) % title
|
||||
new_item.contentTitle = title
|
||||
new_item.url = "https://myanimelist.net%s" % url
|
||||
itemlist.append(new_item)
|
||||
@@ -2525,7 +2525,7 @@ def detalles_mal(item):
|
||||
for url, title in matches:
|
||||
new_item = item.clone(infoLabels={'mediatype': item.contentType}, extra="", fanart=default_fan,
|
||||
thumbnail="")
|
||||
new_item.title = onfig.get_localized_string(70356) % title
|
||||
new_item.title = config.get_localized_string(70356) % title
|
||||
new_item.contentTitle = title
|
||||
new_item.url = "https://myanimelist.net%s" % url
|
||||
itemlist.append(new_item)
|
||||
@@ -2536,7 +2536,7 @@ def detalles_mal(item):
|
||||
for url, title in matches:
|
||||
new_item = item.clone(infoLabels={'mediatype': item.contentType}, extra="", fanart=default_fan,
|
||||
thumbnail="")
|
||||
new_item.title = onfig.get_localized_string(70357) % title
|
||||
new_item.title = config.get_localized_string(70357) % title
|
||||
new_item.contentTitle = title
|
||||
new_item.url = "https://myanimelist.net%s" % url
|
||||
itemlist.append(new_item)
|
||||
@@ -2556,12 +2556,12 @@ def detalles_mal(item):
|
||||
itemlist.append(new_item)
|
||||
|
||||
itemlist.append(
|
||||
item.clone(title=onfig.get_localized_string(70358), action="listado_tmdb", infoLabels={'mediatype': item.contentType},
|
||||
item.clone(title=config.get_localized_string(70358), action="listado_tmdb", infoLabels={'mediatype': item.contentType},
|
||||
search={'url': '%s/%s/recommendations' % (item.extra, item.infoLabels['tmdb_id']),
|
||||
'language': langt, 'page': 1}, text_color=color2))
|
||||
|
||||
# Recomendaciones myanimelist y búsqueda de info en anidb (fansubs en español)
|
||||
itemlist.append(item.clone(title=onfig.get_localized_string(70359), action="reco_mal"))
|
||||
itemlist.append(item.clone(title=config.get_localized_string(70359), action="reco_mal"))
|
||||
anidb_link = scrapertools.find_single_match(data,
|
||||
'<a href="(http://anidb.info/perl-bin/animedb.pl\?show=anime&aid=\d+)')
|
||||
if anidb_link:
|
||||
@@ -2597,7 +2597,7 @@ def videos_mal(item):
|
||||
|
||||
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" class="link-blue-box">More')
|
||||
if next_page:
|
||||
itemlist.append(item.clone(title=onfig.get_localized_string(70361), url=next_page, text_color=""))
|
||||
itemlist.append(item.clone(title=config.get_localized_string(70361), url=next_page, text_color=""))
|
||||
if itemlist:
|
||||
itemlist.insert(0, item.clone(title=config.get_localized_string(70362), action="", text_color=color3))
|
||||
|
||||
@@ -3142,7 +3142,7 @@ def login_mal(from_list=False):
|
||||
|
||||
if not re.search(r'(?i)' + user, response.data):
|
||||
logger.error("Error en el login")
|
||||
return False, onfig.get_localized_string(70330), user
|
||||
return False, config.get_localized_string(70330), user
|
||||
else:
|
||||
if generic:
|
||||
return False, config.get_localized_string(70381), user
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"id": "xtheatre",
|
||||
"name": "xTheatre",
|
||||
"id": "xms",
|
||||
"name": "XMS",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"fanart": "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/adults/xthearebg.jpg",
|
||||
"thumbnail": "https://xtheatre.net/wp-content/uploads/xtlogo.jpg",
|
||||
"banner": "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/adults/xthearebm.png",
|
||||
"thumbnail": "https://i.postimg.cc/wB0NsMTX/xms.png",
|
||||
"banner": "https://i.postimg.cc/c6yh5C3K/xmsbn.png",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
@@ -10,9 +10,9 @@ from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
__channel__ = "xtheatre"
|
||||
__channel__ = "xms"
|
||||
|
||||
host = 'https://xtheatre.net/'
|
||||
host = 'https://xxxmoviestream.com/'
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
__perfil__ = int(config.get_setting('perfil', __channel__))
|
||||
@@ -43,7 +43,6 @@ def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
# thumbnail = 'https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/adults/%s.png'
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Últimas", url=host + '?filtre=date&cat=0',
|
||||
action="peliculas", viewmode="movie_with_plot", viewcontent='movies',
|
||||
@@ -75,37 +74,26 @@ def peliculas(item):
|
||||
# logger.info(data)
|
||||
patron_todos = '<div id="content">(.*?)<div id="footer"'
|
||||
data = scrapertools.find_single_match(data, patron_todos)
|
||||
# logger.info(data)
|
||||
|
||||
patron = 'data-lazy-src="([^"]+)".*?' # img
|
||||
patron += 'title="([^"]+)"/>.*?' # title
|
||||
patron += '</noscript><a href="([^"]+)"' # url
|
||||
patron = 'src="([^"]+)" class="attachment-thumb_site.*?' # img
|
||||
patron += '<a href="([^"]+)" title="([^"]+)".*?' #url, title
|
||||
patron += '<div class="right"><p>([^<]+)</p>' # plot
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
|
||||
title = "%s" % (scrapedtitle)
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle, plot in matches:
|
||||
|
||||
itemlist.append(item.clone(channel=item.channel, action="findvideos", title=title,
|
||||
url=scrapedurl, thumbnail=scrapedthumbnail, plot='',
|
||||
viewmode="movie_with_plot", folder=True))
|
||||
itemlist.append(item.clone(channel=__channel__, action="findvideos", title=scrapedtitle.capitalize(),
|
||||
url=scrapedurl, thumbnail=scrapedthumbnail, infoLabels={"plot": plot}, fanart=scrapedthumbnail,
|
||||
viewmode="movie_with_plot", folder=True, contentTitle=scrapedtitle))
|
||||
# Extrae el paginador
|
||||
paginacion = scrapertools.find_single_match(data, '<a href="([^"]+)">Next ›</a></li><li>')
|
||||
# paginacion = paginacion.replace('#038;', '')
|
||||
paginacion = urlparse.urljoin(item.url, paginacion)
|
||||
|
||||
if paginacion:
|
||||
itemlist.append(Item(channel=item.channel, action="peliculas",
|
||||
itemlist.append(Item(channel=__channel__, action="peliculas",
|
||||
thumbnail=thumbnail % 'rarrow',
|
||||
title="\xc2\xbb Siguiente \xc2\xbb", url=paginacion))
|
||||
|
||||
for item in itemlist:
|
||||
if item.infoLabels['plot'] == '':
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|amp;|\s{2}| ", "", data)
|
||||
patron = '<div id="video-synopsys" itemprop="description">(.*?)<div id="video-bottom">'
|
||||
data = scrapertools.find_single_match(data, patron)
|
||||
item.infoLabels['plot'] = scrapertools.find_single_match(data, '<p>(.*?)</p></div>')
|
||||
item.infoLabels['plot'] = scrapertools.htmlclean(item.plot)
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -115,7 +103,7 @@ def categorias(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
logger.info(data)
|
||||
# logger.info(data)
|
||||
patron = 'data-lazy-src="([^"]+)".*?' # img
|
||||
patron += '</noscript><a href="([^"]+)".*?' # url
|
||||
patron += '<span>([^<]+)</span></a>.*?' # title
|
||||
@@ -124,11 +112,9 @@ def categorias(item):
|
||||
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle, vids in matches:
|
||||
title = "%s (%s)" % (scrapedtitle, vids.title())
|
||||
thumbnail = scrapedthumbnail
|
||||
url = scrapedurl
|
||||
itemlist.append(Item(channel=item.channel, action="peliculas", fanart=scrapedthumbnail,
|
||||
title=title, url=url, thumbnail=thumbnail, plot='',
|
||||
viewmode="movie_with_plot", folder=True))
|
||||
itemlist.append(item.clone(channel=__channel__, action="peliculas", fanart=scrapedthumbnail,
|
||||
title=title, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
viewmode="movie_with_plot", folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -157,27 +143,22 @@ def sub_search(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron_todos = '<div id="content">(.*?)</li></ul></div></div>'
|
||||
data = scrapertools.find_single_match(data, patron_todos)
|
||||
|
||||
patron = 'data-lazy-src="([^"]+)".*?' # img
|
||||
patron += 'title="([^"]+)"/>.*?' # title
|
||||
patron += '</noscript><a href="([^"]+)"' # url
|
||||
patron += 'title="([^"]+)" />.*?' # title
|
||||
patron += '</noscript><a href="([^"]+)".*?' # url
|
||||
patron += '<div class="right"><p>([^<]+)</p>' # plot
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
|
||||
title = "%s" % (scrapedtitle)
|
||||
itemlist.append(item.clone(title=title, url=scrapedurl,
|
||||
action="findvideos", thumbnail=scrapedthumbnail))
|
||||
for scrapedthumbnail, scrapedtitle, scrapedurl, plot in matches:
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, plot=plot, fanart=scrapedthumbnail,
|
||||
action="findvideos", thumbnail=scrapedthumbnail))
|
||||
|
||||
paginacion = scrapertools.find_single_match(
|
||||
data, "<a href='([^']+)' class=\"inactive\">\d+</a>")
|
||||
|
||||
if paginacion:
|
||||
itemlist.append(Item(channel=item.channel, action="sub_search",
|
||||
title="\xc2\xbb Siguiente \xc2\xbb", url=paginacion))
|
||||
itemlist.append(item.clone(channel=__channel__, action="sub_search",
|
||||
title="\xc2\xbb Siguiente \xc2\xbb", url=paginacion))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -186,21 +167,15 @@ def findvideos(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|amp;|\s{2}| ", "", data)
|
||||
logger.info(data)
|
||||
patron_todos = '<div class="video-embed">(.*?)</div>'
|
||||
data = scrapertools.find_single_match(data, patron_todos)
|
||||
patron = '<iframe src="[^"]+" data-lazy-src="([^"]+)".*?</iframe>'
|
||||
|
||||
patron = '<iframe src="([^"]+)".*?webkitallowfullscreen="true" mozallowfullscreen="true"></iframe>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for url in matches:
|
||||
title = item.title
|
||||
server = servertools.get_server_from_url(url)
|
||||
title = "Ver en: [COLOR yellow](%s)[/COLOR]" % server
|
||||
|
||||
itemlist.append(item.clone(action='play', title=title, server=server, mediatype='movie', url=url))
|
||||
itemlist.append(item.clone(action='play', title=title, server=server, url=url))
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.infoLabels = item.infoLabels
|
||||
videoitem.channel = item.channel
|
||||
videoitem.title = "%s [COLOR yellow](%s)[/COLOR]" % (item.title, videoitem.server)
|
||||
|
||||
return itemlist
|
||||
@@ -8,52 +8,91 @@ from core.item import Item
|
||||
from lib import generictools
|
||||
from platformcode import logger
|
||||
|
||||
URL_BROWSE = "https://yts.am/browse-movies"
|
||||
URL = "https://yts.am"
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
title = "Browse",
|
||||
title = "Explorar por generos",
|
||||
action = "categories",
|
||||
opt = 'genre',
|
||||
url = URL_BROWSE
|
||||
))
|
||||
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
title = "Explorar por calidad",
|
||||
action = "categories",
|
||||
opt = 'quality',
|
||||
url = URL_BROWSE
|
||||
))
|
||||
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
title = "Explorar películas",
|
||||
action = "movies",
|
||||
opt = 0,
|
||||
url = "https://yts.am/browse-movies"
|
||||
url = URL_BROWSE
|
||||
))
|
||||
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
title = "Popular",
|
||||
title = "Más populares",
|
||||
action = "movies",
|
||||
opt = 1,
|
||||
url = "https://yts.am" ))
|
||||
url = URL ))
|
||||
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
title = "Search",
|
||||
title = "Buscar",
|
||||
action = "search",
|
||||
opt = 0,
|
||||
url = "https://yts.am/browse-movies"
|
||||
url = URL_BROWSE
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def categories(item):
|
||||
logger.info()
|
||||
itemList = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
block = scrapertools.find_single_match( data, '(?s)<.*?="' + item.opt + '">(.*?)</select>')
|
||||
pattern = '<option value=".*?">(?!All)(.*?)</option>'
|
||||
categories = scrapertools.find_multiple_matches( block, pattern )
|
||||
|
||||
for category in categories:
|
||||
url = URL_BROWSE + '/0/all/' + category + '/0/latest' if item.opt == "genre" else URL_BROWSE + '/0/' + category + '/all/0/latest'
|
||||
|
||||
itemList.append( Item( action = "movies",
|
||||
channel = item.channel,
|
||||
title = category,
|
||||
url = url ))
|
||||
|
||||
return itemList
|
||||
|
||||
def movies(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
infoLabels = {}
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = '(?s)class="browse-movie-wrap.*?a href="([^"]+).*?' #Movie link
|
||||
patron += 'img class.*?src="([^"]+).*?' #Image
|
||||
patron += 'movie-title">.*?([^<]+)' #Movie title
|
||||
patron += '.*?year">(.*?)<' #Year
|
||||
pattern = '(?s)class="browse-movie-wrap.*?a href="([^"]+).*?' #Movie link
|
||||
pattern += 'img class.*?src="([^"]+).*?' #Image
|
||||
pattern += 'movie-title">.*?([^<]+)' #Movie title
|
||||
pattern += '.*?year">(.*?)<' #Year
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
matches = scrapertools.find_multiple_matches(data, pattern)
|
||||
idx = 0
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
|
||||
if item.opt == 1:
|
||||
scrapedthumbnail = 'https://yts.am' + scrapedthumbnail
|
||||
infoLabels['plot'] = findplot(scrapedurl)
|
||||
scrapedthumbnail = URL + scrapedthumbnail
|
||||
infoLabels['year'] = year
|
||||
|
||||
itemlist.append(Item(action = "findvideo",
|
||||
channel = item.channel,
|
||||
contentTitle = scrapedtitle,
|
||||
infoLabels = infoLabels,
|
||||
title = scrapedtitle + ' (' + year + ')',
|
||||
thumbnail = scrapedthumbnail,
|
||||
@@ -64,35 +103,28 @@ def movies(item):
|
||||
break
|
||||
if itemlist != []:
|
||||
actual_page = item.url
|
||||
pattern = '(?s)href="([^"]+)">Next.*?'
|
||||
next_page = scrapertools.find_single_match(data, pattern)
|
||||
nextPattern = '(?s)href="([^"]+)">Next.*?'
|
||||
next_page = scrapertools.find_single_match(data, nextPattern)
|
||||
|
||||
if next_page != '':
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="movies",
|
||||
title='Next >>>',
|
||||
url='https://yts.am' + next_page))
|
||||
url=URL + next_page))
|
||||
|
||||
tmdb.set_infoLabels_itemlist( itemlist, seekTmdb=True)
|
||||
|
||||
return itemlist
|
||||
|
||||
def findplot(url):
|
||||
data = httptools.downloadpage(url).data
|
||||
|
||||
pattern = '(?s)<p class="hidden-xs">(.*?)</p>' #Synopsis
|
||||
|
||||
plot = scrapertools.find_single_match(data, pattern)
|
||||
|
||||
return plot
|
||||
|
||||
def findvideo(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = '(?s)modal-quality.*?<span>(.*?)</span>' #Quality
|
||||
patron += '.*?size">(.*?)</p>' #Type
|
||||
patron += '.*?href="([^"]+)" rel' #Torrent link
|
||||
pattern = '(?s)modal-quality.*?<span>(.*?)</span>' #Quality
|
||||
pattern += '.*?size">(.*?)</p>' #Type
|
||||
pattern += '.*?href="([^"]+)" rel' #Torrent link
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
matches = scrapertools.find_multiple_matches(data, pattern)
|
||||
|
||||
for quality, videoType, link in matches:
|
||||
|
||||
@@ -111,7 +143,7 @@ def search(item, text):
|
||||
logger.info('search: ' + text)
|
||||
|
||||
try:
|
||||
item.url = 'https://yts.am/browse-movies/' + text + '/all/all/0/latest'
|
||||
item.url = URL_BROWSE + text + '/all/all/0/latest'
|
||||
itemlist = movies(item)
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -455,7 +455,6 @@ def listado(item):
|
||||
#Guarda la variable temporal que almacena la info adicional del título a ser restaurada después de TMDB
|
||||
item_local.title_subs = title_subs
|
||||
|
||||
logger.debug(item.extra2)
|
||||
#Ahora se filtra por idioma, si procede, y se pinta lo que vale
|
||||
if config.get_setting('filter_languages', channel) > 0 and item.extra2 not in ['CAST', 'LAT', 'VO', 'VOS', 'VOSE']: #Si hay idioma seleccionado, se filtra
|
||||
itemlist = filtertools.get_link(itemlist, item_local, list_language)
|
||||
|
||||
@@ -55,6 +55,9 @@ default_headers["Accept-Encoding"] = "gzip"
|
||||
# Tiempo máximo de espera para downloadpage, si no se especifica nada
|
||||
HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT = config.get_setting('httptools_timeout', default=15)
|
||||
if HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT == 0: HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT = None
|
||||
|
||||
# Uso aleatorio de User-Agents, si no se especifica nada
|
||||
HTTPTOOLS_DEFAULT_RANDOM_HEADERS = False
|
||||
|
||||
def get_user_agent():
|
||||
# Devuelve el user agent global para ser utilizado cuando es necesario para la url.
|
||||
@@ -96,7 +99,7 @@ load_cookies()
|
||||
|
||||
|
||||
def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=True, cookies=True, replace_headers=False,
|
||||
add_referer=False, only_headers=False, bypass_cloudflare=True, count_retries=0):
|
||||
add_referer=False, only_headers=False, bypass_cloudflare=True, count_retries=0, random_headers=False):
|
||||
"""
|
||||
Abre una url y retorna los datos obtenidos
|
||||
|
||||
@@ -119,6 +122,8 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
|
||||
@type add_referer: bool
|
||||
@param only_headers: Si True, solo se descargarán los headers, omitiendo el contenido de la url.
|
||||
@type only_headers: bool
|
||||
@param random_headers: Si True, utiliza el método de seleccionar headers aleatorios.
|
||||
@type random_headers: bool
|
||||
@return: Resultado de la petición
|
||||
@rtype: HTTPResponse
|
||||
|
||||
@@ -147,6 +152,9 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
|
||||
|
||||
if add_referer:
|
||||
request_headers["Referer"] = "/".join(url.split("/")[:3])
|
||||
|
||||
if random_headers or HTTPTOOLS_DEFAULT_RANDOM_HEADERS:
|
||||
request_headers['User-Agent'] = random_useragent()
|
||||
|
||||
url = urllib.quote(url, safe="%/:=&?~#+!$,;'@()*[]")
|
||||
|
||||
@@ -289,6 +297,26 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
|
||||
logger.info("No se ha podido autorizar")
|
||||
|
||||
return type('HTTPResponse', (), response)
|
||||
|
||||
|
||||
def random_useragent():
|
||||
"""
|
||||
Based on code from https://github.com/theriley106/RandomHeaders
|
||||
|
||||
Python Method that generates fake user agents with a locally saved DB (.csv file).
|
||||
|
||||
This is useful for webscraping, and testing programs that identify devices based on the user agent.
|
||||
"""
|
||||
|
||||
import random
|
||||
|
||||
UserAgentPath = os.path.join(config.get_runtime_path(), 'tools', 'UserAgent.csv')
|
||||
if os.path.exists(UserAgentPath):
|
||||
UserAgentIem = random.choice(list(open(UserAgentPath))).strip()
|
||||
if UserAgentIem:
|
||||
return UserAgentIem
|
||||
|
||||
return default_headers["User-Agent"]
|
||||
|
||||
|
||||
class NoRedirectHandler(urllib2.HTTPRedirectHandler):
|
||||
|
||||
125
plugin.video.alfa/platformcode/custom_code.py
Normal file
125
plugin.video.alfa/platformcode/custom_code.py
Normal file
@@ -0,0 +1,125 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# --------------------------------------------------------------------------------
|
||||
# Updater (kodi)
|
||||
# --------------------------------------------------------------------------------
|
||||
|
||||
import os
|
||||
import json
|
||||
|
||||
from platformcode import config, logger
|
||||
|
||||
from core import jsontools
|
||||
from core import filetools
|
||||
|
||||
json_data_file_name = 'custom_code.json'
|
||||
|
||||
|
||||
def init():
|
||||
logger.info()
|
||||
|
||||
"""
|
||||
Todo el código añadido al add-on se borra con cada actualización. Esta función permite restaurarlo automáticamente con cada actualización.
|
||||
Esto permite al usuario tener su propio código, bajo su responsabilidad, y restaurarlo al add-on cada vez que se actualiza.
|
||||
|
||||
El mecanismo funciona copiando el contenido de la carpeta-arbol ".\userdata\addon_data\plugin.video.alfa\custom_code\..." sobre
|
||||
las carpetas de código del add-on. No verifica el contenido, solo vuelca(reemplaza) el contenido de "custom_code".
|
||||
|
||||
El usuario almacenará en las subcarpetas de "custom_code" su código actualizado y listo para ser copiado en cualquier momento.
|
||||
Si no se desea que copie algo, simplemente se borra de "custom_code" y ya no se copiará en la próxima actualización.
|
||||
|
||||
Los pasos que sigue esta función, son los siguientes:
|
||||
|
||||
1.- La función se llama desde videolibrary_service.py, desde la función inicial:
|
||||
# Copia Custom code a las carpetas de Alfa desde la zona de Userdata
|
||||
from platformcode import custom_code
|
||||
custom_code.init()
|
||||
|
||||
2.- En el inicio de Kodi, comprueba si existe la carpeta "custom_code" en ".\userdata\addon_data\plugin.video.alfa\".
|
||||
Si no existe, la crea y sale sin más, dando al ususario la posibilidad de copiar sobre esa estructura su código,
|
||||
y que la función la vuelque sobre el add-on en el próximo inicio de Kodi.
|
||||
|
||||
3.- En el siguiente inicio de Kodi, comprueba si existe el custom_code.json en la carpeta root del add-on.
|
||||
Si no existe, lo crea con el número de versión del add-on vacío, para permitir que se copien los archivos en esta pasada.
|
||||
|
||||
4.- Verifica que el número de versión del add-on es diferente de el de custom_code.json. Si es la misma versión,
|
||||
se sale porque ya se realizo la copia anteriormente.
|
||||
Si la versión es distinta, se realiza el volcado de todos los archivos de la carpeta-árbol "custom_code" sobre el add-on.
|
||||
Si la carpeta de destino no existe, dará un error y se cancelará la copia. Se considera que no tienen sentido nuevas carpetas.
|
||||
|
||||
5.- Si la copia ha terminado con éxito, se actualiza el custom_code.json con el número de versión del add-on,
|
||||
para que en inicios sucesivos de Kodi no se realicen las copias, hasta que el add-on cambie de versión.
|
||||
En el número de versión del add-on no se considera el número de fix.
|
||||
|
||||
Tiempos: Copiando 7 archivos de prueba, el proceso ha tardado una décima de segundo.
|
||||
"""
|
||||
|
||||
try:
|
||||
#Existe carpeta "custom_code" ? Si no existe se crea y se sale
|
||||
custom_code_dir = os.path.join(config.get_data_path(), 'custom_code')
|
||||
if os.path.exists(custom_code_dir) == False:
|
||||
create_folder_structure(custom_code_dir)
|
||||
return
|
||||
|
||||
else:
|
||||
#Existe "custom_code.json" ? Si no existe se crea
|
||||
custom_code_json_path = config.get_runtime_path()
|
||||
custom_code_json = os.path.join(custom_code_json_path, 'custom_code.json')
|
||||
if os.path.exists(custom_code_json) == False:
|
||||
create_json(custom_code_json_path)
|
||||
|
||||
#Se verifica si la versión del .json y del add-on son iguales. Si es así se sale. Si no se copia "custom_code" al add-on
|
||||
verify_copy_folders(custom_code_dir, custom_code_json_path)
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
def create_folder_structure(custom_code_dir):
|
||||
logger.info()
|
||||
|
||||
#Creamos todas las carpetas. La importante es "custom_code". Las otras sirven meramente de guía para evitar errores de nombres...
|
||||
os.mkdir(custom_code_dir)
|
||||
os.mkdir(filetools.join(custom_code_dir, 'channels'))
|
||||
os.mkdir(filetools.join(custom_code_dir, 'core'))
|
||||
os.mkdir(filetools.join(custom_code_dir, 'lib'))
|
||||
os.mkdir(filetools.join(custom_code_dir, 'platformcode'))
|
||||
os.mkdir(filetools.join(custom_code_dir, 'resources'))
|
||||
os.mkdir(filetools.join(custom_code_dir, 'servers'))
|
||||
|
||||
return
|
||||
|
||||
|
||||
def create_json(custom_code_json_path):
|
||||
logger.info()
|
||||
|
||||
#Guardamaos el json con la versión de Alfa vacía, para permitir hacer la primera copia
|
||||
json_data_file = filetools.join(custom_code_json_path, json_data_file_name)
|
||||
json_file = open(json_data_file, "a+")
|
||||
json_file.write(json.dumps({"addon_version": ""}))
|
||||
json_file.close()
|
||||
|
||||
return
|
||||
|
||||
|
||||
def verify_copy_folders(custom_code_dir, custom_code_json_path):
|
||||
logger.info()
|
||||
|
||||
#verificamos si es una nueva versión de Alfa instalada o era la existente. Si es la existente, nos vamos sin hacer nada
|
||||
json_data_file = filetools.join(custom_code_json_path, json_data_file_name)
|
||||
json_data = jsontools.load(filetools.read(json_data_file))
|
||||
current_version = config.get_addon_version(with_fix=False)
|
||||
if current_version == json_data['addon_version']:
|
||||
return
|
||||
|
||||
#Ahora copiamos los archivos desde el área de Userdata, Custom_code, sobre las carpetas del add-on
|
||||
for root, folders, files in os.walk(custom_code_dir):
|
||||
for file in files:
|
||||
input_file = filetools.join(root, file)
|
||||
output_file = input_file.replace(custom_code_dir, custom_code_json_path)
|
||||
if filetools.copy(input_file, output_file, silent=True) == False:
|
||||
return
|
||||
|
||||
#Guardamaos el json con la versión actual de Alfa, para no volver a hacer la copia hasta la nueva versión
|
||||
json_data['addon_version'] = current_version
|
||||
filetools.write(json_data_file, jsontools.dump(json_data))
|
||||
|
||||
return
|
||||
@@ -110,6 +110,8 @@ def render_items(itemlist, parent_item):
|
||||
@type parent_item: item
|
||||
@param parent_item: elemento padre
|
||||
"""
|
||||
logger.info('INICIO render_items')
|
||||
|
||||
# Si el itemlist no es un list salimos
|
||||
if not type(itemlist) == list:
|
||||
return
|
||||
@@ -285,6 +287,8 @@ def render_items(itemlist, parent_item):
|
||||
if parent_item.mode in ['silent', 'get_cached', 'set_cache', 'finish']:
|
||||
xbmc.executebuiltin("Container.SetViewMode(500)")
|
||||
|
||||
logger.info('FINAL render_items')
|
||||
|
||||
|
||||
def get_viewmode_id(parent_item):
|
||||
# viewmode_json habria q guardarlo en un archivo y crear un metodo para q el user fije sus preferencias en:
|
||||
|
||||
@@ -43,6 +43,7 @@ thumb_dict = {"movies": "https://s10.postimg.cc/fxtqzdog9/peliculas.png",
|
||||
"adults": "https://s10.postimg.cc/s8raxc51l/adultos.png",
|
||||
"recents": "https://s10.postimg.cc/649u24kp5/recents.png",
|
||||
"updated" : "https://s10.postimg.cc/46m3h6h9l/updated.png",
|
||||
"actors": "https://i.postimg.cc/tC2HMhVV/actors.png",
|
||||
"accion": "https://s14.postimg.cc/sqy3q2aht/action.png",
|
||||
"adolescente" : "https://s10.postimg.cc/inq7u4p61/teens.png",
|
||||
"adultos": "https://s10.postimg.cc/s8raxc51l/adultos.png",
|
||||
@@ -87,6 +88,7 @@ thumb_dict = {"movies": "https://s10.postimg.cc/fxtqzdog9/peliculas.png",
|
||||
"romance" : "https://s10.postimg.cc/yn8vdll6x/romance.png",
|
||||
"romantica": "https://s14.postimg.cc/8xlzx7cht/romantic.png",
|
||||
"suspenso": "https://s10.postimg.cc/7peybxdfd/suspense.png",
|
||||
"telenovelas": "https://i.postimg.cc/QCXZkyDM/telenovelas.png",
|
||||
"terror": "https://s14.postimg.cc/thqtvl52p/horror.png",
|
||||
"thriller": "https://s14.postimg.cc/uwsekl8td/thriller.png",
|
||||
"western": "https://s10.postimg.cc/5wc1nokjt/western.png"
|
||||
|
||||
@@ -3251,7 +3251,7 @@ msgstr "Serie"
|
||||
|
||||
msgctxt "#70137"
|
||||
msgid "Movies"
|
||||
msgstr "Películas"
|
||||
msgstr "Peliculas"
|
||||
|
||||
msgctxt "#70138"
|
||||
msgid "Low Rating"
|
||||
|
||||
@@ -3251,7 +3251,7 @@ msgstr "Serie"
|
||||
|
||||
msgctxt "#70137"
|
||||
msgid "Movies"
|
||||
msgstr "Películas"
|
||||
msgstr "Peliculas"
|
||||
|
||||
msgctxt "#70138"
|
||||
msgid "Low Rating"
|
||||
|
||||
@@ -3251,7 +3251,7 @@ msgstr "Serie"
|
||||
|
||||
msgctxt "#70137"
|
||||
msgid "Movies"
|
||||
msgstr "Películas"
|
||||
msgstr "Peliculas"
|
||||
|
||||
msgctxt "#70138"
|
||||
msgid "Low Rating"
|
||||
@@ -3358,8 +3358,16 @@ msgid "Press to 'Clear cache' saved"
|
||||
msgstr "Pulse para 'Borrar caché' guardada"
|
||||
|
||||
msgctxt "#70164"
|
||||
msgid "Free First|Premium First|Debriders First"
|
||||
msgstr "Free primero|Premium primero|Debriders primero"
|
||||
msgid "Free First"
|
||||
msgstr "Free primero"
|
||||
|
||||
msgctxt "#70165"
|
||||
msgid "Premium First"
|
||||
msgstr "Premium primero"
|
||||
|
||||
msgctxt "#70166"
|
||||
msgid "Debriders First"
|
||||
msgstr "Debriders primero"
|
||||
|
||||
msgctxt "#70167"
|
||||
msgid "Titles Options"
|
||||
|
||||
@@ -66,7 +66,9 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
data = httptools.downloadpage("http://hqq.watch/player/get_md5.php?" + params, headers=head).data
|
||||
media_urls = []
|
||||
url_data = jsontools.load(data)
|
||||
media_url = "https:" + tb(url_data["obf_link"].replace("#", "")) + ".mp4.m3u8"
|
||||
media_url = tb(url_data["obf_link"].replace("#", "")) + ".mp4.m3u8"
|
||||
if not media_url.startswith("http"):
|
||||
media_url = "https:" + media_url
|
||||
video_urls = []
|
||||
media = media_url + "|User-Agent=Mozilla/5.0 (iPhone; CPU iPhone OS 5_0_1 like Mac OS X)"
|
||||
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [netu.tv]", media, 0, subtitle])
|
||||
|
||||
4
plugin.video.alfa/servers/streamango.json
Executable file → Normal file
4
plugin.video.alfa/servers/streamango.json
Executable file → Normal file
@@ -6,6 +6,10 @@
|
||||
{
|
||||
"pattern": "streamango.com/(?:embed|f)/([A-z0-9]+)",
|
||||
"url": "http://streamango.com/embed/\\1"
|
||||
},
|
||||
{
|
||||
"pattern": "https://fruitadblock.net/embed/([A-z0-9]+)",
|
||||
"url": "http://streamango.com/embed/\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
42
plugin.video.alfa/servers/tiwikiwi.json
Normal file
42
plugin.video.alfa/servers/tiwikiwi.json
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "https://tiwi.kiwi/embed-([a-z0-9]+).html",
|
||||
"url": "https://tiwi.kiwi/embed-\\1.html"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "tiwikiwi",
|
||||
"name": "tiwikiwi",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "https://i.postimg.cc/CxdyWRcN/tiwikiwi.png"
|
||||
}
|
||||
30
plugin.video.alfa/servers/tiwikiwi.py
Normal file
30
plugin.video.alfa/servers/tiwikiwi.py
Normal file
@@ -0,0 +1,30 @@
|
||||
# Conector vidcloud By Alfa development Group
|
||||
# --------------------------------------------------------
|
||||
|
||||
import re
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from lib import jsunpack
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url)
|
||||
if data.code == 404:
|
||||
return False, "[Cloud] El archivo no existe o ha sido borrado"
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
video_urls = []
|
||||
data = httptools.downloadpage(page_url).data
|
||||
enc_data = scrapertools.find_single_match(data, "type='text/javascript'>(eval.*?)?\s+</script>")
|
||||
dec_data = jsunpack.unpack(enc_data)
|
||||
sources = 'file:"([^"]+)",label:"([^"]+)"'
|
||||
matches = re.compile(sources, re.DOTALL).findall(dec_data)
|
||||
for url, quality in matches:
|
||||
video_url = url
|
||||
video_urls.append(['tiwi.kiwi [%s]' % quality, video_url])
|
||||
return video_urls
|
||||
42
plugin.video.alfa/servers/vidcloud.json
Normal file
42
plugin.video.alfa/servers/vidcloud.json
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "https://vidcloud.co/embed/([a-z0-9]+)",
|
||||
"url": "https://vidcloud.co/player?fid=\\1&page=embed"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "vidcloud",
|
||||
"name": "vidcloud",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "https://i.postimg.cc/xjpwG0rK/0a-RVDzlb-400x400.jpg"
|
||||
}
|
||||
29
plugin.video.alfa/servers/vidcloud.py
Normal file
29
plugin.video.alfa/servers/vidcloud.py
Normal file
@@ -0,0 +1,29 @@
|
||||
# Conector vidcloud By Alfa development Group
|
||||
# --------------------------------------------------------
|
||||
|
||||
import re
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from lib import jsunpack
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url)
|
||||
if data.code == 404:
|
||||
return False, "[Cloud] El archivo no existe o ha sido borrado"
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
video_urls = []
|
||||
data = httptools.downloadpage(page_url).data
|
||||
data = data.replace('\\\\', '\\').replace('\\','')
|
||||
patron = '"file":"([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for url in matches:
|
||||
video_urls.append(['vidcloud', url])
|
||||
return video_urls
|
||||
1822
plugin.video.alfa/tools/UserAgent.csv
Normal file
1822
plugin.video.alfa/tools/UserAgent.csv
Normal file
File diff suppressed because it is too large
Load Diff
@@ -320,6 +320,10 @@ if __name__ == "__main__":
|
||||
from platformcode import updater
|
||||
updater.check_addon_init()
|
||||
|
||||
# Copia Custom code a las carpetas de Alfa desde la zona de Userdata
|
||||
from platformcode import custom_code
|
||||
custom_code.init()
|
||||
|
||||
if not config.get_setting("update", "videolibrary") == 2:
|
||||
check_for_update(overwrite=False)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user