Merge remote-tracking branch 'alfa-addon/master'
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.7.13" provider-name="Alfa Addon">
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.7.17" provider-name="Alfa Addon">
|
||||
<requires>
|
||||
<import addon="xbmc.python" version="2.1.0"/>
|
||||
<import addon="script.module.libtorrent" optional="true"/>
|
||||
@@ -11,7 +11,7 @@
|
||||
<summary lang="es">Navega con Kodi por páginas web.</summary>
|
||||
<assets>
|
||||
<icon>logo-cumple.png</icon>
|
||||
<fanart>fanart.jpg</fanart>
|
||||
<fanart>fanart1.jpg</fanart>
|
||||
<screenshot>resources/media/themes/ss/1.jpg</screenshot>
|
||||
<screenshot>resources/media/themes/ss/2.jpg</screenshot>
|
||||
<screenshot>resources/media/themes/ss/3.jpg</screenshot>
|
||||
@@ -19,15 +19,15 @@
|
||||
</assets>
|
||||
<news>[B]Estos son los cambios para esta versión:[/B]
|
||||
[COLOR green][B]Arreglos[/B][/COLOR]
|
||||
¤ cinetux ¤ porntrex ¤ repelis
|
||||
¤ fembed ¤ uptobox ¤ vivo
|
||||
¤ seriesmetro ¤ DivxTotal ¤ EliteTorrent
|
||||
¤ EstrenosGo ¤ GranTorrent
|
||||
|
||||
¤ Todopeliculas ¤ Maxipelis24 ¤ allcalidad
|
||||
¤ descargacineclasico ¤ porntrex ¤ seriesmetro
|
||||
¤ pedropolis ¤ thumzilla ¤ xms
|
||||
|
||||
[COLOR green][B]Novedades[/B][/COLOR]
|
||||
¤ Pack canales +18
|
||||
¤ cine24h ¤ hdfilmologia ¤ pelis24
|
||||
¤ pelishd24 ¤ pelisplay
|
||||
|
||||
Agradecimientos a @paeznet por colaborar en ésta versión
|
||||
¤ Agradecimientos a @chivmalev por colaborar con ésta versión
|
||||
|
||||
</news>
|
||||
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
|
||||
|
||||
@@ -42,7 +42,6 @@ def categorias(item):
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="item_p">.*?<a href="([^"]+)" title="([^"]+)"><img src="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = "https:" + scrapedthumbnail
|
||||
@@ -78,7 +77,6 @@ def play(item):
|
||||
logger.info()
|
||||
data = scrapertools.cachePage(item.url)
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.fulltitle
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
|
||||
16
plugin.video.alfa/channels/TXXX.json
Normal file
16
plugin.video.alfa/channels/TXXX.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "TXXX",
|
||||
"name": "TXXX",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://www.txxx.com/images/desktop-logo.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
135
plugin.video.alfa/channels/TXXX.py
Normal file
135
plugin.video.alfa/channels/TXXX.py
Normal file
@@ -0,0 +1,135 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://www.txxx.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Ultimas" , action="peliculas", url=host + "/latest-updates/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="peliculas", url=host + "/top-rated/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas popular" , action="peliculas", url=host + "/most-popular/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host + "/channels-list/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def catalogo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="channel-thumb">.*?<a href="([^"]+)" title="([^"]+)".*?<img src="([^"]+)".*?<span>(.*?)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,num in matches:
|
||||
scrapedplot = ""
|
||||
scrapedurl = host + scrapedurl
|
||||
title = scrapedtitle + "[COLOR yellow] " + num + "[/COLOR]"
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=title , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class=" btn btn--size--l btn--next" href="([^"]+)" title="Next Page"')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="c-thumb">.*?<a href="([^"]+)".*?<img src="([^"]+)".*?<div class="c-thumb--overlay c-thumb--overlay-title">([^"]+)</div>.*?<span>(.*?)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,num in matches:
|
||||
scrapedplot = ""
|
||||
title = scrapedtitle + "[COLOR yellow] " + num + "[/COLOR]"
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=title , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = 'data-video-id="\d+">.*?<a href="([^"]+)".*?<img src="([^"]+)" alt="([^"]+)".*?<span class="thumb__duration">(.*?)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,time in matches:
|
||||
contentTitle = scrapedtitle
|
||||
title = "[COLOR yellow]" + time + " [/COLOR]" + scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle=contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class=" btn btn--size--l btn--next" href="([^"]+)" title="Next Page"')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
video_url = scrapertools.find_single_match(data, 'var video_url = "([^"]*)"')
|
||||
video_url += scrapertools.find_single_match(data, 'video_url \+= "([^"]*)"')
|
||||
partes = video_url.split('||')
|
||||
video_url = decode_url(partes[0])
|
||||
video_url = re.sub('/get_file/\d+/[0-9a-z]{32}/', partes[1], video_url)
|
||||
video_url += '&' if '?' in video_url else '?'
|
||||
video_url += 'lip=' + partes[2] + '<=' + partes[3]
|
||||
itemlist.append(item.clone(action="play", title=item.title, url=video_url))
|
||||
return itemlist
|
||||
|
||||
|
||||
def decode_url(txt):
|
||||
_0x52f6x15 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,~'
|
||||
reto = ''; n = 0
|
||||
# En las dos siguientes líneas, ABCEM ocupan 2 bytes cada letra! El replace lo deja en 1 byte. !!!!: АВСЕМ (10 bytes) ABCEM (5 bytes)
|
||||
txt = re.sub('[^АВСЕМA-Za-z0-9\.\,\~]', '', txt)
|
||||
txt = txt.replace('А', 'A').replace('В', 'B').replace('С', 'C').replace('Е', 'E').replace('М', 'M')
|
||||
|
||||
while n < len(txt):
|
||||
a = _0x52f6x15.index(txt[n])
|
||||
n += 1
|
||||
b = _0x52f6x15.index(txt[n])
|
||||
n += 1
|
||||
c = _0x52f6x15.index(txt[n])
|
||||
n += 1
|
||||
d = _0x52f6x15.index(txt[n])
|
||||
n += 1
|
||||
|
||||
a = a << 2 | b >> 4
|
||||
b = (b & 15) << 4 | c >> 2
|
||||
e = (c & 3) << 6 | d
|
||||
reto += chr(a)
|
||||
if c != 64: reto += chr(b)
|
||||
if d != 64: reto += chr(e)
|
||||
|
||||
return urllib.unquote(reto)
|
||||
|
||||
@@ -47,7 +47,6 @@ def categorias(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = ' <a href="([^"]+)" class="link1">([^"]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
|
||||
@@ -124,12 +124,12 @@ def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '(?s)short_overlay.*?<a href="([^"]+)'
|
||||
patron += '.*?img.*?src="([^"]+)'
|
||||
patron += '.*?title="([^"]+).*?'
|
||||
patron += 'data-postid="([^"]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for url, thumbnail, titulo, datapostid in matches:
|
||||
matches = scrapertools.find_multiple_matches(data, '(?s)shortstory cf(.*?)rate_post')
|
||||
for datos in matches:
|
||||
url = scrapertools.find_single_match(datos, 'href="([^"]+)')
|
||||
titulo = scrapertools.find_single_match(datos, 'short_header">([^<]+)').strip()
|
||||
datapostid = scrapertools.find_single_match(datos, 'data-postid="([^"]+)')
|
||||
thumbnail = scrapertools.find_single_match(datos, 'img w.*?src="([^"]+)')
|
||||
post = 'action=get_movie_details&postID=%s' %datapostid
|
||||
data1 = httptools.downloadpage(host + "wp-admin/admin-ajax.php", post=post).data
|
||||
idioma = "Latino"
|
||||
|
||||
@@ -43,7 +43,6 @@ def catalogo(item):
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li><a href="([^"]+)" title="">.*?<span class="videos-count">([^"]+)</span><span class="title">([^"]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,cantidad,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
@@ -59,7 +58,6 @@ def categorias(item):
|
||||
patron = '<a href="([^"]+)">.*?'
|
||||
patron += '<img src="([^"]+)" alt="([^"]+)" />'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedurl = scrapedurl.replace("top", "new")
|
||||
|
||||
@@ -45,7 +45,6 @@ def catalogo(item):
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li><a class="item" href="([^"]+)" title="([^"]+)">'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
@@ -66,7 +65,6 @@ def categorias(item):
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += '<div class="videos">([^"]+)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
@@ -105,7 +103,6 @@ def play(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'video_url: \'([^\']+)\''
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl in matches:
|
||||
url = scrapedurl
|
||||
itemlist.append(item.clone(action="play", title=url, fulltitle = item.title, url=url))
|
||||
|
||||
@@ -47,11 +47,11 @@ def list_all(item):
|
||||
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
patron = '<divclass="post-thumbnail">.?<.*?href="([^"]+)" title="([^"]+)".*?src="([^"]+)".*?'
|
||||
patron = '<article id="post-\d+".*?data-background="([^"]+)".*?href="([^"]+)".*?<h3.*?internal">([^<]+)'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
|
||||
url = scrapedurl
|
||||
title = scrapertools.find_single_match(scrapedtitle, '(.*?)(?:|\(|\| )\d{4}').strip()
|
||||
year = scrapertools.find_single_match(scrapedtitle, '(\d{4})')
|
||||
@@ -68,7 +68,7 @@ def list_all(item):
|
||||
|
||||
if itemlist != []:
|
||||
|
||||
next_page = scrapertools.find_single_match(data, '<a class="next" href="([^"]+)"')
|
||||
next_page = scrapertools.find_single_match(data, 'page-numbers current.*?<a class="page-numbers" href="([^"]+)"')
|
||||
if next_page != '':
|
||||
itemlist.append(Item(channel=item.channel, fanart=fanart, action="list_all", title='Siguiente >>>', url=next_page))
|
||||
else:
|
||||
|
||||
@@ -44,7 +44,6 @@ def categorias(item):
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)" class="th">.*?<img src="([^"]+)".*?<span>([^"]+)</span>\s*(\d+) movies.*?</strong>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
|
||||
63
plugin.video.alfa/channels/cine24h.json
Normal file
63
plugin.video.alfa/channels/cine24h.json
Normal file
@@ -0,0 +1,63 @@
|
||||
{
|
||||
"id": "cine24h",
|
||||
"name": "Cine24H",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat", "cast", "eng"],
|
||||
"fanart": "https://i.postimg.cc/WpqD2n77/cine24bg.jpg",
|
||||
"thumbnail": "https://cine24h.net/wp-content/uploads/2018/06/cine24hv2.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"vose"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"Latino",
|
||||
"Castellano",
|
||||
"English"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Buscar información extra",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "perfil",
|
||||
"type": "list",
|
||||
"label": "Perfil de color",
|
||||
"default": 3,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Sin color",
|
||||
"Perfil 5",
|
||||
"Perfil 4",
|
||||
"Perfil 3",
|
||||
"Perfil 2",
|
||||
"Perfil 1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "orden_episodios",
|
||||
"type": "bool",
|
||||
"label": "Mostrar los episodios de las series en orden descendente",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
382
plugin.video.alfa/channels/cine24h.py
Normal file
382
plugin.video.alfa/channels/cine24h.py
Normal file
@@ -0,0 +1,382 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel CanalPelis -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
import sys
|
||||
import urlparse
|
||||
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from core import channeltools
|
||||
from core import tmdb
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
|
||||
__channel__ = "cine24h"
|
||||
|
||||
host = "https://cine24h.net/"
|
||||
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
__perfil__ = int(config.get_setting('perfil', __channel__))
|
||||
except:
|
||||
__modo_grafico__ = True
|
||||
__perfil__ = 0
|
||||
|
||||
# Fijar perfil de color
|
||||
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
|
||||
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
|
||||
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
|
||||
if __perfil__ < 3:
|
||||
color1, color2, color3, color4, color5 = perfil[__perfil__]
|
||||
else:
|
||||
color1 = color2 = color3 = color4 = color5 = ""
|
||||
|
||||
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
|
||||
['Referer', host]]
|
||||
|
||||
|
||||
parameters = channeltools.get_channel_parameters(__channel__)
|
||||
fanart_host = parameters['fanart']
|
||||
thumbnail_host = parameters['thumbnail']
|
||||
|
||||
IDIOMAS = {'Latino': 'LAT', 'Castellano': 'CAST'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = []
|
||||
list_servers = ['rapidvideo', 'streamango', 'openload', 'streamcherry']
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = [item.clone(title="Peliculas", action="menumovies", text_blod=True,
|
||||
viewcontent='movies', viewmode="movie_with_plot", thumbnail=get_thumb('movies', auto=True)),
|
||||
|
||||
item.clone(title="Series", action="series", extra='serie', url=host + 'series/',
|
||||
viewmode="movie_with_plot", text_blod=True, viewcontent='movies',
|
||||
thumbnail=get_thumb('tvshows', auto=True), page=0),
|
||||
|
||||
item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True),
|
||||
text_blod=True, url=host, page=0)]
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def menumovies(item):
|
||||
logger.info()
|
||||
itemlist = [item.clone(title="Novedades", action="peliculas", thumbnail=get_thumb('newest', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host + 'peliculas/', viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Estrenos", action="peliculas", thumbnail=get_thumb('estrenos', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host + '?s=trfilter&trfilter=1&years%5B%5D=2018', viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Más Vistas", action="peliculas", thumbnail=get_thumb('more watched', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host + 'peliculas-mas-vistas/', viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Géneros", action="genresYears", thumbnail=get_thumb('genres', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host, viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Estrenos por Año", action="genresYears", thumbnail=get_thumb('year', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies', url=host,
|
||||
viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True),
|
||||
text_blod=True, url=host, page=0, extra='buscarP')]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = urlparse.urljoin(item.url, "?s={0}".format(texto))
|
||||
|
||||
try:
|
||||
return peliculas(item)
|
||||
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data)
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
patron = '<article id="[^"]+" class="TPost[^<]+<a href="([^"]+)">.*?' # url
|
||||
patron += '<img src="([^"]+)".*?' # img
|
||||
patron += '</figure>(.*?)' # tipo
|
||||
patron += '<h3 class="Title">([^<]+)</h3>.*?' # title
|
||||
patron += '<span class="Year">([^<]+)</span>.*?' # year
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, tipo, scrapedtitle, year in matches[item.page:item.page + 30]:
|
||||
if item.title == 'Buscar' and 'serie' in scrapedurl:
|
||||
action = 'temporadas'
|
||||
contentType = 'tvshow'
|
||||
title = scrapedtitle + '[COLOR blue] (Serie)[/COLOR]'
|
||||
else:
|
||||
action = 'findvideos'
|
||||
contentType = 'movie'
|
||||
title = scrapedtitle
|
||||
|
||||
itemlist.append(item.clone(channel=__channel__, action=action, text_color=color3, show=scrapedtitle,
|
||||
url=scrapedurl, infoLabels={'year': year}, contentType=contentType,
|
||||
contentTitle=scrapedtitle, thumbnail='https:' + scrapedthumbnail,
|
||||
title=title, context="buscar_trailer"))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
|
||||
if item.page + 30 < len(matches):
|
||||
itemlist.append(item.clone(page=item.page + 30,
|
||||
title="» Siguiente »", text_color=color3))
|
||||
else:
|
||||
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
|
||||
if next_page:
|
||||
itemlist.append(item.clone(url=next_page, page=0, title="» Siguiente »", text_color=color3))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def genresYears(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)| |<br>", "", data)
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
|
||||
if item.title == "Estrenos por Año":
|
||||
patron_todas = 'ESTRENOS</a>(.*?)</i> Géneros'
|
||||
else:
|
||||
patron_todas = 'Géneros</a>(.*?)</li></ul></li>'
|
||||
|
||||
data = scrapertools.find_single_match(data, patron_todas)
|
||||
patron = '<a href="([^"]+)">([^<]+)</a>' # url, title
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="peliculas"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def year_release(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '<li><a href="([^"]+)">([^<]+)</a></li>' # url, title
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
|
||||
itemlist.append(item.clone(channel=item.channel, action="peliculas", title=scrapedtitle, page=0,
|
||||
url=scrapedurl, text_color=color3, viewmode="movie_with_plot", extra='next'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def series(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)| |<br>", "", data)
|
||||
patron = '<article class="TPost C TPostd">\s*<a href="([^"]+)">.*?' # url
|
||||
patron += '<img src="([^"]+)".*?' # img
|
||||
patron += '<h3 class="Title">([^<]+)</h3>' # title
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches[item.page:item.page + 30]:
|
||||
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="temporadas",
|
||||
contentSerieName=scrapedtitle, show=scrapedtitle,
|
||||
thumbnail='https:' + scrapedthumbnail, contentType='tvshow'))
|
||||
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
|
||||
if item.page + 30 < len(matches):
|
||||
itemlist.append(item.clone(page=item.page + 30,
|
||||
title="» Siguiente »", text_color=color3))
|
||||
else:
|
||||
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
|
||||
|
||||
if next_page:
|
||||
itemlist.append(item.clone(url=next_page, page=0,
|
||||
title="» Siguiente »", text_color=color3))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def temporadas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="[^>]+>[^<]+<span>(.*?)</span> <i' # numeros de temporadas
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
if len(matches) > 1:
|
||||
for scrapedseason in matches:
|
||||
new_item = item.clone(action="episodios", season=scrapedseason, extra='temporadas')
|
||||
new_item.infoLabels['season'] = scrapedseason
|
||||
new_item.extra = ""
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
|
||||
for i in itemlist:
|
||||
i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle'])
|
||||
if i.infoLabels['title']:
|
||||
# Si la temporada tiene nombre propio añadirselo al titulo del item
|
||||
i.title += " - %s" % (i.infoLabels['title'])
|
||||
if i.infoLabels.has_key('poster_path'):
|
||||
# Si la temporada tiene poster propio remplazar al de la serie
|
||||
i.thumbnail = i.infoLabels['poster_path']
|
||||
|
||||
itemlist.sort(key=lambda it: int(it.infoLabels['season']))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
|
||||
text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host))
|
||||
|
||||
return itemlist
|
||||
else:
|
||||
return episodios(item)
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<td class="MvTbImg B"><a href="([^"]+)".*?' # url
|
||||
patron += '<td class="MvTbTtl"><a href="https://cine24h.net/episode/(.*?)/">([^<]+)</a>' # title de episodios
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedname in matches:
|
||||
scrapedtitle = scrapedtitle.replace('--', '0')
|
||||
patron = '(\d+)x(\d+)'
|
||||
match = re.compile(patron, re.DOTALL).findall(scrapedtitle)
|
||||
season, episode = match[0]
|
||||
|
||||
if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season):
|
||||
continue
|
||||
|
||||
title = "%sx%s: %s" % (season, episode.zfill(2), scrapedname)
|
||||
new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title,
|
||||
contentType="episode")
|
||||
if 'infoLabels' not in new_item:
|
||||
new_item.infoLabels = {}
|
||||
|
||||
new_item.infoLabels['season'] = season
|
||||
new_item.infoLabels['episode'] = episode.zfill(2)
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
# TODO no hacer esto si estamos añadiendo a la videoteca
|
||||
if not item.extra:
|
||||
# Obtenemos los datos de todos los capitulos de la temporada mediante multihilos
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
for i in itemlist:
|
||||
if i.infoLabels['title']:
|
||||
# Si el capitulo tiene nombre propio añadirselo al titulo del item
|
||||
i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels[
|
||||
'episode'], i.infoLabels['title'])
|
||||
if i.infoLabels.has_key('poster_path'):
|
||||
# Si el capitulo tiene imagen propia remplazar al poster
|
||||
i.thumbnail = i.infoLabels['poster_path']
|
||||
|
||||
itemlist.sort(key=lambda it: int(it.infoLabels['episode']),
|
||||
reverse=config.get_setting('orden_episodios', __channel__))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
# Opción "Añadir esta serie a la videoteca"
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
|
||||
text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|amp;|#038;|\(.*?\)|\s{2}| ", "", data)
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
patron = 'data-tplayernv="Opt(.*?)"><span>(.*?)</span>(.*?)</li>' # option, server, lang - quality
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for option, servername, quote in matches:
|
||||
patron = '<span>(.*?) -([^<]+)</span'
|
||||
match = re.compile(patron, re.DOTALL).findall(quote)
|
||||
lang, quality = match[0]
|
||||
quality = quality.strip()
|
||||
headers = {'Referer': item.url}
|
||||
url_1 = scrapertools.find_single_match(data, 'id="Opt%s"><iframe width="560" height="315" src="([^"]+)"' % option)
|
||||
new_data = httptools.downloadpage(url_1, headers=headers).data
|
||||
new_data = re.sub(r"\n|\r|\t|amp;|\(.*?\)|\s{2}| ", "", new_data)
|
||||
new_data = scrapertools.decodeHtmlentities(new_data)
|
||||
url2 = scrapertools.find_single_match(new_data, '<iframe width="560" height="315" src="([^"]+)"')
|
||||
url = url2 + '|%s' % url_1
|
||||
if 'rapidvideo' in url2:
|
||||
url = url2
|
||||
|
||||
lang = lang.lower().strip()
|
||||
languages = {'latino': '[COLOR cornflowerblue](LAT)[/COLOR]',
|
||||
'español': '[COLOR green](CAST)[/COLOR]',
|
||||
'subespañol': '[COLOR red](VOS)[/COLOR]',
|
||||
'sub': '[COLOR red](VOS)[/COLOR]'}
|
||||
if lang in languages:
|
||||
lang = languages[lang]
|
||||
|
||||
servername = servertools.get_server_from_url(url)
|
||||
|
||||
title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % (
|
||||
servername.title(), quality, lang)
|
||||
|
||||
itemlist.append(item.clone(action='play', url=url, title=title, language=lang, quality=quality,
|
||||
text_color=color3))
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
|
||||
itemlist.sort(key=lambda it: it.language, reverse=False)
|
||||
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# Requerido para AutoPlay
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
|
||||
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",
|
||||
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
|
||||
thumbnail=thumbnail_host, contentTitle=item.contentTitle))
|
||||
|
||||
return itemlist
|
||||
@@ -433,7 +433,14 @@ def newest(categoria):
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
texto = texto.replace(" ", "-")
|
||||
item.url = item.host + '?s=' + texto
|
||||
if texto != '':
|
||||
return peliculas(item)
|
||||
if item.host != '':
|
||||
host_list = [item.host]
|
||||
else:
|
||||
host_list = ['http://www.cinecalidad.to', 'http://cinecalidad.to/espana/']
|
||||
for host_name in host_list:
|
||||
item.url = host_name + '?s=' + texto
|
||||
if texto != '':
|
||||
itemlist.extend(peliculas(item))
|
||||
return itemlist
|
||||
@@ -1,6 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
@@ -176,11 +175,11 @@ def destacadas(item):
|
||||
item.text_color = color2
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.find_single_match(data, 'peliculas_destacadas.*?class="letter_home"')
|
||||
patron = '(?s)title="([^"]+)".*?'
|
||||
patron += 'href="([^"]+)".*?'
|
||||
patron = '(?s)href="([^"]+)".*?'
|
||||
patron += 'alt="([^"]+)".*?'
|
||||
patron += 'src="([^"]+)'
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for scrapedtitle, scrapedurl, scrapedthumbnail in matches:
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
scrapedurl = CHANNEL_HOST + scrapedurl
|
||||
itemlist.append(item.clone(action="findvideos", title=scrapedtitle, fulltitle=scrapedtitle,
|
||||
url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
@@ -224,11 +223,12 @@ def findvideos(item):
|
||||
logger.info()
|
||||
itemlist=[]
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'class="title">.*?src.*?/>([^>]+)</span>.*?data-type="([^"]+).*?data-post="(\d+)".*?data-nume="(\d+)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
#logger.info("Intel66")
|
||||
#scrapertools.printMatches(matches)
|
||||
for language, tp, pt, nm in matches:
|
||||
patron = 'tooltipctx.*?data-type="([^"]+).*?'
|
||||
patron += 'data-post="(\d+)".*?'
|
||||
patron += 'data-nume="(\d+).*?'
|
||||
patron += 'class="title">.*?src.*?/>([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for tp, pt, nm, language in matches:
|
||||
language = language.strip()
|
||||
post = {'action':'doo_player_ajax', 'post':pt, 'nume':nm, 'type':tp}
|
||||
post = urllib.urlencode(post)
|
||||
@@ -242,17 +242,12 @@ def findvideos(item):
|
||||
else:
|
||||
title = ''
|
||||
url = scrapertools.find_single_match(new_data, "src='([^']+)'")
|
||||
#logger.info("Intel33 %s" %url)
|
||||
url = get_url(url)
|
||||
if "mega" not in url and "mediafire" not in url:
|
||||
url = get_url(url.replace('\\/', '/'))
|
||||
if url:
|
||||
itemlist.append(Item(channel=item.channel, title ='%s'+title, url=url, action='play', quality=item.quality,
|
||||
language=IDIOMAS[language], infoLabels=item.infoLabels))
|
||||
#logger.info("Intel44")
|
||||
#scrapertools.printMatches(itemlist)
|
||||
patron = "<a class='optn' href='([^']+)'.*?<img src='.*?>([^<]+)<.*?<img src='.*?>([^<]+)<"
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
#logger.info("Intel66a")
|
||||
#scrapertools.printMatches(matches)
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for hidden_url, quality, language in matches:
|
||||
if not config.get_setting('unify'):
|
||||
title = ' [%s][%s]' % (quality, IDIOMAS[language])
|
||||
@@ -260,27 +255,32 @@ def findvideos(item):
|
||||
title = ''
|
||||
new_data = httptools.downloadpage(hidden_url).data
|
||||
url = scrapertools.find_single_match(new_data, 'id="link" href="([^"]+)"')
|
||||
url = url.replace('\\/', '/')
|
||||
url = get_url(url)
|
||||
if "mega" not in url and "mediafire" not in url:
|
||||
url = get_url(url.replace('\\/', '/'))
|
||||
if url:
|
||||
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', quality=quality,
|
||||
language=IDIOMAS[language], infoLabels=item.infoLabels))
|
||||
#logger.info("Intel55")
|
||||
#scrapertools.printMatches(itemlist)
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
itemlist.sort(key=lambda it: (it.language, it.server, it.quality))
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
return itemlist
|
||||
|
||||
|
||||
def get_url(url):
|
||||
logger.info()
|
||||
if "cinetux.me" in url:
|
||||
d1 = httptools.downloadpage(url).data
|
||||
if "mail" in url:
|
||||
id = scrapertools.find_single_match(d1, '<img src="[^#]+#(\w+)')
|
||||
#logger.info("Intel77b %s" %id)
|
||||
url = "https://my.mail.ru/video/embed/" + id
|
||||
if "mail" in url or "drive" in url or "ok.cinetux" in url or "mp4/" in url:
|
||||
id = scrapertools.find_single_match(d1, '<img src="[^#]+#([^"]+)"')
|
||||
d1 = d1.replace("'",'"')
|
||||
url = scrapertools.find_single_match(d1, '<iframe src="([^"]+)') + id
|
||||
if "drive" in url:
|
||||
url += "/preview"
|
||||
else:
|
||||
url = scrapertools.find_single_match(d1, 'document.location.replace\("([^"]+)')
|
||||
#logger.info("Intel22a %s" %d1)
|
||||
#logger.info("Intel77a %s" %url)
|
||||
url = url.replace("povwideo","powvideo")
|
||||
return url
|
||||
|
||||
|
||||
def play(item):
|
||||
item.thumbnail = item.contentThumbnail
|
||||
return [item]
|
||||
|
||||
@@ -44,7 +44,6 @@ def catalogo(item):
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)">\s*<img src=\'([^\']+)\'/>.*?<span>([^"]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/movies"
|
||||
@@ -63,7 +62,6 @@ def categorias(item):
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)" title="([^"]+)">.*?<img src="([^"]+)"/>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle
|
||||
|
||||
@@ -36,11 +36,8 @@ def search(item, texto):
|
||||
def categorias(item):
|
||||
itemlist = []
|
||||
data = scrapertools.cache_page(item.url)
|
||||
#data = scrapertools.get_match(data,'<div class="sidetitle">Categorías</div>(.*?)</ul>')
|
||||
#<li class="cat-item cat-item-203077"><a href="http://www.coomelonitas.com/Categoria/asiaticas">ASIÁTICAS</a>
|
||||
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)">([^"]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
|
||||
@@ -160,26 +160,21 @@ def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if "onclick=\"changeLink('" in data:
|
||||
patron = "onclick=.changeLink\('([^']+)'"
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for id in matches:
|
||||
url = devuelve_enlace(base64.b64decode(id))
|
||||
itemlist.append(item.clone(title="Ver en %s",url=url, action="play"))
|
||||
else:
|
||||
patron = 'data-type="([^"]+).*?'
|
||||
patron += 'data-post="([^"]+).*?'
|
||||
patron += 'data-nume="([^"]+).*?'
|
||||
patron += 'server">([^<]+).*?'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
headers = {"X-Requested-With":"XMLHttpRequest"}
|
||||
for scrapedtype, scrapedpost, scrapednume, scrapedserver in matches:
|
||||
post = "action=doo_player_ajax&type=%s&post=%s&nume=%s" %(scrapedtype, scrapedpost, scrapednume)
|
||||
data1 = httptools.downloadpage(host + "wp-admin/admin-ajax.php", headers=headers, post=post).data
|
||||
url1 = scrapertools.find_single_match(data1, "src='([^']+)")
|
||||
url1 = devuelve_enlace(url1)
|
||||
if url1:
|
||||
itemlist.append(item.clone(title="Ver en %s",url=url1, action="play"))
|
||||
patron = 'data-type="(tv).*?'
|
||||
patron += 'data-post="([^"]+).*?'
|
||||
patron += 'data-nume="([^"]+).*?'
|
||||
patron += 'server">([^<]+).*?'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
headers = {"X-Requested-With":"XMLHttpRequest"}
|
||||
for scrapedtype, scrapedpost, scrapednume, scrapedserver in matches:
|
||||
post = "action=doo_player_ajax&type=%s&post=%s&nume=%s" %(scrapedtype, scrapedpost, scrapednume)
|
||||
data1 = httptools.downloadpage(host + "wp-admin/admin-ajax.php", headers=headers, post=post).data
|
||||
url1 = scrapertools.find_single_match(data1, "src='([^']+)")
|
||||
url1 = devuelve_enlace(url1)
|
||||
if "drive.google" in url1:
|
||||
url1 = url1.replace("view","preview")
|
||||
if url1:
|
||||
itemlist.append(item.clone(title="Ver en %s",url=url1, action="play"))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentType=="movie" and item.contentChannel!='videolibrary':
|
||||
|
||||
@@ -11,27 +11,28 @@ from lib import unshortenit
|
||||
|
||||
host = "http://www.descargacineclasico.net"
|
||||
|
||||
|
||||
def agrupa_datos(data):
|
||||
# Agrupa los datos
|
||||
data = re.sub(r'\n|\r|\t| |<br>|<!--.*?-->', '', data)
|
||||
data = re.sub(r'\s+', ' ', data)
|
||||
data = re.sub(r'>\s<', '><', data)
|
||||
return data
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, title="Últimas agregadas", action="agregadas",
|
||||
url=host, viewmode="movie_with_plot",
|
||||
thumbnail=get_thumb('last', auto=True)))
|
||||
url=host, viewmode="movie_with_plot", thumbnail=get_thumb('last', auto=True)))
|
||||
itemlist.append(Item(channel=item.channel, title="Listado por género", action="porGenero",
|
||||
url=host,
|
||||
thumbnail=get_thumb('genres', auto=True)))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Buscar", action="search", url=host,
|
||||
thumbnail=get_thumb('search', auto=True)))
|
||||
url=host, thumbnail=get_thumb('genres', auto=True)))
|
||||
itemlist.append(Item(channel=item.channel, title="Listado alfabetico", action="porLetra",
|
||||
url=host + "/cine-online/", thumbnail=get_thumb('alphabet', auto=True)))
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host,
|
||||
thumbnail=get_thumb('search', auto=True)))
|
||||
return itemlist
|
||||
|
||||
|
||||
def porLetra(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'noindex,nofollow" href="([^"]+)">(\w+)<'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for url, titulo in matches:
|
||||
itemlist.append( Item(channel=item.channel , action="agregadas" , title=titulo, url=url, viewmode="movie_with_plot"))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -43,7 +44,9 @@ def porGenero(item):
|
||||
data = re.compile(patron,re.DOTALL).findall(data)
|
||||
patron = '<li.*?>.*?href="([^"]+).*?>([^<]+)'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data[0])
|
||||
for url,genero in matches:
|
||||
for url, genero in matches:
|
||||
if genero == "Erótico" and config.get_setting("adult_mode") == 0:
|
||||
continue
|
||||
itemlist.append( Item(channel=item.channel , action="agregadas" , title=genero,url=url, viewmode="movie_with_plot"))
|
||||
return itemlist
|
||||
|
||||
@@ -129,7 +132,6 @@ def findvideos(item):
|
||||
contentTitle = item.contentTitle
|
||||
))
|
||||
return itemlist
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
|
||||
@@ -577,7 +577,8 @@ def findvideos(item):
|
||||
return item #... y nos vamos
|
||||
|
||||
#Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB
|
||||
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist)
|
||||
if not item.videolibray_emergency_urls:
|
||||
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist)
|
||||
|
||||
#Ahora tratamos los enlaces .torrent
|
||||
for scrapedurl in matches: #leemos los torrents con la diferentes calidades
|
||||
|
||||
@@ -43,7 +43,6 @@ def catalogo(item):
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a class=""\s+title="([^"]+)"\s+href="([^"]+)">'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedtitle,scrapedurl in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
@@ -59,7 +58,6 @@ def categorias(item):
|
||||
data = scrapertools.get_match(data,'<h2>TAGS</h2>(.*?)<div class="sideitem"')
|
||||
patron = '<a href="(.*?)".*?>(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
@@ -92,9 +90,8 @@ def findvideos(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'<div id="wrapper" class="ortala">(.*?)<div class="butonlar">')
|
||||
patron = '<iframe.*?src="([^"]+)"'
|
||||
patron = '<iframe\s+src="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl in matches:
|
||||
itemlist.append( Item(action="play", title=scrapedurl, fulltitle = item.title, url=scrapedurl))
|
||||
return itemlist
|
||||
@@ -104,7 +101,7 @@ def play(item):
|
||||
data = scrapertools.cachePage(item.url)
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.fulltitle
|
||||
videoitem.title = item.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videochannel=item.channel
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -44,7 +44,6 @@ def categorias(item):
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)" title="([^"]+) porn tube" class="thumb">.*?<img src="([^"]+)".*?<span class="total">([^"]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
|
||||
@@ -27,10 +27,23 @@ def mainlist(item):
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle=contentTitle, infoLabels={'year':year} ))
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle=contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="mainlist" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
data = scrapertools.cachePage(item.url)
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -42,7 +42,6 @@ def categorias(item):
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li class="thumb thumb-category">.*?<a href="([^"]+)">.*?<img class="lazy" data-original="([^"]+)">.*?<div class="name">([^"]+)</div>.*?<div class="count">(\d+)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
|
||||
@@ -46,7 +46,6 @@ def catalogo(item):
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li id="menu-item-\d+".*?u=([^"]+)">(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
@@ -59,7 +58,6 @@ def categorias(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<li><a href="([^"]+)" rel="nofollow">(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
@@ -75,25 +73,26 @@ def peliculas(item):
|
||||
patron = '<article id="post-\d+".*?<a href="([^"]+)" rel="bookmark">(.*?)</a>.*?<img src="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
contentTitle = scrapedtitle
|
||||
title = scrapedtitle
|
||||
thumbnail = scrapedthumbnail.replace("jpg#", "jpg")
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle=contentTitle, infoLabels={'year':year} ))
|
||||
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, fulltitle=title, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data, '<div class="nav-previous"><a href="([^"]+)"')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
|
||||
|
||||
# else:
|
||||
# patron = '<div class="nav-previous"><a href="(.*?)"'
|
||||
# next_page = re.compile(patron,re.DOTALL).findall(data)
|
||||
#next_page = scrapertools.find_single_match(data,'class="last" title=.*?<a href="([^"]+)">')
|
||||
# next_page = next_page[0]
|
||||
#next_page = host + next_page
|
||||
# itemlist.append( Item(channel=item.channel, action="peliculas", title=next_page , text_color="blue", url=next_page ) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
data = scrapertools.cachePage(item.url)
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.fulltitle
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videochannel=item.channel
|
||||
return itemlist
|
||||
|
||||
@@ -43,7 +43,6 @@ def categorias(item):
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)" class="thumb">.*?src="([^"]+)".*?<strong class="title">([^"]+)</strong>.*?<b>(.*?)</b>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,vidnum in matches:
|
||||
scrapedplot = ""
|
||||
title = scrapedtitle + " \(" + vidnum + "\)"
|
||||
|
||||
48
plugin.video.alfa/channels/hdfilmologia.json
Normal file
48
plugin.video.alfa/channels/hdfilmologia.json
Normal file
@@ -0,0 +1,48 @@
|
||||
{
|
||||
"id": "hdfilmologia",
|
||||
"name": "HDFilmologia",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast", "lat"],
|
||||
"fanart": "https://i.postimg.cc/qvFCZNKT/Alpha-652355392-large.jpg",
|
||||
"thumbnail": "https://hdfilmologia.com/templates/gorstyle/images/logo.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"movie",
|
||||
"vos"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Buscar información extra",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "perfil",
|
||||
"type": "list",
|
||||
"label": "Perfil de color",
|
||||
"default": 3,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Sin color",
|
||||
"Perfil 5",
|
||||
"Perfil 4",
|
||||
"Perfil 3",
|
||||
"Perfil 2",
|
||||
"Perfil 1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "orden_episodios",
|
||||
"type": "bool",
|
||||
"label": "Mostrar los episodios de las series en orden descendente",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
262
plugin.video.alfa/channels/hdfilmologia.py
Normal file
262
plugin.video.alfa/channels/hdfilmologia.py
Normal file
@@ -0,0 +1,262 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel HDFilmologia -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
import sys
|
||||
import urllib
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from core import channeltools
|
||||
from core import tmdb
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
|
||||
__channel__ = "hdfilmologia"
|
||||
|
||||
host = "https://hdfilmologia.com/"
|
||||
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
__perfil__ = int(config.get_setting('perfil', __channel__))
|
||||
except:
|
||||
__modo_grafico__ = True
|
||||
__perfil__ = 0
|
||||
|
||||
# Fijar perfil de color
|
||||
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
|
||||
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
|
||||
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
|
||||
if __perfil__ < 3:
|
||||
color1, color2, color3, color4, color5 = perfil[__perfil__]
|
||||
else:
|
||||
color1 = color2 = color3 = color4 = color5 = ""
|
||||
|
||||
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
|
||||
['Referer', host]]
|
||||
|
||||
|
||||
parameters = channeltools.get_channel_parameters(__channel__)
|
||||
fanart_host = parameters['fanart']
|
||||
thumbnail_host = parameters['thumbnail']
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(item.clone(title="Últimas Agregadas", action="movies",thumbnail=get_thumb('last', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host + 'index.php?do=lastnews', viewmode="movie_with_plot"))
|
||||
|
||||
itemlist.append(item.clone(title="Estrenos", action="movies", thumbnail=get_thumb('premieres', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies', url=host + 'estrenos',
|
||||
viewmode="movie_with_plot"))
|
||||
|
||||
itemlist.append(item.clone(title="Más Vistas", action="movies",thumbnail=get_thumb('more watched', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host + 'mas-vistas/', viewmode="movie_with_plot"))
|
||||
|
||||
itemlist.append(item.clone(title="Películas Por País", action="countriesYears",thumbnail=get_thumb('country', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host, viewmode="movie_with_plot"))
|
||||
|
||||
itemlist.append(item.clone(title="Películas Por Año", action="countriesYears",thumbnail=get_thumb('year', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host, viewmode="movie_with_plot"))
|
||||
|
||||
itemlist.append(item.clone(title="Géneros", action="genres",thumbnail=get_thumb('genres', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host, viewmode="movie_with_plot"))
|
||||
|
||||
|
||||
|
||||
itemlist.append(item.clone(title="Buscar", action="search",thumbnail=get_thumb('search', auto=True),
|
||||
text_blod=True, url=host, page=0))
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = urlparse.urljoin(item.url, "?do=search&mode=advanced&subaction=search&story={0}".format(texto))
|
||||
# 'https://hdfilmologia.com/?do=search&mode=advanced&subaction=search&story=la+sombra'
|
||||
|
||||
try:
|
||||
return sub_search(item)
|
||||
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
|
||||
def sub_search(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a class="sres-wrap clearfix" href="([^"]+)">' #url
|
||||
patron += '<div class="sres-img"><img src="/([^"]+)" alt="([^"]+)" />.*?' # img, title
|
||||
patron += '<div class="sres-desc">(.*?)</div>' # plot
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, plot in matches:
|
||||
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, contentTitle=scrapedtitle,
|
||||
action="findvideos", text_color=color3, page=0, plot=plot,
|
||||
thumbnail=host+scrapedthumbnail))
|
||||
|
||||
pagination = scrapertools.find_single_match(data, 'class="pnext"><a href="([^"]+)">')
|
||||
|
||||
if pagination:
|
||||
itemlist.append(Item(channel=__channel__, action="sub_search",
|
||||
title="» Siguiente »", url=pagination))
|
||||
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def movies(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data)
|
||||
patron = '<div class="kino-item ignore-select">.*?<a href="([^"]+)" class="kino-h"><h2>([^<]+)</h2>.*?' # url, title
|
||||
patron += '<img src="([^"]+)".*?' # img
|
||||
patron += '<div class="k-meta qual-mark">([^<]+)</div>.*?' # quality
|
||||
patron += '<strong>Año:</strong></div>([^<]+)</li>' # year
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, quality, year in matches[item.page:item.page + 25]:
|
||||
scrapedthumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
|
||||
title = "%s [COLOR yellow][%s][/COLOR]" % (scrapedtitle, quality)
|
||||
|
||||
itemlist.append(item.clone(channel=__channel__, action="findvideos", text_color=color3,
|
||||
url=scrapedurl, infoLabels={'year': year.strip()},
|
||||
contentTitle=scrapedtitle, thumbnail=scrapedthumbnail,
|
||||
title=title, context="buscar_trailer"))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
|
||||
if item.page + 25 < len(matches):
|
||||
itemlist.append(item.clone(page=item.page + 25,
|
||||
title="» Siguiente »", text_color=color3))
|
||||
else:
|
||||
next_page = scrapertools.find_single_match(
|
||||
data, 'class="pnext"><a href="([^"]+)">')
|
||||
|
||||
if next_page:
|
||||
itemlist.append(item.clone(url=next_page, page=0,
|
||||
title="» Siguiente »", text_color=color3))
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def genres(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
patron = '<li class="myli"><a href="/([^"]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
|
||||
itemlist.append(item.clone(channel=__channel__, action="movies", title=scrapedtitle,
|
||||
url=host+scrapedurl, text_color=color3, viewmode="movie_with_plot"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def countriesYears(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)| |<br>", "", data)
|
||||
|
||||
if item.title == "Películas Por País":
|
||||
patron_todas = 'Por País</option>(.*?)</option></select>'
|
||||
else:
|
||||
patron_todas = 'Por Año</option>(.*?)<option value="/">Peliculas'
|
||||
|
||||
data = scrapertools.find_single_match(data, patron_todas)
|
||||
patron = '<option value="/([^"]+)">([^<]+)</option>' # url, title
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=host+scrapedurl, action="movies"))
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|amp;|#038;|\(.*?\)|\s{2}| ", "", data)
|
||||
|
||||
patron = '(\w+)src\d+="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for lang, url in matches:
|
||||
|
||||
server = servertools.get_server_from_url(url)
|
||||
if 'dropbox' in url:
|
||||
server = 'dropbox'
|
||||
if '/drive/' in url:
|
||||
data = httptools.downloadpage(url).data
|
||||
url = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
|
||||
server = 'gdrive'
|
||||
|
||||
if 'ultrapeliculashd' in url:
|
||||
data = httptools.downloadpage(url).data
|
||||
# logger.info(data)
|
||||
patron = "\|s\|(\w+)\|"
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for key in matches:
|
||||
url = 'https://www.dropbox.com/s/%s?dl=1' % (key)
|
||||
server = 'dropbox'
|
||||
languages = {'l': '[COLOR cornflowerblue](LAT)[/COLOR]',
|
||||
'e': '[COLOR green](CAST)[/COLOR]',
|
||||
's': '[COLOR red](VOS)[/COLOR]'}
|
||||
if lang in languages:
|
||||
lang = languages[lang]
|
||||
|
||||
title = "Ver en: [COLOR yellow](%s)[/COLOR] [COLOR yellowgreen]%s[/COLOR]" % (server.title(), lang)
|
||||
if 'youtube' not in server:
|
||||
|
||||
itemlist.append(item.clone(action='play', url=url, title=title, language=lang,
|
||||
text_color=color3))
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
itemlist.sort(key=lambda it: it.language, reverse=False)
|
||||
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
|
||||
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",
|
||||
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
|
||||
thumbnail=thumbnail_host, contentTitle=item.contentTitle))
|
||||
|
||||
return itemlist
|
||||
@@ -43,7 +43,6 @@ def categorias(item):
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li>.*?<a href="([^"]+)".*?<img class="thumb" src="([^"]+)" alt="([^"]+)".*?<span class="videos-count">(\d+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,vidnum in matches:
|
||||
scrapedplot = ""
|
||||
title = scrapedtitle + " \(" + vidnum + "\)"
|
||||
|
||||
@@ -41,7 +41,6 @@ def categorias(item):
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)">.*?<img src="([^"]+)" alt="([^"]+) - Porn videos">.*?<span>(\d+) videos</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
|
||||
@@ -45,7 +45,6 @@ def catalogo(item):
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a class="thumbnail" href="([^"]+)">.*?<img src="([^"]+)".*?<span class="thumbnail__info__right">\s+([^"]+)\s+</span>.*?<h5>([^"]+)</h5>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedthumbnail,cantidad,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
cantidad = cantidad.replace(" ", "")
|
||||
@@ -66,7 +65,6 @@ def categorias(item):
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a class="thumbnail" href="([^"]+)" title="([^"]+)">.*?<img src="([^"]+)".*?<i class="mdi mdi-video"></i>([^"]+)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
cantidad = cantidad.replace(" ", "")
|
||||
@@ -100,6 +98,7 @@ def peliculas(item):
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
@@ -42,7 +42,6 @@ def categorias(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<option class="level-0" value="([^"]+)">([^"]+) \((.*?)\)<'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,number in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
@@ -59,11 +58,10 @@ def peliculas(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="featured-wrap clearfix">.*?<a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<li><a rel=\'nofollow\' href=\'([^\']+)\' class=\'inactive\'>Next')
|
||||
next_page_url = scrapertools.find_single_match(data,'<span class=\'currenttext\'>.*?href=\'([^\']+)\' class=\'inactive\'>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
@@ -75,7 +73,6 @@ def play(item):
|
||||
logger.info()
|
||||
data = scrapertools.cachePage(item.url)
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
|
||||
@@ -44,7 +44,6 @@ def categorias(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?<div class="videos">(\d+) video.*?</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,numero in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
@@ -61,7 +60,6 @@ def peliculas(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<a href="http://xxx.justporno.tv/videos/(\d+)/.*?" title="([^"]+)" >.*?data-original="([^"]+)".*?<div class="duration">(.*?)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = "[COLOR yellow]" + (scrapedtime) + "[/COLOR] " + scrapedtitle
|
||||
@@ -81,7 +79,6 @@ def play(item):
|
||||
data = re.sub(r"\n|\r|\t| |<br>|<br/>", "", data)
|
||||
patron = 'video_url: \'([^\']+)\''
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl in matches:
|
||||
scrapedplot = ""
|
||||
itemlist.append(item.clone(channel=item.channel, action="play", title=scrapedurl , url=scrapedurl , plot="" , folder=True) )
|
||||
|
||||
@@ -12,7 +12,7 @@ from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
|
||||
host = "http://maxipelis24.com"
|
||||
host = "https://maxipelis24.tv"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
@@ -42,8 +42,8 @@ def category(item):
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ","", data)
|
||||
|
||||
if item.cat == 'genre':
|
||||
data = scrapertools.find_single_match(data, '<h3>Géneros.*?</div>')
|
||||
patron = '<a href="([^"]+)">([^<]+)<'
|
||||
data = scrapertools.find_single_match(data, '<h3>Géneros <span class="icon-sort">.*?</ul>')
|
||||
patron = '<li class="cat-item cat-item.*?<a href="([^"]+)" >([^<]+)<'
|
||||
elif item.cat == 'year':
|
||||
data = scrapertools.find_single_match(data, '<h3>Año de estreno.*?</div>')
|
||||
patron = 'li><a href="([^"]+)">([^<]+).*?<'
|
||||
@@ -65,15 +65,14 @@ def movies(item):
|
||||
|
||||
patron = '<div id="mt.+?href="([^"]+)".+?'
|
||||
patron += '<img src="([^"]+)" alt="([^"]+)".+?'
|
||||
patron += '<span class="imdb">.*?>([^<]+)<.*?'
|
||||
patron += '<span class="ttx">([^<]+).*?'
|
||||
patron += 'class="year">([^<]+).+?class="calidad2">([^<]+)<'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, img, scrapedtitle, ranking, resto, year, quality in matches:
|
||||
for scrapedurl, img, scrapedtitle, resto, year, quality in matches:
|
||||
scrapedtitle = re.sub(r'\d{4}|[()]','', scrapedtitle)
|
||||
plot = scrapertools.htmlclean(resto).strip()
|
||||
title = ' %s [COLOR yellow](%s)[/COLOR] [COLOR red][%s][/COLOR]' % (scrapedtitle, ranking, quality)
|
||||
title = ' %s [COLOR red][%s][/COLOR]' % (scrapedtitle, quality)
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
title = title,
|
||||
url = scrapedurl,
|
||||
@@ -87,7 +86,7 @@ def movies(item):
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
|
||||
#Paginacion
|
||||
matches = re.compile('<div class="pag_.*?href="([^"]+)">Siguiente<', re.DOTALL).findall(data)
|
||||
matches = re.compile('class="respo_pag"><div class="pag.*?<a href="([^"]+)" >Siguiente</a><', re.DOTALL).findall(data)
|
||||
if matches:
|
||||
url = urlparse.urljoin(item.url, matches[0])
|
||||
itemlist.append(Item(channel = item.channel, action = "movies", title = "Página siguiente >>", url = url))
|
||||
@@ -99,32 +98,53 @@ def findvideos(item):
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data, '<div id="contenedor">(.*?)</div></div></div>')
|
||||
# Busca los enlaces a los videos
|
||||
listavideos = servertools.findvideos(data)
|
||||
for video in listavideos:
|
||||
videotitle = scrapertools.unescape(video[0])
|
||||
url = video[1]
|
||||
server = video[2]
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "play",
|
||||
server = server,
|
||||
title = videotitle,
|
||||
url = url,
|
||||
thumbnail = item.thumbnail,
|
||||
plot = item.plot,
|
||||
contentTitle = item.contentTitle,
|
||||
infoLabels = item.infoLabels,
|
||||
folder = False))
|
||||
# Opción "Añadir esta película a la biblioteca de KODI"
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
title = '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
|
||||
url = item.url,
|
||||
action = "add_pelicula_to_library",
|
||||
extra = "findvideos",
|
||||
contentTitle = item.contentTitle,
|
||||
thumbnail = item.thumbnail
|
||||
))
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ","", data)
|
||||
|
||||
patron = '<div id="div.*?<div class="movieplay".*?(?:iframe.*?src|IFRAME SRC)="([^&]+)&'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for link in matches:
|
||||
if 'maxipelis24.tv/hideload/?' in link:
|
||||
if 'id=' in link:
|
||||
id_type = 'id'
|
||||
ir_type = 'ir'
|
||||
elif 'ud=' in link:
|
||||
id_type = 'ud'
|
||||
ir_type = 'ur'
|
||||
elif 'od=' in link:
|
||||
id_type = 'od'
|
||||
ir_type = 'or'
|
||||
elif 'ad=' in link:
|
||||
id_type = 'ad'
|
||||
ir_type = 'ar'
|
||||
elif 'ed=' in link:
|
||||
id_type = 'ed'
|
||||
ir_type = 'er'
|
||||
else:
|
||||
continue
|
||||
|
||||
id = scrapertools.find_single_match(link, '%s=(.*)' % id_type)
|
||||
base_link = scrapertools.find_single_match(link, '(.*?)%s=' % id_type)
|
||||
|
||||
ir = id[::-1]
|
||||
referer = base_link+'%s=%s&/' % (id_type, ir)
|
||||
video_data = httptools.downloadpage('%s%s=%s' % (base_link, ir_type, ir), headers={'Referer':referer},
|
||||
follow_redirects=False)
|
||||
url = video_data.headers['location']
|
||||
title = '%s'
|
||||
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play',
|
||||
language='', infoLabels=item.infoLabels))
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
|
||||
if itemlist:
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(Item(channel = item.channel, action = ""))
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
|
||||
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
|
||||
contentTitle = item.contentTitle
|
||||
))
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -265,8 +265,8 @@ def listado(item):
|
||||
del item.next_page
|
||||
|
||||
#logger.debug(data)
|
||||
logger.debug("PATRON1: " + patron + " / ")
|
||||
logger.debug(matches)
|
||||
#logger.debug("PATRON1: " + patron + " / ")
|
||||
#logger.debug(matches)
|
||||
|
||||
# Primera pasada
|
||||
# En la primera pasada se obtiene una información básica del título a partir de la url
|
||||
@@ -360,8 +360,8 @@ def listado(item):
|
||||
cnt_pag += cnt_tot
|
||||
cnt_pag_num += 1
|
||||
|
||||
logger.debug("PATRON2: " + patron_title)
|
||||
logger.debug(matches)
|
||||
#logger.debug("PATRON2: " + patron_title)
|
||||
#logger.debug(matches)
|
||||
cnt = 0
|
||||
for scrapedtitle, notused, scrapedinfo in matches:
|
||||
item_local = itemlist[cnt] #Vinculamos item_local con la entrada de la lista itemlist (más fácil de leer)
|
||||
@@ -763,6 +763,12 @@ def findvideos(item):
|
||||
itemlist_f = [] #Itemlist de enlaces filtrados
|
||||
if not item.language:
|
||||
item.language = ['CAST'] #Castellano por defecto
|
||||
matches = []
|
||||
|
||||
#Si es un lookup para cargar las urls de emergencia en la Videoteca...
|
||||
if item.videolibray_emergency_urls:
|
||||
item.emergency_urls = []
|
||||
item.emergency_urls.append([]) #Reservamos el espacio para los .torrents locales
|
||||
|
||||
#Bajamos los datos de la página
|
||||
data = ''
|
||||
@@ -775,24 +781,47 @@ def findvideos(item):
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
|
||||
patron = "<a href='(secciones.php\?sec\=descargas&ap=contar&tabla=[^']+)'"
|
||||
except:
|
||||
pass
|
||||
|
||||
if not data:
|
||||
logger.error("ERROR 01: FINDVIDEOS: La Web no responde o la URL es erronea: " + item.url + " / DATA: " + data)
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: FINDVIDEOS:. La Web no responde o la URL es erronea. Si la Web está activa, reportar el error con el log'))
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia?
|
||||
matches = item.emergency_urls[1] #Restauramos matches
|
||||
item.armagedon = True #Marcamos la situación como catastrófica
|
||||
else:
|
||||
if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca...
|
||||
return item #Devolvemos el Item de la llamada
|
||||
else:
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
if not item.armagedon: #Si es un proceso normal, seguimos
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
if not matches:
|
||||
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
|
||||
if item.intervencion: #Sí ha sido clausurada judicialmente
|
||||
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist) #Llamamos al método para el pintado del error
|
||||
return itemlist #Salimos
|
||||
|
||||
logger.error("ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
|
||||
itemlist.append(item.clone(action='', title=item.category + ': ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web. Verificar en la Web y reportar el error con el log'))
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
elif not item.armagedon:
|
||||
logger.error("ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
|
||||
itemlist.append(item.clone(action='', title=item.category + ': ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web. Verificar en la Web y reportar el error con el log'))
|
||||
|
||||
if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia?
|
||||
matches = item.emergency_urls[1] #Restauramos matches
|
||||
item.armagedon = True #Marcamos la situación como catastrófica
|
||||
else:
|
||||
if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca...
|
||||
return item #Devolvemos el Item de la llamada
|
||||
else:
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
#logger.debug(data)
|
||||
#logger.debug("PATRON: " + patron)
|
||||
#logger.debug(matches)
|
||||
|
||||
#Si es un lookup para cargar las urls de emergencia en la Videoteca...
|
||||
if item.videolibray_emergency_urls:
|
||||
item.emergency_urls.append(matches) #Salvamnos matches...
|
||||
|
||||
#Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB
|
||||
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist)
|
||||
@@ -803,41 +832,69 @@ def findvideos(item):
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
|
||||
# Localiza el .torrent en el siguiente link
|
||||
if not item.post: # Si no es llamada con Post, hay que bajar un nivel más
|
||||
if not item.post and not item.armagedon: # Si no es llamada con Post, hay que bajar un nivel más
|
||||
try:
|
||||
torrent_data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(url).data)
|
||||
except: #error
|
||||
except: #error
|
||||
pass
|
||||
|
||||
if not torrent_data:
|
||||
logger.error("ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web " + " / URL: " + url + " / DATA: " + data)
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web. Verificar en la Web y reportar el error con el log'))
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia?
|
||||
if len(item.emergency_urls[0]):
|
||||
item_local.url = item.emergency_urls[0][0] #Restauramos la primera url
|
||||
item.armagedon = True #Marcamos la situación como catastrófica
|
||||
else:
|
||||
if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca...
|
||||
return item #Devolvemos el Item de la llamada
|
||||
else:
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
#logger.debug(torrent_data)
|
||||
item_local.url = scrapertools.get_match(torrent_data, ">Pincha.*?<a href='(.*?\/uploads\/torrents\/\w+\/.*?\.torrent)'")
|
||||
item_local.url = urlparse.urljoin(url, item_local.url)
|
||||
else:
|
||||
if not item.armagedon:
|
||||
item_local.url = scrapertools.get_match(torrent_data, ">Pincha.*?<a href='(.*?\/uploads\/torrents\/\w+\/.*?\.torrent)'")
|
||||
item_local.url = urlparse.urljoin(url, item_local.url)
|
||||
|
||||
elif not item.armagedon:
|
||||
item_local.url = url # Ya teníamos el link desde el primer nivel (documentales)
|
||||
item_local.url = item_local.url.replace(" ", "%20")
|
||||
|
||||
if item.armagedon and item.emergency_urls and not item.videolibray_emergency_urls:
|
||||
if len(item.emergency_urls[0]):
|
||||
item_local.url = item.emergency_urls[0][0] #Guardamos la primera url del .Torrent
|
||||
if len(item.emergency_urls[0]) > 1:
|
||||
del item.emergency_urls[0][0]
|
||||
if not item.armagedon and item.emergency_urls and not item.videolibray_emergency_urls:
|
||||
if len(item.emergency_urls[0]):
|
||||
item_local.torrent_alt = item.emergency_urls[0][0] #Guardamos la primera url del .Torrent ALTERNATIVA
|
||||
|
||||
if item.videolibray_emergency_urls:
|
||||
item.emergency_urls[0].append(item_local.url) #Salvamnos la url...
|
||||
|
||||
# Poner la calidad, si es necesario
|
||||
if not item_local.quality:
|
||||
if "hdtv" in item_local.url.lower() or "720p" in item_local.url.lower() or "1080p" in item_local.url.lower() or "4k" in item_local.url.lower():
|
||||
item_local.quality = scrapertools.find_single_match(item_local.url, '.*?_([H|7|1|4].*?)\.torrent')
|
||||
item_local.quality = item_local.quality.replace("_", " ")
|
||||
if item.armagedon: #Si es catastrófico, lo marcamos
|
||||
item_local.quality = '[/COLOR][COLOR hotpink][E] [COLOR limegreen]%s' % item_local.quality
|
||||
|
||||
# Extrae la dimensión del vídeo
|
||||
size = scrapertools.find_single_match(item_local.url, '(\d{1,3},\d{1,2}?\w+)\.torrent')
|
||||
size = size.upper().replace(".", ",").replace("G", " G ").replace("M", " M ") #sustituimos . por , porque Unify lo borra
|
||||
if not size:
|
||||
size = size.upper().replace(".", ",").replace("G", " G ").replace("M", " M ") #sustituimos . por , porque Unify lo borra
|
||||
if not size and not item.armagedon:
|
||||
size = generictools.get_torrent_size(item_local.url) #Buscamos el tamaño en el .torrent
|
||||
if size:
|
||||
item_local.title = re.sub('\s\[\d+,?\d*?\s\w[b|B]\]', '', item_local.title) #Quitamos size de título, si lo traía
|
||||
item_local.title = '%s [%s]' % (item_local.title, size) #Agregamos size al final del título
|
||||
item_local.quality = re.sub('\s\[\d+,?\d*?\s\w[b|B]\]', '', item_local.quality) #Quitamos size de calidad, si lo traía
|
||||
item_local.quality = '%s [%s]' % (item.quality, size) #Agregamos size al final de calidad
|
||||
item_local.quality = '%s [%s]' % (item.quality, size) #Agregamos size al final de calidad
|
||||
|
||||
#Ahora pintamos el link del Torrent, si lo hay
|
||||
if item_local.url: # Hay Torrent ?
|
||||
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language)) #Preparamos título de Torrent
|
||||
|
||||
if item_local.url: # Hay Torrent ?
|
||||
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language)) #Preparamos título de Torrent
|
||||
|
||||
#Preparamos título y calidad, quitamos etiquetas vacías
|
||||
item_local.title = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.title)
|
||||
item_local.title = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.title)
|
||||
@@ -858,6 +915,9 @@ def findvideos(item):
|
||||
|
||||
#logger.debug("title=[" + item.title + "], torrent=[ " + item_local.url + " ], url=[ " + url + " ], post=[" + item.post + "], thumbnail=[ " + item.thumbnail + " ]" + " size: " + size)
|
||||
|
||||
if item.videolibray_emergency_urls:
|
||||
return item
|
||||
|
||||
if len(itemlist_f) > 0: #Si hay entradas filtradas...
|
||||
itemlist.extend(itemlist_f) #Pintamos pantalla filtrada
|
||||
else:
|
||||
|
||||
@@ -54,6 +54,28 @@
|
||||
"VOSE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "emergency_urls",
|
||||
"type": "list",
|
||||
"label": "Se quieren guardar Enlaces de Emergencia por si se cae la Web?",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No",
|
||||
"Guardar",
|
||||
"Borrar",
|
||||
"Actualizar"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "emergency_urls_torrents",
|
||||
"type": "bool",
|
||||
"label": "Se quieren guardar Torrents de Emergencia por si se cae la Web?",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": "!eq(-1,'No')"
|
||||
},
|
||||
{
|
||||
"id": "seleccionar_ult_temporadda_activa",
|
||||
"type": "bool",
|
||||
|
||||
@@ -165,8 +165,8 @@ def listado(item):
|
||||
item.contentType = "movie"
|
||||
pag = False #No hay paginación
|
||||
elif (item.extra == "peliculas" or item.extra == "varios") and not item.tipo: #Desde Menú principal
|
||||
patron = '<a href="([^"]+)">?<img src="([^"]+)"[^<]+<\/a>'
|
||||
patron_enlace = '\/\/.*?\/(.*?)\/$'
|
||||
patron = '<a href="([^"]+)"[^>]+>?<img src="([^"]+)"[^<]+<\/a>'
|
||||
patron_enlace = '\/\/.*?\/(8.*?)\/$'
|
||||
patron_title = '<a href="[^"]+">([^<]+)<\/a>(\s*<b>([^>]+)<\/b>)?'
|
||||
item.action = "findvideos"
|
||||
item.contentType = "movie"
|
||||
@@ -184,7 +184,7 @@ def listado(item):
|
||||
pag = False
|
||||
cnt_tot = 10 # Se reduce el numero de items por página porque es un proceso pesado
|
||||
elif item.extra == "series" and not item.tipo:
|
||||
patron = '<a href="([^"]+)">?<img src="([^"]+)"[^<]+<\/a>'
|
||||
patron = '<a href="([^"]+)"[^>]+>?<img src="([^"]+)"[^<]+<\/a>'
|
||||
patron_enlace = '\/\/.*?\/(.*?)-[temporada]?\d+[-|x]'
|
||||
patron_title = '<a href="[^"]+">([^<]+)<\/a>(\s*<b>([^>]+)<\/b>)?'
|
||||
patron_title_ep = '\/\/.*?\/(.*?)-(\d{1,2})x(\d{1,2})(?:-al-\d{1,2}x\d{1,2})?-?(\d+p)?\/$'
|
||||
@@ -203,7 +203,7 @@ def listado(item):
|
||||
item.contentType = "tvshow"
|
||||
pag = False
|
||||
else:
|
||||
patron = '<a href="([^"]+)">?<img src="([^"]+)"[^<]+<\/a>'
|
||||
patron = '<a href="([^"]+)"[^>]+>?<img src="([^"]+)"[^<]+<\/a>'
|
||||
patron_enlace = '\/\/.*?\/(.*?)-[temporada]?\d+[-|x]'
|
||||
patron_title = '<a href="[^"]+">([^<]+)<\/a>(\s*<b>([^>]+)<\/b>)?'
|
||||
patron_title_ep = '\/\/.*?\/(.*?)-(\d{1,2})x(\d{1,2})(?:-al-\d{1,2}x\d{1,2})?-?(\d+p)?\/$'
|
||||
@@ -813,6 +813,7 @@ def findvideos(item):
|
||||
itemlist_f = [] #Itemlist de enlaces filtrados
|
||||
if not item.language:
|
||||
item.language = ['CAST'] #Castellano por defecto
|
||||
matches = []
|
||||
|
||||
#logger.debug(item)
|
||||
|
||||
@@ -823,6 +824,11 @@ def findvideos(item):
|
||||
#Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB
|
||||
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist)
|
||||
|
||||
#Si es un lookup para cargar las urls de emergencia en la Videoteca...
|
||||
if item.videolibray_emergency_urls:
|
||||
item.emergency_urls = []
|
||||
item.emergency_urls.append([]) #Reservamos el espacio para los .torrents locales
|
||||
|
||||
#Bajamos los datos de la página de todo menos de Documentales y Varios
|
||||
if not item.post:
|
||||
try:
|
||||
@@ -836,34 +842,54 @@ def findvideos(item):
|
||||
if not data:
|
||||
logger.error("ERROR 01: FINDVIDEOS: La Web no responde o la URL es erronea: " + item.url + " / DATA: " + data)
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: FINDVIDEOS:. La Web no responde o la URL es erronea. Si la Web está activa, reportar el error con el log'))
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia?
|
||||
matches = item.emergency_urls[1] #Restauramos matches
|
||||
item.armagedon = True #Marcamos la situación como catastrófica
|
||||
else:
|
||||
if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca...
|
||||
return item #Devolvemos el Item de la llamada
|
||||
else:
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
if not item.armagedon: #Si es un proceso normal, seguimos
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
if not matches:
|
||||
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
|
||||
if item.intervencion: #Sí ha sido clausurada judicialmente
|
||||
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist) #Llamamos al método para el pintado del error
|
||||
return itemlist #Salimos
|
||||
else:
|
||||
logger.error("ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
|
||||
itemlist.append(item.clone(action='', title=item.category + ': ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web. Verificar en la Web y reportar el error con el log'))
|
||||
|
||||
logger.error("ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
|
||||
itemlist.append(item.clone(action='', title=item.category + ': ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web. Verificar en la Web y reportar el error con el log'))
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia?
|
||||
matches = item.emergency_urls[1] #Restauramos matches
|
||||
item.armagedon = True #Marcamos la situación como catastrófica
|
||||
else:
|
||||
if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca...
|
||||
return item #Devolvemos el Item de la llamada
|
||||
else:
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
#logger.debug("PATRON: " + patron)
|
||||
#logger.debug(matches)
|
||||
#logger.debug(data)
|
||||
|
||||
for scrapedurl, name1, value1, value2, name2 in matches: #Hacemos el FOR aunque solo habrá un item
|
||||
#Si es un lookup para cargar las urls de emergencia en la Videoteca...
|
||||
if item.videolibray_emergency_urls:
|
||||
item.emergency_urls.append(matches) #Salvamnos matches...
|
||||
|
||||
for scrapedurl, name1, value1, value2, name2 in matches: #Hacemos el FOR aunque solo habrá un item
|
||||
#Generamos una copia de Item para trabajar sobre ella
|
||||
item_local = item.clone()
|
||||
url = scrapedurl
|
||||
|
||||
# Localiza el .torrent en el siguiente link con Post
|
||||
post = '%s=%s&%s=%s' % (name1, value1, name2, value2)
|
||||
try:
|
||||
torrent_data = httptools.downloadpage(url, post=post, headers=headers, follow_redirects=False)
|
||||
except: #error
|
||||
pass
|
||||
if not item.armagedon:
|
||||
try:
|
||||
torrent_data = httptools.downloadpage(url, post=post, headers=headers, follow_redirects=False)
|
||||
except: #error
|
||||
pass
|
||||
|
||||
else:
|
||||
#Viene de SERIES y DOCUMENTALES. Generamos una copia de Item para trabajar sobre ella
|
||||
@@ -874,19 +900,34 @@ def findvideos(item):
|
||||
except:
|
||||
pass
|
||||
|
||||
if not torrent_data:
|
||||
if not torrent_data or not 'location' in torrent_data.headers or not torrent_data.headers['location']:
|
||||
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
|
||||
if item.intervencion: #Sí ha sido clausurada judicialmente
|
||||
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist) #Llamamos al método para el pintado del error
|
||||
return itemlist #Salimos
|
||||
elif not item.armagedon:
|
||||
logger.error("ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web " + " / URL: " + url + " / DATA: " + data)
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web. Verificar en la Web y reportar el error con el log'))
|
||||
|
||||
logger.error("ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web " + " / URL: " + url + " / DATA: " + data)
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web. Verificar en la Web y reportar el error con el log'))
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia?
|
||||
item_local.url = item.emergency_urls[0][0] #Restauramos la url del .torrent
|
||||
item.armagedon = True #Marcamos la situación como catastrófica
|
||||
else:
|
||||
if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca...
|
||||
return item #Devolvemos el Item de la llamada
|
||||
else:
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
#Capturamos la url del .torrent desde el Header
|
||||
item_local.url = torrent_data.headers['location'] if 'location' in torrent_data.headers else item.url_post
|
||||
item_local.url = item_local.url.replace(" ", "%20") #Quitamos espacios
|
||||
if not item.armagedon:
|
||||
item_local.url = torrent_data.headers['location'] if 'location' in torrent_data.headers else item.url_post
|
||||
item_local.url = item_local.url.replace(" ", "%20") #Quitamos espacios
|
||||
if item.emergency_urls:
|
||||
item_local.torrent_alt = item.emergency_urls[0][0] #Guardamos la url del .Torrent ALTERNATIVA
|
||||
|
||||
#Si es un lookup para cargar las urls de emergencia en la Videoteca...
|
||||
if item.videolibray_emergency_urls:
|
||||
item.emergency_urls[0].append(item_local.url) #Salvamnos la url...
|
||||
return item #... y nos vamos
|
||||
|
||||
# Poner la calidad, si es necesario
|
||||
if not item_local.quality:
|
||||
@@ -896,6 +937,8 @@ def findvideos(item):
|
||||
elif "hdtv" in item_local.url.lower() or "720p" in item_local.url.lower() or "1080p" in item_local.url.lower() or "4k" in item_local.url.lower():
|
||||
item_local.quality = scrapertools.find_single_match(item_local.url, '.*?_([H|7|1|4].*?)\.torrent')
|
||||
item_local.quality = item_local.quality.replace("_", " ")
|
||||
if item.armagedon: #Si es catastrófico, lo marcamos
|
||||
item_local.quality = '[/COLOR][COLOR hotpink][E] [COLOR limegreen]%s' % item_local.quality
|
||||
|
||||
# Extrae el tamaño del vídeo
|
||||
if scrapertools.find_single_match(data, '<b>Tama.*?:<\/b>&\w+;\s?([^<]+B)<?'):
|
||||
@@ -903,7 +946,7 @@ def findvideos(item):
|
||||
else:
|
||||
size = scrapertools.find_single_match(item_local.url, '(\d{1,3},\d{1,2}?\w+)\.torrent')
|
||||
size = size.upper().replace(".", ",").replace("G", " G ").replace("M", " M ") #sustituimos . por , porque Unify lo borra
|
||||
if not size:
|
||||
if not size and not item.armagedon:
|
||||
size = generictools.get_torrent_size(item_local.url) #Buscamos el tamaño en el .torrent
|
||||
if size:
|
||||
item_local.title = re.sub('\s\[\d+,?\d*?\s\w[b|B]\]', '', item_local.title) #Quitamos size de título, si lo traía
|
||||
|
||||
@@ -44,7 +44,6 @@ def categorias(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<h3><a href="([^"]+)">(.*?)</a> <small>(.*?)</small></h3>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,cantidad in matches:
|
||||
scrapedplot = scrapedurl.replace("http://mporno.unblckd.org/", "").replace("page1.html", "")
|
||||
scrapedthumbnail = ""
|
||||
@@ -58,9 +57,6 @@ def peliculas(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
|
||||
|
||||
|
||||
patron = '<img class="content_image" src="([^"]+).mp4/.*?" alt="([^"]+)".*?this.src="(.*?)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
@@ -76,14 +72,5 @@ def peliculas(item):
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
|
||||
|
||||
# else:
|
||||
# patron = '<a href=\'([^\']+)\' class="next">Next >></a>'
|
||||
# next_page = re.compile(patron,re.DOTALL).findall(data)
|
||||
# next_page = scrapertools.find_single_match(data,'class="last" title=.*?<a href="([^"]+)">')
|
||||
# plot = item.plot
|
||||
# next_page = next_page[0]
|
||||
# next_page = host + plot + next_page
|
||||
# itemlist.append( Item(channel=item.channel, action="peliculas", title=next_page , text_color="blue", url=next_page, plot=plot ) )
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -41,7 +41,6 @@ def categorias(item):
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a class="muestra-escena muestra-categoria" href="([^"]+)" title="([^"]+)">.*?src="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle
|
||||
|
||||
@@ -100,7 +100,7 @@
|
||||
"id": "intervenidos_channels_list",
|
||||
"type": "text",
|
||||
"label": "Lista de canales y clones de NewPct1 intervenidos y orden de sustitución de URLs",
|
||||
"default": "('0', 'canal_org', 'canal_des', 'url_org', 'url_des', 'patron1', 'patron2', 'patron3', 'patron4', 'patron5', 'content_inc', 'content_exc', 'ow_force'), ('0', 'mejortorrent', 'mejortorrent1', 'http://www.mejortorrent.com/', 'https://mejortorrent1.com/', '(http.?:\/\/.*?\/)', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-(?:[^-]+-)([^0-9]+-)', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-(?:[^-]+-)[^0-9]+-\\d+-(Temporada-).html', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-(?:[^-]+-)[^0-9]+-(\\d+)-', '', 'tvshow, season', '', 'force'), ('0', 'mejortorrent', 'mejortorrent1', 'http://www.mejortorrent.com/', 'https://mejortorrent1.com/', '(http.?:\/\/.*?\/)', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-([^.]+).html', '', '', '', 'movie', '', 'force'), ('0', 'mejortorrent', 'mejortorrent', 'http://www.mejortorrent.com/', 'http://www.mejortorrent.org/', '', '', '', '', '', '*', '', 'force'), ('0', 'plusdede', 'megadede', 'https://www.plusdede.com', 'https://www.megadede.com', '', '', '', '', '', '*', '', 'auto')",
|
||||
"default": "('0', 'canal_org', 'canal_des', 'url_org', 'url_des', 'patron1', 'patron2', 'patron3', 'patron4', 'patron5', 'content_inc', 'content_exc', 'ow_force'), ('0', 'mejortorrent', 'mejortorrent1', 'http://www.mejortorrent.com/', 'https://mejortorrent1.com/', '(http.?:\/\/.*?\/)', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-(?:[^-]+-)([^0-9]+-)', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-(?:[^-]+-)[^0-9]+-\\d+-(Temporada-).html', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-(?:[^-]+-)[^0-9]+-(\\d+)-', '', 'tvshow, season', '', 'force'), ('0', 'mejortorrent', 'mejortorrent1', 'http://www.mejortorrent.com/', 'https://mejortorrent1.com/', '(http.?:\/\/.*?\/)', 'http.?:\/\/.*?\/.*?-torrent.?-[^-]+-([^.]+).html', '', '', '', 'movie', '', 'force'), ('0', 'mejortorrent', 'mejortorrent', 'http://www.mejortorrent.com/', 'http://www.mejortorrent.org/', '', '', '', '', '', '*', '', 'force'), ('1', 'plusdede', 'megadede', 'https://www.plusdede.com', 'https://www.megadede.com', '', '', '', '', '', '*', '', 'auto'), ('1', 'newpct1', 'descargas2020', 'http://www.newpct1.com', 'http://descargas2020.com', '', '', '', '', '', '*', '', 'force')",
|
||||
"enabled": true,
|
||||
"visible": false
|
||||
},
|
||||
|
||||
@@ -133,8 +133,11 @@ def mainlist(item):
|
||||
thumbnail=thumb_docus, category=item.category, channel_host=item.channel_host))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="search", title="Buscar", url=item.channel_host + "buscar", thumbnail=thumb_buscar, category=item.category, channel_host=item.channel_host))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR yellow]Configuración:[/COLOR]", folder=False, thumbnail=thumb_separador, category=item.category, channel_host=item.channel_host))
|
||||
|
||||
clone_act = 'Clone: '
|
||||
if config.get_setting('clonenewpct1_channel_default', channel_py) == 0:
|
||||
clone_act = 'Aleatorio: '
|
||||
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR yellow]Configuración:[/COLOR] (" + clone_act + item.category + ")", folder=False, thumbnail=thumb_separador, category=item.category, channel_host=item.channel_host))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="settingCanal", title="Configurar canal", thumbnail=thumb_settings, category=item.category, channel_host=item.channel_host))
|
||||
|
||||
@@ -243,9 +246,8 @@ def submenu_novedades(item):
|
||||
item.extra2 = ''
|
||||
|
||||
#Renombramos el canal al nombre de clone inicial desde la URL
|
||||
host = scrapertools.find_single_match(item.url, '(http.?\:\/\/(?:www.)?\w+\.\w+\/)')
|
||||
item.channel_host = host
|
||||
item.category = scrapertools.find_single_match(item.url, 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').capitalize()
|
||||
item.category = channel_clone_name.capitalize()
|
||||
|
||||
data = ''
|
||||
timeout_search=timeout * 2 #Más tiempo para Novedades, que es una búsqueda
|
||||
@@ -2051,7 +2053,15 @@ def episodios(item):
|
||||
if match['episode'] is None: match['episode'] = "0"
|
||||
try:
|
||||
match['season'] = int(match['season'])
|
||||
season_alt = match['season']
|
||||
match['episode'] = int(match['episode'])
|
||||
if match['season'] > max_temp:
|
||||
logger.error("ERROR 07: EPISODIOS: Error en número de Temporada o Episodio: " + " / TEMPORADA/EPISODIO: " + str(match['season']) + " / " + str(match['episode']) + " / NUM_TEMPORADA: " + str(max_temp) + " / " + str(season) + " / MATCHES: " + str(matches))
|
||||
match['season'] = scrapertools.find_single_match(item_local.url, '\/[t|T]emp\w+-*(\d+)\/')
|
||||
if not match['season']:
|
||||
match['season'] = season_alt
|
||||
else:
|
||||
match['season'] = int(match['season'])
|
||||
except:
|
||||
logger.error("ERROR 07: EPISODIOS: Error en número de Temporada o Episodio: " + " / TEMPORADA/EPISODIO: " + str(match['season']) + " / " + str(match['episode']) + " / NUM_TEMPORADA: " + str(max_temp) + " / " + str(season) + " / MATCHES: " + str(matches))
|
||||
|
||||
|
||||
@@ -19,7 +19,6 @@ def mainlist(item):
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host + "/list-movies"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/list-movies"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/list-movies"))
|
||||
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
@@ -47,7 +46,6 @@ def categorias(item):
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li><a title=".*?" href="([^"]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
|
||||
@@ -136,7 +136,7 @@ def peliculas(item):
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
|
||||
pagination = scrapertools.find_single_match(data, "<span class=\"current\">\d+</span><a href='([^']+)'")
|
||||
pagination = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
|
||||
|
||||
if pagination:
|
||||
itemlist.append(Item(channel=__channel__, action="peliculas", title="» Siguiente »",
|
||||
@@ -239,7 +239,7 @@ def series(item):
|
||||
action="temporadas", contentType='tvshow'))
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
|
||||
pagination = scrapertools.find_single_match(data, "<link rel='next' href='([^']+)' />")
|
||||
pagination = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
|
||||
|
||||
if pagination:
|
||||
itemlist.append(Item(channel=__channel__, action="series", title="» Siguiente »", url=pagination,
|
||||
|
||||
@@ -46,6 +46,6 @@ def mainlist(item):
|
||||
next_page_url = "http://www.peliculaseroticas.net/cine-erotico/" + str(next_page) + ".html"
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="peliculas", title=">> Página siguiente", url=next_page_url, folder=True))
|
||||
Item(channel=item.channel, action="mainlist", title=">> Página siguiente", url=next_page_url, folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
49
plugin.video.alfa/channels/pelis24.json
Normal file
49
plugin.video.alfa/channels/pelis24.json
Normal file
@@ -0,0 +1,49 @@
|
||||
{
|
||||
"id": "pelis24",
|
||||
"name": "Pelis24",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat"],
|
||||
"fanart": "https://i.postimg.cc/WpqD2n77/cine24bg.jpg",
|
||||
"thumbnail": "https://www.pelis24.in/wp-content/uploads/2018/05/44.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"vos"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Buscar información extra",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "perfil",
|
||||
"type": "list",
|
||||
"label": "Perfil de color",
|
||||
"default": 3,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Sin color",
|
||||
"Perfil 5",
|
||||
"Perfil 4",
|
||||
"Perfil 3",
|
||||
"Perfil 2",
|
||||
"Perfil 1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "orden_episodios",
|
||||
"type": "bool",
|
||||
"label": "Mostrar los episodios de las series en orden descendente",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
397
plugin.video.alfa/channels/pelis24.py
Normal file
397
plugin.video.alfa/channels/pelis24.py
Normal file
@@ -0,0 +1,397 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel CanalPelis -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
import sys
|
||||
import urllib
|
||||
import urlparse
|
||||
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from core import channeltools
|
||||
from core import tmdb
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
|
||||
__channel__ = "pelis24"
|
||||
|
||||
host = "https://www.pelis24.in/"
|
||||
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
__perfil__ = int(config.get_setting('perfil', __channel__))
|
||||
except:
|
||||
__modo_grafico__ = True
|
||||
__perfil__ = 0
|
||||
|
||||
# Fijar perfil de color
|
||||
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
|
||||
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
|
||||
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
|
||||
if __perfil__ < 3:
|
||||
color1, color2, color3, color4, color5 = perfil[__perfil__]
|
||||
else:
|
||||
color1 = color2 = color3 = color4 = color5 = ""
|
||||
|
||||
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
|
||||
['Referer', host]]
|
||||
|
||||
|
||||
parameters = channeltools.get_channel_parameters(__channel__)
|
||||
fanart_host = parameters['fanart']
|
||||
thumbnail_host = parameters['thumbnail']
|
||||
|
||||
IDIOMAS = {'Latino': 'LAT', 'Castellano': 'CAST'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = []
|
||||
list_servers = ['rapidvideo', 'streamango', 'openload', 'streamcherry']
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = [item.clone(title="Novedades", action="peliculas",thumbnail=get_thumb('newest', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host + 'movies/', viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Tendencias", action="peliculas",thumbnail=get_thumb('newest', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host + 'tendencias/?get=movies', viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Estrenos", action="peliculas",thumbnail=get_thumb('estrenos', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host + 'genre/estrenos/', viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Géneros", action="genresYears",thumbnail=get_thumb('genres', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host, viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Buscar", action="search",thumbnail=get_thumb('search', auto=True),
|
||||
text_blod=True, url=host, page=0)]
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = urlparse.urljoin(item.url, "?s={0}".format(texto))
|
||||
|
||||
try:
|
||||
return sub_search(item)
|
||||
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
def sub_search(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
data = scrapertools.find_single_match(data, '<header><h1>Resultados encontrados(.*?)resppages')
|
||||
# logger.info(data)
|
||||
patron = '<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)" />.*?' # url, img, title
|
||||
patron += '<span class="year">([^<]+)</span>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
|
||||
if 'tvshows' not in scrapedurl:
|
||||
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, contentTitle=scrapedtitle,
|
||||
action="findvideos", infoLabels={"year": year},
|
||||
thumbnail=scrapedthumbnail, text_color=color3))
|
||||
|
||||
paginacion = scrapertools.find_single_match(data, "<span class=\"current\">\d+</span><a href='([^']+)'")
|
||||
|
||||
if paginacion:
|
||||
itemlist.append(Item(channel=item.channel, action="sub_search",
|
||||
title="» Siguiente »", url=paginacion,
|
||||
thumbnail='https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/next.png'))
|
||||
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data)
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
# logger.info(data)
|
||||
|
||||
patron = '<article id="post-\w+" class="item movies"><div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?' # img, title
|
||||
patron += '<span class="quality">([^<]+)</span> </div>\s*<a href="([^"]+)">.*?' # quality, url
|
||||
patron += '</h3><span>([^<]+)</span>' # year
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, quality, scrapedurl, year in matches[item.page:item.page + 30]:
|
||||
title = '%s [COLOR yellowgreen](%s)[/COLOR]' % (scrapedtitle, quality)
|
||||
|
||||
itemlist.append(item.clone(channel=__channel__, action="findvideos", text_color=color3,
|
||||
url=scrapedurl, infoLabels={'year': year}, quality=quality,
|
||||
contentTitle=scrapedtitle, thumbnail=scrapedthumbnail,
|
||||
title=title, context="buscar_trailer"))
|
||||
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
|
||||
if item.page + 30 < len(matches):
|
||||
itemlist.append(item.clone(page=item.page + 30,
|
||||
title="» Siguiente »", text_color=color3))
|
||||
else:
|
||||
next_page = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
|
||||
if next_page:
|
||||
itemlist.append(item.clone(url=next_page, page=0, title="» Siguiente »", text_color=color3))
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def genresYears(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)| |<br>", "", data)
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
|
||||
if item.title == "Estrenos por Año":
|
||||
patron_todas = 'ESTRENOS</a>(.*?)</i> Géneros'
|
||||
else:
|
||||
patron_todas = '<h2>Generos</h2>(.*?)</div><aside'
|
||||
# logger.error(texto='***********uuuuuuu*****' + patron_todas)
|
||||
|
||||
data = scrapertools.find_single_match(data, patron_todas)
|
||||
# logger.error(texto='***********uuuuuuu*****' + data)
|
||||
patron = '<a href="([^"]+)">([^<]+)</a> <i>([^<]+)</i>' # url, title, videos
|
||||
# patron = '<a href="([^"]+)">([^<]+)</a>' # url, title
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedtitle, videos_num in matches:
|
||||
title = '%s (%s)' % (scrapedtitle, videos_num.replace('.', ','))
|
||||
|
||||
itemlist.append(item.clone(title=title, url=scrapedurl, action="peliculas"))
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def year_release(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<li><a href="([^"]+)">([^<]+)</a></li>' # url, title
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
|
||||
itemlist.append(item.clone(channel=item.channel, action="peliculas", title=scrapedtitle, page=0,
|
||||
url=scrapedurl, text_color=color3, viewmode="movie_with_plot", extra='next'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def series(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
|
||||
patron = '<article class="TPost C TPostd">\s*<a href="([^"]+)">.*?' # url
|
||||
patron += '<img src="([^"]+)".*?' # img
|
||||
patron += '<h3 class="Title">([^<]+)</h3>' # title
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches[item.page:item.page + 30]:
|
||||
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="temporadas",
|
||||
contentSerieName=scrapedtitle, show=scrapedtitle,
|
||||
thumbnail='https:'+scrapedthumbnail, contentType='tvshow'))
|
||||
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
|
||||
if item.page + 30 < len(matches):
|
||||
itemlist.append(item.clone(page=item.page + 30,
|
||||
title="» Siguiente »", text_color=color3))
|
||||
else:
|
||||
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
|
||||
|
||||
if next_page:
|
||||
itemlist.append(item.clone(url=next_page, page=0,
|
||||
title="» Siguiente »", text_color=color3))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def temporadas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<div class="[^>]+>[^<]+<span>(.*?)</span> <i' # numeros de temporadas
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
if len(matches) > 1:
|
||||
for scrapedseason in matches:
|
||||
new_item = item.clone(action="episodios", season=scrapedseason, extra='temporadas')
|
||||
new_item.infoLabels['season'] = scrapedseason
|
||||
new_item.extra = ""
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
|
||||
for i in itemlist:
|
||||
i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle'])
|
||||
if i.infoLabels['title']:
|
||||
# Si la temporada tiene nombre propio añadirselo al titulo del item
|
||||
i.title += " - %s" % (i.infoLabels['title'])
|
||||
if i.infoLabels.has_key('poster_path'):
|
||||
# Si la temporada tiene poster propio remplazar al de la serie
|
||||
i.thumbnail = i.infoLabels['poster_path']
|
||||
|
||||
itemlist.sort(key=lambda it: int(it.infoLabels['season']))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
|
||||
text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host))
|
||||
|
||||
return itemlist
|
||||
else:
|
||||
return episodios(item)
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<td class="MvTbImg B"><a href="([^"]+)".*?' # url
|
||||
patron += '<td class="MvTbTtl"><a href="https://cine24h.net/episode/(.*?)/">([^<]+)</a>' # title de episodios
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedname in matches:
|
||||
scrapedtitle = scrapedtitle.replace('--', '0')
|
||||
patron = '(\d+)x(\d+)'
|
||||
match = re.compile(patron, re.DOTALL).findall(scrapedtitle)
|
||||
season, episode = match[0]
|
||||
|
||||
if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season):
|
||||
continue
|
||||
|
||||
title = "%sx%s: %s" % (season, episode.zfill(2), scrapedname)
|
||||
new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title,
|
||||
contentType="episode")
|
||||
if 'infoLabels' not in new_item:
|
||||
new_item.infoLabels = {}
|
||||
|
||||
new_item.infoLabels['season'] = season
|
||||
new_item.infoLabels['episode'] = episode.zfill(2)
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
# TODO no hacer esto si estamos añadiendo a la videoteca
|
||||
if not item.extra:
|
||||
# Obtenemos los datos de todos los capitulos de la temporada mediante multihilos
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
for i in itemlist:
|
||||
if i.infoLabels['title']:
|
||||
# Si el capitulo tiene nombre propio añadirselo al titulo del item
|
||||
i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels[
|
||||
'episode'], i.infoLabels['title'])
|
||||
if i.infoLabels.has_key('poster_path'):
|
||||
# Si el capitulo tiene imagen propia remplazar al poster
|
||||
i.thumbnail = i.infoLabels['poster_path']
|
||||
|
||||
itemlist.sort(key=lambda it: int(it.infoLabels['episode']),
|
||||
reverse=config.get_setting('orden_episodios', __channel__))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
# Opción "Añadir esta serie a la videoteca"
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
|
||||
text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|amp;|#038;|\(.*?\)|\s{2}| ", "", data)
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
# logger.info(data)
|
||||
|
||||
# patron1 = 'data-tplayernv="Opt(.*?)"><span>(.*?)</span><span>(.*?)</span>' # option, server, lang - quality
|
||||
patron = 'href="#option-(.*?)"><span class="dt_flag"><img src="[^"]+"></span>([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
# urls = re.compile(patron2, re.DOTALL).findall(data)
|
||||
|
||||
for option, lang in matches:
|
||||
url = scrapertools.find_single_match(data, '<div id="option-%s" class="[^"]+"><iframe class="metaframe rptss" src="([^"]+)"' % option)
|
||||
lang = lang.lower().strip()
|
||||
languages = {'latino': '[COLOR cornflowerblue](LAT)[/COLOR]',
|
||||
'castellano': '[COLOR green](CAST)[/COLOR]',
|
||||
'español': '[COLOR green](CAST)[/COLOR]',
|
||||
'subespañol': '[COLOR red](VOS)[/COLOR]',
|
||||
'sub': '[COLOR red](VOS)[/COLOR]',
|
||||
'ingles': '[COLOR red](VOS)[/COLOR]'}
|
||||
if lang in languages:
|
||||
lang = languages[lang]
|
||||
|
||||
server = servertools.get_server_from_url(url)
|
||||
title = "»» [COLOR yellow](%s)[/COLOR] [COLOR goldenrod](%s)[/COLOR] %s ««" % (server.title(), item.quality, lang)
|
||||
# if 'google' not in url and 'directo' not in server:
|
||||
|
||||
|
||||
itemlist.append(item.clone(action='play', url=url, title=title, language=lang, text_color=color3))
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
itemlist.sort(key=lambda it: it.language, reverse=False)
|
||||
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
|
||||
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",
|
||||
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
|
||||
thumbnail=thumbnail_host, contentTitle=item.contentTitle))
|
||||
|
||||
return itemlist
|
||||
64
plugin.video.alfa/channels/pelishd24.json
Normal file
64
plugin.video.alfa/channels/pelishd24.json
Normal file
@@ -0,0 +1,64 @@
|
||||
{
|
||||
"id": "pelishd24",
|
||||
"name": "PelisHD24",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat", "cast", "eng"],
|
||||
"fanart": "https://pelishd24.com/wp-content/uploads/2018/11/background.png",
|
||||
"thumbnail": "https://pelishd24.com/wp-content/uploads/2018/07/pelishd24.2.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"vos",
|
||||
"direct"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"Latino",
|
||||
"Castellano",
|
||||
"English"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Buscar información extra",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "perfil",
|
||||
"type": "list",
|
||||
"label": "Perfil de color",
|
||||
"default": 3,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Sin color",
|
||||
"Perfil 5",
|
||||
"Perfil 4",
|
||||
"Perfil 3",
|
||||
"Perfil 2",
|
||||
"Perfil 1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "orden_episodios",
|
||||
"type": "bool",
|
||||
"label": "Mostrar los episodios de las series en orden descendente",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
464
plugin.video.alfa/channels/pelishd24.py
Normal file
464
plugin.video.alfa/channels/pelishd24.py
Normal file
@@ -0,0 +1,464 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel PelisHD24 -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
import sys
|
||||
import urlparse
|
||||
|
||||
from channels import autoplay
|
||||
from lib import generictools
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from core import channeltools
|
||||
from core import tmdb
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
|
||||
__channel__ = "pelishd24"
|
||||
|
||||
host = "https://pelishd24.com/"
|
||||
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
__perfil__ = int(config.get_setting('perfil', __channel__))
|
||||
except:
|
||||
__modo_grafico__ = True
|
||||
__perfil__ = 0
|
||||
|
||||
# Fijar perfil de color
|
||||
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
|
||||
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
|
||||
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
|
||||
if __perfil__ < 3:
|
||||
color1, color2, color3, color4, color5 = perfil[__perfil__]
|
||||
else:
|
||||
color1 = color2 = color3 = color4 = color5 = ""
|
||||
|
||||
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
|
||||
['Referer', host]]
|
||||
|
||||
parameters = channeltools.get_channel_parameters(__channel__)
|
||||
fanart_host = parameters['fanart']
|
||||
thumbnail_host = parameters['thumbnail']
|
||||
|
||||
IDIOMAS = {'Latino': 'LAT', 'Castellano': 'CAST', 'English': 'VOS'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = []
|
||||
list_servers = ['rapidvideo', 'streamango', 'openload', 'streamcherry']
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = [item.clone(title="Peliculas", action="menumovies", text_blod=True,
|
||||
viewcontent='movies', viewmode="movie_with_plot", thumbnail=get_thumb('movies', auto=True)),
|
||||
|
||||
item.clone(title="Series", action="series", extra='serie', url=host + 'series/',
|
||||
viewmode="movie_with_plot", text_blod=True, viewcontent='movies',
|
||||
thumbnail=get_thumb('tvshows', auto=True), page=0),
|
||||
|
||||
item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True),
|
||||
text_blod=True, url=host, page=0)]
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def menumovies(item):
|
||||
logger.info()
|
||||
itemlist = [item.clone(title="Todas", action="peliculas", thumbnail=get_thumb('all', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host + 'peliculas/', viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Estrenos", action="peliculas", thumbnail=get_thumb('estrenos', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host + '?s=trfilter&trfilter=1&years=2018', viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Más Vistas", action="peliculas", thumbnail=get_thumb('more watched', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host + 'mas-vistas/', viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Más Votadas", action="peliculas", thumbnail=get_thumb('more voted', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host + 'peliculas-mas-votadas/', viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Géneros", action="genres_atoz", thumbnail=get_thumb('genres', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies',
|
||||
url=host, viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="A-Z", action="genres_atoz", thumbnail=get_thumb('year', auto=True),
|
||||
text_blod=True, page=0, viewcontent='movies', url=host,
|
||||
viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True),
|
||||
text_blod=True, url=host, page=0, extra='buscarP')]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = urlparse.urljoin(item.url, "?s={0}".format(texto))
|
||||
|
||||
try:
|
||||
return peliculas(item)
|
||||
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
action = ''
|
||||
contentType = ''
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data)
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
|
||||
patron = '<article id="[^"]+" class="TPost[^<]+<a href="([^"]+)">.*?' # url
|
||||
patron += '<img src="([^"]+)".*?' # img
|
||||
patron += '</figure>(.*?)' # tipo
|
||||
patron += '<h3 class="Title">([^<]+)</h3>.*?' # title
|
||||
patron += '<span class="Year">([^<]+)</span>.*?' # year
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, tipo, scrapedtitle, year in matches[item.page:item.page + 30]:
|
||||
title = ''
|
||||
if '/serie/' in scrapedurl:
|
||||
action = 'temporadas'
|
||||
contentType = 'tvshow'
|
||||
title = scrapedtitle + '[COLOR blue] (Serie)[/COLOR]'
|
||||
else:
|
||||
action = 'findvideos'
|
||||
contentType = 'movie'
|
||||
title = scrapedtitle
|
||||
|
||||
itemlist.append(item.clone(channel=__channel__, action=action, text_color=color3, show=scrapedtitle,
|
||||
url=scrapedurl, infoLabels={'year': year}, extra='peliculas',
|
||||
contentTitle=scrapedtitle, thumbnail='https:' + scrapedthumbnail,
|
||||
title=title, context="buscar_trailer", contentType=contentType))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
|
||||
if item.page + 30 < len(matches):
|
||||
itemlist.append(item.clone(page=item.page + 30,
|
||||
title="» Siguiente »", text_color=color3))
|
||||
else:
|
||||
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
|
||||
if next_page:
|
||||
itemlist.append(item.clone(url=next_page, page=0, title="» Siguiente »", text_color=color3))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def genres_atoz(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
action = ''
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)| |<br>", "", data)
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
|
||||
if item.title == "A-Z":
|
||||
patron_todas = '<ul class="AZList"(.*?)</li></ul>'
|
||||
action = 'atoz'
|
||||
else:
|
||||
patron_todas = '<a href="#">GENERO</a>(.*?)</li></ul>'
|
||||
action = 'peliculas'
|
||||
|
||||
data = scrapertools.find_single_match(data, patron_todas)
|
||||
patron = '<a href="([^"]+)">([^<]+)</a>' # url, title
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action=action))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def atoz(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)| |<br>", "", data)
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
|
||||
patron = '<td class="MvTbImg"> <a href="([^"]+)".*?' # url
|
||||
patron += '<img src="([^"]+)".*?' # img
|
||||
patron += '<strong>([^<]+)</strong> </a></td><td>([^<]+)</td>.*?' # title, year
|
||||
patron += '<span class="Qlty">([^<]+)</span>' # quality
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, year, quality in matches[item.page:item.page + 30]:
|
||||
title = ''
|
||||
action = ''
|
||||
if '/serie/' in scrapedurl:
|
||||
action = 'temporadas'
|
||||
contentType = 'tvshow'
|
||||
title = scrapedtitle + '[COLOR blue] (Serie)[/COLOR]'
|
||||
else:
|
||||
action = 'findvideos'
|
||||
contentType = 'movie'
|
||||
title = "%s [COLOR yellow]%s[/COLOR]" % (scrapedtitle, quality)
|
||||
|
||||
itemlist.append(item.clone(channel=__channel__, action=action, text_color=color3, contentType=contentType,
|
||||
url=scrapedurl, infoLabels={'year': year}, extra='peliculas',
|
||||
contentTitle=scrapedtitle, thumbnail='https:' + scrapedthumbnail,
|
||||
title=title, context="buscar_trailer", show=scrapedtitle, ))
|
||||
|
||||
if item.page + 30 < len(matches):
|
||||
itemlist.append(item.clone(page=item.page + 30,
|
||||
title="» Siguiente »", text_color=color3))
|
||||
else:
|
||||
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
|
||||
if next_page:
|
||||
itemlist.append(item.clone(url=next_page, page=0, title="» Siguiente »", text_color=color3))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def series(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)| |<br>", "", data)
|
||||
|
||||
patron = '<article class="TPost C">\s*<a href="([^"]+)">.*?' # url
|
||||
patron += '<img src="([^"]+)".*?' # img
|
||||
patron += '<h3 class="Title">([^<]+)</h3>' # title
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches[item.page:item.page + 30]:
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="temporadas",
|
||||
contentSerieName=scrapedtitle, show=scrapedtitle,
|
||||
thumbnail='https:' + scrapedthumbnail, contentType='tvshow'))
|
||||
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
|
||||
if item.page + 30 < len(matches):
|
||||
itemlist.append(item.clone(page=item.page + 30,
|
||||
title="» Siguiente »", text_color=color3))
|
||||
else:
|
||||
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
|
||||
|
||||
if next_page:
|
||||
itemlist.append(item.clone(url=next_page, page=0,
|
||||
title="» Siguiente »", text_color=color3))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def temporadas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="[^>]+>[^<]+<span>(.*?)</span> <i' # numeros de temporadas
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
if len(matches) > 1:
|
||||
for scrapedseason in matches:
|
||||
new_item = item.clone(action="episodios", season=scrapedseason, extra='temporadas')
|
||||
new_item.infoLabels['season'] = scrapedseason
|
||||
new_item.extra = ""
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
|
||||
for i in itemlist:
|
||||
i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle'])
|
||||
if i.infoLabels['title']:
|
||||
# Si la temporada tiene nombre propio añadirselo al titulo del item
|
||||
i.title += " - %s" % (i.infoLabels['title'])
|
||||
if i.infoLabels.has_key('poster_path'):
|
||||
# Si la temporada tiene poster propio remplazar al de la serie
|
||||
i.thumbnail = i.infoLabels['poster_path']
|
||||
|
||||
itemlist.sort(key=lambda it: int(it.infoLabels['season']))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
|
||||
text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host))
|
||||
|
||||
return itemlist
|
||||
else:
|
||||
return episodios(item)
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<td class="MvTbImg B"><a href="([^"]+)".*?' # url
|
||||
patron += host + 'episode/(.*?)/">([^<]+)</a>' # title de episodios
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedname in matches:
|
||||
scrapedtitle = scrapedtitle.replace('--', '0')
|
||||
patron = '(\d+)x(\d+)'
|
||||
match = re.compile(patron, re.DOTALL).findall(scrapedtitle)
|
||||
season, episode = match[0]
|
||||
|
||||
if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season):
|
||||
continue
|
||||
|
||||
title = "%sx%s: %s" % (season, episode.zfill(2), scrapedname)
|
||||
new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title,
|
||||
contentType="episode", extra='episodios')
|
||||
if 'infoLabels' not in new_item:
|
||||
new_item.infoLabels = {}
|
||||
|
||||
new_item.infoLabels['season'] = season
|
||||
new_item.infoLabels['episode'] = episode.zfill(2)
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
# TODO no hacer esto si estamos añadiendo a la videoteca
|
||||
if not item.extra:
|
||||
# Obtenemos los datos de todos los capitulos de la temporada mediante multihilos
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
for i in itemlist:
|
||||
if i.infoLabels['title']:
|
||||
# Si el capitulo tiene nombre propio añadirselo al titulo del item
|
||||
i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels[
|
||||
'episode'], i.infoLabels['title'])
|
||||
if i.infoLabels.has_key('poster_path'):
|
||||
# Si el capitulo tiene imagen propia remplazar al poster
|
||||
i.thumbnail = i.infoLabels['poster_path']
|
||||
|
||||
itemlist.sort(key=lambda it: int(it.infoLabels['episode']),
|
||||
reverse=config.get_setting('orden_episodios', __channel__))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
# Opción "Añadir esta serie a la videoteca"
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
|
||||
text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|amp;|#038;|\(.*?\)|\s{2}| ", "", data)
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
patron = 'data-tplayernv="Opt(.*?)"><span>[^"<]+</span>(.*?)</li>' # option, servername, lang - quality
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for option, quote in matches:
|
||||
patron = '<span>(.*?) -([^<]+)</span'
|
||||
match = re.compile(patron, re.DOTALL).findall(quote)
|
||||
lang, quality = match[0]
|
||||
quality = quality.strip()
|
||||
lang = lang.lower().strip()
|
||||
languages = {'latino': '[COLOR cornflowerblue](LAT)[/COLOR]',
|
||||
'castellano': '[COLOR green](CAST)[/COLOR]',
|
||||
'subtitulado': '[COLOR red](VOS)[/COLOR]'}
|
||||
|
||||
if lang in languages:
|
||||
lang = languages[lang]
|
||||
|
||||
url_1 = scrapertools.find_single_match(data,
|
||||
'id="Opt%s"><iframe width="560" height="315" src="([^"]+)"' % option)
|
||||
new_data = httptools.downloadpage(url_1).data
|
||||
new_data = re.sub(r"\n|\r|\t|amp;|\(.*?\)|\s{2}| ", "", new_data)
|
||||
new_data = scrapertools.decodeHtmlentities(new_data)
|
||||
patron1 = '<iframe width="560" height="315" src="([^"]+)"'
|
||||
match1 = re.compile(patron1, re.DOTALL).findall(new_data)
|
||||
|
||||
urls = scrapertools.find_single_match(new_data, '<iframe width="560" height="315" src="([^"]+)"')
|
||||
servername = servertools.get_server_from_url(urls)
|
||||
if 'stream.pelishd24.net' in urls:
|
||||
vip_data = httptools.downloadpage(urls).data
|
||||
dejuiced = generictools.dejuice(vip_data)
|
||||
patron = '"file":"([^"]+)"'
|
||||
match = re.compile(patron, re.DOTALL).findall(dejuiced)
|
||||
for scrapedurl in match:
|
||||
urls = scrapedurl
|
||||
servername = 'gvideo'
|
||||
if 'pelishd24.com/?trhide' in urls:
|
||||
data = httptools.downloadpage(urls).data
|
||||
# logger.error(texto='****hex'+data)
|
||||
patron = '"file":"([^"]+)"'
|
||||
match = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl in match:
|
||||
urls = scrapedurl
|
||||
servername = 'gvideo'
|
||||
|
||||
title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % (
|
||||
servername.title(), quality, lang)
|
||||
if 'embed.pelishd24.com' not in urls and 'embed.pelishd24.net' not in urls:
|
||||
itemlist.append(item.clone(action='play', title=title, url=urls, language=lang, quality=quality,
|
||||
text_color=color3))
|
||||
|
||||
for url in match1:
|
||||
new_data = httptools.downloadpage(url).data
|
||||
new_data = re.sub(r"\n|\r|\t|amp;|\(.*?\)|\s{2}| ", "", new_data)
|
||||
new_data = scrapertools.decodeHtmlentities(new_data)
|
||||
patron1 = '\["\d+","([^"]+)",\d+]'
|
||||
match1 = re.compile(patron1, re.DOTALL).findall(new_data)
|
||||
for url in match1:
|
||||
url = url.replace('\\', '')
|
||||
servername = servertools.get_server_from_url(url)
|
||||
if 'pelishd24.net' in url or 'stream.pelishd24.com' in url:
|
||||
vip_data = httptools.downloadpage(url).data
|
||||
dejuiced = generictools.dejuice(vip_data)
|
||||
patron = '"file":"([^"]+)"'
|
||||
match = re.compile(patron, re.DOTALL).findall(dejuiced)
|
||||
for scrapedurl in match:
|
||||
url = scrapedurl
|
||||
servername = 'gvideo'
|
||||
|
||||
if 'ww3.pelishd24.com' in url:
|
||||
data1 = httptools.downloadpage(url).data
|
||||
url = scrapertools.find_single_match(data1, '"file": "([^"]+)"')
|
||||
servername = 'gvideo'
|
||||
|
||||
title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % (
|
||||
servername.title(), quality, lang)
|
||||
|
||||
itemlist.append(item.clone(action='play', title=title, url=url, language=lang, quality=quality,
|
||||
text_color=color3))
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
|
||||
itemlist.sort(key=lambda it: it.language, reverse=False)
|
||||
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
|
||||
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",
|
||||
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
|
||||
thumbnail=thumbnail_host, contentTitle=item.contentTitle))
|
||||
|
||||
return itemlist
|
||||
@@ -45,6 +45,28 @@
|
||||
"VOSE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "emergency_urls",
|
||||
"type": "list",
|
||||
"label": "Se quieren guardar Enlaces de Emergencia por si se cae la Web?",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No",
|
||||
"Guardar",
|
||||
"Borrar",
|
||||
"Actualizar"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "emergency_urls_torrents",
|
||||
"type": "bool",
|
||||
"label": "Se quieren guardar Torrents de Emergencia por si se cae la Web?",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": "!eq(-1,'No')"
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_torrent",
|
||||
"type": "bool",
|
||||
|
||||
@@ -355,7 +355,7 @@ def listado(item):
|
||||
title = re.sub(r'[\(|\[]\s+[\)|\]]', '', title)
|
||||
title = title.replace('()', '').replace('[]', '').strip().lower().title()
|
||||
|
||||
item_local.from_title = title.strip().lower().title() #Guardamos esta etiqueta para posible desambiguación de título
|
||||
item_local.from_title = title.strip().lower().title() #Guardamos esta etiqueta para posible desambiguación de título
|
||||
|
||||
#Salvamos el título según el tipo de contenido
|
||||
if item_local.contentType == "movie":
|
||||
@@ -387,8 +387,8 @@ def listado(item):
|
||||
|
||||
title = '%s' % curr_page
|
||||
|
||||
if cnt_matches + 1 >= last_title: #Si hemos pintado ya todo lo de esta página...
|
||||
cnt_matches = 0 #... la próxima pasada leeremos otra página
|
||||
if cnt_matches + 1 >= last_title: #Si hemos pintado ya todo lo de esta página...
|
||||
cnt_matches = 0 #... la próxima pasada leeremos otra página
|
||||
next_page_url = re.sub(r'page=(\d+)', r'page=' + str(int(re.search('\d+', next_page_url).group()) + 1), next_page_url)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente " + title, url=next_page_url, extra=item.extra, extra2=item.extra2, last_page=str(last_page), curr_page=str(curr_page + 1), cnt_matches=str(cnt_matches)))
|
||||
@@ -399,10 +399,10 @@ def listado(item):
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist_t = [] #Itemlist total de enlaces
|
||||
itemlist_f = [] #Itemlist de enlaces filtrados
|
||||
itemlist_t = [] #Itemlist total de enlaces
|
||||
itemlist_f = [] #Itemlist de enlaces filtrados
|
||||
if not item.language:
|
||||
item.language = ['CAST'] #Castellano por defecto
|
||||
item.language = ['CAST'] #Castellano por defecto
|
||||
matches = []
|
||||
item.category = categoria
|
||||
|
||||
@@ -412,22 +412,53 @@ def findvideos(item):
|
||||
#logger.debug(item)
|
||||
|
||||
matches = item.url
|
||||
if not matches: #error
|
||||
logger.error("ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web: " + item)
|
||||
if not matches: #error
|
||||
logger.error("ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web: " + str(item))
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log'))
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia?
|
||||
matches = item.emergency_urls[1] #Restauramos matches
|
||||
item.armagedon = True #Marcamos la situación como catastrófica
|
||||
else:
|
||||
if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca...
|
||||
return item #Devolvemos el Item de la llamada
|
||||
else:
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
#logger.debug(matches)
|
||||
|
||||
#Si es un lookup para cargar las urls de emergencia en la Videoteca...
|
||||
if item.videolibray_emergency_urls:
|
||||
item.emergency_urls = [] #Iniciamos emergency_urls
|
||||
item.emergency_urls.append([]) #Reservamos el espacio para los .torrents locales
|
||||
item.emergency_urls.append(matches) #Salvamnos matches...
|
||||
|
||||
#Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB
|
||||
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist)
|
||||
if not item.videolibray_emergency_urls:
|
||||
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist)
|
||||
|
||||
#Ahora tratamos los enlaces .torrent
|
||||
for scrapedurl, quality in matches: #leemos los magnets con la diferentes calidades
|
||||
for scrapedurl, quality in matches: #leemos los magnets con la diferentes calidades
|
||||
#Generamos una copia de Item para trabajar sobre ella
|
||||
item_local = item.clone()
|
||||
|
||||
item_local.url = scrapedurl
|
||||
if item.videolibray_emergency_urls:
|
||||
item.emergency_urls[0].append(scrapedurl) #guardamos la url y pasamos a la siguiente
|
||||
continue
|
||||
if item.emergency_urls and not item.videolibray_emergency_urls:
|
||||
item_local.torrent_alt = item.emergency_urls[0][0] #Guardamos la url del .Torrent ALTERNATIVA
|
||||
if item.armagedon:
|
||||
item_local.url = item.emergency_urls[0][0] #... ponemos la emergencia como primaria
|
||||
del item.emergency_urls[0][0] #Una vez tratado lo limpiamos
|
||||
|
||||
size = ''
|
||||
if not item.armagedon:
|
||||
size = generictools.get_torrent_size(item_local.url) #Buscamos el tamaño en el .torrent
|
||||
if size:
|
||||
quality += ' [%s]' % size
|
||||
if item.armagedon: #Si es catastrófico, lo marcamos
|
||||
quality = '[/COLOR][COLOR hotpink][E] [COLOR limegreen]%s' % quality
|
||||
|
||||
#Añadimos la calidad y copiamos la duración
|
||||
item_local.quality = quality
|
||||
@@ -445,9 +476,9 @@ def findvideos(item):
|
||||
item_local.quality = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.quality).strip()
|
||||
item_local.quality = item_local.quality.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
|
||||
|
||||
item_local.alive = "??" #Calidad del link sin verificar
|
||||
item_local.action = "play" #Visualizar vídeo
|
||||
item_local.server = "torrent" #Servidor Torrent
|
||||
item_local.alive = "??" #Calidad del link sin verificar
|
||||
item_local.action = "play" #Visualizar vídeo
|
||||
item_local.server = "torrent" #Servidor Torrent
|
||||
|
||||
itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas
|
||||
|
||||
@@ -459,6 +490,9 @@ def findvideos(item):
|
||||
|
||||
#logger.debug(item_local)
|
||||
|
||||
if item.videolibray_emergency_urls: #Si ya hemos guardado todas las urls...
|
||||
return item #... nos vamos
|
||||
|
||||
if len(itemlist_f) > 0: #Si hay entradas filtradas...
|
||||
itemlist.extend(itemlist_f) #Pintamos pantalla filtrada
|
||||
else:
|
||||
|
||||
93
plugin.video.alfa/channels/pelisplay.json
Normal file
93
plugin.video.alfa/channels/pelisplay.json
Normal file
@@ -0,0 +1,93 @@
|
||||
{
|
||||
"id": "pelisplay",
|
||||
"name": "PelisPlay",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast", "lat"],
|
||||
"fanart": "https://s33.postimg.cc/d3ioghaof/image.png",
|
||||
"thumbnail": "https://www.pelisplay.tv/static/img/logo.png",
|
||||
"banner": "https://s33.postimg.cc/cyex6xlen/image.png",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"vos"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"Latino"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Buscar información extra",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "perfil",
|
||||
"type": "list",
|
||||
"label": "Perfil de color",
|
||||
"default": 3,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Sin color",
|
||||
"Perfil 5",
|
||||
"Perfil 4",
|
||||
"Perfil 3",
|
||||
"Perfil 2",
|
||||
"Perfil 1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "orden_episodios",
|
||||
"type": "bool",
|
||||
"label": "Mostrar los episodios de las series en orden descendente",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
414
plugin.video.alfa/channels/pelisplay.py
Normal file
414
plugin.video.alfa/channels/pelisplay.py
Normal file
@@ -0,0 +1,414 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel PelisPlay -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
import sys
|
||||
import urllib
|
||||
import urlparse
|
||||
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from core import channeltools
|
||||
from core import tmdb
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
|
||||
__channel__ = "pelisplay"
|
||||
|
||||
host = "https://www.pelisplay.tv/"
|
||||
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
__perfil__ = int(config.get_setting('perfil', __channel__))
|
||||
except:
|
||||
__modo_grafico__ = True
|
||||
__perfil__ = 0
|
||||
|
||||
# Fijar perfil de color
|
||||
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
|
||||
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
|
||||
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
|
||||
if __perfil__ < 3:
|
||||
color1, color2, color3, color4, color5 = perfil[__perfil__]
|
||||
else:
|
||||
color1 = color2 = color3 = color4 = color5 = ""
|
||||
|
||||
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
|
||||
['Referer', host]]
|
||||
|
||||
parameters = channeltools.get_channel_parameters(__channel__)
|
||||
fanart_host = parameters['fanart']
|
||||
thumbnail_host = parameters['thumbnail']
|
||||
|
||||
IDIOMAS = {'Latino': 'LAT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = []
|
||||
list_servers = ['rapidvideo', 'streamango', 'fastplay', 'openload']
|
||||
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = [item.clone(title="Peliculas", action="menumovies", text_blod=True,
|
||||
viewcontent='movie', viewmode="movie_with_plot", thumbnail=get_thumb("channels_movie.png")),
|
||||
|
||||
item.clone(title="Series", action="menuseries", text_blod=True, extra='serie', mediatype="tvshow",
|
||||
viewcontent='tvshow', viewmode="tvshow_with_plot",
|
||||
thumbnail=get_thumb("channels_tvshow.png")),
|
||||
|
||||
item.clone(title="Netflix", action="flixmovies", text_blod=True, extra='serie', mediatype="tvshow",
|
||||
viewcontent='tvshows', viewmode="movie_with_plot", fanart='https://i.postimg.cc/jjN85j8s/netflix-logo.png',
|
||||
thumbnail='http://img.app.kiwi/icon/jcbqFma-5e91cY9MlEasA-fvCRJK493MxphrqbBd8oS74FtYg00IXeOAn0ahsLprxIA'),
|
||||
|
||||
item.clone(title="Buscar", action="search", text_blod=True, extra='buscar',
|
||||
thumbnail=get_thumb('search.png'), url=host+'buscar')]
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def menumovies(item):
|
||||
logger.info()
|
||||
itemlist = [item.clone(title="Estrenos", action="peliculas", text_blod=True,
|
||||
viewcontent='movie', url=host + 'peliculas/estrenos', viewmode="movie_with_plot"),
|
||||
item.clone(title="Más Populares", action="peliculas", text_blod=True,
|
||||
viewcontent='movie', url=host + 'peliculas?filtro=visitas', viewmode="movie_with_plot"),
|
||||
item.clone(title="Recíen Agregadas", action="peliculas", text_blod=True,
|
||||
viewcontent='movie', url=host + 'peliculas?filtro=fecha_creacion', viewmode="movie_with_plot"),
|
||||
item.clone(title="Por año", action="p_portipo", text_blod=True, extra="Películas Por año",
|
||||
viewcontent='movie', url=host, viewmode="movie_with_plot"),
|
||||
item.clone(title="Géneros", action="p_portipo", text_blod=True, extra='movie',
|
||||
viewcontent='movie', url=host+'peliculas', viewmode="movie_with_plot"),
|
||||
item.clone(title="Buscar", action="search", text_blod=True, extra='buscarp',
|
||||
thumbnail=get_thumb('search.png'), url=host+'peliculas')]
|
||||
return itemlist
|
||||
|
||||
def flixmovies(item):
|
||||
logger.info()
|
||||
itemlist = [item.clone(title="Novedades", action="peliculas", text_blod=True, url=host + 'peliculas/netflix?filtro=fecha_actualizacion',
|
||||
viewcontent='movie', viewmode="movie_with_plot"),
|
||||
# item.clone(title="Estrenos", action="peliculas", text_blod=True,
|
||||
# viewcontent='movie', url=host + 'peliculas/estrenos', viewmode="movie_with_plot"),
|
||||
item.clone(title="Más Vistas", action="peliculas", text_blod=True,
|
||||
viewcontent='movie', url=host + 'peliculas/netflix?filtro=visitas', viewmode="movie_with_plot"),
|
||||
item.clone(title="Recíen Agregadas", action="peliculas", text_blod=True,
|
||||
viewcontent='movie', url=host + 'peliculas/netflix?filtro=fecha_creacion', viewmode="movie_with_plot"),
|
||||
item.clone(title="Por año", action="p_portipo", text_blod=True, extra="Películas Por año",
|
||||
viewcontent='movie', url=host, viewmode="movie_with_plot"),
|
||||
item.clone(title="Géneros", action="p_portipo", text_blod=True, extra='movie',
|
||||
viewcontent='movie', url=host+'netflix', viewmode="movie_with_plot")]
|
||||
return itemlist
|
||||
|
||||
|
||||
def menuseries(item):
|
||||
logger.info()
|
||||
itemlist = [item.clone(title="Novedades", action="series", text_blod=True, extra='serie', mediatype="tvshow",
|
||||
viewcontent='tvshow', url=host + 'series', viewmode="tvshow_with_plot"),
|
||||
|
||||
item.clone(title="Más Vistas", action="series", text_blod=True, extra='serie', mediatype="tvshow",
|
||||
viewcontent='tvshow', url=host + 'series?filtro=visitas', viewmode="tvshow_with_plot"),
|
||||
|
||||
item.clone(title="Recíen Agregadas", action="series", text_blod=True, extra='serie', mediatype="tvshow",
|
||||
viewcontent='tvshow', url=host + 'series?filtro=fecha_actualizacion', viewmode="tvshow_with_plot"),
|
||||
|
||||
item.clone(title="Géneros", action="p_portipo", text_blod=True, extra='serie',
|
||||
viewcontent='movie', url=host+'series', viewmode="movie_with_plot"),
|
||||
item.clone(title="Buscar", action="search", text_blod=True, extra='buscars',
|
||||
thumbnail=get_thumb('search.png'), url=host+'series')]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def p_portipo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
action = ''
|
||||
patron = '<li class="item"><a href="([^"]+)" class="category">.*?' # url
|
||||
patron += '<div class="[^<]+<img class="[^"]+" src="/([^"]+)"></div><div class="[^"]+">([^<]+)</div>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
if item.extra == 'movie':
|
||||
action = 'peliculas'
|
||||
elif item.extra == 'serie':
|
||||
action = 'series'
|
||||
itemlist.append(item.clone(action = action,
|
||||
title = scrapedtitle,
|
||||
url = scrapedurl,
|
||||
thumbnail=scrapedthumbnail
|
||||
))
|
||||
itemlist.sort(key=lambda it: it.title)
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<img class="posterentrada" src="/([^"]+)".*?' # img
|
||||
patron += '<a href="([^"]+)">.*?' # url
|
||||
patron += '<p class="description_poster">.*?\(([^<]+)\)</p>.*?' # year
|
||||
patron += '<div class="Description"> <div>([^<]+)</div>.*?' # plot
|
||||
patron += '<strong>([^<]+)</strong></h4>' # title
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedurl, year, plot, scrapedtitle in matches:
|
||||
if item.infoLabels['plot'] == '':
|
||||
item.plot = plot
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", contentTitle=scrapedtitle,
|
||||
infoLabels={"year":year}, thumbnail=host+scrapedthumbnail,
|
||||
url=scrapedurl, title=scrapedtitle, plot=plot))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
|
||||
pagination = scrapertools.find_single_match(data, '<li><a href="([^"]+)" rel="next">')
|
||||
|
||||
if pagination:
|
||||
itemlist.append(Item(channel=__channel__, action="peliculas", title="» Siguiente »",
|
||||
url=pagination, folder=True, text_blod=True, thumbnail=get_thumb("next.png")))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = urlparse.urljoin(item.url, "?q={0}".format(texto))
|
||||
if item.extra == 'buscarp' or item.extra == 'buscars':
|
||||
item.url = urlparse.urljoin(item.url, "?buscar={0}".format(texto))
|
||||
|
||||
try:
|
||||
if item.extra == 'buscars':
|
||||
return series(item)
|
||||
return peliculas(item)
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
|
||||
pagination = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
|
||||
if pagination:
|
||||
itemlist.append(Item(channel=item.channel, action="sub_search",
|
||||
title="» Siguiente »", url=pagination, thumbnail=get_thumb("next.png")))
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == 'peliculas':
|
||||
item.url = host + 'movies/'
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + "genre/animacion/"
|
||||
elif categoria == 'terror':
|
||||
item.url = host + "genre/terror/"
|
||||
else:
|
||||
return []
|
||||
|
||||
itemlist = peliculas(item)
|
||||
if itemlist[-1].title == "» Siguiente »":
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def series(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<img class="portada" src="/([^"]+)"><[^<]+><a href="([^"]+)".*?' # img, url
|
||||
patron += 'class="link-title"><h2>([^<]+)</h2>' # title
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(Item(channel=__channel__, title=scrapedtitle, extra='serie',
|
||||
url=scrapedurl, thumbnail=host+scrapedthumbnail,
|
||||
contentSerieName=scrapedtitle, show=scrapedtitle,
|
||||
action="temporadas", contentType='tvshow'))
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
|
||||
pagination = scrapertools.find_single_match(data, '<li><a href="([^"]+)" rel="next">')
|
||||
|
||||
if pagination:
|
||||
itemlist.append(Item(channel=__channel__, action="series", title="» Siguiente »", url=pagination,
|
||||
thumbnail=get_thumb("next.png")))
|
||||
return itemlist
|
||||
|
||||
|
||||
def temporadas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
from core import jsontools
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<img class="posterentrada" src="/([^"]+)" alt="\w+\s*(\w+).*?'
|
||||
patron += 'class="abrir_temporada" href="([^"]+)">' # img, season
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 1:
|
||||
for scrapedthumbnail, temporada, url in matches:
|
||||
new_item = item.clone(action="episodios", season=temporada, url=url,
|
||||
thumbnail=host+scrapedthumbnail, extra='serie')
|
||||
new_item.infoLabels['season'] = temporada
|
||||
new_item.extra = ""
|
||||
itemlist.append(new_item)
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
for i in itemlist:
|
||||
i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle'])
|
||||
if i.infoLabels['title']:
|
||||
# Si la temporada tiene nombre propio añadírselo al titulo del item
|
||||
i.title += " - %s" % (i.infoLabels['title'])
|
||||
if i.infoLabels.has_key('poster_path'):
|
||||
# Si la temporada tiene poster propio remplazar al de la serie
|
||||
i.thumbnail = i.infoLabels['poster_path']
|
||||
itemlist.sort(key=lambda it: it.title)
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
|
||||
text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host))
|
||||
return itemlist
|
||||
else:
|
||||
return episodios(item)
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
from core import jsontools
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
post_link = '%sentradas/abrir_temporada' % host
|
||||
token = scrapertools.find_single_match(data, 'data-token="([^"]+)">')
|
||||
data_t = scrapertools.find_single_match(data, '<a data-s="[^"]+" data-t="([^"]+)"')
|
||||
data_s = scrapertools.find_single_match(data, '<a data-s="([^"]+)" data-t="[^"]+"')
|
||||
post= {'t':data_t, 's':data_s, '_token':token}
|
||||
post = urllib.urlencode(post)
|
||||
new_data = httptools.downloadpage(post_link, post=post).data
|
||||
# json_data = jsontools.load(new_data)
|
||||
# logger.info(new_data)
|
||||
patron = '"nepisodio":"([^"]+)",[^,]+,"ntemporada":"([^"]+)".*?"url_directa":"([^"]+)",.*?"titulo":"([^"]+)",'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(new_data)
|
||||
for episode, season, scrapedurl, scrapedname in matches:
|
||||
scrapedurl = scrapedurl.replace('\\', '')
|
||||
logger.info('###name%s' % scrapedname)
|
||||
|
||||
if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season):
|
||||
continue
|
||||
title = "%sx%s: %s" % (season, episode.zfill(2), scrapertools.unescape(scrapedname))
|
||||
new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title,
|
||||
contentType="episode", extra='serie')
|
||||
if 'infoLabels' not in new_item:
|
||||
new_item.infoLabels = {}
|
||||
new_item.infoLabels['season'] = season
|
||||
new_item.infoLabels['episode'] = episode.zfill(2)
|
||||
itemlist.append(new_item)
|
||||
# TODO no hacer esto si estamos añadiendo a la videoteca
|
||||
if not item.extra:
|
||||
# Obtenemos los datos de todos los capítulos de la temporada mediante multihilos
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
for i in itemlist:
|
||||
if i.infoLabels['title']:
|
||||
# Si el capitulo tiene nombre propio añadírselo al titulo del item
|
||||
i.title = "%sx%s: %s" % (i.infoLabels['season'], i.infoLabels['episode'], i.infoLabels['title'])
|
||||
if i.infoLabels.has_key('poster_path'):
|
||||
# Si el capitulo tiene imagen propia remplazar al poster
|
||||
i.thumbnail = i.infoLabels['poster_path']
|
||||
itemlist.sort(key=lambda it: int(it.infoLabels['episode']),
|
||||
reverse=config.get_setting('orden_episodios', __channel__))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
# Opción "Añadir esta serie a la videoteca"
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
|
||||
text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host))
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
from lib import generictools
|
||||
from core import jsontools
|
||||
import urllib
|
||||
import base64
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data)
|
||||
patron = 'data-player="([^"]+)"[^>]+>([^<]+)</div>.*?' # data-player, servername
|
||||
patron += '<td class="[^"]+">([^<]+)</td><td class="[^"]+">([^<]+)</td>' # quality, lang
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for data_player, servername, quality, lang in matches:
|
||||
post_link = '%sentradas/procesar_player' % host
|
||||
token = scrapertools.find_single_match(data, 'data-token="([^"]+)">')
|
||||
post= {'data':data_player, 'tipo':'videohost', '_token':token}
|
||||
post = urllib.urlencode(post)
|
||||
new_data = httptools.downloadpage(post_link, post=post).data
|
||||
json_data = jsontools.load(new_data)
|
||||
url = json_data['data']
|
||||
|
||||
if 'pelisplay.tv/embed/' in url:
|
||||
new_data = httptools.downloadpage(url).data
|
||||
url = scrapertools.find_single_match(new_data, '"file":"([^"]+)",').replace('\\', '')
|
||||
|
||||
elif 'fondo_requerido' in url:
|
||||
link = scrapertools.find_single_match(url, '=(.*?)&fondo_requerido').partition('&')[0]
|
||||
post_link = '%sprivate/plugins/gkpluginsphp.php' % host
|
||||
post= {'link':link}
|
||||
post = urllib.urlencode(post)
|
||||
new_data2 = httptools.downloadpage(post_link, post=post).data
|
||||
url = scrapertools.find_single_match(new_data2, '"link":"([^"]+)"').replace('\\', '')
|
||||
|
||||
lang = lang.lower().strip()
|
||||
idioma = {'latino': '[COLOR cornflowerblue](LAT)[/COLOR]',
|
||||
'castellano': '[COLOR green](CAST)[/COLOR]',
|
||||
'subtitulado': '[COLOR red](VOS)[/COLOR]'}
|
||||
if lang in idioma:
|
||||
lang = idioma[lang]
|
||||
|
||||
title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % (servername.title(), quality, lang)
|
||||
|
||||
itemlist.append(item.clone(channel=__channel__, title=title, action='play', language=lang, quality=quality, url=url))
|
||||
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
itemlist.sort(key=lambda it: it.language, reverse=False)
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
# Requerido para AutoPlay
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'serie':
|
||||
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",
|
||||
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
|
||||
thumbnail=get_thumb("videolibrary_movie.png"), contentTitle=item.contentTitle))
|
||||
return itemlist
|
||||
@@ -183,7 +183,7 @@ def episodesxseasons(item):
|
||||
season = item.infoLabels['season']
|
||||
data=get_source(item.url)
|
||||
season_data = scrapertools.find_single_match(data, 'id="pills-vertical-%s">(.*?)</div>' % season)
|
||||
patron='href="([^"]+)".*?block">Capitulo(\d+) -.?([^<]+)<'
|
||||
patron='href="([^"]+)".*?block">Capitulo.?(\d+) -.?([^<]+)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(season_data)
|
||||
|
||||
infoLabels = item.infoLabels
|
||||
|
||||
@@ -143,10 +143,12 @@ def series_menu(item):
|
||||
return itemlist
|
||||
|
||||
|
||||
def get_source(url):
|
||||
|
||||
def get_source(url, referer=None):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
if referer is None:
|
||||
data = httptools.downloadpage(url).data
|
||||
else:
|
||||
data = httptools.downloadpage(url, headers={'Referer':referer, 'x-requested-with': 'XMLHttpRequest'}).data
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
@@ -173,7 +175,7 @@ def list_all (item):
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedyear, scrapedtitle in matches:
|
||||
url = host+scrapedurl+'p001/'
|
||||
url = host+scrapedurl
|
||||
thumbnail = scrapedthumbnail
|
||||
contentTitle=scrapedtitle
|
||||
title = contentTitle
|
||||
@@ -349,16 +351,15 @@ def season_episodes(item):
|
||||
|
||||
def get_links_by_language(item, data):
|
||||
logger.info()
|
||||
|
||||
video_list = []
|
||||
|
||||
language = scrapertools.find_single_match(data, 'ul id=level\d_(.*?)\s*class=')
|
||||
patron = 'data-source=(.*?)data.*?srt=(.*?)data-iframe.*?Opci.*?<.*?hidden>[^\(]\((.*?)\)'
|
||||
language = scrapertools.find_single_match(data, 'ul id="level\d_([^"]+)"\s*class=')
|
||||
patron = 'data-source="([^"]+)"data-quality="([^"]+)"data-srt="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
if language in IDIOMAS:
|
||||
language = IDIOMAS[language]
|
||||
|
||||
for url, sub, quality in matches:
|
||||
for url, quality, sub in matches:
|
||||
if 'http' not in url:
|
||||
|
||||
new_url = 'https://onevideo.tv/api/player?key=90503e3de26d45e455b55e9dc54f015b3d1d4150&link' \
|
||||
@@ -391,15 +392,19 @@ def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
video_list = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
if item.contentType == 'movie':
|
||||
new_url = new_url = item.url.replace('/pelicula/', '/player/%s/' % item.contentType)
|
||||
else:
|
||||
base_url = scrapertools.find_single_match(item.url, '(.*?)/temporada')
|
||||
new_url = base_url.replace('/serie/', '/player/serie/')
|
||||
new_url += '|%s|%s/' % (item.contentSeason, item.contentEpisodeNumber)
|
||||
data = get_source(new_url, referer=item.url)
|
||||
|
||||
patron_language ='(<ul id=level\d_.*?\s*class=.*?ul>)'
|
||||
patron_language ='(<ul id="level\d_.*?"*class=.*?ul>)'
|
||||
matches = re.compile(patron_language, re.DOTALL).findall(data)
|
||||
|
||||
for language in matches:
|
||||
video_list.extend(get_links_by_language(item, language))
|
||||
|
||||
video_list = servertools.get_servers_itemlist(video_list, lambda i: i.title % (i.server.capitalize(), i.language,
|
||||
i.quality) )
|
||||
# Requerido para FilterTools
|
||||
|
||||
@@ -41,7 +41,6 @@ def categorias(item):
|
||||
# data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li class="additional_list__item"><a href="([^"]+)">([^"]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
@@ -79,7 +78,6 @@ def findvideos(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<source src="([^"]+)" res="\d+" label="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
itemlist.append(item.clone(action="play", title=scrapedtitle, fulltitle = item.title, url=scrapedurl))
|
||||
return itemlist
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
{
|
||||
"id": "planetadocumental",
|
||||
"name": "Planeta documental",
|
||||
"language": ["*"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "https://s8.postimg.cc/r6njedwdt/planeta_documental1.png",
|
||||
"banner": "https://s8.postimg.cc/6za3m36m9/planeta_documental2.png",
|
||||
"categories": [
|
||||
"documentary"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,142 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel Planeta Documental -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
from core import httptools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from channelselector import get_thumb
|
||||
from platformcode import config, logger
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
|
||||
|
||||
IDIOMAS = {"Latino": "LAT"}
|
||||
list_language = IDIOMAS.values()
|
||||
|
||||
list_quality = []
|
||||
|
||||
list_servers = ['gvideo']
|
||||
|
||||
host = "https://www.planetadocumental.com"
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist.append(item.clone(title="Últimos documentales", action="lista",
|
||||
url= host,
|
||||
thumbnail=get_thumb('lastest', auto=True)))
|
||||
itemlist.append(item.clone(title="Por genero", action="generos",
|
||||
url= host, thumbnail=get_thumb('genres', auto=True)))
|
||||
itemlist.append(item.clone(title="", action=""))
|
||||
itemlist.append(item.clone(title="Buscar...", action="search", thumbnail=get_thumb('search', auto=True)))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def generos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.find_single_match(data, 'sub-menu elementor-nav-menu--dropdown(.*?)</ul')
|
||||
patron = 'href="([^"]+).*?'
|
||||
patron += '>([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(item.clone(
|
||||
action = "sub_list",
|
||||
title = scrapedtitle,
|
||||
url = scrapedurl,
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
url = scrapertools.find_single_match(data, 'Ver Online.*?iframe src="([^"]+)')
|
||||
if "/gd/" in url:
|
||||
data = httptools.downloadpage(url).data
|
||||
data = data.replace("file:",'"file":')
|
||||
url = scrapertools.find_single_match(data, 'source.*?file":\s*"([^"]+)')
|
||||
itemlist.append(item.clone(
|
||||
action = "play",
|
||||
server = "directo",
|
||||
title = "Ver video " + item.title,
|
||||
url = url
|
||||
))
|
||||
else:
|
||||
if url:
|
||||
itemlist.append(item.clone(
|
||||
action = "play",
|
||||
title = "Ver video " + item.title,
|
||||
url = url
|
||||
))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'post__thumbnail__link.*?src="([^"]+).*?'
|
||||
patron += 'href="([^"]+).*?'
|
||||
patron += '>([^<]+).*?'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(item.clone(action = "findvideos",
|
||||
contentTitle = scrapedtitle.strip(),
|
||||
title = scrapedtitle.strip(),
|
||||
url = scrapedurl,
|
||||
thumbnail = scrapedthumbnail
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
if texto != "":
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=" + texto
|
||||
item.extra = "busqueda"
|
||||
try:
|
||||
return sub_list(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def sub_list(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'post-thumb-img-content post-thumb.*?src="([^"]+).*?'
|
||||
patron += 'href="([^"]+).*?'
|
||||
patron += '>([^<]+).*?'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(item.clone(action = "findvideos",
|
||||
contentTitle = scrapedtitle,
|
||||
title = scrapedtitle.strip(),
|
||||
url = scrapedurl,
|
||||
thumbnail = scrapedthumbnail
|
||||
))
|
||||
return itemlist
|
||||
@@ -43,9 +43,8 @@ def catalogo(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a itemprop="url" href="([^"]+)".*?title="([^"]+)">.*?<img itemprop="image" src=([^"]+) alt=.*?</svg> ([^"]+)</li>'
|
||||
patron = '<a itemprop="url" href="([^"]+)".*?title="([^"]+)">.*?<img itemprop="image" src=([^"]+) alt=.*?</svg>\s+([^"]+) </li>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad +")"
|
||||
@@ -65,11 +64,10 @@ def categorias(item):
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a itemprop="url" href="([^"]+)".*?title="([^"]+)">.*?<img itemprop="image" src="([^"]+)".*?</svg>([^"]+) </small>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad +")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/?sort=latest"
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="btn btn-primary--light btn-pagination" itemprop="name" href="([^"]+)" title="Siguiente">')
|
||||
if next_page_url!="":
|
||||
|
||||
@@ -45,7 +45,6 @@ def peliculas(item):
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a class="clip-link" data-id="\d+" title="([^"]+)" href="([^"]+)">.*?<img src="([^"]+)".*?<span class="timer">(.*?)</span></div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedtitle,scrapedurl,scrapedthumbnail,scrapedtime in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = "[COLOR yellow]" + (scrapedtime) + "[/COLOR] " + scrapedtitle
|
||||
@@ -62,7 +61,6 @@ def play(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '"video-setup".*?file: "(.*?)",'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl in matches:
|
||||
scrapedurl = str(scrapedurl)
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
@@ -38,27 +37,22 @@ def search(item, texto):
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Descarga la página
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.find_single_match(data, '<div id="categoriesStraightImages">(.*?)</ul>')
|
||||
|
||||
# Extrae las categorias
|
||||
|
||||
|
||||
patron = '<li class="cat_pic" data-category=".*?'
|
||||
patron += '<a href="([^"]+)".*?'
|
||||
patron += '<img src="([^"]+)".*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += 'alt="([^"]+)"'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
if "?" in scrapedurl:
|
||||
url = urlparse.urljoin(item.url, scrapedurl + "&o=cm")
|
||||
else:
|
||||
url = urlparse.urljoin(item.url, scrapedurl + "?o=cm")
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=url, fanart=item.fanart,
|
||||
thumbnail=scrapedthumbnail))
|
||||
|
||||
itemlist.sort(key=lambda x: x.title)
|
||||
return itemlist
|
||||
|
||||
@@ -66,30 +60,21 @@ def categorias(item):
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Descarga la página
|
||||
data = httptools.downloadpage(item.url).data
|
||||
videodata = scrapertools.find_single_match(data, 'videos search-video-thumbs">(.*?)<div class="reset"></div>')
|
||||
|
||||
# Extrae las peliculas
|
||||
patron = '<div class="phimage">.*?'
|
||||
patron += '<a href="([^"]+)" title="([^"]+).*?'
|
||||
patron += '<var class="duration">([^<]+)</var>(.*?)</div>.*?'
|
||||
patron += 'data-mediumthumb="([^"]+)"'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(videodata)
|
||||
|
||||
for url, scrapedtitle, duration, scrapedhd, thumbnail in matches:
|
||||
title = scrapedtitle.replace("&amp;", "&") + " (" + duration + ")"
|
||||
|
||||
title = "(" + duration + ") " + scrapedtitle.replace("&amp;", "&")
|
||||
scrapedhd = scrapertools.find_single_match(scrapedhd, '<span class="hd-thumbnail">(.*?)</span>')
|
||||
if scrapedhd == 'HD':
|
||||
title += ' [HD]'
|
||||
|
||||
url = urlparse.urljoin(item.url, url)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", title=title, url=url, fanart=item.fanart, thumbnail=thumbnail))
|
||||
|
||||
if itemlist:
|
||||
# Paginador
|
||||
patron = '<li class="page_next"><a href="([^"]+)"'
|
||||
@@ -99,29 +84,16 @@ def peliculas(item):
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="peliculas", title=">> Página siguiente", fanart=item.fanart,
|
||||
url=url))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Descarga la página
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
quality = scrapertools.find_multiple_matches(data, '"id":"quality([^"]+)"')
|
||||
for q in quality:
|
||||
match = scrapertools.find_single_match(data, 'var quality_%s=(.*?);' % q)
|
||||
match = re.sub(r'(/\*.*?\*/)', '', match).replace("+", "")
|
||||
url = ""
|
||||
for s in match.split():
|
||||
val = scrapertools.find_single_match(data, 'var %s=(.*?);' % s.strip())
|
||||
if "+" in val:
|
||||
values = scrapertools.find_multiple_matches(val, '"([^"]+)"')
|
||||
val = "".join(values)
|
||||
|
||||
url += val.replace('"', "")
|
||||
itemlist.append([".mp4 %s [directo]" % q, url])
|
||||
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '"defaultQuality":true,"format":"","quality":"\d+","videoUrl":"(.*?)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl in matches:
|
||||
url = scrapedurl.replace("\/", "/")
|
||||
itemlist.append(item.clone(action="play", title=url, fulltitle = item.title, url=url))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -66,16 +66,21 @@ def lista(item):
|
||||
action = "menu_info"
|
||||
|
||||
# Extrae las entradas
|
||||
patron = '<div class="video-item.*?href="([^"]+)" title="([^"]+)".*?data-original="([^"]+)"(.*?)<div class="durations">.*?</i>([^<]+)<'
|
||||
patron = '<div class="video-item.*?href="([^"]+)" '
|
||||
patron += 'title="([^"]+)".*?'
|
||||
patron += 'data-src="([^"]+)"'
|
||||
patron += '(.*?)<div class="durations">.*?'
|
||||
patron += '</i>([^<]+)<'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, quality, duration in matches:
|
||||
if "go.php?" in scrapedurl:
|
||||
scrapedurl = urllib.unquote(scrapedurl.split("/go.php?u=")[1].split("&")[0])
|
||||
scrapedthumbnail = urlparse.urljoin(host, scrapedthumbnail)
|
||||
if not scrapedthumbnail.startswith("https"):
|
||||
scrapedthumbnail = "https:%s" % scrapedthumbnail
|
||||
else:
|
||||
scrapedurl = urlparse.urljoin(host, scrapedurl)
|
||||
if not scrapedthumbnail.startswith("https"):
|
||||
scrapedthumbnail = host + "%s" % scrapedthumbnail
|
||||
scrapedthumbnail = "https:%s" % scrapedthumbnail
|
||||
if duration:
|
||||
scrapedtitle = "%s - %s" % (duration, scrapedtitle)
|
||||
if '>HD<' in quality:
|
||||
@@ -110,7 +115,6 @@ def lista(item):
|
||||
next_page = "%s?mode=async&function=get_block&block_id=list_videos_common_videos_list&sort_by=post_date&from=%s" % (
|
||||
item.url, next_page)
|
||||
itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -225,7 +229,6 @@ def play(item):
|
||||
itemlist = []
|
||||
|
||||
data = get_data(item.url)
|
||||
|
||||
patron = '(?:video_url|video_alt_url[0-9]*)\s*:\s*\'([^\']+)\'.*?(?:video_url_text|video_alt_url[0-9]*_text)\s*:\s*\'([^\']+)\''
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
if not matches:
|
||||
|
||||
@@ -42,7 +42,6 @@ def categorias(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<li><a href="([^<]+)">(.*?)</a></li>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
@@ -58,7 +57,6 @@ def peliculas(item):
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<article id="post-\d+".*?<a href="([^"]+)" title="([^"]+)">.*?<img data-src="(.*?)".*?<span class="duration"><i class="fa fa-clock-o"></i>([^<]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,duracion in matches:
|
||||
scrapedplot = ""
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
@@ -85,4 +83,5 @@ def play(item):
|
||||
scrapedurl = scrapedurl.replace("\/", "/")
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
|
||||
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
|
||||
return itemlist
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -42,7 +42,6 @@ def catalogo(item):
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a class="pornstar_link js_mpop js-pop" href="([^"]+)".*?"([^"]+)"\s+title="([^"]+)".*?<div class="ps_info_count">\s+([^"]+)\s+Videos'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " [COLOR yellow]" + cantidad + "[/COLOR] "
|
||||
@@ -61,7 +60,6 @@ def categorias(item):
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="category_item_wrapper">.*?<a href="([^"]+)".*?data-thumb_url="([^"]+)".*?alt="([^"]+)".*?<span class="category_count">\s+([^"]+) Videos'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"id": "rexpelis",
|
||||
"name": "Rexpelis",
|
||||
"active": true,
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"language": ["lat","cast"],
|
||||
"thumbnail": "https://i.postimg.cc/MMJ5g9Y1/rexpelis1.png",
|
||||
|
||||
@@ -59,7 +59,7 @@ def list_all(item):
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
patron = '<div class="post-thumbnail"><a href="([^"]+)" title="([^"]+)">.*?data-src="([^"]+)"'
|
||||
patron = '<div class="post-thumbnail"><a href="([^"]+)" title="([^"]+)">.*?data-lazy-src="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
|
||||
@@ -42,7 +42,6 @@ def canales (item):
|
||||
data = scrapertools.get_match(data,'Top Networks</a>(.*?)</ul>')
|
||||
patron = '<li id=.*?<a href="(.*?)">(.*?)</a></li>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
@@ -59,7 +58,6 @@ def categorias(item):
|
||||
data = scrapertools.get_match(data,'More Categories</a>(.*?)</ul>')
|
||||
patron = '<li id=.*?<a href="(.*?)">(.*?)</a></li>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
@@ -75,7 +73,6 @@ def peliculas(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="post-img small-post-img">.*?<a href="(.*?)" title="(.*?)">.*?<img src="(.*?)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , fulltitle=scrapedtitle , plot=scrapedplot , folder=True) )
|
||||
|
||||
@@ -47,7 +47,6 @@ def categorias(item):
|
||||
data = scrapertools.get_match(data,'<div class="tagcloud">(.*?)<p>')
|
||||
patron = '<a href="(.*?)".*?>(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , folder=True) )
|
||||
return itemlist
|
||||
@@ -60,7 +59,6 @@ def catalogo(item):
|
||||
data = scrapertools.get_match(data,'>Best Porn Studios</a>(.*?)</ul>')
|
||||
patron = '<a href="(.*?)">(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , folder=True) )
|
||||
return itemlist
|
||||
@@ -71,7 +69,6 @@ def anual(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<li><a href="([^<]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
@@ -85,16 +82,14 @@ def peliculas(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="post-thumbnail.*?<a href="([^"]+)" title="(.*?)".*?src="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle.replace(" Porn DVD", "")
|
||||
scrapedtitle = scrapedtitle.replace(" Porn DVD", "").replace("Permalink to ", "").replace(" Porn Movie", "")
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">»</a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"id": "special",
|
||||
"name": "<Terror 2018>",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": [],
|
||||
"thumbnail": "https://i.postimg.cc/FR2nygS0/g4567.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"movie"
|
||||
]
|
||||
}
|
||||
@@ -1,78 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel Halloween -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import jsontools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
|
||||
host = 'https://www.imdb.com/list/ls027655523/?sort=list_order,asc&st_dt=&mode=detail&page='
|
||||
|
||||
|
||||
def get_source(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
item.url = host
|
||||
item.first = 60
|
||||
item.last = 80
|
||||
item.page = 1
|
||||
return list_all(item)
|
||||
|
||||
|
||||
def list_all(item):
|
||||
logger.info()
|
||||
from core import jsontools
|
||||
itemlist = []
|
||||
|
||||
data = get_source('%s%s' % (host, item.page))
|
||||
data = scrapertools.find_single_match(data, '"itemListElement":([^\]]+)\]')
|
||||
data = data + ']'
|
||||
#logger.debug(data)
|
||||
movie_list = eval(data)
|
||||
for movie in movie_list[item.first:item.last]:
|
||||
|
||||
IMDBNumber = movie['url'].replace('title','').replace('/','')
|
||||
|
||||
|
||||
new_item = Item(channel='search', contentType='movie', action='do_search',
|
||||
infoLabels={'imdb_id': IMDBNumber})
|
||||
|
||||
#new_item.infoLabels = tmdb.find_and_set_infoLabels(new_item)
|
||||
itemlist.append(new_item)
|
||||
logger.debug('id %s' % IMDBNumber)
|
||||
#logger.debug(new_item)
|
||||
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
for movie in itemlist:
|
||||
movie.title = movie.infoLabels['title']
|
||||
movie.wanted = movie.title
|
||||
|
||||
if item.last + 20 < len(movie_list):
|
||||
first = item.last
|
||||
last = item.last + 20
|
||||
page = item.page
|
||||
else:
|
||||
first = 0
|
||||
last = 20
|
||||
page = item.page + 1
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title='Siguiente >>', action='list_all',
|
||||
last=last, first=first, page=page))
|
||||
return itemlist
|
||||
@@ -44,6 +44,28 @@
|
||||
"VOSE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "emergency_urls",
|
||||
"type": "list",
|
||||
"label": "Se quieren guardar Enlaces de Emergencia por si se cae la Web?",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No",
|
||||
"Guardar",
|
||||
"Borrar",
|
||||
"Actualizar"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "emergency_urls_torrents",
|
||||
"type": "bool",
|
||||
"label": "Se quieren guardar Torrents de Emergencia por si se cae la Web?",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": "!eq(-1,'No')"
|
||||
},
|
||||
{
|
||||
"id": "timeout_downloadpage",
|
||||
"type": "list",
|
||||
|
||||
@@ -372,6 +372,7 @@ def findvideos(item):
|
||||
if not item.language:
|
||||
item.language = ['CAST'] #Castellano por defecto
|
||||
matches = []
|
||||
subtitles = []
|
||||
item.category = categoria
|
||||
|
||||
#logger.debug(item)
|
||||
@@ -389,51 +390,74 @@ def findvideos(item):
|
||||
if not data:
|
||||
logger.error("ERROR 01: FINDVIDEOS: La Web no responde o la URL es erronea: " + item.url)
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: FINDVIDEOS:. La Web no responde o la URL es erronea. Si la Web está activa, reportar el error con el log'))
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
#Extraemos el thumb
|
||||
if not item.thumbnail:
|
||||
item.thumbnail = scrapertools.find_single_match(data, patron) #guardamos thumb si no existe
|
||||
|
||||
#Extraemos quality, audio, year, country, size, scrapedlanguage
|
||||
patron = '<\/script><\/div><ul>(?:<li><label>Fecha de estreno <\/label>[^<]+<\/li>)?(?:<li><label>Genero <\/label>[^<]+<\/li>)?(?:<li><label>Calidad <\/label>([^<]+)<\/li>)?(?:<li><label>Audio <\/label>([^<]+)<\/li>)?(?:<li><label>Fecha <\/label>.*?(\d+)<\/li>)?(?:<li><label>Pais de Origen <\/label>([^<]+)<\/li>)?(?:<li><label>Tamaño <\/label>([^<]+)<\/li>)?(<li> Idioma[^<]+<img src=.*?<br \/><\/li>)?'
|
||||
try:
|
||||
quality, audio, year, country, size, scrapedlanguage = scrapertools.find_single_match(data, patron)
|
||||
except:
|
||||
quality = ''
|
||||
audio = ''
|
||||
year = ''
|
||||
country = ''
|
||||
size = ''
|
||||
scrapedlanguage = ''
|
||||
if quality: item.quality = quality
|
||||
if audio: item.quality += ' %s' % audio.strip()
|
||||
if not item.infoLabels['year'] and year: item.infoLabels['year'] = year
|
||||
if size: item.quality += ' [%s]' % size.replace('GB', 'G B').replace('Gb', 'G b').replace('MB', 'M B').replace('Mb', 'M b').replace('.', ',').strip()
|
||||
if size: item.title += ' [%s]' % size.replace('GB', 'G B').replace('Gb', 'G b').replace('MB', 'M B').replace('Mb', 'M b').replace('.', ',').strip()
|
||||
language = []
|
||||
matches = re.compile('(\d+.png)', re.DOTALL).findall(scrapedlanguage)
|
||||
for lang in matches:
|
||||
if "1.png" in lang and not 'CAST' in language: language += ['CAST']
|
||||
if "512.png" in lang and not 'LAT' in language: language += ['LAT']
|
||||
if ("1.png" not in lang and "512.png" not in lang) and not 'VOSE' in language: language += ['VOSE']
|
||||
if language: item.language = language
|
||||
|
||||
#Extraemos los enlaces .torrent
|
||||
##Modalidad de varios archivos
|
||||
patron = '<div class="fichadescargat"><\/div><div class="table-responsive"[^>]+>.*?<\/thead><tbody>(.*?)<\/tbody><\/table><\/div>'
|
||||
if scrapertools.find_single_match(data, patron):
|
||||
data_torrents = scrapertools.find_single_match(data, patron)
|
||||
patron = '<tr><td>.*?<\/td><td><a href="([^"]+)"[^>]+><[^>]+><\/a><\/td><\/tr>'
|
||||
#Modalidad de un archivo
|
||||
else:
|
||||
data_torrents = data
|
||||
patron = '<div class="fichasubtitulos">.*?<\/div><\/li><\/ul>.*?<a href="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data_torrents)
|
||||
if not matches: #error
|
||||
logger.error("ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web " + " / PATRON: " + patron + data)
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log'))
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia?
|
||||
matches = item.emergency_urls[1] #Restauramos matches de vídeos
|
||||
subtitles = item.emergency_urls[2] #Restauramos matches de subtítulos
|
||||
item.armagedon = True #Marcamos la situación como catastrófica
|
||||
else:
|
||||
if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca...
|
||||
return item #Devolvemos el Item de la llamada
|
||||
else:
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
if not item.armagedon:
|
||||
#Extraemos el thumb
|
||||
if not item.thumbnail:
|
||||
item.thumbnail = scrapertools.find_single_match(data, patron) #guardamos thumb si no existe
|
||||
|
||||
#Extraemos quality, audio, year, country, size, scrapedlanguage
|
||||
patron = '<\/script><\/div><ul>(?:<li><label>Fecha de estreno <\/label>[^<]+<\/li>)?(?:<li><label>Genero <\/label>[^<]+<\/li>)?(?:<li><label>Calidad <\/label>([^<]+)<\/li>)?(?:<li><label>Audio <\/label>([^<]+)<\/li>)?(?:<li><label>Fecha <\/label>.*?(\d+)<\/li>)?(?:<li><label>Pais de Origen <\/label>([^<]+)<\/li>)?(?:<li><label>Tamaño <\/label>([^<]+)<\/li>)?(<li> Idioma[^<]+<img src=.*?<br \/><\/li>)?'
|
||||
try:
|
||||
quality = ''
|
||||
audio = ''
|
||||
year = ''
|
||||
country = ''
|
||||
size = ''
|
||||
scrapedlanguage = ''
|
||||
quality, audio, year, country, size, scrapedlanguage = scrapertools.find_single_match(data, patron)
|
||||
except:
|
||||
pass
|
||||
if quality: item.quality = quality
|
||||
if audio: item.quality += ' %s' % audio.strip()
|
||||
if not item.infoLabels['year'] and year: item.infoLabels['year'] = year
|
||||
if size: item.quality += ' [%s]' % size.replace('GB', 'G B').replace('Gb', 'G b').replace('MB', 'M B').replace('Mb', 'M b').replace('.', ',').strip()
|
||||
if size:
|
||||
item.title = re.sub(r'\s*\[\d+,?\d*?\s\w\s*[b|B]\]', '', item.title) #Quitamos size de título, si lo traía
|
||||
item.title += ' [%s]' % size.replace('GB', 'G B').replace('Gb', 'G b').replace('MB', 'M B').replace('Mb', 'M b').replace('.', ',').strip()
|
||||
|
||||
language = []
|
||||
matches_lang = re.compile('(\d+.png)', re.DOTALL).findall(scrapedlanguage)
|
||||
for lang in matches_lang:
|
||||
if "1.png" in lang and not 'CAST' in language: language += ['CAST']
|
||||
if "512.png" in lang and not 'LAT' in language: language += ['LAT']
|
||||
if ("1.png" not in lang and "512.png" not in lang) and not 'VOSE' in language: language += ['VOSE']
|
||||
if language: item.language = language
|
||||
|
||||
#Extraemos los enlaces .torrent
|
||||
#Modalidad de varios archivos
|
||||
patron = '<div class="fichadescargat"><\/div><div class="table-responsive"[^>]+>.*?<\/thead><tbody>(.*?)<\/tbody><\/table><\/div>'
|
||||
if scrapertools.find_single_match(data, patron):
|
||||
data_torrents = scrapertools.find_single_match(data, patron)
|
||||
patron = '<tr><td>.*?<\/td><td><a href="([^"]+)"[^>]+><[^>]+><\/a><\/td><\/tr>'
|
||||
#Modalidad de un archivo
|
||||
else:
|
||||
data_torrents = data
|
||||
patron = '<div class="fichasubtitulos">.*?<\/div><\/li><\/ul>.*?<a href="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data_torrents)
|
||||
if not matches: #error
|
||||
logger.error("ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web " + " / PATRON: " + patron + data)
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log'))
|
||||
|
||||
if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia?
|
||||
matches = item.emergency_urls[1] #Restauramos matches de vídeos
|
||||
subtitles = item.emergency_urls[2] #Restauramos matches de subtítulos
|
||||
item.armagedon = True #Marcamos la situación como catastrófica
|
||||
else:
|
||||
if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca...
|
||||
return item #Devolvemos el Item de la llamada
|
||||
else:
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
else: #SERIES: ya viene con las urls
|
||||
data = item.url #inicio data por compatibilidad
|
||||
@@ -447,11 +471,22 @@ def findvideos(item):
|
||||
del item.subtitle
|
||||
else:
|
||||
subtitle = scrapertools.find_single_match(data, patron).replace('&', '&').replace('.io/', sufix).replace('.com/', sufix)
|
||||
data_subtitle = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(subtitle, timeout=timeout).data)
|
||||
patron = '<tbody>(<tr class="fichserietabla_b">.*?<\/tr>)<\/tbody>' #salvamos el bloque
|
||||
data_subtitle = scrapertools.find_single_match(data_subtitle, patron)
|
||||
patron = '<tr class="fichserietabla_b">.*?<a href="([^"]+)"'
|
||||
subtitles = re.compile(patron, re.DOTALL).findall(data_subtitle) #Creamos una lista con todos los sub-títulos
|
||||
|
||||
try:
|
||||
data_subtitle = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(subtitle, timeout=timeout).data)
|
||||
except:
|
||||
pass
|
||||
|
||||
if not data_subtitle:
|
||||
if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia?
|
||||
matches = item.emergency_urls[1] #Restauramos matches de vídeos
|
||||
subtitles = item.emergency_urls[2] #Restauramos matches de subtítulos
|
||||
item.armagedon = True #Marcamos la situación como catastrófica
|
||||
else:
|
||||
patron = '<tbody>(<tr class="fichserietabla_b">.*?<\/tr>)<\/tbody>' #salvamos el bloque
|
||||
data_subtitle = scrapertools.find_single_match(data_subtitle, patron)
|
||||
patron = '<tr class="fichserietabla_b">.*?<a href="([^"]+)"'
|
||||
subtitles = re.compile(patron, re.DOTALL).findall(data_subtitle) #Creamos una lista con todos los sub-títulos
|
||||
if subtitles:
|
||||
item.subtitle = []
|
||||
for subtitle in subtitles:
|
||||
@@ -460,29 +495,49 @@ def findvideos(item):
|
||||
|
||||
#logger.debug("PATRON: " + patron)
|
||||
#logger.debug(matches)
|
||||
#logger.debug(subtitles)
|
||||
#logger.debug(data)
|
||||
|
||||
#Si es un lookup para cargar las urls de emergencia en la Videoteca...
|
||||
if item.videolibray_emergency_urls:
|
||||
item.emergency_urls = [] #Iniciamos emergency_urls
|
||||
item.emergency_urls.append([]) #Reservamos el espacio para los .torrents locales
|
||||
item.emergency_urls.append(matches) #Salvamnos matches de los vídeos...
|
||||
item.emergency_urls.append(subtitles) #Salvamnos matches de los subtítulos
|
||||
|
||||
#Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB
|
||||
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist)
|
||||
if not item.videolibray_emergency_urls:
|
||||
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist)
|
||||
|
||||
#Ahora tratamos los enlaces .torrent
|
||||
for scrapedurl in matches: #leemos los torrents con la diferentes calidades
|
||||
#Generamos una copia de Item para trabajar sobre ella
|
||||
item_local = item.clone()
|
||||
|
||||
item_local.url = scrapedurl.replace('&', '&').replace('.io/', sufix).replace('.com/', sufix)
|
||||
if item.videolibray_emergency_urls:
|
||||
item.emergency_urls[0].append(scrapedurl) #guardamos la url y pasamos a la siguiente
|
||||
continue
|
||||
if item.emergency_urls and not item.videolibray_emergency_urls:
|
||||
item_local.torrent_alt = item.emergency_urls[0][0] #Guardamos la url del .Torrent ALTERNATIVA
|
||||
if item.armagedon:
|
||||
item_local.url = item.emergency_urls[0][0] #... ponemos la emergencia como primaria
|
||||
del item.emergency_urls[0][0] #Una vez tratado lo limpiamos
|
||||
|
||||
#Buscamos si ya tiene tamaño, si no, los buscamos en el archivo .torrent
|
||||
size = scrapertools.find_single_match(item_local.quality, '\s\[(\d+,?\d*?\s\w\s?[b|B])\]')
|
||||
if not size:
|
||||
size = scrapertools.find_single_match(item_local.quality, '\s*\[(\d+,?\d*?\s\w\s*[b|B])\]')
|
||||
if not size and not item.armagedon:
|
||||
size = generictools.get_torrent_size(scrapedurl) #Buscamos el tamaño en el .torrent
|
||||
if size:
|
||||
item_local.title = re.sub(r'\s\[\d+,?\d*?\s\w[b|B]\]', '', item_local.title) #Quitamos size de título, si lo traía
|
||||
item_local.title = '%s [%s]' % (item_local.title, size) #Agregamos size al final del título
|
||||
size = size.replace('GB', 'G B').replace('Gb', 'G b').replace('MB', 'M B').replace('Mb', 'M b')
|
||||
item_local.quality = re.sub(r'\s\[\d+,?\d*?\s\w\s?[b|B]\]', '', item_local.quality) #Quitamos size de calidad, si lo traía
|
||||
item_local.quality = '%s [%s]' % (item_local.quality, size) #Agregamos size al final de la calidad
|
||||
item_local.title = re.sub(r'\s*\[\d+,?\d*?\s\w\s*[b|B]\]', '', item_local.title) #Quitamos size de título, si lo traía
|
||||
item_local.title = '%s [%s]' % (item_local.title, size) #Agregamos size al final del título
|
||||
item_local.quality = re.sub(r'\s*\[\d+,?\d*?\s\w\s*[b|B]\]', '', item_local.quality) #Quitamos size de calidad, si lo traía
|
||||
item_local.quality = '%s [%s]' % (item_local.quality, size) #Agregamos size al final de la calidad
|
||||
if item.armagedon: #Si es catastrófico, lo marcamos
|
||||
item_local.quality = '[/COLOR][COLOR hotpink][E] [COLOR limegreen]%s' % item_local.quality
|
||||
|
||||
#Ahora pintamos el link del Torrent
|
||||
item_local.url = scrapedurl.replace('&', '&').replace('.io/', sufix).replace('.com/', sufix)
|
||||
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language))
|
||||
|
||||
#Preparamos título y calidad, quitamos etiquetas vacías
|
||||
@@ -506,6 +561,9 @@ def findvideos(item):
|
||||
#logger.debug("TORRENT: " + scrapedurl + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName)
|
||||
#logger.debug(item_local)
|
||||
|
||||
if item.videolibray_emergency_urls: #Si ya hemos guardado todas las urls...
|
||||
return item #... nos vamos
|
||||
|
||||
if len(itemlist_f) > 0: #Si hay entradas filtradas...
|
||||
itemlist.extend(itemlist_f) #Pintamos pantalla filtrada
|
||||
else:
|
||||
|
||||
@@ -45,7 +45,6 @@ def categorias(item):
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)">\s*(.*?)\s*<'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
|
||||
@@ -18,8 +18,6 @@ def mainlist(item):
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
|
||||
# itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -30,7 +28,6 @@ def categorias(item):
|
||||
data = scrapertools.get_match(data,'<h3>Categories</h3>(.*?)</ul>')
|
||||
patron = '<li class="cat-item cat-item-\d+"><a href="(.*?)" >(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
@@ -45,7 +42,6 @@ def peliculas(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="post" id="post-\d+">.*?<a href="([^"]+)" title="(.*?)"><img src="(.*?)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle.replace(" – Free Porn Download", "")
|
||||
@@ -63,7 +59,6 @@ def play(item):
|
||||
logger.info()
|
||||
data = scrapertools.cachePage(item.url)
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
|
||||
@@ -3,11 +3,11 @@
|
||||
"name": "ThumbZilla",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": "*",
|
||||
"language": "en",
|
||||
"fanart": "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/adults/xthearebg.jpg",
|
||||
"thumbnail": "https://ci.phncdn.com/www-static/thumbzilla/images/pc/logo.png",
|
||||
"thumbnail": "https://ci.phncdn.com/www-static/thumbzilla/images/pc/logo.png?cache=2018110203",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
@@ -35,4 +35,3 @@
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
@@ -44,28 +44,36 @@ def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=__channel__, action="videos", title="Más Calientes", url=host,
|
||||
viewmode="movie", thumbnail=get_thumb("/channels_adult.png")))
|
||||
viewmode="movie", thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Nuevas", url=host + '/newest',
|
||||
action="videos", viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
itemlist.append(Item(channel=__channel__, title="Tendencias", url=host + '/trending',
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Tendencias", url=host + '/tending',
|
||||
action="videos", viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Mejores Videos", url=host + '/top',
|
||||
action="videos", viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Populares", url=host + '/popular',
|
||||
action="videos", viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Videos en HD", url=host + '/hd',
|
||||
action="videos", viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Caseros", url=host + '/hd',
|
||||
action="videos", viewmode="movie_with_plot", viewcontent='homemade',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Categorías", action="categorias",
|
||||
url=host + '/categories/', viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail=get_thumb("channels_adult.png")))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Buscador", action="search", url=host,
|
||||
thumbnail=get_thumb("channels_adult.png"), extra="buscar"))
|
||||
return itemlist
|
||||
@@ -92,6 +100,7 @@ def search(item, texto):
|
||||
def videos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '<a class="[^"]+" href="([^"]+)">' # url
|
||||
@@ -99,15 +108,20 @@ def videos(item):
|
||||
patron += '<span class="title">([^<]+)</span>.*?' # title
|
||||
patron += '<span class="duration">([^<]+)</span>' # time
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, time in matches:
|
||||
title = "[%s] %s" % (time, scrapedtitle)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action='play', title=title, thumbnail=scrapedthumbnail,
|
||||
url=host + scrapedurl, contentTile=scrapedtitle, fanart=scrapedthumbnail))
|
||||
|
||||
paginacion = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />').replace('amp;', '')
|
||||
|
||||
if paginacion:
|
||||
itemlist.append(Item(channel=item.channel, action="videos",
|
||||
thumbnail=thumbnail % 'rarrow',
|
||||
title="\xc2\xbb Siguiente \xc2\xbb", url=paginacion))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -116,9 +130,12 @@ def categorias(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
patron = 'class="checkHomepage"><a href="([^"]+)".*?' # url
|
||||
patron += '<span class="count">([^<]+)</span>' # title, vids
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, vids in matches:
|
||||
scrapedtitle = scrapedurl.replace('/categories/', '').replace('-', ' ').title()
|
||||
title = "%s (%s)" % (scrapedtitle, vids.title())
|
||||
@@ -127,17 +144,14 @@ def categorias(item):
|
||||
itemlist.append(Item(channel=item.channel, action="videos", fanart=thumbnail,
|
||||
title=title, url=url, thumbnail=thumbnail,
|
||||
viewmode="movie_with_plot", folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|amp;|\s{2}| ", "", data)
|
||||
patron = '<li><a class="qualityButton active" data-quality="([^"]+)">([^"]+)</a></li>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl,calidad in matches:
|
||||
title = "[COLOR yellow](%s)[/COLOR] %s" % (calidad, item.contentTile)
|
||||
itemlist.append(item.clone(channel=item.channel, action="play", title=item.title , url=scrapedurl , folder=True) )
|
||||
return itemlist
|
||||
url = scrapertools.find_single_match(data, '"quality":"[^"]+","videoUrl":"([^"]+)"').replace('\\', '')
|
||||
itemlist.append(item.clone(url=url, title=item.contentTile))
|
||||
|
||||
return itemlist
|
||||
|
||||
16
plugin.video.alfa/channels/tnaflix.json
Normal file
16
plugin.video.alfa/channels/tnaflix.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "tnaflix",
|
||||
"name": "tnaflix",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://www.tnaflix.com/images/favicons/tnaflix/android-icon-192x192.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
116
plugin.video.alfa/channels/tnaflix.py
Normal file
116
plugin.video.alfa/channels/tnaflix.py
Normal file
@@ -0,0 +1,116 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'https://www.tnaflix.com'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/new/1"))
|
||||
itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/popular/?period=month&d=all"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorado" , action="peliculas", url=host + "/toprated/?d=all&period=month"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host + "/channels/all/top-rated/1/all"))
|
||||
itemlist.append( Item(channel=item.channel, title="PornStars" , action="categorias", url=host + "/pornstars"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search.php?what=%s&tab=" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def catalogo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '<div class="vidcountSp">(\d+)</div>.*?<a class="categoryTitle channelTitle" href="([^"]+)" title="([^"]+)">.*?data-original="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for cantidad,scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
title = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=title , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="llNav" href="([^"]+)">')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
if item.title=="PornStars" :
|
||||
data = scrapertools.get_match(data,'</i> Hall Of Fame Pornstars</h2>(.*?)</section>')
|
||||
patron = '<a class="thumb" href="([^"]+)">.*?<img src="([^"]+)".*?<div class="vidcountSp">(.*?)</div>.*?<a class="categoryTitle".*?>([^"]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,cantidad,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
if item.title=="Categorias" :
|
||||
scrapedthumbnail = "http:" + scrapedthumbnail
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
if item.title=="PornStars" :
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "?section=videos"
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="llNav" href="([^"]+)">')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="categorias" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a class=\'thumb no_ajax\' href=\'(.*?)\'.*?data-original=\'(.*?)\' alt="([^"]+)"><div class=\'videoDuration\'>([^<]+)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
contentTitle = title
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="llNav" href="([^"]+)">')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '<meta itemprop="contentUrl" content="([^"]+)" />'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for url in matches:
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=url,
|
||||
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
|
||||
return itemlist
|
||||
|
||||
@@ -45,6 +45,28 @@
|
||||
"VOSE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "emergency_urls",
|
||||
"type": "list",
|
||||
"label": "Se quieren guardar Enlaces de Emergencia por si se cae la Web?",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No",
|
||||
"Guardar",
|
||||
"Borrar",
|
||||
"Actualizar"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "emergency_urls_torrents",
|
||||
"type": "bool",
|
||||
"label": "Se quieren guardar Torrents de Emergencia por si se cae la Web?",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": "!eq(-1,'No')"
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
|
||||
@@ -178,7 +178,9 @@ def listado(item):
|
||||
#logger.debug(data)
|
||||
|
||||
#Buscamos la url de paginado y la última página
|
||||
patron = '<a href="([^"]+=(\d+))" title="Siguiente">Siguiente<\/a>'
|
||||
patron = '<a href="([^"]+=(\d+))" title="Next">Next<\/a>'
|
||||
if not scrapertools.find_single_match(data, patron):
|
||||
patron = '<a href="([^"]+=(\d+))" title="Siguiente">Siguiente<\/a>'
|
||||
try:
|
||||
next_page_url, curr_page = scrapertools.find_single_match(data, patron)
|
||||
curr_page = int(curr_page) / len(matches)
|
||||
@@ -345,56 +347,100 @@ def findvideos(item):
|
||||
if not data:
|
||||
logger.error("ERROR 01: FINDVIDEOS: La Web no responde o la URL es erronea: " + item.url)
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: FINDVIDEOS:. La Web no responde o la URL es erronea. Si la Web está activa, reportar el error con el log'))
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia?
|
||||
matches = item.emergency_urls[1] #Restauramos matches
|
||||
item.armagedon = True #Marcamos la situación como catastrófica
|
||||
else:
|
||||
if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca...
|
||||
return item #Devolvemos el Item de la llamada
|
||||
else:
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
if not item.armagedon:
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
if not matches: #error
|
||||
logger.error("ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web " + " / PATRON: " + patron + data)
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log'))
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
|
||||
if item.intervencion: #Sí ha sido clausurada judicialmente
|
||||
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist) #Llamamos al método para el pintado del error
|
||||
else:
|
||||
logger.error("ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web " + " / PATRON: " + patron + data)
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log'))
|
||||
|
||||
if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia?
|
||||
matches = item.emergency_urls[1] #Restauramos matches
|
||||
item.armagedon = True #Marcamos la situación como catastrófica
|
||||
else:
|
||||
if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca...
|
||||
return item #Devolvemos el Item de la llamada
|
||||
else:
|
||||
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
|
||||
|
||||
#logger.debug("PATRON: " + patron)
|
||||
#logger.debug(matches)
|
||||
#logger.debug(data)
|
||||
|
||||
#Si es un lookup para cargar las urls de emergencia en la Videoteca...
|
||||
if item.videolibray_emergency_urls:
|
||||
item.emergency_urls = [] #Iniciamos emergency_urls
|
||||
item.emergency_urls.append([]) #Reservamos el espacio para los .torrents locales
|
||||
item.emergency_urls.append(matches) #Salvamnos matches de los vídeos...
|
||||
|
||||
#Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB
|
||||
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist)
|
||||
if not item.videolibray_emergency_urls:
|
||||
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist)
|
||||
|
||||
#Ahora tratamos los enlaces .torrent
|
||||
for scrapedurl in matches: #leemos los torrents con la diferentes calidades
|
||||
if 'javascript' in scrapedurl: #evitamos la basura
|
||||
for scrapedurl in matches: #leemos los torrents con la diferentes calidades
|
||||
if 'javascript' in scrapedurl: #evitamos la basura
|
||||
continue
|
||||
|
||||
url = urlparse.urljoin(host, scrapedurl)
|
||||
#Leemos la siguiente página, que es de verdad donde está el magnet/torrent
|
||||
try:
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)| ", "", httptools.downloadpage(url, timeout=timeout).data)
|
||||
data = unicode(data, "utf-8", errors="replace").encode("utf-8")
|
||||
except:
|
||||
pass
|
||||
|
||||
patron = "window.open\('([^']+)'"
|
||||
url = scrapertools.find_single_match(data, patron)
|
||||
if not url: #error
|
||||
logger.error("ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web " + " / PATRON: " + patron + data)
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log'))
|
||||
continue #si no hay más datos, algo no funciona, pasamos al siguiente
|
||||
url = ''
|
||||
if not item.armagedon:
|
||||
url = urlparse.urljoin(host, scrapedurl)
|
||||
#Leemos la siguiente página, que es de verdad donde está el magnet/torrent
|
||||
try:
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)| ", "", httptools.downloadpage(url, timeout=timeout).data)
|
||||
data = unicode(data, "utf-8", errors="replace").encode("utf-8")
|
||||
except:
|
||||
pass
|
||||
|
||||
patron = "window.open\('([^']+)'"
|
||||
url = scrapertools.find_single_match(data, patron)
|
||||
if not url: #error
|
||||
logger.error("ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web " + " / PATRON: " + patron + data)
|
||||
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log'))
|
||||
|
||||
if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia?
|
||||
item.armagedon = True #Marcamos la situación como catastrófica
|
||||
else:
|
||||
continue #si no hay más datos, algo no funciona, pasamos al siguiente
|
||||
|
||||
#Generamos una copia de Item para trabajar sobre ella
|
||||
item_local = item.clone()
|
||||
|
||||
|
||||
item_local.url = urlparse.urljoin(host, url)
|
||||
if item.videolibray_emergency_urls:
|
||||
item.emergency_urls[0].append(item_local.url) #guardamos la url y pasamos a la siguiente
|
||||
continue
|
||||
if item.emergency_urls and not item.videolibray_emergency_urls:
|
||||
item_local.torrent_alt = item.emergency_urls[0][0] #Guardamos la url del .Torrent ALTERNATIVA
|
||||
if item.armagedon:
|
||||
item_local.url = item.emergency_urls[0][0] #Restauramos la url
|
||||
if len(item.emergency_urls[0]) > 1:
|
||||
del item.emergency_urls[0][0]
|
||||
|
||||
#Buscamos si ya tiene tamaño, si no, los buscamos en el archivo .torrent
|
||||
size = scrapertools.find_single_match(item_local.quality, '\s\[(\d+,?\d*?\s\w\s?[b|B])\]')
|
||||
if not size:
|
||||
size = scrapertools.find_single_match(item_local.quality, '\s?\[(\d+,?\d*?\s\w\s?[b|B])\]')
|
||||
if not size and not item.armagedon:
|
||||
size = generictools.get_torrent_size(item_local.url) #Buscamos el tamaño en el .torrent
|
||||
if size:
|
||||
item_local.title = re.sub(r'\s\[\d+,?\d*?\s\w[b|B]\]', '', item_local.title) #Quitamos size de título, si lo traía
|
||||
item_local.title = re.sub(r'\s?\[\d+,?\d*?\s\w\s?[b|B]\]', '', item_local.title) #Quitamos size de título, si lo traía
|
||||
item_local.title = '%s [%s]' % (item_local.title, size) #Agregamos size al final del título
|
||||
size = size.replace('GB', 'G B').replace('Gb', 'G b').replace('MB', 'M B').replace('Mb', 'M b')
|
||||
item_local.quality = re.sub(r'\s\[\d+,?\d*?\s\w\s?[b|B]\]', '', item_local.quality) #Quitamos size de calidad, si lo traía
|
||||
item_local.quality = '%s [%s]' % (item_local.quality, size) #Agregamos size al final de la calidad
|
||||
item_local.quality = re.sub(r'\s?\[\d+,?\d*?\s\w\s?[b|B]\]', '', item_local.quality) #Quitamos size de calidad, si lo traía
|
||||
item_local.quality = '%s [%s]' % (item_local.quality, size) #Agregamos size al final de la calidad
|
||||
if item.armagedon: #Si es catastrófico, lo marcamos
|
||||
item_local.quality = '[/COLOR][COLOR hotpink][E] [COLOR limegreen]%s' % item_local.quality
|
||||
|
||||
#Ahora pintamos el link del Torrent
|
||||
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language))
|
||||
@@ -421,6 +467,9 @@ def findvideos(item):
|
||||
|
||||
#logger.debug(item_local)
|
||||
|
||||
if item.videolibray_emergency_urls: #Si ya hemos guardado todas las urls...
|
||||
return item #... nos vamos
|
||||
|
||||
if len(itemlist_f) > 0: #Si hay entradas filtradas...
|
||||
itemlist.extend(itemlist_f) #Pintamos pantalla filtrada
|
||||
else:
|
||||
|
||||
16
plugin.video.alfa/channels/tryboobs.json
Normal file
16
plugin.video.alfa/channels/tryboobs.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "tryboobs",
|
||||
"name": "tryboobs",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://tb3.fuckandcdn.com/tb/tbstatic/v30/common/tryboobs/img/logo.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
104
plugin.video.alfa/channels/tryboobs.py
Normal file
104
plugin.video.alfa/channels/tryboobs.py
Normal file
@@ -0,0 +1,104 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'http://www.tryboobs.com'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/most-popular/week/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor Valorado" , action="peliculas", url=host + "/top-rated/week/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Modelos" , action="modelos", url=host + "/models/model-viewed/1/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/?q=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def modelos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)" class="th-model">.*?src="([^"]+)".*?<span class="roliks"><span>(\d+)</span>.*?<span class="title">([^"]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,cantidad,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<li><a class="pag-next" href="([^"]+)"><ins>Next</ins></a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="modelos" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)" class="th-cat">.*?<img src="([^"]+)".*?<span>(\d+)</span>.*?<span class="title">([^"]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,cantidad,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = 'href="([^"]+)"\s*class="th-video.*?<img src="([^"]+)".*?<span class="time">([^"]+)</span>.*?<span class="title">([^"]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,duracion,scrapedtitle in matches:
|
||||
url = scrapedurl
|
||||
contentTitle = scrapedtitle
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<li><a class="pag-next" href="([^"]+)"><ins>Next</ins></a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '<video src="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for url in matches:
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=url,
|
||||
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
|
||||
return itemlist
|
||||
|
||||
@@ -52,9 +52,6 @@ def mainlist(item):
|
||||
itemlist.append(Item(channel=item.channel, title="Generos", action="section",
|
||||
thumbnail=get_thumb('genres', auto=True)))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Por Años", action="section",
|
||||
thumbnail=get_thumb('year', auto=True)))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title = 'Buscar', action="search", url=host + 'search?q=',
|
||||
thumbnail=get_thumb('search', auto=True)))
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ langi = langs[config.get_setting('imdb', "tvmoviedb")]
|
||||
adult_mal = config.get_setting('adult_mal', "tvmoviedb")
|
||||
mal_ck = "MzE1MDQ2cGQ5N2llYTY4Z2xwbGVzZjFzbTY="
|
||||
images_predef = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/"
|
||||
default_fan = filetools.join(config.get_runtime_path(), "fanart.jpg")
|
||||
default_fan = filetools.join(config.get_runtime_path(), "fanart1.jpg")
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
import base64
|
||||
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
@@ -178,8 +179,8 @@ def findvideos(item):
|
||||
|
||||
data = get_source(item.url)
|
||||
video_id = scrapertools.find_single_match(data, 'getEnlaces\((\d+)\)')
|
||||
links_url = '%s%s%s' % (host,'/link/repro.php/',video_id)
|
||||
online_url = '%s%s%s' % (host, '/link/enlaces_online.php/', video_id)
|
||||
links_url = '%s%s%s' % (host,'link/repro.php/',video_id)
|
||||
online_url = '%s%s%s' % (host, 'link/enlaces_online.php/', video_id)
|
||||
|
||||
# listado de opciones links_url
|
||||
|
||||
@@ -223,10 +224,14 @@ def findvideos(item):
|
||||
video_id = scrapertools.find_single_match(scrapedurl, 'index.php/(\d+)/')
|
||||
new_url = '%s%s%s%s' % (host, 'ext/index-include.php?id=', video_id, '&tipo=1')
|
||||
data = get_source(new_url)
|
||||
video_url = scrapertools.find_single_match(data, '<div class=container><a href=(.*?)>')
|
||||
video_url = video_url.replace('enlace.php', 'r')
|
||||
data = httptools.downloadpage(video_url, follow_redirects=False)
|
||||
url = data.headers['location']
|
||||
video_url = scrapertools.find_single_match(data, '<div class=container><a onclick=addURL.*?href=(.*?)>')
|
||||
video_url = video_url.replace('%3D', '&')+'status'
|
||||
headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
|
||||
'Referer': item.url}
|
||||
data = httptools.downloadpage(video_url, headers=headers, ignore_response_code=True).data
|
||||
b64_url = scrapertools.find_single_match(data, "var string = '([^']+)';")+'=='
|
||||
url = base64.b64decode(b64_url)
|
||||
|
||||
title = '%s '+ '[%s]' % language
|
||||
if url != '':
|
||||
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play', language=language,
|
||||
|
||||
@@ -52,6 +52,9 @@ def list_movies(item, silent=False):
|
||||
|
||||
head_nfo, new_item = videolibrarytools.read_nfo(nfo_path)
|
||||
|
||||
if not new_item: #Si no ha leído bien el .nfo, pasamos a la siguiente
|
||||
continue
|
||||
|
||||
if len(new_item.library_urls) > 1:
|
||||
multicanal = True
|
||||
else:
|
||||
@@ -62,7 +65,6 @@ def list_movies(item, silent=False):
|
||||
|
||||
for canal_org in new_item.library_urls:
|
||||
canal = generictools.verify_channel(canal_org)
|
||||
logger.error(canal)
|
||||
try:
|
||||
channel_verify = __import__('channels.%s' % canal, fromlist=["channels.%s" % canal])
|
||||
logger.debug('El canal %s parece correcto' % channel_verify)
|
||||
@@ -286,8 +288,6 @@ def list_tvshows(item):
|
||||
# logger.debug("item_tvshow:\n" + item_tvshow.tostring('\n'))
|
||||
|
||||
## verifica la existencia de los canales ##
|
||||
|
||||
logger.debug(item_tvshow)
|
||||
if len(item_tvshow.library_urls) > 0:
|
||||
itemlist.append(item_tvshow)
|
||||
|
||||
|
||||
16
plugin.video.alfa/channels/videosXYZ.json
Normal file
16
plugin.video.alfa/channels/videosXYZ.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "videosXYZ",
|
||||
"name": "videosXYZ",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://free-porn-videos.xyz/wp-content/uploads/2018/10/cropped-Logo-org-Free-porn-videos.xyz-app-icon-192x192.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
72
plugin.video.alfa/channels/videosXYZ.py
Normal file
72
plugin.video.alfa/channels/videosXYZ.py
Normal file
@@ -0,0 +1,72 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://free-porn-videos.xyz'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Videos" , action="peliculas", url=host + "/topics/porn-videos/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Parody" , action="peliculas", url=host + "/topics/free-porn-parodies/"))
|
||||
itemlist.append( Item(channel=item.channel, title="BigTits" , action="peliculas", url=host + "/?s=big+tit"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<article id="post-\d+".*?<a href="([^"]+)" title="([^"]+)">.*?data-src="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle.replace("Permalink to Watch ", "").replace("Porn Online", "").replace("Permalink to ", "")
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , contentTitle=scrapedtitle, plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">»</a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
scrapedurl = scrapertools.find_single_match(data,'<iframe src="([^"]+)"')
|
||||
scrapedurl = scrapedurl.replace("%28", "(").replace("%29", ")")
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
return itemlist
|
||||
|
||||
16
plugin.video.alfa/channels/vintagetube.json
Normal file
16
plugin.video.alfa/channels/vintagetube.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "vintagetube",
|
||||
"name": "vintagetube",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://www.vintagexxxsex.com/images/vintagexxxsex.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
95
plugin.video.alfa/channels/vintagetube.py
Normal file
95
plugin.video.alfa/channels/vintagetube.py
Normal file
@@ -0,0 +1,95 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://www.vintagetube.club'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host + "/tube/last-1/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/tube/popular-1/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/%s" % texto
|
||||
item.url = item.url + "/popular-1/"
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="prev prev-ct">.*?<a href="(.*?)">.*?<img src="(.*?)".*?<span class="prev-tit">(.*?)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = str(scrapedtitle)
|
||||
scrapedurl = host + scrapedurl
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="prev">.*?<a href="(.*?)">.*?<img src="(.*?)">.*?<span class="prev-tit">(.*?)</span>.*?<div class="prev-dur"><span>(.*?)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,scrapedtime in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = "[COLOR yellow]" + (scrapedtime) + "[/COLOR] " + str(scrapedtitle)
|
||||
scrapedurl = scrapedurl.replace("/xxx.php?tube=", "")
|
||||
scrapedurl = host + scrapedurl
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
current_page = scrapertools.find_single_match(data,'<li><span class="page">(.*?)</span></li>')
|
||||
next_page = int(current_page) + 1
|
||||
url = item.url
|
||||
url_page = current_page + "/"
|
||||
url = url.replace(url_page, "")
|
||||
next_page_url = url + str(next_page)+"/"
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
scrapedurl = scrapertools.find_single_match(data,'<iframe frameborder=0 scrolling="no" src=\'(.*?)\'')
|
||||
if scrapedurl == "":
|
||||
scrapedurl = scrapertools.find_single_match(data,'<iframe src="(.*?)"')
|
||||
scrapedurl = scrapedurl.replace ("http:", "")
|
||||
data = httptools.downloadpage("http:" + scrapedurl).data
|
||||
else:
|
||||
data = httptools.downloadpage(scrapedurl).data
|
||||
scrapedurl = scrapertools.find_single_match(data,'<iframe src="(.*?)"')
|
||||
data = httptools.downloadpage("https:" + scrapedurl).data
|
||||
media_url = scrapertools.find_single_match(data,'<source src="(.*?)"')
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=media_url, fulltitle=media_url, url=media_url,
|
||||
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
|
||||
return itemlist
|
||||
|
||||
16
plugin.video.alfa/channels/vintagexxxsex.json
Normal file
16
plugin.video.alfa/channels/vintagexxxsex.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "vintagexxxsex",
|
||||
"name": "vintagexxxsex",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://www.vintagexxxsex.com/images/vintagexxxsex.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
101
plugin.video.alfa/channels/vintagexxxsex.py
Normal file
101
plugin.video.alfa/channels/vintagexxxsex.py
Normal file
@@ -0,0 +1,101 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://www.vintagexxxsex.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Top" , action="peliculas", url=host + "/all-top/1/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Novedades" , action="peliculas", url=host + "/all-new/1/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Longitud" , action="peliculas", url=host + "/all-longest/1/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<li><a href="([^"]+)"><i class="fa fa-tag"></i>(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedurl = host + scrapedurl
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="th">.*?<a href="([^"]+)".*?<img src="([^"]+)".*?<span class="th_nm">([^"]+)</span>.*?<i class="fa fa-clock-o"></i>([^"]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,time in matches:
|
||||
contentTitle = scrapedtitle
|
||||
title = "[COLOR yellow]" + time + " [/COLOR]" + scrapedtitle
|
||||
scrapedurl = scrapedurl.replace("/up.php?xxx=", "")
|
||||
scrapedurl = host + scrapedurl
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle=contentTitle, infoLabels={'year':year} ))
|
||||
|
||||
|
||||
next_page_url = scrapertools.find_single_match(data,'<li><span class="pg_nm">\d+</span></li>.*?href="([^"]+)"')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
|
||||
|
||||
# else:
|
||||
# patron = '<li><span class="pg_nm">\d+</span></li>.*?href="([^"]+)"'
|
||||
# next_page = re.compile(patron,re.DOTALL).findall(data)
|
||||
# next_page = item.url + next_page[0]
|
||||
# itemlist.append( Item(channel=item.channel, action="peliculas", title=next_page[0] , text_color="blue", url=next_page[0] ) )
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
scrapedurl = scrapertools.find_single_match(data,'<iframe src="(.*?)"')
|
||||
data = httptools.downloadpage(scrapedurl).data
|
||||
scrapedurl = scrapertools.find_single_match(data,'<source src="(.*?)"')
|
||||
if scrapedurl == "":
|
||||
scrapedurl = "http:" + scrapertools.find_single_match(data,'<iframe src="(.*?)"')
|
||||
data = httptools.downloadpage(scrapedurl).data
|
||||
scrapedurl = scrapertools.find_single_match(data,'file: "(.*?)"')
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
|
||||
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
|
||||
return itemlist
|
||||
|
||||
16
plugin.video.alfa/channels/vporn.json
Normal file
16
plugin.video.alfa/channels/vporn.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "vporn",
|
||||
"name": "vporn",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://th-us2.vporn.com/images/logo%20Dark%20theme.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
109
plugin.video.alfa/channels/vporn.py
Normal file
109
plugin.video.alfa/channels/vporn.py
Normal file
@@ -0,0 +1,109 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
host = 'https://www.vporn.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Novedades" , action="peliculas", url=host + "/newest/month/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="peliculas", url=host + "/views/month/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor Valoradas" , action="peliculas", url=host + "/rating/month/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Favoritas" , action="peliculas", url=host + "/favorites/month/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Votada" , action="peliculas", url=host + "/votes/month/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Longitud" , action="peliculas", url=host + "/longest/month/"))
|
||||
itemlist.append( Item(channel=item.channel, title="PornStar" , action="catalogo", url=host + "/pornstars/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search?q=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def catalogo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class=\'star\'>.*?<a href="([^"]+)">.*?<img src="([^"]+)" alt="([^"]+)".*?<span> (\d+) Videos'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
|
||||
scrapedurl = host + scrapedurl
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="next" href="([^"]+)">')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="catalogo" , title="Next page >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
data = scrapertools.get_match(data,'<div class="cats-all categories-list">(.*?)</div>')
|
||||
patron = '<a href="([^"]+)".*?>([^"]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedurl = host + scrapedurl
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="video">.*?<a href="([^"]+)".*?<span class="time">(.*?)</span>.*?<img src="([^"]+)" alt="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,time,scrapedthumbnail,scrapedtitle in matches:
|
||||
title = "[COLOR yellow]" + time + " [/COLOR]" + scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="next.*?title="Next Page" href="([^"]+)">')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Next page >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '<source src="([^"]+)" type="video/mp4" label="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
itemlist.append(item.clone(action="play", title=scrapedtitle, fulltitle = item.title, url=scrapedurl))
|
||||
return itemlist
|
||||
|
||||
16
plugin.video.alfa/channels/watchpornfree.json
Normal file
16
plugin.video.alfa/channels/watchpornfree.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "watchpornfree",
|
||||
"name": "watchpornfree",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "https://watchpornfree.ws/wp-content/uploads/2018/03/Untitled-2.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
75
plugin.video.alfa/channels/watchpornfree.py
Normal file
75
plugin.video.alfa/channels/watchpornfree.py
Normal file
@@ -0,0 +1,75 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
# https://playpornfree.org/ https://mangoporn.net/ https://watchfreexxx.net/ https://losporn.org/ https://xxxstreams.me/ https://speedporn.net/
|
||||
|
||||
host = 'https://watchpornfree.ws'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("")
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host + "/movies"))
|
||||
itemlist.append( Item(channel=item.channel, title="Parodia" , action="peliculas", url=host + "/category/parodies-hd"))
|
||||
itemlist.append( Item(channel=item.channel, title="Videos" , action="peliculas", url=host + "/category/clips-scenes"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Año" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("")
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info("")
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if item.title == "Canal":
|
||||
data = scrapertools.get_match(data,'>Studios</a>(.*?)</ul>')
|
||||
if item.title == "Año":
|
||||
data = scrapertools.get_match(data,'>Years</a>(.*?)</ul>')
|
||||
if item.title == "Categorias":
|
||||
data = scrapertools.get_match(data,'>XXX Genres</div>(.*?)</ul>')
|
||||
patron = '<a href="(.*?)".*?>(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("")
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '<article class="TPost B">.*?<a href="([^"]+)">.*?src="([^"]+)".*?<div class="Title">([^"]+)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'<a class="next page-numbers" href="([^"]+)">Next »</a>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
16
plugin.video.alfa/channels/webpeliculasporno.json
Normal file
16
plugin.video.alfa/channels/webpeliculasporno.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "webpeliculasporno",
|
||||
"name": "webpeliculasporno",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://www.webpeliculasporno.com/wp-content/uploads/logo.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
74
plugin.video.alfa/channels/webpeliculasporno.py
Normal file
74
plugin.video.alfa/channels/webpeliculasporno.py
Normal file
@@ -0,0 +1,74 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://www.webpeliculasporno.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("pelisalacarta.webpeliculasporno mainlist")
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Ultimas" , action="peliculas", url= host))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas vistas" , action="peliculas", url= host + "/?display=tube&filtre=views"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="peliculas", url= host + "/?display=tube&filtre=rate"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url= host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("pelisalacarta.gmobi mainlist")
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
itemlist = []
|
||||
data = scrapertools.cache_page(item.url)
|
||||
patron = '<li class="cat-item [^>]+><a href="([^"]+)" >([^<]+)'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '<li class="border-radius-5 box-shadow">.*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += '<a href="([^"]+)" title="([^"]+)">'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedthumbnail,scrapedurl,scrapedtitle in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
title = scrapedtitle
|
||||
contentTitle = title
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<li><a class="next page-numbers" href="([^"]+)">Next')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
16
plugin.video.alfa/channels/woodrocket.json
Normal file
16
plugin.video.alfa/channels/woodrocket.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "woodrocket",
|
||||
"name": "woodrocket",
|
||||
"active": true,
|
||||
"adult": true,
|
||||
"language": ["*"],
|
||||
"thumbnail": "http://woodrocket.com/img//logo.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"adult"
|
||||
],
|
||||
"settings": [
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
75
plugin.video.alfa/channels/woodrocket.py
Normal file
75
plugin.video.alfa/channels/woodrocket.py
Normal file
@@ -0,0 +1,75 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://woodrocket.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
itemlist.append( Item(channel=item.channel, title="Novedades" , action="peliculas", url=host + "/porn"))
|
||||
itemlist.append( Item(channel=item.channel, title="Parodias" , action="peliculas", url=host + "/parodies"))
|
||||
itemlist.append( Item(channel=item.channel, title="Shows" , action="categorias", url=host + "/series"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="media-panel-image">.*?<img src="(.*?)".*?<a href="(.*?)">(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedthumbnail,scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = host + scrapedthumbnail
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="media-panel-image">.*?<a href="([^"]+)".*?title="([^"]+)".*?<img src="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
plot = ""
|
||||
contentTitle = scrapedtitle
|
||||
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
|
||||
title = scrapedtitle
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<li><a href="([^"]+)" rel="next">»</a></li>')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '<iframe src="(.*?)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl in matches:
|
||||
scrapedurl = scrapedurl
|
||||
data = httptools.downloadpage(scrapedurl).data
|
||||
scrapedurl = scrapertools.find_single_match(data,'"quality":"\d*","videoUrl":"(.*?)"')
|
||||
scrapedurl = scrapedurl.replace("\/", "/")
|
||||
itemlist.append(item.clone(action="play", title=scrapedurl, fulltitle = item.title, url=scrapedurl))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ from platformcode import config, logger
|
||||
__channel__ = "xms"
|
||||
|
||||
host = 'https://xxxmoviestream.com/'
|
||||
host1 = 'https://www.cam4.com/'
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
__perfil__ = int(config.get_setting('perfil', __channel__))
|
||||
@@ -41,7 +42,6 @@ thumbnail = 'https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Últimas", url=host + '?filtre=date&cat=0',
|
||||
@@ -60,31 +60,50 @@ def mainlist(item):
|
||||
url=host + 'categories/', viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail=thumbnail % '4'))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="WebCam", action="webcamenu",
|
||||
viewmode="movie_with_plot", viewcontent='movies',
|
||||
thumbnail='https://ae01.alicdn.com/kf/HTB1LDoiaHsrBKNjSZFpq6AXhFXa9/-.jpg'))
|
||||
|
||||
itemlist.append(Item(channel=__channel__, title="Buscador", action="search", url=host, thumbnail=thumbnail % '5'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def webcamenu(item):
|
||||
logger.info()
|
||||
itemlist = [item.clone(title="Trending Cams", action="webcam", text_blod=True, url=host1,
|
||||
viewcontent='movies', viewmode="movie_with_plot"),
|
||||
item.clone(title="Females", action="webcam", text_blod=True,
|
||||
viewcontent='movies', url=host1 + 'female', viewmode="movie_with_plot"),
|
||||
item.clone(title="Males", action="webcam", text_blod=True,
|
||||
viewcontent='movies', url=host1 + 'male', viewmode="movie_with_plot"),
|
||||
item.clone(title="Couples", action="webcam", text_blod=True,
|
||||
viewcontent='movies', url=host1 + 'couple', viewmode="movie_with_plot"),
|
||||
item.clone(title="Trans", action="webcam", text_blod=True, extra="Películas Por año",
|
||||
viewcontent='movies', url=host1 + 'transgender', viewmode="movie_with_plot")]
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>|#038;", "", data)
|
||||
# logger.info(data)
|
||||
patron_todos = '<div id="content">(.*?)<div id="footer"'
|
||||
data = scrapertools.find_single_match(data, patron_todos)
|
||||
|
||||
patron = 'src="([^"]+)" class="attachment-thumb_site.*?' # img
|
||||
patron += '<a href="([^"]+)" title="([^"]+)".*?' #url, title
|
||||
patron += '<div class="right"><p>([^<]+)</p>' # plot
|
||||
patron += '<a href="([^"]+)" title="([^"]+)".*?' # url, title
|
||||
patron += '<div class="right"><p>([^<]+)</p>' # plot
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle, plot in matches:
|
||||
plot = scrapertools.decodeHtmlentities(plot)
|
||||
|
||||
itemlist.append(item.clone(channel=__channel__, action="findvideos", title=scrapedtitle.capitalize(),
|
||||
url=scrapedurl, thumbnail=scrapedthumbnail, infoLabels={"plot": plot}, fanart=scrapedthumbnail,
|
||||
viewmode="movie_with_plot", folder=True, contentTitle=scrapedtitle))
|
||||
url=scrapedurl, thumbnail=scrapedthumbnail, infoLabels={"plot": plot},
|
||||
fanart=scrapedthumbnail,viewmode="movie_with_plot",
|
||||
folder=True, contentTitle=scrapedtitle))
|
||||
# Extrae el paginador
|
||||
paginacion = scrapertools.find_single_match(data, '<a href="([^"]+)">Next ›</a></li><li>')
|
||||
paginacion = urlparse.urljoin(item.url, paginacion)
|
||||
@@ -94,6 +113,36 @@ def peliculas(item):
|
||||
thumbnail=thumbnail % 'rarrow',
|
||||
title="\xc2\xbb Siguiente \xc2\xbb", url=paginacion))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def webcam(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>|#038;", "", data)
|
||||
patron = '<div class="profileBox">.*?<a href="/([^"]+)".*?' # url
|
||||
patron += 'data-hls-preview-url="([^"]+)">.*?' # video_url
|
||||
patron += 'data-username="([^"]+)".*?' # username
|
||||
patron += 'title="([^"]+)".*?' # title
|
||||
patron += 'data-profile="([^"]+)" />' # img
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, video_url, username, scrapedtitle, scrapedthumbnail in matches:
|
||||
scrapedtitle = scrapedtitle.replace(' Chat gratis con webcam.', '')
|
||||
|
||||
itemlist.append(item.clone(channel=__channel__, action="play", title=scrapedtitle,
|
||||
url=video_url, thumbnail=scrapedthumbnail, fanart=scrapedthumbnail,
|
||||
viewmode="movie_with_plot", folder=True, contentTitle=scrapedtitle))
|
||||
# Extrae el paginador
|
||||
paginacion = scrapertools.find_single_match(data, '<span id="pagerSpan">\d+</span> <a href="([^"]+)"')
|
||||
paginacion = urlparse.urljoin(item.url, paginacion)
|
||||
|
||||
if paginacion:
|
||||
itemlist.append(Item(channel=__channel__, action="webcam",
|
||||
thumbnail=thumbnail % 'rarrow',
|
||||
title="\xc2\xbb Siguiente \xc2\xbb", url=paginacion))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -103,10 +152,9 @@ def categorias(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
patron = 'data-lazy-src="([^"]+)".*?' # img
|
||||
patron += '</noscript><a href="([^"]+)".*?' # url
|
||||
patron += '<span>([^<]+)</span></a>.*?' # title
|
||||
patron = 'data-lazy-src="([^"]+)".*?' # img
|
||||
patron += '</noscript><a href="([^"]+)".*?' # url
|
||||
patron += '<span>([^<]+)</span></a>.*?' # title
|
||||
patron += '<span class="nb_cat border-radius-5">([^<]+)</span>' # num_vids
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
@@ -142,16 +190,15 @@ def sub_search(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
|
||||
patron = 'data-lazy-src="([^"]+)".*?' # img
|
||||
patron += 'title="([^"]+)" />.*?' # title
|
||||
patron += '</noscript><a href="([^"]+)".*?' # url
|
||||
patron = 'data-lazy-src="([^"]+)".*?' # img
|
||||
patron += 'title="([^"]+)" />.*?' # title
|
||||
patron += '</noscript><a href="([^"]+)".*?' # url
|
||||
patron += '<div class="right"><p>([^<]+)</p>' # plot
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, scrapedurl, plot in matches:
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, plot=plot, fanart=scrapedthumbnail,
|
||||
action="findvideos", thumbnail=scrapedthumbnail))
|
||||
action="findvideos", thumbnail=scrapedthumbnail))
|
||||
|
||||
paginacion = scrapertools.find_single_match(
|
||||
data, "<a href='([^']+)' class=\"inactive\">\d+</a>")
|
||||
@@ -167,15 +214,13 @@ def findvideos(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|amp;|\s{2}| ", "", data)
|
||||
|
||||
patron = '<iframe src="([^"]+)".*?webkitallowfullscreen="true" mozallowfullscreen="true"></iframe>'
|
||||
patron = '<iframe src="[^"]+".*?<iframe src="([^"]+)" scrolling="no" frameborder="0"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for url in matches:
|
||||
server = servertools.get_server_from_url(url)
|
||||
title = "Ver en: [COLOR yellow](%s)[/COLOR]" % server
|
||||
title = "Ver en: [COLOR yellow](%s)[/COLOR]" % server.title()
|
||||
|
||||
itemlist.append(item.clone(action='play', title=title, server=server, url=url))
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user