Merge remote-tracking branch 'alfa-addon/master' into fixes

This commit is contained in:
Unknown
2018-10-28 18:48:19 -03:00
26 changed files with 1671 additions and 205 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.7.9" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.7.10" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,17 +19,17 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Arreglos[/B][/COLOR]
¤ cinetux ¤ asialiveaction
¤ dospelis ¤ pelisfox
¤ pelisplus ¤ pelisplusco
¤ poseidonhd ¤ yts
¤ anitoons ¤ goovie ¤ playporn
¤ gnula ¤ pelisr ¤ peliculonhd
¤ thevid ¤ vidcloud ¤ xhamster
¤ descargacineclasico
[COLOR green][B]Novedades[/B][/COLOR]
¤ peliculashd ¤ peliculonhd
¤ tikiwiki ¤ vidcloud
¤ dramasjc ¤ xms
¤ repelis.live ¤ zonaworld ¤ subtorrents
¤ fex ¤ xdrive ¤ cat3plus
¤ sleazemovies
¤Agradecimientos a @diegotcba y @wrlopez por colaborar en ésta versión
¤ Agradecimientos a @diegotcba y @sculkurt por colaborar en ésta versión
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>

View File

@@ -0,0 +1,14 @@
{
"id": "cat3plus",
"name": "Cat3plus",
"active": true,
"adult": true,
"language": [],
"thumbnail": "https://i.imgur.com/SJxXKa2.png",
"fanart": "https://i.imgur.com/ejCwTxT.jpg",
"banner": "https://i.imgur.com/bXUyk6m.png",
"categories": [
"movie",
"vo"
]
}

View File

@@ -0,0 +1,130 @@
# -*- coding: utf-8 -*-
# -*- Channel SleazeMovies -*-
# -*- Created for Alfa-addon -*-
# -*- By Sculkurt -*-
import re
import urllib
import urlparse
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
host = 'http://www.cat3plus.com/'
headers = [
['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:38.0) Gecko/20100101 Firefox/38.0'],
['Accept-Encoding', 'gzip, deflate'],
['Referer', host]
]
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(item.clone(title="Todas", action="list_all", url=host, thumbnail=get_thumb('all', auto=True)))
itemlist.append(item.clone(title="Años", action="years", url=host, thumbnail=get_thumb('year', auto=True)))
itemlist.append(item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True)))
return itemlist
def years(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(item.url, cookies=False).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = "<a dir='ltr' href='([^']+)'>([^<]+)</a>"
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(action='list_all', title=scrapedtitle, url=scrapedurl))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = "<h2 class='post-title entry-title'><a href='([^']+)'>([^(]+).*?\(([^)]+).*?"
patron += 'src="([^"]+).*?'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, year, img in matches:
itemlist.append(Item(channel = item.channel,
title = scrapedtitle,
url = scrapedurl,
action = "findvideos",
thumbnail = img,
contentTitle = scrapedtitle,
contentType = "movie",
infoLabels = {'year': year}))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
# Extraer la marca de siguiente página
next_page = scrapertools.find_single_match(data, "<a class='blog-pager-older-link' href='([^']+)'")
if next_page != "":
itemlist.append(Item(channel=item.channel, action="list_all", title=">> Página siguiente", url=next_page, folder=True))
return itemlist
def search(item, texto):
logger.info()
if texto != "":
texto = texto.replace(" ", "+")
item.url = host + "search?q=" + texto
item.extra = "busqueda"
try:
return list_all(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<h2>\s*<a href="([^"]+)" target="_blank">.*?</a></h2>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url in matches:
data = httptools.downloadpage(url, headers={'Referer': item.url}).data
itemlist.extend(servertools.find_video_items(data=data))
for video in itemlist:
video.channel = item.channel
video.contentTitle = item.contentTitle
video.title = video.server.capitalize()
# Opción "Añadir esta pelicula a la videoteca"
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel = item.channel,
title = '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url = item.url,
action = "add_pelicula_to_library",
extra = "findvideos",
contentTitle = item.contentTitle,
thumbnail = item.thumbnail
))
return itemlist

View File

@@ -1,73 +1,57 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import logger
def mainlist(item):
logger.info()
itemlist = []
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
itemlist.append(item.clone(action="categorias", title="Categorías", url="http://dato.porn/categories_all", contentType="movie", viewmode="movie"))
itemlist.append(item.clone(title="Buscar...", action="search", contentType="movie", viewmode="movie"))
return itemlist
data = httptools.downloadpage(page_url).data
if 'File Not Found' in data or '404 Not Found' in data:
return False, "[Datoporn] El archivo no existe o ha sido borrado"
return True, ""
def search(item, texto):
logger.info()
item.url = "http://dato.porn/?k=%s&op=search" % texto.replace(" ", "+")
return lista(item)
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
data = httptools.downloadpage(page_url).data
logger.debug(data)
media_urls = scrapertools.find_multiple_matches(data, 'src: "([^"]+)",.*?label: "([^"]+)"')
#media_urls = scrapertools.find_multiple_matches(data, 'file\:"([^"]+\.mp4)",label:"([^"]+)"')
# if not media_urls:
# match = scrapertools.find_single_match(data, "p,a,c,k(.*?)</script>")
# try:
# data = jsunpack.unpack(match)
# except:
# pass
# media_urls = scrapertools.find_multiple_matches(data, 'file\:"([^"]+\.mp4)",label:"([^"]+)"')
def lista(item):
logger.info()
itemlist = []
# Extrae la URL
calidades = []
video_urls = []
for media_url in sorted(media_urls, key=lambda x: int(x[1][-3:])):
calidades.append(int(media_url[1][-3:]))
try:
title = ".%s %sp [datoporn]" % (media_url[0].rsplit('.', 1)[1], media_url[1][-3:])
except:
title = ".%s %sp [datoporn]" % (media_url[-4:], media_url[1][-3:])
video_urls.append([title, media_url[0]])
# Descarga la pagina
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
sorted(calidades)
m3u8 = scrapertools.find_single_match(data, 'file\:"([^"]+\.m3u8)"')
if not m3u8:
m3u8 = str(scrapertools.find_multiple_matches(data, 'player.updateSrc\({src:.?"([^"]+\.m3u8)"')).replace("['", "").replace("']", "")
calidades = ['720p']
if m3u8:
video_urls.insert(0, [".m3u8 %s [datoporn]" % calidades[-1], m3u8])
# Extrae las entradas
patron = '<div class="videobox">\s*<a href="([^"]+)".*?url\(\'([^\']+)\'.*?<span>(.*?)<\/span><\/div><\/a>.*?class="title">(.*?)<\/a><span class="views">.*?<\/a><\/span><\/div> '
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, duration, scrapedtitle in matches:
if "/embed-" not in scrapedurl:
#scrapedurl = scrapedurl.replace("dato.porn/", "dato.porn/embed-") + ".html"
scrapedurl = scrapedurl.replace("datoporn.co/", "datoporn.co/embed-") + ".html"
if duration:
scrapedtitle = "%s - %s" % (duration, scrapedtitle)
scrapedtitle += ' gb'
scrapedtitle = scrapedtitle.replace(":", "'")
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
#logger.debug(scrapedurl + ' / ' + scrapedthumbnail + ' / ' + duration + ' / ' + scrapedtitle)
itemlist.append(item.clone(action="play", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
server="datoporn", fanart=scrapedthumbnail.replace("_t.jpg", ".jpg")))
# Extrae la marca de siguiente página
#next_page = scrapertools.find_single_match(data, '<a href=["|\']([^["|\']+)["|\']>Next')
next_page = scrapertools.find_single_match(data, '<a class=["|\']page-link["|\'] href=["|\']([^["|\']+)["|\']>Next')
if next_page and itemlist:
itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page))
return itemlist
def categorias(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# Extrae las entradas (carpetas)
patron = '<div class="vid_block">\s*<a href="([^"]+)".*?url\((.*?)\).*?<span>(.*?)</span>.*?<b>(.*?)</b>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, numero, scrapedtitle in matches:
if numero:
scrapedtitle = "%s (%s)" % (scrapedtitle, numero)
itemlist.append(item.clone(action="lista", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail))
return itemlist
return video_urls

View File

@@ -8,7 +8,7 @@ from platformcode import config, logger
from channelselector import get_thumb
host = "http://gnula.nu/"
host_search = "https://www.googleapis.com/customsearch/v1element?key=AIzaSyCVAXiUzRYsML1Pv6RwSG1gunmMikTzQqY&rsz=small&num=20&hl=es&prettyPrint=false&source=gcsc&gss=.es&sig=45e50696e04f15ce6310843f10a3a8fb&cx=014793692610101313036:vwtjajbclpq&q=%s&cse_tok=%s&googlehost=www.google.com&callback=google.search.Search.apiary10745&nocache=1519145965573&start=0"
host_search = "https://cse.google.com/cse/element/v1?rsz=filtered_cse&num=20&hl=es&source=gcsc&gss=.es&sig=c891f6315aacc94dc79953d1f142739e&cx=014793692610101313036:vwtjajbclpq&q=%s&safe=off&cse_tok=%s&googlehost=www.google.com&callback=google.search.Search.csqr6098&nocache=1540313852177&start=0"
item_per_page = 20
@@ -58,9 +58,9 @@ def sub_search(item):
break
page = int(scrapertools.find_single_match(item.url, ".*?start=(\d+)")) + item_per_page
item.url = scrapertools.find_single_match(item.url, "(.*?start=)") + str(page)
patron = '(?s)clicktrackUrl":".*?q=(.*?)".*?'
patron += 'title":"([^"]+)".*?'
patron += 'cseImage":{"src":"([^"]+)"'
patron = '(?s)clicktrackUrl":\s*".*?q=(.*?)".*?'
patron += 'title":\s*"([^"]+)".*?'
patron += '"src":\s*"([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedurl = scrapertools.find_single_match(scrapedurl, ".*?online/")

View File

@@ -70,7 +70,7 @@
"id": "timeout_downloadpage",
"type": "list",
"label": "Timeout (segs.) en descarga de páginas o verificación de servidores",
"default": 5,
"default": 10,
"enabled": true,
"visible": true,
"lvalues": [

View File

@@ -30,6 +30,7 @@ channel = "grantorrent"
dict_url_seasons = dict()
__modo_grafico__ = config.get_setting('modo_grafico', channel)
timeout = config.get_setting('timeout_downloadpage', channel)
if timeout <= 5: timeout = timeout*2
modo_serie_temp = config.get_setting('seleccionar_serie_temporada', channel)
modo_ultima_temp = config.get_setting('seleccionar_ult_temporadda_activa', channel)
@@ -154,10 +155,10 @@ def listado(item):
inicio = time.time() # Controlaremos que el proceso no exceda de un tiempo razonable
fin = inicio + 5 # Después de este tiempo pintamos (segundos)
timeout_search = timeout # Timeout para descargas
if item.extra == 'search':
timeout_search = timeout * 2 # Timeout un poco más largo para las búsquedas
if timeout_search < 5:
timeout_search = 5 # Timeout un poco más largo para las búsquedas
if item.action == 'search':
timeout_search = int(timeout * 1.5) # Timeout un poco más largo para las búsquedas
if timeout_search < 10:
timeout_search = 10 # Timeout un poco más largo para las búsquedas
#Máximo num. de líneas permitidas por TMDB (40). Máx de 5 páginas por Itemlist para no degradar el rendimiento.
#Si itemlist sigue vacío después de leer 5 páginas, se pueden llegar a leer hasta 10 páginas para encontrar algo
@@ -170,12 +171,12 @@ def listado(item):
item.post = item.url
video_section = ''
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.post, timeout=timeout_search).data)
video_section = scrapertools.find_single_match(data, '<div class="contenedor-home">(.*?</div>)</div></div>')
video_section = scrapertools.find_single_match(data, '<div class="contenedor-home">(?:\s*<div class="titulo-inicial">\s*Últi.*?Añadi...\s*<\/div>)?\s*<div class="contenedor-imagen">\s*(<div class="imagen-post">.*?<\/div><\/div>)<\/div>')
except:
pass
cnt_next += 1
if not data: #Si la web está caída salimos sin dar error
if not data or 'Error 503 Backend fetch failed' in data: #Si la web está caída salimos sin dar error
logger.error("ERROR 01: LISTADO: La Web no responde o ha cambiado de URL: " + item.url + " / DATA: " + video_section)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: LISTADO:. La Web no responde o ha cambiado de URL. Si la Web está activa, reportar el error con el log'))
if len(itemlist) > 1:
@@ -217,12 +218,12 @@ def listado(item):
cnt_next = 99 #No hay más páginas. Salir del bucle después de procesar ésta
# Preparamos un patron que pretende recoger todos los datos significativos del video
patron = '<a href="(?P<url>[^"]+)"><img.*?src="(?P<thumb>[^"]+)".*?'
patron = '<div class="imagen-post">\s*<a href="(?P<url>[^"]+)"><img.*?src="(?P<thumb>[^"]+)".*?'
if "categoria" in item.url or item.media == "search": #Patron distinto para páginas de Categorías o Búsquedas
patron += 'class="attachment-(?P<quality>.*?)-(?P<lang>[^\s]+)\s.*?'
else:
patron += 'class="bloque-superior">\s*(?P<quality>.*?)\s*<div class="imagen-idioma">\s*<img src=".*?icono_(?P<lang>[^\.]+).*?'
patron += '<div class="bloque-inferior">\s*(?P<title>.*?)\s*<\/div>\s?<div class="bloque-date">\s*(?P<date>.*?)\s*<\/div>'
patron += '<div class="bloque-inferior">\s*(?P<title>.*?)\s*<\/div>\s*<div class="bloque-date">\s*(?P<date>.*?)\s*<\/div>\s*<\/div>'
matches_alt = re.compile(patron, re.DOTALL).findall(video_section)
if not matches_alt and not '<div class="titulo-load-core">0 resultados' in data: #error
@@ -231,6 +232,7 @@ def listado(item):
item, itemlist = generictools.post_tmdb_listado(item, itemlist) #Llamamos al método para el pintado del error
return itemlist #Salimos
if video_section: data = video_section
logger.error("ERROR 02: LISTADO: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: LISTADO: Ha cambiado la estructura de la Web. Reportar el error con el log'))
if len(itemlist) > 1:

View File

@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import scrapertools
@@ -12,10 +13,12 @@ host = "https://watchfreexxx.net/"
def mainlist(item):
itemlist = []
itemlist.append(Item(channel=item.channel, title="Todas", action="lista",
thumbnail='https://s18.postimg.cc/fwvaeo6qh/todas.png',
fanart='https://s18.postimg.cc/fwvaeo6qh/todas.png',
url =host))
itemlist.append(Item(channel=item.channel, title="Peliculas", action="lista",
url = urlparse.urljoin(host, "category/porn-movies/")))
itemlist.append(Item(channel=item.channel, title="Escenas", action="lista",
url = urlparse.urljoin(host, "category/xxx-scenes/")))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+'?s=',
thumbnail='https://s30.postimg.cc/pei7txpa9/buscar.png',
@@ -29,34 +32,27 @@ def lista(item):
itemlist = []
if item.url == '': item.url = host
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
if item.extra != 'Buscar':
patron = '<div class=item>.*?href=(.*?)><div.*?<img src=(.*?) alt=(.*?) width'
else:
patron = '<div class=movie>.*?<img src=(.*?) alt=(.*?) \/>.*?href=(.*?)\/>'
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = '<article id=.*?<a href="([^"]+)".*?<img data-src="([^"]+)" alt="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for data_1, data_2, data_3 in matches:
if item.extra != 'Buscar':
url = data_1
thumbnail = data_2
title = data_3
else:
url = data_3
thumbnail = data_1
title = data_2
url = data_1
thumbnail = data_2
title = data_3
itemlist.append(Item(channel=item.channel, action='findvideos', title=title, url=url, thumbnail=thumbnail))
# #Paginacion
#Paginacion
if itemlist != []:
actual_page_url = item.url
next_page = scrapertools.find_single_match(data, '<link rel=next href=(.*?) \/>')
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)">Next</a>')
if next_page != '':
itemlist.append(Item(channel=item.channel, action="lista", title='Siguiente >>>', url=next_page,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png', extra=item.extra))
return itemlist

View File

@@ -0,0 +1,55 @@
{
"id": "repelislive",
"name":"Repelis.live",
"thumbnail":"https://i.postimg.cc/j5ndjr3j/repelislive.png",
"banner":"",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"version": 1,
"categories": [
"movie",
"vos"
],
"settings": [
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"CAST",
"VOSE"
]
}
]
}

View File

@@ -0,0 +1,246 @@
# -*- coding: utf-8 -*-
# -*- Channel Repelis.live -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
import urlparse
from channelselector import get_thumb
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import tmdb
from channels import filtertools
from channels import autoplay
IDIOMAS = {'Latino': 'LAT', 'Castellano':'CAST', 'Subtitulado': 'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['openload', 'streamango', 'rapidvideo', 'netutv']
host = "http://repelis.live/"
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(
Item(channel=item.channel,
title="Ultimas",
action="list_all",
url=host,
thumbnail=get_thumb("last", auto=True)))
itemlist.append(
Item(channel=item.channel,
title="Castellano",
action="list_all",
url=host+'pelis-castellano/',
thumbnail=get_thumb("cast", auto=True)))
itemlist.append(
Item(channel=item.channel,
title="Latino",
action="list_all",
url=host+'pelis-latino/',
thumbnail=get_thumb("lat", auto=True)))
itemlist.append(
Item(channel=item.channel,
title="VOSE",
action="list_all",
url=host+'pelis-subtitulado/',
thumbnail=get_thumb("vose", auto=True)))
itemlist.append(
Item(channel=item.channel,
title="Generos",
action="categories",
url=host,
thumbnail=get_thumb('genres', auto=True)
))
itemlist.append(
Item(channel=item.channel,
title="Por Año",
action="categories",
url=host,
thumbnail=get_thumb('year', auto=True)
))
itemlist.append(
Item(channel=item.channel,
title="Buscar",
action="search",
url=host + '?s=',
thumbnail=get_thumb("search", auto=True)
))
autoplay.show_option(item.channel, itemlist)
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def categories(item):
logger.info()
itemlist = []
data = get_source(item.url)
if item.title != 'Generos':
patron = '<option value="([^"]+)">([^<]+)</option>'
else:
data = scrapertools.find_single_match(data, '</span>Categories</h3><ul>(.*?)</ul>')
patron = '<a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title in matches:
itemlist.append(Item(channel=item.channel,
action="list_all",
title=title,
url=url
))
return itemlist
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
if item.title == 'Buscar':
pattern = '<div class="row"> <a href="([^"]+)" title="([^\(]+)\(.*?">.*?<img src="([^"]+)".*?'
pattern += '<p class="main-info-list">Pelicula del (\d{4})'
else:
pattern = '<div class="col-mt-5 postsh">.?<div class="poster-media-card"> <a href="([^"]+)" '
pattern += 'title="([^\(]+)\(.*?">.*?"anio".*?>(\d{4}).*?src="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, pattern)
for url, title, year, thumb in matches:
new_item = Item(channel=item.channel,
title=title,
url=url,
action='findvideos',
contentTitle=title,
thumbnail=thumb,
infoLabels = {'year': year}
)
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, seekTmdb=True)
next_page = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)"')
if next_page != '':
itemlist.append(Item(channel=item.channel,
action="list_all",
title=">> Página siguiente",
url=next_page,
))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
trailer = ''
data = get_source(item.url)
patron = '<a href="#embed\d+".*?data-src="([^"]+)".*?"tab">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, language in matches:
data = httptools.downloadpage(url, follow_redirects=False, headers={'Referer':item.url}, only_headers=True)
url = data.headers['location']
if config.get_setting('unify'):
title = ''
else:
title = ' [%s]' % language
if 'youtube' in url:
trailer = Item(channel=item.channel, title='Trailer', url=url, action='play', server='youtube')
else:
itemlist.append(Item(channel=item.channel,
title='%s'+title,
url=url,
action='play',
language=IDIOMAS[language],
infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if trailer != '':
itemlist.append(trailer)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url, action="add_pelicula_to_library", extra="findvideos",
contentTitle=item.contentTitle
))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
try:
if texto != '':
item.url += texto
return list_all(item)
else:
return []
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(category):
logger.info()
item = Item()
try:
if category == 'peliculas':
item.url = host
elif category == 'infantiles':
item.url = host + 'category/animacion'
elif category == 'terror':
item.url = host + 'category/terror'
itemlist = list_all(item)
if itemlist[-1].title == '>> Página siguiente':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return itemlist

View File

@@ -104,9 +104,9 @@ def sub_search(item):
data = httptools.downloadpage(item.url).data
token = scrapertools.find_single_match(data, 'csrf-token" content="([^"]+)')
data = httptools.downloadpage(item.url + "&_token=" + token, headers=headers).data
logger.info("Intel33 %s" %data)
#logger.info("Intel33 %s" %data)
data_js = jsontools.load(data)["data"]["m"]
logger.info("Intel44 %s" %data_js)
#logger.info("Intel44 %s" %data_js)
for js in data_js:
itemlist.append(Item(channel = item.channel,
action = "findvideos",
@@ -139,14 +139,15 @@ def peliculas(item):
post = "page=%s&type=%s&_token=%s" %(item.page, item.type, token)
if item.slug:
post += "&slug=%s" %item.slug
logger.info("Intel11 %s" %post)
#logger.info("Intel11 %s" %post)
data = httptools.downloadpage(host + "/pagination", post=post, headers=headers).data
patron = 'href="([^"]+)".*?'
#logger.info("Intel11 %s" %data)
patron = '(?s)href="([^"]+)".*?'
patron += 'src="([^"]+)".*?'
patron += '<p>([^<]+).*?'
patron += '<span>([^<]+)'
patron += 'text-center">([^<]+).*?'
patron += '<p>([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle , scrapedyear in matches:
for scrapedurl, scrapedthumbnail, scrapedyear, scrapedtitle in matches:
itemlist.append(Item(channel = item.channel,
action = "findvideos",
contentTitle = scrapedtitle,

View File

@@ -0,0 +1,14 @@
{
"id": "sleazemovies",
"name": "SleazeMovies",
"active": true,
"adult": true,
"language": [],
"thumbnail": "https://i.imgur.com/x0tzGxQ.jpg",
"banner": "https://i.imgur.com/d8LsUNf.png",
"fanart": "https://i.imgur.com/NRdQvFW.jpg",
"categories": [
"movie",
"vo"
]
}

View File

@@ -0,0 +1,109 @@
# -*- coding: utf-8 -*-
# -*- Channel SleazeMovies -*-
# -*- Created for Alfa-addon -*-
# -*- By Sculkurt -*-
import re
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
host = 'http://www.eroti.ga/'
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(item.clone(title="Todas", action="list_all", url=host, thumbnail=get_thumb('all', auto=True)))
itemlist.append(item.clone(title="Generos", action="genero", url=host, thumbnail=get_thumb('genres', auto=True)))
itemlist.append(item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True)))
return itemlist
def genero(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(host).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<li class="cat-item.*?<a href="([^"]+)">([^<]+)</a>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(action='list_all', title=scrapedtitle, url=scrapedurl))
return itemlist
def list_all(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data) # Eliminamos tabuladores, dobles espacios saltos de linea, etc...
patron = '<div class="featured-thumb"><a href="([^"]+)"><img.*?src="([^?]+).*?data-image-title="([^\(]+).*?\(([^\)]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, img, scrapedtitle, year in matches:
itemlist.append(Item(channel = item.channel,
title = scrapedtitle,
url = scrapedurl,
action = "findvideos",
thumbnail = img,
contentTitle = scrapedtitle,
contentType = "movie",
infoLabels = {'year': year}))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
# Extrae la marca de siguiente página
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">Next</a></div>')
if next_page != "":
itemlist.append(Item(channel=item.channel, action="list_all", title=">> Página siguiente", url=next_page, folder=True))
return itemlist
def search(item, texto):
logger.info()
if texto != "":
texto = texto.replace(" ", "+")
item.url = host + "?s=" + texto
item.extra = "busqueda"
try:
return list_all(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
logger.debug('codigo = ' + data)
itemlist.extend(servertools.find_video_items(data=data))
for video in itemlist:
video.channel = item.channel
video.contentTitle = item.contentTitle
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel = item.channel,
title = '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url = item.url,
action = "add_pelicula_to_library",
extra = "findvideos",
contentTitle = item.contentTitle,
thumbnail = item.thumbnail
))
return itemlist

View File

@@ -0,0 +1,85 @@
{
"id": "subtorrents",
"name": "SubTorrents",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "https://www.subtorrents.tv/wp-content/themes/SubTorrent/css/images/logo2.png",
"categories": [
"movie",
"tvshow",
"torrent",
"vos"
],
"settings": [
{
"default": true,
"enabled": true,
"id": "include_in_global_search",
"label": "Incluir en busqueda global",
"type": "bool",
"visible": true
},
{
"default": true,
"enabled": true,
"id": "modo_grafico",
"label": "Buscar información extra (TMDB)",
"type": "bool",
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"CAST",
"LAT",
"VO",
"VOS",
"VOSE"
]
},
{
"id": "timeout_downloadpage",
"type": "list",
"label": "Timeout (segs.) en descarga de páginas o verificación de servidores",
"default": 10,
"enabled": true,
"visible": true,
"lvalues": [
"None",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10"
]
},
{
"id": "seleccionar_ult_temporadda_activa",
"type": "bool",
"label": "Seleccionar para Videoteca si estará activa solo la última Temporada",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": false
}
]
}

View File

@@ -0,0 +1,779 @@
# -*- coding: utf-8 -*-
import re
import sys
import urllib
import urlparse
import time
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import tmdb
from lib import generictools
from channels import filtertools
from channels import autoplay
#IDIOMAS = {'CAST': 'Castellano', 'LAT': 'Latino', 'VO': 'Version Original'}
IDIOMAS = {'Castellano': 'CAST', 'Latino': 'LAT', 'Version Original': 'VO'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['torrent']
host = 'https://www.subtorrents.tv/'
sufix = '.tv/'
channel = 'subtorrents'
categoria = channel.capitalize()
color1, color2, color3 = ['0xFF58D3F7', '0xFF2E64FE', '0xFF0404B4']
__modo_grafico__ = config.get_setting('modo_grafico', channel)
modo_ultima_temp = config.get_setting('seleccionar_ult_temporadda_activa', channel) #Actualización sólo últ. Temporada?
timeout = config.get_setting('timeout_downloadpage', channel)
def mainlist(item):
logger.info()
itemlist = []
thumb_pelis_hd = get_thumb("channels_movie_hd.png")
thumb_series = get_thumb("channels_tvshow.png")
thumb_buscar = get_thumb("search.png")
thumb_separador = get_thumb("next.png")
thumb_settings = get_thumb("setting_0.png")
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(Item(channel=item.channel, url=host, title="Películas", action="submenu", thumbnail=thumb_pelis_hd, extra="peliculas"))
itemlist.append(Item(channel=item.channel, url=host, title="Series", action="submenu", thumbnail=thumb_series, extra="series"))
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", url=host + "?s=%s", thumbnail=thumb_buscar, extra="search"))
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR yellow]Configuración:[/COLOR]", folder=False, thumbnail=thumb_separador))
itemlist.append(Item(channel=item.channel, action="configuracion", title="Configurar canal", thumbnail=thumb_settings))
autoplay.show_option(item.channel, itemlist) #Activamos Autoplay
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return
def submenu(item):
logger.info()
itemlist = []
thumb_cartelera = get_thumb("now_playing.png")
thumb_latino = get_thumb("channels_latino")
thumb_pelis = get_thumb("channels_movie.png")
thumb_pelis_AZ = get_thumb("channels_movie_az.png")
thumb_pelis_hd = get_thumb("channels_movie_hd.png")
thumb_series = get_thumb("channels_tvshow.png")
thumb_series_AZ = get_thumb("channels_tvshow_az.png")
thumb_buscar = get_thumb("search.png")
thumb_separador = get_thumb("next.png")
thumb_settings = get_thumb("setting_0.png")
thumb_series = get_thumb("channels_tvshow.png")
if item.extra == "peliculas":
itemlist.append(Item(channel=item.channel, title="Novedades", action="listado", url=host + "peliculas-subtituladas/?filtro=estrenos", thumbnail=thumb_cartelera, extra="peliculas"))
itemlist.append(Item(channel=item.channel, title=" Castellano o Latino", action="listado", url=host + "peliculas-subtituladas/?filtro=estrenos&filtro2=audio-latino", thumbnail=thumb_latino, extra="peliculas"))
itemlist.append(Item(channel=item.channel, title="Películas", action="listado", url=host + "peliculas-subtituladas", thumbnail=thumb_pelis, extra="peliculas"))
itemlist.append(Item(channel=item.channel, title=" Castellano o Latino", action="listado", url=host + "peliculas-subtituladas/?filtro=audio-latino", thumbnail=thumb_latino, extra="peliculas"))
itemlist.append(Item(channel=item.channel, title=" Alfabético A-Z", action="alfabeto", url=host + "peliculas-subtituladas/?s=letra-%s", thumbnail=thumb_pelis_AZ, extra="peliculas"))
itemlist.append(Item(channel=item.channel, title="3D", action="listado", url=host + "peliculas-3d/", thumbnail=thumb_pelis, extra="peliculas"))
itemlist.append(Item(channel=item.channel, title="Calidad DVD", action="listado", url=host + "calidad/dvd-full/", thumbnail=thumb_pelis, extra="peliculas"))
if item.extra == "series":
itemlist.append(item.clone(title="Series", action="listado", url=item.url + "series/", thumbnail=thumb_series, extra="series"))
itemlist.append(item.clone(title=" Alfabético A-Z", action="alfabeto", url=item.url + "series/?s=letra-%s", thumbnail=thumb_series_AZ, extra="series"))
return itemlist
def alfabeto(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="listado", title="0-9", url=item.url % "0"))
for letra in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']:
itemlist.append(item.clone(action="listado", title=letra, url=item.url % letra.lower()))
return itemlist
def listado(item):
logger.info()
itemlist = []
item.category = categoria
#logger.debug(item)
curr_page = 1 # Página inicial
last_page = 99999 # Última página inicial
if item.curr_page:
curr_page = int(item.curr_page) # Si viene de una pasada anterior, lo usamos
del item.curr_page # ... y lo borramos
if item.last_page:
last_page = int(item.last_page) # Si viene de una pasada anterior, lo usamos
del item.last_page # ... y lo borramos
cnt_tot = 40 # Poner el num. máximo de items por página
cnt_title = 0 # Contador de líneas insertadas en Itemlist
inicio = time.time() # Controlaremos que el proceso no exceda de un tiempo razonable
fin = inicio + 5 # Después de este tiempo pintamos (segundos)
timeout_search = timeout # Timeout para descargas
if item.extra == 'search':
timeout_search = timeout * 2 # Timeout un poco más largo para las búsquedas
if timeout_search < 5:
timeout_search = 5 # Timeout un poco más largo para las búsquedas
#Sistema de paginado para evitar páginas vacías o semi-vacías en casos de búsquedas con series con muchos episodios
title_lista = [] # Guarda la lista de series que ya están en Itemlist, para no duplicar lineas
if item.title_lista: # Si viene de una pasada anterior, la lista ya estará guardada
title_lista.extend(item.title_lista) # Se usa la lista de páginas anteriores en Item
del item.title_lista # ... limpiamos
if not item.extra2: # Si viene de Catálogo o de Alfabeto
item.extra2 = ''
next_page_url = item.url
#Máximo num. de líneas permitidas por TMDB. Máx de 10 segundos por Itemlist para no degradar el rendimiento
while cnt_title < cnt_tot * 0.5 and curr_page <= last_page and fin > time.time():
# Descarga la página
data = ''
try:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)|&nbsp;", "", httptools.downloadpage(next_page_url, timeout=timeout_search).data)
#data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
except:
pass
curr_page += 1 #Apunto ya a la página siguiente
if not data: #Si la web está caída salimos sin dar error
logger.error("ERROR 01: LISTADO: La Web no responde o ha cambiado de URL: " + item.url + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: LISTADO:. La Web no responde o ha cambiado de URL. Si la Web está activa, reportar el error con el log'))
break #si no hay más datos, algo no funciona, pintamos lo que tenemos
#Patrón para todo, menos para Series completas, incluido búsquedas en cualquier caso
patron = '<td class="vertThseccion"><img src="([^"]+)"[^>]+><a href="([^"]+)"\s*title="([^"]+)"\s*>[^<]+<\/a><\/td><td>.*?(\d+)?<\/td><td>([^<]+)?<\/td><td>([^<]+)?<\/td><\/tr>'
#Si son series completas, ponemos un patrón especializado
if item.extra == 'series':
patron = '<(td)><a href="([^"]+)"\s*title="([^"]+)"\s*><[^>]+src="[^"]+\/(\d{4})[^"]+"[^>]+>(?:(\d+))?\s*(?:(\d+))?<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
if not matches and not '<p>Lo sentimos, pero que esta buscando algo que no esta aqui. </p>' in data and not item.extra2 and not '<h2>Sin resultados</h2> in data': #error
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
if item.intervencion: #Sí ha sido clausurada judicialmente
item, itemlist = generictools.post_tmdb_episodios(item, itemlist) #Llamamos al método para el pintado del error
return itemlist #Salimos
logger.error("ERROR 02: LISTADO: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: LISTADO: Ha cambiado la estructura de la Web. Reportar el error con el log'))
break #si no hay más datos, algo no funciona, pintamos lo que tenemos
#logger.debug("PATRON: " + patron)
#logger.debug(matches)
#logger.debug(data)
#Buscamos la próxima y la última página
patron_last = "<div class='pagination'>.*?<a href='([^']+\/page\/(\d+)[^']+)'\s*>(?:&raquo;)?(?:\d+)?<\/a><\/div>"
if last_page == 99999: #Si es el valor inicial, buscamos last page
try:
next_page_url, last_page = scrapertools.find_single_match(data, patron_last) #cargamos la url y la última página
last_page = int(last_page)
except: #Si no lo encuentra, lo ponemos a 1
last_page = 1
#logger.error('ERROR 03: LISTADO: Al obtener la paginación: ' + patron_last + ' / ' + next_page_url + ' / ' + str(last_page))
#logger.debug('curr_page: ' + str(curr_page) + '/ last_page: ' + str(last_page))
if last_page > 1:
next_page_url = re.sub(r'\/page\/\d+\/', '/page/%s/' % curr_page, next_page_url)
next_page_url = next_page_url.replace('&#038;', '&')
else:
next_page_url = item.url
#logger.debug('curr_page: ' + str(curr_page) + '/ last_page: ' + str(last_page) + '/ next_page_url: ' + next_page_url)
#Empezamos el procesado de matches
for scrapedlanguage, scrapedurl, scrapedtitle, year, scrapedcategory, scrapedquality in matches:
title = scrapedtitle
url = scrapedurl.replace('&#038;', '&')
title = title.replace("á", "a").replace("é", "e").replace("í", "i").replace("ó", "o").replace("ú", "u").replace("ü", "u").replace("�", "ñ").replace("ñ", "ñ").replace("&atilde;", "a").replace("&etilde;", "e").replace("&itilde;", "i").replace("&otilde;", "o").replace("&utilde;", "u").replace("&ntilde;", "ñ").replace("&#8217;", "'").replace('&#038;', '&')
#cnt_title += 1
item_local = item.clone() #Creamos copia de Item para trabajar
if item_local.tipo: #... y limpiamos
del item_local.tipo
if item_local.totalItems:
del item_local.totalItems
if item_local.category:
del item_local.category
if item_local.intervencion:
del item_local.intervencion
if item_local.viewmode:
del item_local.viewmode
item_local.extra2 = True
del item_local.extra2
item_local.text_bold = True
del item_local.text_bold
item_local.text_color = True
del item_local.text_color
title_subs = [] #creamos una lista para guardar info importante
item_local.language = [] #creamos lista para los idiomas
item_local.quality = scrapedquality #iniciamos calidad
item_local.thumbnail = ''
item_local.url = url.replace('&#038;', '&').replace('.io/', sufix).replace('.com/', sufix) #guardamos la url final
item_local.context = "['buscar_trailer']"
item_local.contentType = "movie" #por defecto, son películas
item_local.action = "findvideos"
#Analizamos el formato de series
if '/series' in scrapedurl or item_local.extra == 'series' or 'series' in scrapedcategory:
item_local.extra = 'series'
item_local.contentType = "tvshow"
item_local.action = "episodios"
item_local.season_colapse = True #Muestra las series agrupadas por temporadas
#Detectamos idiomas
if "1.png" in scrapedlanguage: item_local.language += ['CAST']
if "512.png" in scrapedlanguage or 'latino' in title.lower(): item_local.language += ['LAT']
if ("1.png" not in scrapedlanguage and "512.png" not in scrapedlanguage) or "eng" in title.lower() or "sub" in title.lower(): item_local.language += ['VOSE']
if '-3d' in scrapedurl:
title = title.replace('3D', '').replace('3d', '')
item_local.quality += ' 3D'
#Detectamos el año
item_local.infoLabels['year'] = '-'
if year:
try:
year = int(year)
if year >= 1970 and year <= 2040:
item_local.infoLabels["year"] = year
except:
pass
#Detectamos info importante a guardar para después de TMDB
if "extendida" in title.lower() or "extended" in title.lower() or "v.e." in title.lower()or "v e " in title.lower():
title_subs += ["[V. Extendida]"]
title = title.replace("Version Extendida", "").replace("(Version Extendida)", "").replace("V. Extendida", "").replace("VExtendida", "").replace("V Extendida", "").replace("V.Extendida", "").replace("V Extendida", "").replace("V.E.", "").replace("V E ", "")
if scrapertools.find_single_match(title, '[m|M].*?serie'):
title = re.sub(r'[m|M]iniserie', '', title)
title_subs += ["Miniserie"]
if scrapertools.find_single_match(title, '[s|S]aga'):
title = re.sub(r'[s|S]aga', '', title)
title_subs += ["Saga"]
if scrapertools.find_single_match(title, '[c|C]olecc'):
title = re.sub(r'[c|C]olecc...', '', title)
title_subs += ["Colección"]
#Empezamos a limpiar el título en varias pasadas
patron = '\s?-?\s?(line)?\s?-\s?$'
regex = re.compile(patron, re.I)
title = regex.sub("", title)
title = re.sub(r'\(\d{4}\s*?\)', '', title)
title = re.sub(r'\[\d{4}\s*?\]', '', title)
title = re.sub(r'[s|S]erie', '', title)
title = re.sub(r'- $', '', title)
title = re.sub(r'\d+[M|m|G|g][B|b]', '', title)
#Limpiamos el título de la basura innecesaria
title = re.sub(r'TV|Online|Spanish|Torrent|en Espa\xc3\xb1ol|Español|Latino|Subtitulado|Blurayrip|Bluray rip|\[.*?\]|R2 Pal|\xe3\x80\x90 Descargar Torrent \xe3\x80\x91|Completa|Temporada|Descargar|Torren|\(iso\)|\(dvd.*?\)|(?:\d+\s*)?\d{3,4}p.*?$|extended|(?:\d+\s*)?bdrip.*?$|\(.*?\).*?$|iso$|unrated|\[.*?$|\d{4}$', '', title, flags=re.IGNORECASE)
#Obtenemos temporada y episodio si se trata de Episodios
if item_local.contentType == "episode":
patron = '(\d+)[x|X](\d+)'
try:
item_local.contentSeason, item_local.contentEpisodeNumber = scrapertools.find_single_match(title, patron)
except:
item_local.contentSeason = 1
item_local.contentEpisodeNumber = 0
#Si son eisodios múltiples, lo extraemos
patron1 = '\d+[x|X]\d+.?(?:y|Y|al|Al)?.?\d+[x|X](\d+)'
epi_rango = scrapertools.find_single_match(title, patron1)
if epi_rango:
item_local.infoLabels['episodio_titulo'] = 'al %s' % epi_rango
title = re.sub(patron1, '', title)
else:
title = re.sub(patron, '', title)
#Terminamos de limpiar el título
title = re.sub(r'\??\s?\d*?\&.*', '', title)
title = re.sub(r'[\(|\[]\s+[\)|\]]', '', title)
title = title.replace('()', '').replace('[]', '').strip().lower().title()
item_local.from_title = title.strip().lower().title() #Guardamos esta etiqueta para posible desambiguación de título
#Salvamos el título según el tipo de contenido
if item_local.contentType == "movie":
item_local.contentTitle = title
else:
item_local.contentSerieName = title.strip().lower().title()
item_local.title = title.strip().lower().title()
item_local.quality = item_local.quality.strip()
#Guarda la variable temporal que almacena la info adicional del título a ser restaurada después de TMDB
item_local.title_subs = title_subs
#Salvamos y borramos el número de temporadas porque TMDB a veces hace tonterias. Lo pasamos como serie completa
if item_local.contentSeason and (item_local.contentType == "season" or item_local.contentType == "tvshow"):
item_local.contentSeason_save = item_local.contentSeason
del item_local.infoLabels['season']
#Ahora se filtra por idioma, si procede, y se pinta lo que vale
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
itemlist = filtertools.get_link(itemlist, item_local, list_language)
else:
itemlist.append(item_local.clone()) #Si no, pintar pantalla
cnt_title = len(itemlist) #Contador de líneas añadidas
#logger.debug(item_local)
#Pasamos a TMDB la lista completa Itemlist
tmdb.set_infoLabels(itemlist, __modo_grafico__)
#Llamamos al método para el maquillaje de los títulos obtenidos desde TMDB
item, itemlist = generictools.post_tmdb_listado(item, itemlist)
# Si es necesario añadir paginacion
if curr_page <= last_page and last_page > 1:
if last_page:
title = '%s de %s' % (curr_page-1, last_page)
else:
title = '%s' % curr_page-1
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente " + title, title_lista=title_lista, url=next_page_url, extra=item.extra, extra2=item.extra2, last_page=str(last_page), curr_page=str(curr_page)))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
itemlist_t = [] #Itemlist total de enlaces
itemlist_f = [] #Itemlist de enlaces filtrados
if not item.language:
item.language = ['CAST'] #Castellano por defecto
matches = []
item.category = categoria
#logger.debug(item)
if item.extra != 'episodios':
#Bajamos los datos de la página
data = ''
patron = '<div class="secciones"><h1>[^<]+<\/h1><br\s*\/><br\s*\/><div class="fichimagen">\s*<img class="carat" src="([^"]+)"'
try:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url, timeout=timeout).data)
#data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
except:
pass
if not data:
logger.error("ERROR 01: FINDVIDEOS: La Web no responde o la URL es erronea: " + item.url)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: FINDVIDEOS:. La Web no responde o la URL es erronea. Si la Web está activa, reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
#Extraemos el thumb
if not item.thumbnail:
item.thumbnail = scrapertools.find_single_match(data, patron) #guardamos thumb si no existe
#Extraemos quality, audio, year, country, size, scrapedlanguage
patron = '<\/script><\/div><ul>(?:<li><label>Fecha de estreno <\/label>[^<]+<\/li>)?(?:<li><label>Genero <\/label>[^<]+<\/li>)?(?:<li><label>Calidad <\/label>([^<]+)<\/li>)?(?:<li><label>Audio <\/label>([^<]+)<\/li>)?(?:<li><label>Fecha <\/label>.*?(\d+)<\/li>)?(?:<li><label>Pais de Origen <\/label>([^<]+)<\/li>)?(?:<li><label>Tama&ntilde;o <\/label>([^<]+)<\/li>)?(<li> Idioma[^<]+<img src=.*?<br \/><\/li>)?'
try:
quality, audio, year, country, size, scrapedlanguage = scrapertools.find_single_match(data, patron)
except:
quality = ''
audio = ''
year = ''
country = ''
size = ''
scrapedlanguage = ''
if quality: item.quality = quality
if audio: item.quality += ' %s' % audio.strip()
if not item.infoLabels['year'] and year: item.infoLabels['year'] = year
if size: item.quality += ' [%s]' % size.replace('GB', 'G B').replace('Gb', 'G b').replace('MB', 'M B').replace('Mb', 'M b').replace('.', ',').strip()
if size: item.title += ' [%s]' % size.replace('GB', 'G B').replace('Gb', 'G b').replace('MB', 'M B').replace('Mb', 'M b').replace('.', ',').strip()
language = []
matches = re.compile('(\d+.png)', re.DOTALL).findall(scrapedlanguage)
for lang in matches:
if "1.png" in lang and not 'CAST' in language: language += ['CAST']
if "512.png" in lang and not 'LAT' in language: language += ['LAT']
if ("1.png" not in lang and "512.png" not in lang) and not 'VOSE' in language: language += ['VOSE']
if language: item.language = language
#Extraemos los enlaces .torrent
##Modalidad de varios archivos
patron = '<div class="fichadescargat"><\/div><div class="table-responsive"[^>]+>.*?<\/thead><tbody>(.*?)<\/tbody><\/table><\/div>'
if scrapertools.find_single_match(data, patron):
data_torrents = scrapertools.find_single_match(data, patron)
patron = '<tr><td>.*?<\/td><td><a href="([^"]+)"[^>]+><[^>]+><\/a><\/td><\/tr>'
#Modalidad de un archivo
else:
data_torrents = data
patron = '<div class="fichasubtitulos">.*?<\/div><\/li><\/ul>.*?<a href="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data_torrents)
if not matches: #error
logger.error("ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web " + " / PATRON: " + patron + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
else: #SERIES: ya viene con las urls
data = item.url #inicio data por compatibilidad
matches = [item.url] #inicio matches por compatibilidad
#Extraemos las urls de los subtítulos (Platformtools usa item.subtitle como sub-titulo por defecto)
patron = '<div class="fichasubtitulos">\s*<label class="fichsub">\s*<a href="([^"]+)">Subtitulos\s*<\/a>\s*<\/label>'
if scrapertools.find_single_match(data, patron) or item.subtitle:
if item.extra == 'episodios': #Si viene de Series, ya tengo la primera url
subtitle = item.subtitle
del item.subtitle
else:
subtitle = scrapertools.find_single_match(data, patron).replace('&#038;', '&').replace('.io/', sufix).replace('.com/', sufix)
data_subtitle = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(subtitle, timeout=timeout).data)
patron = '<tbody>(<tr class="fichserietabla_b">.*?<\/tr>)<\/tbody>' #salvamos el bloque
data_subtitle = scrapertools.find_single_match(data_subtitle, patron)
patron = '<tr class="fichserietabla_b">.*?<a href="([^"]+)"'
subtitles = re.compile(patron, re.DOTALL).findall(data_subtitle) #Creamos una lista con todos los sub-títulos
if subtitles and len(subtitles) > 1: #Solo se guarda si hay más de un idioma. Si no, automático
item.subtitle = []
for subtitle in subtitles:
subtitle = subtitle.replace('&#038;', '&').replace('.io/', sufix).replace('.com/', sufix)
item.subtitle.append(subtitle)
#logger.debug("PATRON: " + patron)
#logger.debug(matches)
#logger.debug(data)
#Llamamos al método para crear el título general del vídeo, con toda la información obtenida de TMDB
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist)
#Ahora tratamos los enlaces .torrent
for scrapedurl in matches: #leemos los torrents con la diferentes calidades
#Generamos una copia de Item para trabajar sobre ella
item_local = item.clone()
#Buscamos si ya tiene tamaño, si no, los buscamos en el archivo .torrent
size = scrapertools.find_single_match(item_local.quality, '\s\[(\d+,?\d*?\s\w\s?[b|B])\]')
if not size:
size = generictools.get_torrent_size(scrapedurl) #Buscamos el tamaño en el .torrent
if size:
item_local.title = re.sub(r'\s\[\d+,?\d*?\s\w[b|B]\]', '', item_local.title) #Quitamos size de título, si lo traía
item_local.title = '%s [%s]' % (item_local.title, size) #Agregamos size al final del título
size = size.replace('GB', 'G B').replace('Gb', 'G b').replace('MB', 'M B').replace('Mb', 'M b')
item_local.quality = re.sub(r'\s\[\d+,?\d*?\s\w\s?[b|B]\]', '', item_local.quality) #Quitamos size de calidad, si lo traía
item_local.quality = '%s [%s]' % (item_local.quality, size) #Agregamos size al final de la calidad
#Ahora pintamos el link del Torrent
item_local.url = scrapedurl.replace('&#038;', '&').replace('.io/', sufix).replace('.com/', sufix)
item_local.title = '[COLOR yellow][?][/COLOR] [COLOR yellow][Torrent][/COLOR] [COLOR limegreen][%s][/COLOR] [COLOR red]%s[/COLOR]' % (item_local.quality, str(item_local.language))
#Preparamos título y calidad, quitamos etiquetas vacías
item_local.title = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.title)
item_local.title = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.title)
item_local.title = item_local.title.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").strip()
item_local.quality = re.sub(r'\s?\[COLOR \w+\]\[\[?\s?\]?\]\[\/COLOR\]', '', item_local.quality)
item_local.quality = re.sub(r'\s?\[COLOR \w+\]\s?\[\/COLOR\]', '', item_local.quality)
item_local.quality = item_local.quality.replace("--", "").replace("[]", "").replace("()", "").replace("(/)", "").replace("[/]", "").replace(".", ",").strip()
item_local.alive = "??" #Calidad del link sin verificar
item_local.action = "play" #Visualizar vídeo
item_local.server = "torrent" #Seridor Torrent
itemlist_t.append(item_local.clone()) #Pintar pantalla, si no se filtran idiomas
# Requerido para FilterTools
if config.get_setting('filter_languages', channel) > 0: #Si hay idioma seleccionado, se filtra
itemlist_f = filtertools.get_link(itemlist_f, item_local, list_language) #Pintar pantalla, si no está vacío
#logger.debug("TORRENT: " + scrapedurl + " / title gen/torr: " + item.title + " / " + item_local.title + " / calidad: " + item_local.quality + " / content: " + item_local.contentTitle + " / " + item_local.contentSerieName)
#logger.debug(item_local)
if len(itemlist_f) > 0: #Si hay entradas filtradas...
itemlist.extend(itemlist_f) #Pintamos pantalla filtrada
else:
if config.get_setting('filter_languages', channel) > 0 and len(itemlist_t) > 0: #Si no hay entradas filtradas ...
thumb_separador = get_thumb("next.png") #... pintamos todo con aviso
itemlist.append(Item(channel=item.channel, url=host, title="[COLOR red][B]NO hay elementos con el idioma seleccionado[/B][/COLOR]", thumbnail=thumb_separador))
itemlist.extend(itemlist_t) #Pintar pantalla con todo si no hay filtrado
# Requerido para AutoPlay
autoplay.start(itemlist, item) #Lanzamos Autoplay
return itemlist
def play(item): #Permite preparar la descarga de los subtítulos externos
logger.info()
itemlist = []
headers = []
import os
from core import downloadtools
if item.subtitle: #Si hay urls de sub-títulos, se descargan
headers.append(["User-Agent", httptools.random_useragent()]) #Se busca un User-Agent aleatorio
if not os.path.exists(os.path.join(config.get_setting("videolibrarypath"), "subtitles")): #Si no hay carpeta se Sub-títulos, se crea
os.mkdir(os.path.join(config.get_setting("videolibrarypath"), "subtitles"))
subtitles = []
subtitles.extend(item.subtitle)
item.subtitle = subtitles[0] #ponemos por defecto el primero
for subtitle in subtitles: #recorremos la lista
subtitle_name = scrapertools.find_single_match(subtitle, '\/\d{2}\/(.*?\.\w+)$') #se pone el nombre del Sub-título
subtitle_folder_path = os.path.join(config.get_setting("videolibrarypath"), "subtitles", subtitle_name) #Path de descarga
ret = downloadtools.downloadfile(subtitle, subtitle_folder_path, headers=headers, continuar=True, silent=True) #Descarga
itemlist.append(item.clone()) #Reproducción normal
return itemlist
def episodios(item):
logger.info()
itemlist = []
item.category = categoria
#logger.debug(item)
if item.from_title:
item.title = item.from_title
if item.subtitle:
del item.subtitle
#Limpiamos num. Temporada y Episodio que ha podido quedar por Novedades
season_display = 0
if item.contentSeason:
if item.season_colapse: #Si viene del menú de Temporadas...
season_display = item.contentSeason #... salvamos el num de sesión a pintar
item.from_num_season_colapse = season_display
del item.season_colapse
item.contentType = "tvshow"
if item.from_title_season_colapse:
item.title = item.from_title_season_colapse
del item.from_title_season_colapse
if item.infoLabels['title']:
del item.infoLabels['title']
del item.infoLabels['season']
if item.contentEpisodeNumber:
del item.infoLabels['episode']
if season_display == 0 and item.from_num_season_colapse:
season_display = item.from_num_season_colapse
# Obtener la información actualizada de la Serie. TMDB es imprescindible para Videoteca
if not item.infoLabels['tmdb_id']:
tmdb.set_infoLabels(item, True)
modo_ultima_temp_alt = modo_ultima_temp
if item.ow_force == "1": #Si hay un traspaso de canal o url, se actualiza todo
modo_ultima_temp_alt = False
max_temp = 1
if item.infoLabels['number_of_seasons']:
max_temp = item.infoLabels['number_of_seasons']
y = []
if modo_ultima_temp_alt and item.library_playcounts: #Averiguar cuantas temporadas hay en Videoteca
patron = 'season (\d+)'
matches = re.compile(patron, re.DOTALL).findall(str(item.library_playcounts))
for x in matches:
y += [int(x)]
max_temp = max(y)
# Descarga la página
data = '' #Inserto en num de página en la url
try:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)|&nbsp;", "", httptools.downloadpage(item.url, timeout=timeout).data)
#data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
except: #Algún error de proceso, salimos
pass
if not data:
logger.error("ERROR 01: EPISODIOS: La Web no responde o la URL es erronea" + item.url)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: EPISODIOS:. La Web no responde o la URL es erronea. Si la Web está activa, reportar el error con el log'))
return itemlist
patron = '<td class="capitulonombre"><img src="([^"]+)[^>]+>(?:<a href="[^>]+>)(.*?)<\/a><\/td><td class="capitulodescarga"><a href="([^"]+)[^>]+>.*?(?:<td class="capitulofecha">.*?(\d{4})?.*?<\/td>)?(?:<td class="capitulosubtitulo"><a href="([^"]+)[^>]+>.*?<\/td>)?<td class="capitulodescarga"><\/tr>'
matches = re.compile(patron, re.DOTALL).findall(data)
if not matches: #error
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
if item.intervencion: #Sí ha sido clausurada judicialmente
item, itemlist = generictools.post_tmdb_episodios(item, itemlist) #Llamamos al método para el pintado del error
return itemlist #Salimos
logger.error("ERROR 02: EPISODIOS: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: EPISODIOS: Ha cambiado la estructura de la Web. Reportar el error con el log'))
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
#logger.debug("PATRON: " + patron)
#logger.debug(matches)
#logger.debug(data)
season = max_temp
#Comprobamos si realmente sabemos el num. máximo de temporadas
if item.library_playcounts or (item.infoLabels['number_of_seasons'] and item.tmdb_stat):
num_temporadas_flag = True
else:
num_temporadas_flag = False
# Recorremos todos los episodios generando un Item local por cada uno en Itemlist
for scrapedlanguage, scrapedtitle, scrapedurl, year, scrapedsubtitle in matches:
item_local = item.clone()
item_local.action = "findvideos"
item_local.contentType = "episode"
item_local.extra = "episodios"
if item_local.library_playcounts:
del item_local.library_playcounts
if item_local.library_urls:
del item_local.library_urls
if item_local.path:
del item_local.path
if item_local.update_last:
del item_local.update_last
if item_local.update_next:
del item_local.update_next
if item_local.channel_host:
del item_local.channel_host
if item_local.active:
del item_local.active
if item_local.contentTitle:
del item_local.infoLabels['title']
if item_local.season_colapse:
del item_local.season_colapse
item_local.title = ''
item_local.context = "['buscar_trailer']"
item_local.url = scrapedurl.replace('&#038;', '&').replace('.io/', sufix).replace('.com/', sufix)
if scrapedsubtitle:
item_local.subtitle = scrapedsubtitle.replace('&#038;', '&').replace('.io/', sufix).replace('.com/', sufix)
title = scrapedtitle
item_local.language = []
if "1.png" in scrapedlanguage: item_local.language += ['CAST']
if "512.png" in scrapedlanguage or 'latino' in title.lower(): item_local.language += ['LAT']
if ("1.png" not in scrapedlanguage and "512.png" not in scrapedlanguage) or "eng" in title.lower() or "sub" in title.lower(): item_local.language += ['VOSE']
try:
item_local.contentEpisodeNumber = 0
if 'miniserie' in title.lower():
item_local.contentSeason = 1
title = title.replace('miniserie', '').replace('MiniSerie', '')
elif 'completa' in title.lower():
patron = '[t|T].*?(\d+) [c|C]ompleta'
if scrapertools.find_single_match(title, patron):
item_local.contentSeason = int(scrapertools.find_single_match(title, patron))
if not item_local.contentSeason:
#Extraemos los episodios
patron = '(\d{1,2})[x|X](\d{1,2})'
item_local.contentSeason, item_local.contentEpisodeNumber = scrapertools.find_single_match(title, patron)
item_local.contentSeason = int(item_local.contentSeason)
item_local.contentEpisodeNumber = int(item_local.contentEpisodeNumber)
except:
logger.error('ERROR al extraer Temporada/Episodio: ' + title)
item_local.contentSeason = 1
item_local.contentEpisodeNumber = 0
#Si son eisodios múltiples, lo extraemos
patron1 = '\d+[x|X]\d{1,2}.?(?:y|Y|al|Al)?(?:\d+[x|X]\d{1,2})?.?(?:y|Y|al|Al)?.?\d+[x|X](\d{1,2})'
epi_rango = scrapertools.find_single_match(title, patron1)
if epi_rango:
item_local.infoLabels['episodio_titulo'] = 'al %s' % epi_rango
item_local.title = '%sx%s al %s -' % (str(item_local.contentSeason), str(item_local.contentEpisodeNumber).zfill(2), str(epi_rango).zfill(2))
else:
item_local.title = '%sx%s -' % (str(item_local.contentSeason), str(item_local.contentEpisodeNumber).zfill(2))
if modo_ultima_temp_alt and item.library_playcounts: #Si solo se actualiza la última temporada de Videoteca
if item_local.contentSeason < max_temp:
break #Sale del bucle actual del FOR
if season_display > 0:
if item_local.contentSeason > season_display:
continue
elif item_local.contentSeason < season_display:
break
itemlist.append(item_local.clone())
#logger.debug(item_local)
if len(itemlist) > 1:
itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber))) #clasificamos
if item.season_colapse and not item.add_videolibrary: #Si viene de listado, mostramos solo Temporadas
item, itemlist = generictools.post_tmdb_seasons(item, itemlist)
if not item.season_colapse: #Si no es pantalla de Temporadas, pintamos todo
# Pasada por TMDB y clasificación de lista por temporada y episodio
tmdb.set_infoLabels(itemlist, True)
#Llamamos al método para el maquillaje de los títulos obtenidos desde TMDB
item, itemlist = generictools.post_tmdb_episodios(item, itemlist)
#logger.debug(item)
return itemlist
def actualizar_titulos(item):
logger.info()
item = generictools.update_title(item) #Llamamos al método que actualiza el título con tmdb.find_and_set_infoLabels
#Volvemos a la siguiente acción en el canal
return item
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
try:
item.url = item.url % texto
if texto != '':
return listado(item)
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host + "peliculas-subtituladas/?filtro=estrenos"
item.extra = "peliculas"
item.channel = channel
item.category_new= 'newest'
itemlist = listado(item)
if ">> Página siguiente" in itemlist[-1].title:
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -1,29 +1,32 @@
# -*- coding: utf-8 -*-
import re
import sys
import urlparse
from platformcode import logger
from core import scrapertools
from core.item import Item
from platformcode import logger
HOST = "http://es.xhamster.com/"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="videos", title="Útimos vídeos", url="http://es.xhamster.com/",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="categorias", title="Categorías"))
itemlist.append(Item(channel=item.channel, action="votados", title="Más votados"))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar",
url="http://xhamster.com/search.php?q=%s&qcat=video"))
itemlist.append( Item(channel=item.channel, action="videos" , title="Útimos videos" , url=HOST, viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="categorias" , title="Categorías", url=HOST))
itemlist.append( Item(channel=item.channel, action="votados" , title="Lo mejor"))
itemlist.append( Item(channel=item.channel, action="vistos" , title="Los mas vistos"))
itemlist.append( Item(channel=item.channel, action="videos" , title="Recomendados", url=urlparse.urljoin(HOST,"/videos/recommended")))
itemlist.append( Item(channel=item.channel, action="search" , title="Buscar", url=urlparse.urljoin(HOST,"/search?q=%s")))
return itemlist
# REALMENTE PASA LA DIRECCION DE BUSQUEDA
def search(item, texto):
def search(item,texto):
logger.info()
tecleado = texto.replace(" ", "+")
tecleado = texto.replace( " ", "+" )
item.url = item.url % tecleado
item.extra = "buscar"
try:
@@ -34,8 +37,6 @@ def search(item, texto):
for line in sys.exc_info():
logger.error("%s" % line)
return []
# SECCION ENCARGADA DE BUSCAR
def videos(item):
@@ -43,93 +44,66 @@ def videos(item):
data = scrapertools.cache_page(item.url)
itemlist = []
data = scrapertools.get_match(data, '<div class="boxC videoList clearfix">(.*?)<div id="footer">')
data = scrapertools.get_match(data,'<article.+?>(.*?)</article>')
#Patron
patron = '(?s)<div class="thumb-list__item.*?href="([^"]+)".*?src="([^"]+)".*?alt="([^"]+)">.*?'
patron += '<div class="thumb-image-container__duration">(.+?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
# Patron #1
patron = '<div class="video"><a href="([^"]+)" class="hRotator">' + "<img src='([^']+)' class='thumb'" + ' alt="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
folder=True))
for scrapedurl,scrapedthumbnail,scrapedtitle,duration in matches:
#logger.debug("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
fullTitle = scrapedtitle.strip() + " [" + duration + "]"
itemlist.append( Item(channel=item.channel, action="play" , title=fullTitle , url=scrapedurl, thumbnail=scrapedthumbnail, folder=True))
# Patron #2
patron = '<a href="([^"]+)" data-click="[^"]+" class="hRotator"><img src=\'([^\']+)\' class=\'thumb\' alt="([^"]+)"/>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
folder=True))
# Paginador
patron = "<a href='([^']+)' class='last colR'><div class='icon iconPagerNextHover'></div>Próximo</a>"
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) > 0:
itemlist.append(
Item(channel=item.channel, action="videos", title="Página Siguiente", url=matches[0], thumbnail="",
folder=True, viewmode="movie"))
#Paginador
patron = '(?s)<div class="pager-container".*?<li class="next">.*?href="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
if len(matches) >0:
itemlist.append( Item(channel=item.channel, action="videos", title="Página Siguiente" , url=matches[0] , thumbnail="" , folder=True, viewmode="movie") )
return itemlist
# SECCION ENCARGADA DE VOLCAR EL LISTADO DE CATEGORIAS CON EL LINK CORRESPONDIENTE A CADA PAGINA
def categorias(item):
logger.info()
itemlist = []
itemlist.append(
Item(channel=item.channel, action="lista", title="Heterosexual", url="http://es.xhamster.com/channels.php"))
itemlist.append(
Item(channel=item.channel, action="lista", title="Transexuales", url="http://es.xhamster.com/channels.php"))
itemlist.append(Item(channel=item.channel, action="lista", title="Gays", url="http://es.xhamster.com/channels.php"))
return itemlist
data = scrapertools.cache_page(item.url)
data = scrapertools.get_match(data,'(?s)<div class="all-categories">(.*?)</aside>')
patron = '(?s)<li>.*?<a href="([^"]+)".*?>([^<]+).*?</a></li>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
fullTitle = scrapedtitle.strip()
itemlist.append( Item(channel=item.channel, action="videos" , title=fullTitle , url=scrapedurl))
return itemlist
def votados(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="videos", title="Día",
url="http://es.xhamster.com/rankings/daily-top-videos.html", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="videos", title="Semana",
url="http://es.xhamster.com/rankings/weekly-top-videos.html", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="videos", title="Mes",
url="http://es.xhamster.com/rankings/monthly-top-videos.html", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="videos", title="De siempre",
url="http://es.xhamster.com/rankings/alltime-top-videos.html", viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="videos" , title="Día", url=urlparse.urljoin(HOST,"/best/daily"), viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="videos" , title="Semana" , url=urlparse.urljoin(HOST,"/best/weekly"), viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="videos" , title="Mes" , url=urlparse.urljoin(HOST,"/best/monthly"), viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="videos" , title="De siempre" , url=urlparse.urljoin(HOST,"/best/"), viewmode="movie"))
return itemlist
def lista(item):
def vistos(item):
logger.info()
itemlist = []
data = scrapertools.downloadpageGzip(item.url)
# data = data.replace("\n","")
# data = data.replace("\t","")
if item.title == "Gays":
data = scrapertools.get_match(data,
'<div class="title">' + item.title + '</div>.*?<div class="list">(.*?)<div id="footer">')
else:
data = scrapertools.get_match(data,
'<div class="title">' + item.title + '</div>.*?<div class="list">(.*?)<div class="catName">')
patron = '(<div.*?</div>)'
matches = re.compile(patron, re.DOTALL).findall(data)
for match in matches:
data = data.replace(match, "")
patron = 'href="([^"]+)">(.*?)</a>'
data = ' '.join(data.split())
logger.info(data)
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
itemlist.append(Item(channel=item.channel, action="videos", title=scrapedtitle, url=scrapedurl, folder=True,
viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="videos" , title="Día", url=urlparse.urljoin(HOST,"/most-viewed/daily"), viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="videos" , title="Semana" , url=urlparse.urljoin(HOST,"/most-viewed/weekly"), viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="videos" , title="Mes" , url=urlparse.urljoin(HOST,"/most-viewed/monthly"), viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="videos" , title="De siempre" , url=urlparse.urljoin(HOST,"/most-viewed/"), viewmode="movie"))
sorted_itemlist = sorted(itemlist, key=lambda Item: Item.title)
return sorted_itemlist
return itemlist
# OBTIENE LOS ENLACES SEGUN LOS PATRONES DEL VIDEO Y LOS UNE CON EL SERVIDOR
@@ -141,10 +115,11 @@ def play(item):
logger.debug(data)
patron = '"([0-9]+p)":"([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = re.compile(patron,re.DOTALL).findall(data)
for res, url in matches:
url = url.replace("\\", "")
logger.debug("url=" + url)
itemlist.append(["%s %s [directo]" % (res, scrapertools.get_filename_from_url(url)[-4:]), url])
logger.debug("url="+url)
itemlist.append(["%s %s [directo]" % (res, scrapertools.get_filename_from_url(url)[-4:]), url])
return itemlist

View File

@@ -327,6 +327,8 @@ def post_tmdb_listado(item, itemlist):
except:
pass
__modo_grafico__ = config.get_setting('modo_grafico', item.channel)
# Si TMDB no ha encontrado el vídeo limpiamos el año
if item_local.infoLabels['year'] == "-":
item_local.infoLabels['year'] = ''
@@ -338,7 +340,7 @@ def post_tmdb_listado(item, itemlist):
logger.error(item_local)
del item_local.infoLabels['tmdb_id'] #puede traer un TMDB-ID erroneo
try:
tmdb.set_infoLabels(item_local, True) #pasamos otra vez por TMDB
tmdb.set_infoLabels(item_local, __modo_grafico__) #pasamos otra vez por TMDB
except:
pass
logger.error(item_local)
@@ -349,7 +351,7 @@ def post_tmdb_listado(item, itemlist):
year = item_local.infoLabels['year'] #salvamos el año por si no tiene éxito la nueva búsqueda
item_local.infoLabels['year'] = "-" #reseteo el año
try:
tmdb.set_infoLabels(item_local, True) #pasamos otra vez por TMDB
tmdb.set_infoLabels(item_local, __modo_grafico__) #pasamos otra vez por TMDB
except:
pass
if not item_local.infoLabels['tmdb_id']: #ha tenido éxito?

View File

@@ -26,7 +26,7 @@ def find_in_text(regex, text, flags=re.IGNORECASE | re.DOTALL):
class UnshortenIt(object):
_adfly_regex = r'adf\.ly|j\.gs|q\.gs|u\.bb|ay\.gy|atominik\.com|tinyium\.com|microify\.com|threadsphere\.bid|clearload\.bid'
_adfly_regex = r'adf\.ly|j\.gs|q\.gs|u\.bb|ay\.gy|atominik\.com|tinyium\.com|microify\.com|threadsphere\.bid|clearload\.bid|activetect\.net'
_linkbucks_regex = r'linkbucks\.com|any\.gs|cash4links\.co|cash4files\.co|dyo\.gs|filesonthe\.net|goneviral\.com|megaline\.co|miniurls\.co|qqc\.co|seriousdeals\.net|theseblogs\.com|theseforums\.com|tinylinks\.co|tubeviral\.com|ultrafiles\.net|urlbeat\.net|whackyvidz\.com|yyv\.co'
_adfocus_regex = r'adfoc\.us'
_lnxlu_regex = r'lnx\.lu'

View File

@@ -44,6 +44,9 @@ thumb_dict = {"movies": "https://s10.postimg.cc/fxtqzdog9/peliculas.png",
"recents": "https://s10.postimg.cc/649u24kp5/recents.png",
"updated" : "https://s10.postimg.cc/46m3h6h9l/updated.png",
"actors": "https://i.postimg.cc/tC2HMhVV/actors.png",
"cast": "https://i.postimg.cc/qvfP5Xvt/cast.png",
"lat": "https://i.postimg.cc/Gt8fMH0J/lat.png",
"vose": "https://i.postimg.cc/kgmnbd8h/vose.png",
"accion": "https://s14.postimg.cc/sqy3q2aht/action.png",
"adolescente" : "https://s10.postimg.cc/inq7u4p61/teens.png",
"adultos": "https://s10.postimg.cc/s8raxc51l/adultos.png",

View File

@@ -5,7 +5,7 @@ from core import httptools
from core import scrapertools
from platformcode import logger
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}
headers = {'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Mobile Safari/537.36'}
def test_video_exists(page_url):
@@ -28,7 +28,7 @@ def get_video_url(page_url, user="", password="", video_password=""):
time.sleep(1)
data1 = httptools.downloadpage(page_url, post = post, headers = headers).data
patron = "window.open\('([^']+)"
file = scrapertools.find_single_match(data1, patron)
file = scrapertools.find_single_match(data1, patron).replace(" ","%20")
file += "|User-Agent=" + headers['User-Agent']
video_urls = []
videourl = file

View File

@@ -22,10 +22,12 @@ def get_video_url(page_url, user="", password="", video_password=""):
packed = scrapertools.find_multiple_matches(data, "(?s)<script>\s*eval(.*?)\s*</script>")
for pack in packed:
unpacked = jsunpack.unpack(pack)
if "file" in unpacked:
videos = scrapertools.find_multiple_matches(unpacked, 'file.="(//[^"]+)')
if "tida" in unpacked:
videos = scrapertools.find_multiple_matches(unpacked, 'tid.="([^"]+)')
video_urls = []
for video in videos:
if not video.startswith("//"):
continue
video = "https:" + video
video_urls.append(["mp4 [Thevid]", video])
logger.info("Url: %s" % videos)

View File

@@ -4,7 +4,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "https://vidcloud.co/embed/([a-z0-9]+)",
"pattern": "https://(?:vidcloud.co|vcstream.to)/embed/([a-z0-9]+)",
"url": "https://vidcloud.co/player?fid=\\1&page=embed"
}
]

View File

@@ -4,7 +4,6 @@
import re
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import logger
@@ -23,7 +22,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
data = data.replace('\\\\', '\\').replace('\\','')
patron = '"file":"([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for url in matches:
video_urls.append(['vidcloud', url])
if not ".vtt" in url:
video_urls.append(['vidcloud', url])
return video_urls

View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "https://xdrive.cc/embed/([A-z0-9]+)",
"url": "https://xdrive.cc/embed/\\1"
}
]
},
"free": true,
"id": "xdrive",
"name": "xdrive",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://i.postimg.cc/MHyNdRPZ/xdrive.png"
}

View File

@@ -0,0 +1,32 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Alfa addon - KODI Plugin
# Conector para xdrive
# https://github.com/alfa-addon
# ------------------------------------------------------------
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Object not found" in data or "no longer exists" in data or '"sources": [false]' in data:
return False, "[xdrive] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
videourl = scrapertools.find_multiple_matches(data, "src: '([^']+).*?label: '([^']+)")
scrapertools.printMatches(videourl)
for scrapedurl, scrapedquality in videourl:
scrapedquality = scrapedquality.replace("&uarr;","")
video_urls.append([scrapedquality + " [xdrive]", scrapedurl])
video_urls.sort(key=lambda it: int(it[0].split("P ", 1)[0]))
return video_urls

View File

@@ -21,7 +21,6 @@ Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0;)
Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)
Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0)
Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; MS-RTC LM 8; .NET4.0C; .NE
Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; SLCC2; .NET CLR 2.0.50727; InfoPath.3; .NET4.0C; .NET4.0E; .NET CLR 3.5.30729; .NET CLR 3.0.30729; MS-RTC LM 8)
Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.1; .NET CLR 1.0.3705; Media Center PC 3.1; Alexa Toolbar; .NET CLR 1.1.4322; .NET CLR 2.0.50727)
Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.40607)
Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)
@@ -36,7 +35,6 @@ Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)
Mozilla/4.0 (compatible; MSIE 8.0; Linux i686; en) Opera 10.51
Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; ko) Opera 10.53
Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; pl) Opera 11.00
Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; en) Opera 11.00
Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; ja) Opera 11.00
Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; en) Opera 10.62
Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; fr) Opera 11.00
@@ -46,7 +44,6 @@ Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .N
Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; MS-RTC LM 8;
Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; MS-RTC LM 8;
Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; MS-RTC LM 8;
Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; MS-RTC LM 8)
Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; msn Optimized
Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; Zune 3.0)
Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)
@@ -373,7 +370,6 @@ Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.1b4pre) Gecko/20090401 Fi
Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.1b4pre) Gecko/20090409 Firefox/3.5b4pre
Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.1b5pre) Gecko/20090517 Firefox/3.5b4pre (.NET CLR 3.5.30729)
Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.2.3) Gecko/20100401 Firefox/3.0.16 (.NET CLR 3.5.30729)
Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.2.3) Gecko/20100401 Mozilla/5.0 (X11; U; Linux i686; it-IT; rv:1.9.0.2) Gecko/2008092313 Ubuntu/9.25 (jaunty) Firefox
Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.2b4) Gecko/20091124 Firefox/3.6b4
Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9b1) Gecko/2007110703 Firefox/3.0b1
Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9b3) Gecko/2008020514 Firefox/3.0b3
Can't render this file because it has a wrong number of fields in line 58.