@@ -13,7 +13,7 @@ from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from platformcode import platformtools
|
||||
|
||||
host = "https://hdfull.tv"
|
||||
host = "https://hdfull.me"
|
||||
|
||||
if config.get_setting('hdfulluser', 'hdfull'):
|
||||
account = True
|
||||
@@ -39,23 +39,10 @@ def login():
|
||||
|
||||
httptools.downloadpage(host, post=post)
|
||||
|
||||
def set_host():
|
||||
global host
|
||||
logger.info()
|
||||
|
||||
hosts_list= [host, 'https://hdfull.me']
|
||||
for url in hosts_list:
|
||||
data = httptools.downloadpage(url, only_headers=True)
|
||||
if data.sucess:
|
||||
host = url
|
||||
break
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
set_host()
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="menupeliculas", title="Películas", url=host, folder=True))
|
||||
itemlist.append(Item(channel=item.channel, action="menuseries", title="Series", url=host, folder=True))
|
||||
itemlist.append(Item(channel=item.channel, action="search", title="Buscar..."))
|
||||
@@ -355,14 +342,14 @@ def fichas(item):
|
||||
if str != "": title += str
|
||||
|
||||
if item.title == "Buscar...":
|
||||
tag_type = scrapertools.get_match(url, 'l.tv/([^/]+)/')
|
||||
bus = host[-4:]
|
||||
tag_type = scrapertools.find_single_match(url, '%s/([^/]+)/' %bus)
|
||||
title += " - [COLOR blue]" + tag_type.capitalize() + "[/COLOR]"
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action=action, title=title, url=url, fulltitle=title, thumbnail=thumbnail,
|
||||
show=show, folder=True, contentType=contentType, contentTitle=contentTitle,
|
||||
language =language, infoLabels=infoLabels))
|
||||
|
||||
## Paginación
|
||||
next_page_url = scrapertools.find_single_match(data, '<a href="([^"]+)">.raquo;</a>')
|
||||
if next_page_url != "":
|
||||
@@ -760,7 +747,7 @@ def agrupa_datos(data):
|
||||
|
||||
|
||||
def extrae_idiomas(bloqueidiomas):
|
||||
logger.info("idiomas=" + bloqueidiomas)
|
||||
logger.info()
|
||||
language=[]
|
||||
textoidiomas = ''
|
||||
patronidiomas = '([a-z0-9]+).png"'
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
{
|
||||
"id": "maxipelis",
|
||||
"name": "Maxipelis",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast", "lat"],
|
||||
"thumbnail": "http://www.maxipelis.net/wp-content/uploads/2016/12/applogo.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"movie"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,152 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Alfa
|
||||
# ------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
|
||||
from core import jsontools as json
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
|
||||
host = 'http://www.maxipelis.net'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, title="Novedades" , action="peliculas", url=host + "/pelicula"))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=%s" % texto
|
||||
|
||||
try:
|
||||
return sub_search(item)
|
||||
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def sub_search(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
|
||||
patron = '<div class="thumbnail animation-2"> <a href="([^"]+)"> <img src="([^"]+)" alt="(.*?)" />.*?'
|
||||
patron +='<div class="contenido"><p>(.*?)</p>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for url,img,name,plot in matches:
|
||||
itemlist.append(item.clone(channel=item.channel, action="findvideos", title=name, url=url, plot=plot,
|
||||
thumbnail=img))
|
||||
|
||||
paginacion = scrapertools.find_single_match(data, '<div class=\'resppages\'><a href="([^"]+)" ><span class="'
|
||||
'icon-chevron-right"></span>')
|
||||
|
||||
if paginacion:
|
||||
itemlist.append(Item(channel=item.channel, action="sub_search", title="Next page >>" , url=paginacion))
|
||||
|
||||
return itemlist
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = '<li class="cat-item"><a href="([^"]+)".*?>(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=host + scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="poster">.*?src="(.*?)" alt="(.*?)">.*?'
|
||||
patron += '"quality">(.*?)<.*?href="(.*?)".*?<span>(\d{4}).*?"texto">(.*?)<.*?'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, scrapedquality, scrapedurl, scrapedyear, scrapedplot in matches:
|
||||
url = scrapedurl
|
||||
thumbnail = scrapedthumbnail
|
||||
contentTitle = scrapedtitle
|
||||
quality = scrapedquality
|
||||
year = scrapedyear
|
||||
plot = scrapedplot
|
||||
if quality == "" or year=="" :
|
||||
title = contentTitle
|
||||
else:
|
||||
title = contentTitle + " (" + year + ") " + "[COLOR red]" + quality + "[/COLOR]"
|
||||
|
||||
new_item = Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
contentTitle = contentTitle , infoLabels={'year':year} )
|
||||
|
||||
#if year:
|
||||
# tmdb.set_infoLabels_item(new_item)
|
||||
itemlist.append(new_item)
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
try:
|
||||
patron = '<a href="([^"]+)" ><span class="icon-chevron-right"></span></a></div>'
|
||||
next_page = re.compile(patron,re.DOTALL).findall(data)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Siguiente >>" , text_color="yellow",
|
||||
url=next_page[0]))
|
||||
|
||||
except: pass
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = '<tr><td> <a class="link_a" href="([^"]+)".*?<td> (.*?)</td><td> (.*?)</td><td> (.*?)</td>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for url, server, calidad, idioma in matches:
|
||||
server = servertools.get_server_from_url(url)
|
||||
title = '%s [%s] [%s] [%s]' % (item.contentTitle, server, calidad, idioma)
|
||||
itemlist.append(item.clone(action="play", title=title, fulltitle = item.title, url=url, language = idioma,
|
||||
contentTitle = item.contentTitle, quality = calidad, server = server))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra !='findvideos' :
|
||||
itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Agregar esta pelicula a la Videoteca[/COLOR]',
|
||||
url=item.url, action="add_pelicula_to_library", extra="findvideos",
|
||||
contentTitle = item.contentTitle))
|
||||
return itemlist
|
||||
|
||||
# def play(item):
|
||||
# logger.info()
|
||||
# itemlist = servertools.find_video_items(data=item.url)
|
||||
#
|
||||
# for videoitem in itemlist:
|
||||
# videoitem.title = item.title
|
||||
# videoitem.fulltitle = item.fulltitle
|
||||
# videoitem.thumbnail = item.thumbnail
|
||||
# videoitem.channel = item.channel
|
||||
# videoitem.
|
||||
# return itemlist
|
||||
@@ -46,20 +46,20 @@ def mainlist(item):
|
||||
url= host,
|
||||
thumbnail="http://imgur.com/fN2p6qH.png", fanart="http://imgur.com/b8OuBR2.jpg",
|
||||
contentType="movie"))
|
||||
# itemlist.append(itemlist[-1].clone(title="[COLOR lightskyblue][B]Series[/B][/COLOR]", action="scraper",
|
||||
# url= host + "/lista-de-series",
|
||||
# thumbnail="http://imgur.com/Jia27Uc.png", fanart="http://imgur.com/b8OuBR2.jpg",
|
||||
# contentType="tvshow"))
|
||||
itemlist.append(itemlist[-1].clone(title="[COLOR lightskyblue][B]Buscar[/B][/COLOR]",
|
||||
itemlist.append(itemlist[-1].clone(title="[COLOR lightskyblue][B]Series[/B][/COLOR]", action="scraper",
|
||||
url= host + "/lista-de-series",
|
||||
thumbnail="http://imgur.com/Jia27Uc.png", fanart="http://imgur.com/b8OuBR2.jpg",
|
||||
contentType="tvshow"))
|
||||
itemlist.append(itemlist[-1].clone(title="[COLOR lightskyblue][B]Buscar[/B][/COLOR]", action = "",
|
||||
thumbnail="http://imgur.com/mwTwfN7.png", fanart="http://imgur.com/b8OuBR2.jpg"))
|
||||
itemlist.append(
|
||||
itemlist[-1].clone(title="[COLOR lightskyblue][B] Buscar Película[/B][/COLOR]", action="search", url="",
|
||||
thumbnail="http://imgur.com/mwTwfN7.png", fanart="http://imgur.com/b8OuBR2.jpg",
|
||||
contentType="movie"))
|
||||
# itemlist.append(
|
||||
# itemlist[-1].clone(title="[COLOR lightskyblue][B] Buscar Serie[/B][/COLOR]", action="search", url="",
|
||||
# thumbnail="http://imgur.com/mwTwfN7.png", fanart="http://imgur.com/b8OuBR2.jpg",
|
||||
# contentType="tvshow"))
|
||||
itemlist.append(
|
||||
itemlist[-1].clone(title="[COLOR lightskyblue][B] Buscar Serie[/B][/COLOR]", action="search", url="",
|
||||
thumbnail="http://imgur.com/mwTwfN7.png", fanart="http://imgur.com/b8OuBR2.jpg",
|
||||
contentType="tvshow"))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -119,11 +119,11 @@ def scraper(item):
|
||||
patron += 'alt="([^"]+)".*?'
|
||||
patron += '">([^<]+)<.*?'
|
||||
patron += '<div class="l">(.*?)<\/a><h3>.*?'
|
||||
#patron += '<\/a><\/h3> <span>(.*?)<'
|
||||
patron += '<\/a><\/h3> <span>(.*?)<'
|
||||
action = "findvideos"
|
||||
matches = scrapertools.find_multiple_matches(bloque_enlaces, patron)
|
||||
for url, thumb, title, quality, check_idioma in matches:
|
||||
#year = year.strip()
|
||||
for url, thumb, title, quality, check_idioma, year in matches:
|
||||
year = year.strip()
|
||||
title_fan = title
|
||||
title_item = "[COLOR cornflowerblue][B]" + title + "[/B][/COLOR]"
|
||||
if item.contentType != "movie":
|
||||
@@ -140,20 +140,20 @@ def scraper(item):
|
||||
title = title
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title=title, fulltitle=title, url=host + url, action=action, thumbnail=thumb,
|
||||
fanart="http://imgur.com/nqmJozd.jpg", extra=title_fan + "|" + title_item + "|", show=title,
|
||||
contentType=item.contentType, folder=True, language = idiomas))
|
||||
fanart="http://imgur.com/nqmJozd.jpg", extra=title_fan + "|" + title_item + "|" + year, show=title,
|
||||
contentType=item.contentType, folder=True, language = idiomas, infoLabels={"year":year}))
|
||||
## Paginación
|
||||
#tmdb.set_infoLabels(itemlist)
|
||||
#if year:
|
||||
next = scrapertools.find_single_match(data, 'href="([^"]+)" title="Siguiente página">')
|
||||
if len(next) > 0:
|
||||
url = next
|
||||
if not "http" in url:
|
||||
url = host + url
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="scraper", title="[COLOR floralwhite][B]Siguiente[/B][/COLOR]",
|
||||
url=url, thumbnail="http://imgur.com/jhRFAmk.png", fanart="http://imgur.com/nqmJozd.jpg",
|
||||
extra=item.extra, contentType=item.contentType, folder=True))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
if year:
|
||||
next = scrapertools.find_single_match(data, 'href="([^"]+)" title="Siguiente página">')
|
||||
if len(next) > 0:
|
||||
url = next
|
||||
if not "http" in url:
|
||||
url = host + url
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="scraper", title="[COLOR floralwhite][B]Siguiente[/B][/COLOR]",
|
||||
url=url, thumbnail="http://imgur.com/jhRFAmk.png", fanart="http://imgur.com/nqmJozd.jpg",
|
||||
extra=item.extra, contentType=item.contentType, folder=True))
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -423,11 +423,11 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda='es', lock=None):
|
||||
|
||||
otmdb = Tmdb(texto_buscado=titulo_buscado, tipo=tipo_busqueda, idioma_busqueda=idioma_busqueda,
|
||||
filtro=item.infoLabels.get('filtro', {}), year=item.infoLabels['year'])
|
||||
|
||||
if otmdb.get_id() and config.get_setting("tmdb_plus_info", default=False):
|
||||
# Si la busqueda ha dado resultado y no se esta buscando una lista de items,
|
||||
# realizar otra busqueda para ampliar la informacion
|
||||
otmdb = Tmdb(id_Tmdb=otmdb.result.get("id"), tipo=tipo_busqueda, idioma_busqueda=idioma_busqueda)
|
||||
if otmdb is not None:
|
||||
if otmdb.get_id() and config.get_setting("tmdb_plus_info", default=False):
|
||||
# Si la busqueda ha dado resultado y no se esta buscando una lista de items,
|
||||
# realizar otra busqueda para ampliar la informacion
|
||||
otmdb = Tmdb(id_Tmdb=otmdb.result.get("id"), tipo=tipo_busqueda, idioma_busqueda=idioma_busqueda)
|
||||
|
||||
if lock and lock.locked():
|
||||
lock.release()
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "userscloud.com/(?:embed-|)([A-z0-9]+)",
|
||||
"pattern": "userscloud.com/(?:embed-|embed/|)([A-z0-9]+)",
|
||||
"url": "http://userscloud.com/\\1"
|
||||
}
|
||||
]
|
||||
@@ -43,4 +43,4 @@
|
||||
}
|
||||
],
|
||||
"thumbnail": "http://i.imgur.com/u4W2DgA.png?1"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,18 +19,18 @@ def test_video_exists(page_url):
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
|
||||
video_urls = []
|
||||
unpacked = ""
|
||||
data = httptools.downloadpage(page_url).data
|
||||
packed = scrapertools.find_single_match(data, "function\(p,a,c,k.*?</script>")
|
||||
unpacked = jsunpack.unpack(packed)
|
||||
if packed:
|
||||
unpacked = jsunpack.unpack(packed)
|
||||
media_url = scrapertools.find_single_match(unpacked, 'src"value="([^"]+)')
|
||||
if not media_url:
|
||||
id_ = page_url.rsplit("/", 1)[1]
|
||||
rand = scrapertools.find_single_match(data, 'name="rand" value="([^"]+)"')
|
||||
post = "op=download2&id=%s&rand=%s&referer=%s&method_free=&method_premium=" % (id_, rand, page_url)
|
||||
data = httptools.downloadpage(page_url, post).data
|
||||
|
||||
media_url = scrapertools.find_single_match(data, '<div id="dl_link".*?<a href="([^"]+)"')
|
||||
|
||||
ext = scrapertools.get_filename_from_url(media_url)[-4:]
|
||||
|
||||
Reference in New Issue
Block a user