Merge pull request #191 from Intel11/actualizados

Actualizados
This commit is contained in:
Alfa
2017-12-30 10:18:00 -05:00
committed by GitHub
31 changed files with 292 additions and 833 deletions

View File

@@ -0,0 +1,62 @@
{
"id": "ciberpeliculashd",
"name": "Ciberpeliculashd",
"active": true,
"adult": false,
"language": ["lat"],
"thumbnail": "https://s17.postimg.org/78tekxeov/ciberpeliculashd1.png",
"banner": "",
"categories": [
"movie"
],
"settings": [
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,151 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
__channel__='ciberpeliculashd'
host = "http://ciberpeliculashd.net"
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
except:
__modo_grafico__ = True
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel = item.channel, title = "Novedades", action = "peliculas", url = host + "/?peli=1"))
itemlist.append(Item(channel = item.channel, title = "Por género", action = "filtro", url = host, extra = "categories" ))
itemlist.append(Item(channel = item.channel, title = "Por calidad", action = "filtro", url = host, extra = "qualitys"))
itemlist.append(Item(channel = item.channel, title = "Por idioma", action = "filtro", url = host, extra = "languages"))
itemlist.append(Item(channel = item.channel, title = ""))
itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "/?s="))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas','latino']:
item.url = host + "/?peli=1"
elif categoria == 'infantiles':
item.url = host + '/categories/animacion/?peli=1'
elif categoria == 'terror':
item.url = host + '/categories/terror/?peli=1'
itemlist = peliculas(item)
if "Pagina" in itemlist[-1].title:
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto + "&peli=1"
item.extra = "busca"
if texto != '':
return peliculas(item)
else:
return []
def filtro(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'uk-navbar-nav-subtitle taxonomy-menu-title">%s.*?</ul>' %item.extra
bloque = scrapertools.find_single_match(data, patron)
patron = "href='([^']+)"
patron += "'>([^<]+)"
matches = scrapertools.find_multiple_matches(bloque, patron)
for url, titulo in matches:
itemlist.append(Item(channel = item.channel,
action = "peliculas",
title = titulo,
url = url + "/?peli=1"
))
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, 'loop-posts".*?panel-pagination pagination-bottom')
patron = 'a href="([^"]+)".*?'
patron += 'img alt="([^"]+)".*?'
patron += '((?:http|https)://image.tmdb.org[^"]+)".*?'
patron += 'a href="([^"]+)".*?'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedurl1 in matches:
scrapedtitle = scrapedtitle.replace(" Online imagen","").replace("Pelicula ","")
year = scrapertools.find_single_match(scrapedtitle, "\(([0-9]+)\)")
if year:
year = int(year)
else:
year = 0
fulltitle = scrapertools.find_single_match(scrapedtitle, "(.*?) \(")
itemlist.append(Item(action = "findvideos",
channel = item.channel,
fulltitle = fulltitle,
thumbnail = scrapedthumbnail,
infoLabels = {'year': year},
title = scrapedtitle,
url = scrapedurl
))
tmdb.set_infoLabels(itemlist)
page = int(scrapertools.find_single_match(item.url,"peli=([0-9]+)")) + 1
next_page = scrapertools.find_single_match(item.url,".*?peli=")
next_page += "%s" %page
itemlist.append(Item(action = "peliculas",
channel = item.channel,
title = "Página siguiente",
url = next_page
))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'src=&quot;([^&]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
title = "Ver en: %s"
itemlist.append(item.clone(action = "play",
title = title,
url = scrapedurl
))
tmdb.set_infoLabels(itemlist)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if itemlist:
itemlist.append(Item(channel = item.channel))
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
text_color="magenta"))
# Opción "Añadir esta película a la biblioteca de KODI"
if item.extra != "library":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
fulltitle = item.fulltitle
))
return itemlist
def play(item):
item.thumbnail = item.contentThumbnail
return [item]

View File

@@ -343,12 +343,14 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
def play(item):
logger.info()
itemlist = []
if "api.cinetux" in item.url or item.server == "okru" or "drive.php" in item.url:
if "api.cinetux" in item.url or item.server == "okru" or "drive.php" in item.url or "youtube" in item.url:
data = httptools.downloadpage(item.url, headers={'Referer': item.extra}).data.replace("\\", "")
id = scrapertools.find_single_match(data, 'img src="[^#]+#(.*?)"')
item.url = "http://docs.google.com/get_video_info?docid=" + id
if item.server == "okru":
item.url = "https://ok.ru/videoembed/" + id
if item.server == "youtube":
item.url = "https://www.youtube.com/embed/" + id
elif "links" in item.url or "www.cinetux.me" in item.url:
data = httptools.downloadpage(item.url).data
scrapedurl = scrapertools.find_single_match(data, '<a href="(http[^"]+)')

View File

@@ -260,14 +260,16 @@ def findvideos(item):
item.plot = scrapertools.find_single_match(data, '<div class="post-entry" style="height:300px;">(.*?)</div>')
item.plot = scrapertools.htmlclean(item.plot).strip()
item.contentPlot = item.plot
link = scrapertools.find_single_match(data, 'location\.href.*?=.*?"http:\/\/(?:tumejorserie|tumejorjuego).*?link=(.*?)"')
if link != "":
link = "http://www.divxatope1.com/" + link
logger.info("torrent=" + link)
al_url_fa = scrapertools.find_single_match(data, 'location\.href.*?=.*?"http:\/\/(?:tumejorserie|tumejorjuego).*?link=(.*?)"')
if al_url_fa == "":
al_url_fa = scrapertools.find_single_match(data,
'location\.href.*?=.*?"http:\/\/divxatope1.com/(.*?)"')
if al_url_fa != "":
al_url_fa = "http://www.divxatope1.com/" + al_url_fa
logger.info("torrent=" + al_url_fa)
itemlist.append(
Item(channel=item.channel, action="play", server="torrent", title="Vídeo en torrent", fulltitle=item.title,
url=link, thumbnail=servertools.guess_server_thumbnail("torrent"), plot=item.plot, folder=False,
url=al_url_fa, thumbnail=servertools.guess_server_thumbnail("torrent"), plot=item.plot, folder=False,
parentContent=item))
patron = '<div class=\"box1\"[^<]+<img[^<]+<\/div[^<]+<div class="box2">([^<]+)<\/div[^<]+<div class="box3">([^<]+)'

View File

@@ -6,21 +6,18 @@ from core import httptools
from core import scrapertools
from platformcode import config, logger
host = "http://www.javtasty.com"
host = "https://www.javwhores.com"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="lista", title="Nuevos Vídeos", url=host + "/videos"))
itemlist.append(item.clone(action="lista", title="Mejor Valorados", url=host + "/videos?o=tr"))
itemlist.append(item.clone(action="lista", title="Más Vistos", url=host + "/videos?o=mv"))
itemlist.append(item.clone(action="lista", title="Ordenados por duración", url=host + "/videos?o=lg"))
itemlist.append(item.clone(action="categorias", title="Categorías", url=host + "/categories"))
itemlist.append(item.clone(action="lista", title="Nuevos Vídeos", url=host + "/latest-updates/"))
itemlist.append(item.clone(action="lista", title="Mejor Valorados", url=host + "/top-rated/"))
itemlist.append(item.clone(action="lista", title="Más Vistos", url=host + "/most-popular/"))
itemlist.append(item.clone(action="categorias", title="Categorías", url=host + "/categories/"))
itemlist.append(item.clone(title="Buscar...", action="search"))
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
return itemlist
@@ -33,7 +30,7 @@ def configuracion(item):
def search(item, texto):
logger.info()
item.url = "%s/search?search_query=%s&search_type=videos" % (host, texto)
item.url = "%s/search/%s/" % (host, texto)
item.extra = texto
try:
return lista(item)
@@ -48,83 +45,66 @@ def search(item, texto):
def lista(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
action = "play"
if config.get_setting("menu_info", "javtasty"):
action = "menu_info"
# Extrae las entradas
patron = '<div class="well wellov well-sm".*?href="([^"]+)".*?data-original="([^"]+)" title="([^"]+)"(.*?)<div class="duration">(?:.*?</i>|)\s*([^<]+)<'
patron = 'div class="video-item.*?href="([^"]+)".*?'
patron += 'data-original="([^"]+)" '
patron += 'alt="([^"]+)"(.*?)fa fa-clock-o"></i>([^<]+)<'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, quality, duration in matches:
scrapedurl = urlparse.urljoin(host, scrapedurl)
scrapedtitle = scrapedtitle.strip()
if duration:
scrapedtitle = "%s - %s" % (duration.strip(), scrapedtitle)
if '>HD<' in quality:
scrapedtitle += " [COLOR red][HD][/COLOR]"
itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
fanart=scrapedthumbnail))
# Extrae la marca de siguiente página
next_page = scrapertools.find_single_match(data, 'href="([^"]+)" class="prevnext">')
next_page = scrapertools.find_single_match(data, 'next"><a href="([^"]+)')
if next_page:
next_page = next_page.replace("&amp;", "&")
itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page))
itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=host + next_page))
return itemlist
def categorias(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# Extrae las entradas
patron = '<div class="col-sm-4.*?href="([^"]+)".*?data-original="([^"]+)" title="([^"]+)"'
patron = '(?s)<a class="item" href="([^"]+)".*?'
patron += 'src="([^"]+)" '
patron += 'alt="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedurl = urlparse.urljoin(host, scrapedurl)
scrapedthumbnail = urlparse.urljoin(host, scrapedthumbnail)
itemlist.append(item.clone(action="lista", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
fanart=scrapedthumbnail))
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
videourl = scrapertools.find_single_match(data, "var video_sd\s*=\s*'([^']+)'")
videourl = scrapertools.find_single_match(data, "video_url:\s*'([^']+)'")
if videourl:
itemlist.append(['.mp4 [directo]', videourl])
videourl = scrapertools.find_single_match(data, "var video_hd\s*=\s*'([^']+)'")
videourl = scrapertools.find_single_match(data, "video_alt_url:\s*'([^']+)'")
if videourl:
itemlist.append(['.mp4 HD [directo]', videourl])
if item.extra == "play_menu":
return itemlist, data
return itemlist
def menu_info(item):
logger.info()
itemlist = []
video_urls, data = play(item.clone(extra="play_menu"))
itemlist.append(item.clone(action="play", title="Ver -- %s" % item.title, video_urls=video_urls))
bloque = scrapertools.find_single_match(data, '<div class="carousel-inner"(.*?)<div class="container">')
matches = scrapertools.find_multiple_matches(bloque, 'src="([^"]+)"')
for i, img in enumerate(matches):
@@ -132,5 +112,4 @@ def menu_info(item):
continue
title = "Imagen %s" % (str(i))
itemlist.append(item.clone(action="", title=title, thumbnail=img, fanart=img))
return itemlist

View File

@@ -3,16 +3,18 @@
import re
import urlparse
from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import logger
host = "https://www.serviporno.com"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(
Item(channel=item.channel, action="videos", title="Útimos videos", url="http://www.serviporno.com/"))
Item(channel=item.channel, action="videos", title="Útimos videos", url= host))
itemlist.append(
Item(channel=item.channel, action="videos", title="Más vistos", url="http://www.serviporno.com/mas-vistos/"))
itemlist.append(
@@ -43,15 +45,14 @@ def search(item, texto):
def videos(item):
logger.info()
itemlist = []
data = scrapertools.downloadpage(item.url)
data = httptools.downloadpage(item.url).data
patron = '<div class="wrap-box-escena">.*?'
patron = '(?s)<div class="wrap-box-escena">.*?'
patron += '<div class="box-escena">.*?'
patron += '<a href="([^"]+)" data-stats-video-id="[^"]+" data-stats-video-name="([^"]+)" data-stats-video-category="[^"]*" data-stats-list-name="[^"]*" data-stats-list-pos="[^"]*">.*?'
patron += '<img src="([^"]+)" data-src="[^"]+" alt="[^"]+" id=\'[^\']+\' class="thumbs-changer" data-thumbs-prefix="[^"]+" height="150px" width="175px" border=0 />'
matches = re.compile(patron, re.DOTALL).findall(data)
logger.info(str(matches))
patron += '<a\s*href="([^"]+)".*?'
patron += 'data-stats-video-name="([^"]+)".*?'
patron += '<img\s*src="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for url, title, thumbnail in matches:
url = urlparse.urljoin(item.url, url)
itemlist.append(Item(channel=item.channel, action='play', title=title, url=url, thumbnail=thumbnail))
@@ -106,10 +107,9 @@ def categorias(item):
def play(item):
logger.info()
itemlist = []
data = scrapertools.downloadpage(item.url)
url = scrapertools.get_match(data, "url: '([^']+)',\s*framesURL:")
data = httptools.downloadpage(item.url).data
url = scrapertools.find_single_match(data, "sendCdnInfo.'([^']+)")
itemlist.append(
Item(channel=item.channel, action="play", server="directo", title=item.title, url=url, thumbnail=item.thumbnail,
plot=item.plot, folder=False))
return itemlist

View File

@@ -1,12 +0,0 @@
{
"id": "teledocumentales",
"name": "Teledocumentales",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"banner": "teledocumentales.png",
"thumbnail": "teledocumentales.png",
"categories": [
"documentary"
]
}

View File

@@ -1,109 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="ultimo", title="Últimos Documentales",
url="http://www.teledocumentales.com/", viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, action="ListaCat", title="Listado por Genero",
url="http://www.teledocumentales.com/"))
return itemlist
def ultimo(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
# Extrae las entradas
patron = '<div class="imagen"(.*?)<div style="clear.both">'
matches = re.compile(patron, re.DOTALL).findall(data)
print "manolo"
print matches
for match in matches:
scrapedtitle = scrapertools.get_match(match, '<img src="[^"]+" alt="([^"]+)"')
scrapedtitle = scrapertools.htmlclean(scrapedtitle)
scrapedurl = scrapertools.get_match(match, '<a href="([^"]+)"')
scrapedthumbnail = scrapertools.get_match(match, '<img src="([^"]+)" alt="[^"]+"')
scrapedplot = scrapertools.get_match(match, '<div class="excerpt">([^<]+)</div>')
itemlist.append(
Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
plot=scrapedplot, fanart=scrapedthumbnail))
# Extrae la marca de siguiente pagina
try:
next_page = scrapertools.get_match(data, '<a class="next" href="([^"]+)">')
itemlist.append(Item(channel=item.channel, action="ultimo", title=">> Página siguiente",
url=urlparse.urljoin(item.url, next_page, viewmode="movie_with_plot")))
except:
pass
return itemlist
def ListaCat(item):
logger.info()
url = item.url
data = scrapertools.cachePage(url)
# Extrae las entradas (carpetas)
# <div class="slidethumb">
# <a href="http://www.cine-adicto.com/transformers-dark-of-the-moon.html"><img src="http://www.cine-adicto.com/wp-content/uploads/2011/09/Transformers-Dark-of-the-moon-wallpaper.jpg" width="638" alt="Transformers: Dark of the Moon 2011" /></a>
# </div>
patron = '<div id="menu_horizontal">(.*?)<div class="cuerpo">'
matches = re.compile(patron, re.DOTALL).findall(data)
logger.info("hay %d matches" % len(matches))
itemlist = []
for match in matches:
data2 = match
patron = '<li class="cat-item cat-item-.*?<a href="(.*?)".*?>(.*?)</a>.*?</li>'
matches2 = re.compile(patron, re.DOTALL).findall(data2)
logger.info("hay %d matches2" % len(matches2))
for match2 in matches2:
scrapedtitle = match2[1].replace("&#8211;", "-").replace("&amp;", "&").strip()
scrapedurl = match2[0]
scrapedthumbnail = match2[0].replace(" ", "%20")
scrapedplot = ""
itemlist.append(Item(channel=item.channel, action="ultimo", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, fanart=scrapedthumbnail,
viewmode="movie_with_plot"))
return itemlist
def play(item):
logger.info()
data = scrapertools.cachePage(item.url)
urlvideo = scrapertools.get_match(data, '<!-- end navigation -->.*?<iframe src="([^"]+)"')
data = scrapertools.cachePage(urlvideo)
url = scrapertools.get_match(data, 'iframe src="([^"]+)"')
itemlist = servertools.find_video_items(data=url)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.channel = item.channel
return itemlist

View File

@@ -82,11 +82,11 @@ def series(item):
for scrapedurl, scrapedthumbnail, scrapedepisodes, year, scrapedtitle in matches:
scrapedepisodes.strip()
year = year.strip()
contentTitle = scrapertools.htmlclean(scrapedtitle.strip())
title = "%s (%s)" %(contentTitle, scrapedepisodes)
contentSerieName = scrapertools.htmlclean(scrapedtitle.strip())
title = "%s (%s)" %(contentSerieName, scrapedepisodes)
if "series" in scrapedurl:
itemlist.append(Item(channel=item.channel, action="temporadas", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, show=contentTitle,
thumbnail=scrapedthumbnail, contentSerieName=contentSerieName,
infoLabels={"year": year}, text_color=color1))
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
tmdb.set_infoLabels(itemlist, True)
@@ -123,6 +123,22 @@ def temporadas(item):
url = HOST_TVSHOWS_TPL
))
tmdb.set_infoLabels(itemlist)
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title =""))
itemlist.append(item.clone(action = "add_serie_to_library",
channel = item.channel,
extra = "episodios",
title = '[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url = item.url
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = temporadas(item)
for tempitem in templist:
itemlist += capitulos(tempitem)
return itemlist
@@ -138,13 +154,13 @@ def capitulos(item):
for scrapedurl, scrapedtitle, scrapeddate in matches:
scrapedtitle = scrapedtitle + " (%s)" %scrapeddate
episode = scrapertools.find_single_match(scrapedurl, "capitulo-([0-9]+)")
query = item.show + " " + str(item.infoLabels["season"]) + "x" + episode.rjust(2, "0")
query = item.contentSerieName + " " + scrapertools.find_single_match(scrapedtitle, "\w+")
item.infoLabels["episode"] = episode
itemlist.append(item.clone(action = "findvideos",
title = scrapedtitle.decode("unicode-escape"),
query = query.replace(" ","+"),
url = scrapedurl.replace("\\","")
))
title = scrapedtitle.decode("unicode-escape"),
query = query.replace(" ","+"),
url = scrapedurl.replace("\\","")
))
tmdb.set_infoLabels(itemlist)
return itemlist
@@ -237,8 +253,8 @@ def peliculas(item):
contentTitle = scrapertools.htmlclean(scrapedtitle.strip())
title = "%s %s" % (contentTitle, idiomas_disponibles)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, contentTitle=contentTitle,
infoLabels={"year": year}, text_color=color1, query = query))
thumbnail=scrapedthumbnail, contentTitle=contentTitle, query = query,
infoLabels={"year": year}, text_color=color1))
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
tmdb.set_infoLabels(itemlist)
@@ -281,6 +297,8 @@ def findvideos(item):
data = httptools.downloadpage(item.url).data
patron = '(?s)id="online".*?server="([^"]+)"'
mserver = scrapertools.find_single_match(data, patron)
if not item.query:
item.query = scrapertools.find_single_match(item.url, "peliculas.*?/[0-9]+/([^/]+)").replace("-","+")
url_m = "http://olimpo.link/?q=%s&server=%s" %(item.query, mserver)
patron = 'class="favicon.*?domain=(?:www\.|)([^\.]+).*?text-overflow.*?href="([^"]+).*?'
patron += '\[([^\]]+)\].*?\[([^\]]+)\]'

View File

@@ -23,7 +23,7 @@ ficherocookies = os.path.join(config.get_data_path(), "cookies.dat")
# Headers por defecto, si no se especifica nada
default_headers = dict()
default_headers["User-Agent"] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36"
default_headers["User-Agent"] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3163.100 Safari/537.36"
default_headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8"
default_headers["Accept-Language"] = "es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3"
default_headers["Accept-Charset"] = "UTF-8"

View File

@@ -268,8 +268,9 @@ def save_tvshow(item, episodelist):
# Creamos tvshow.nfo, si no existe, con la head_nfo, info de la serie y marcas de episodios vistos
logger.info("Creando tvshow.nfo: " + tvshow_path)
head_nfo = scraper.get_nfo(item)
item_tvshow = Item(title=item.contentTitle, channel="videolibrary", action="get_seasons",
item.infoLabels['mediatype'] = "tvshow"
item.infoLabels['title'] = item.contentSerieName
item_tvshow = Item(title=item.contentSerieName, channel="videolibrary", action="get_seasons",
fanart=item.infoLabels['fanart'], thumbnail=item.infoLabels['thumbnail'],
infoLabels=item.infoLabels, path=path.replace(TVSHOWS_PATH, ""))
item_tvshow.library_playcounts = {}
@@ -294,7 +295,6 @@ def save_tvshow(item, episodelist):
if item.channel != "downloads":
item_tvshow.active = 1 # para que se actualice a diario cuando se llame a videolibrary_service
filetools.write(tvshow_path, head_nfo + item_tvshow.tojson())
if not episodelist:
@@ -439,7 +439,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
news_in_playcounts["season %s" % e.contentSeason] = 0
# Marcamos la serie como no vista
# logger.debug("serie " + serie.tostring('\n'))
news_in_playcounts[serie.contentTitle] = 0
news_in_playcounts[serie.contentSerieName] = 0
else:
logger.info("Sobreescrito: %s" % json_path)

View File

@@ -8,7 +8,7 @@
"url": "https://www.bitporno.com/e/\\1"
},
{
"pattern": "raptu.com/(?:\\?v=|embed/|e/)([A-z0-9]+)",
"pattern": "raptu.com/(?:\\?v=|embed/|e/|v/)([A-z0-9]+)",
"url": "https://www.bitporno.com/e/\\1"
}
]

View File

@@ -23,7 +23,7 @@ def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
videourl = scrapertools.find_multiple_matches(data, 'file":"([^"]+).*?label":"([^"]+)')
videourl = scrapertools.find_multiple_matches(data, '<source src="(http[^"]+).*?data-res="([^"]+)')
scrapertools.printMatches(videourl)
for scrapedurl, scrapedquality in videourl:
if "loadthumb" in scrapedurl:

View File

@@ -1,42 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(?:divxstage|cloudtime).[^/]+/video/([^\"' ]+)",
"url": "http://www.cloudtime.to/embed/?v=\\1"
}
]
},
"free": true,
"id": "divxstage",
"name": "divxstage",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "server_divxstage.png"
}

View File

@@ -1,46 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from platformcode import logger
host = "http://www.cloudtime.to"
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url.replace('/embed/?v=', '/video/')).data
if "This file no longer exists" in data:
return False, "El archivo no existe<br/>en divxstage o ha sido borrado."
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
if "divxstage.net" in page_url:
page_url = page_url.replace("divxstage.net", "cloudtime.to")
data = httptools.downloadpage(page_url).data
video_urls = []
videourls = scrapertools.find_multiple_matches(data, 'src\s*:\s*[\'"]([^\'"]+)[\'"]')
if not videourls:
videourls = scrapertools.find_multiple_matches(data, '<source src=[\'"]([^\'"]+)[\'"]')
for videourl in videourls:
if videourl.endswith(".mpd"):
id = scrapertools.find_single_match(videourl, '/dash/(.*?)/')
videourl = "http://www.cloudtime.to/download.php%3Ffile=mm" + "%s.mp4" % id
videourl = re.sub(r'/dl(\d)*/', '/dl/', videourl)
ext = scrapertools.get_filename_from_url(videourl)[-4:]
videourl = videourl.replace("%3F", "?") + \
"|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0"
video_urls.append([ext + " [cloudtime]", videourl])
return video_urls

View File

@@ -9,8 +9,6 @@ from platformcode import logger
def test_video_exists(page_url):
if 'googleusercontent' in page_url:
return True, ""
response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url})
if "no+existe" in response.data:
return False, "[gvideo] El video no existe o ha sido borrado"
@@ -22,6 +20,8 @@ def test_video_exists(page_url):
return False, "[gvideo] Se ha producido un error en el reproductor de google"
if "No+se+puede+procesar+este" in response.data:
return False, "[gvideo] No se puede procesar este video"
if response.code == 429:
return False, "[gvideo] Demasiadas conexiones al servidor, inténtelo después"
return True, ""

View File

@@ -1,42 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "idowatch.net/(?:embed-)?([a-z0-9]+)",
"url": "http://idowatch.net/\\1.html"
}
]
},
"free": true,
"id": "idowatch",
"name": "idowatch",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "server_idowatch.png"
}

View File

@@ -1,34 +0,0 @@
# -*- coding: utf-8 -*-
from core import scrapertools
from lib import jsunpack
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = scrapertools.cache_page(page_url)
if "File Not Found" in data:
return False, "[Idowatch] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = scrapertools.cache_page(page_url)
mediaurl = scrapertools.find_single_match(data, ',{file:(?:\s+|)"([^"]+)"')
if not mediaurl:
matches = scrapertools.find_single_match(data,
"<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d.*?)</script>")
matchjs = jsunpack.unpack(matches).replace("\\", "")
mediaurl = scrapertools.find_single_match(matchjs, ',{file:(?:\s+|)"([^"]+)"')
video_urls = []
video_urls.append([scrapertools.get_filename_from_url(mediaurl)[-4:] + " [idowatch]", mediaurl])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -1,45 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "nosvideo.com/(?:\\?v=|vj/video.php\\?u=|)([a-z0-9]+)",
"url": "http://nosvideo.com/vj/videomain.php?u=\\1==530"
},
{
"pattern": "nosupload.com(/\\?v\\=[a-z0-9]+)",
"url": "http://nosvideo.com\\1"
}
]
},
"free": true,
"id": "nosvideo",
"name": "nosvideo",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,41 +0,0 @@
# -*- coding: utf-8 -*-
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = scrapertools.cache_page(page_url)
if "404 Page no found" in data:
return False, "[nosvideo] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
# Lee la URL
data = scrapertools.cache_page(page_url)
urls = scrapertools.find_multiple_matches(data, ":'(http:\/\/.+?(?:v.mp4|.smil))")
urls = set(urls)
for media_url in urls:
if ".smil" in media_url:
data = scrapertools.downloadpage(media_url)
rtmp = scrapertools.find_single_match(data, '<meta base="([^"]+)"')
playpath = scrapertools.find_single_match(data, '<video src="([^"]+)"')
media_url = rtmp + " playpath=" + playpath
filename = "rtmp"
else:
filename = scrapertools.get_filename_from_url(media_url)[-4:]
video_urls.append([filename + " [nosvideo]", media_url])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -1,45 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(nowdownload.\\w{2}]/dl/[a-z0-9]+)",
"url": "http://www.\\1"
}
]
},
"free": false,
"id": "nowdownload",
"name": "nowdownload",
"premium": [
"realdebrid"
],
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "server_nowdownload.png"
}

View File

@@ -1,36 +0,0 @@
# -*- coding: utf-8 -*-
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
'''
<a href="http://f02.nowdownload.co/dl/91efaa9ec507ef4de023cd62bb9a0fe2/50ab76ac/6711c9c90ebf3_family.guy.s11e02.italian.subbed.hdtv.xvid_gannico.avi" class="btn btn-danger"><i class="icon-white icon-download"></i> Download Now</a>
'''
data = scrapertools.cache_page(page_url)
logger.debug("data:" + data)
try:
url = scrapertools.get_match(data,
'<a href="([^"]*)" class="btn btn-danger"><i class="icon-white icon-download"></i> Download Now</a>')
except:
# $.get("/api/token.php?token=7e1ab09df2775dbea02506e1a2651883");
token = scrapertools.get_match(data, '(/api/token.php\?token=[^"]*)')
logger.debug("token:" + token)
d = scrapertools.cache_page("http://www.nowdownload.co" + token)
url = scrapertools.get_match(data, 'expiryText: \'<a class="btn btn-danger" href="([^"]*)')
logger.debug("url_1:" + url)
data = scrapertools.cache_page("http://www.nowdownload.co" + url)
logger.debug("data:" + data)
# <a href="http://f03.nowdownload.co/dl/8ec5470153bb7a2177847ca7e1638389/50ab71b3/f92882f4d33a5_squadra.antimafia_palermo.oggi.4x01.episodio.01.ita.satrip.xvid_upz.avi" class="btn btn-success">Click here to download !</a>
url = scrapertools.get_match(data, '<a href="([^"]*)" class="btn btn-success">Click here to download !</a>')
logger.debug("url_final:" + url)
video_urls = [url]
return video_urls

View File

@@ -1,32 +0,0 @@
{
"active": true,
"free": true,
"id": "pcloud",
"name": "pcloud",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,29 +0,0 @@
# -*- coding: utf-8 -*-
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = scrapertools.cache_page(page_url)
if "Invalid link" in data: return False, "[pCloud] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
data = scrapertools.cache_page(page_url)
media_url = scrapertools.find_single_match(data, '"downloadlink":.*?"([^"]+)"')
media_url = media_url.replace("\\", "")
video_urls = []
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [pCloud]", media_url])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -1,49 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(http://stagevu.com/video/[A-Z0-9a-z]+)",
"url": "\\1"
},
{
"pattern": "http://stagevu.com.*?uid\\=([A-Z0-9a-z]+)",
"url": "http://stagevu.com/video/\\1"
},
{
"pattern": "http://[^\\.]+\\.stagevu.com/v/[^/]+/(.*?).avi",
"url": "http://stagevu.com/video/\\1"
}
]
},
"free": true,
"id": "stagevu",
"name": "stagevu",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,33 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import scrapertools
from platformcode import logger
# Returns an array of possible video url's from the page_url
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
# Descarga la página del vídeo
data = scrapertools.cache_page(page_url)
# Busca el vídeo de dos formas distintas
patronvideos = '<param name="src" value="([^"]+)"'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
video_urls = [["[stagevu]", matches[0]]]
else:
patronvideos = 'src="([^"]+stagevu.com/[^i][^"]+)"' # Forma src="XXXstagevu.com/ y algo distinto de i para evitar images e includes
matches = re.findall(patronvideos, data)
if len(matches) > 0:
video_urls = [["[stagevu]", matches[0]]]
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -1,42 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "stormo.tv/(?:videos/|embed/)([0-9]+)",
"url": "http://stormo.tv/embed/\\1"
}
]
},
"free": true,
"id": "stormo",
"name": "stormo",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "http://i.imgur.com/mTYCw5E.png"
}

View File

@@ -1,33 +0,0 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
response = httptools.downloadpage(page_url)
if "video_error.mp4" in response.data:
return False, "[Stormo] El archivo no existe o ha sido borrado"
if response.code == 451:
return False, "[Stormo] El archivo ha sido borrado por problemas legales."
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info(" url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
media_url = scrapertools.find_single_match(data, "file\s*:\s*['\"]([^'\"]+)['\"]")
if media_url.endswith("/"):
media_url = media_url[:-1]
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [stormo]", media_url])
for video_url in video_urls:
logger.info(" %s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -1,41 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "turbovideos.net/embed-([a-z0-9A-Z]+)",
"url": "http://turbovideos.net/embed-\\1.html"
}
]
},
"free": true,
"id": "turbovideos",
"name": "turbovideos",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,38 +0,0 @@
# -*- coding: utf-8 -*-
from core import scrapertools
from lib import jsunpack
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
if "embed" not in page_url:
page_url = page_url.replace("http://turbovideos.net/", "http://turbovideos.net/embed-") + ".html"
data = scrapertools.cache_page(page_url)
logger.info("data=" + data)
data = scrapertools.find_single_match(data,
"<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d.*?)</script>")
logger.info("data=" + data)
data = jsunpack.unpack(data)
logger.info("data=" + data)
video_urls = []
# {file:"http://ultra.turbovideos.net/73ciplxta26xsbj2bqtkqcd4rtyxhgx5s6fvyzed7ocf4go2lxjnd6e5kjza/v.mp4",label:"360"
media_urls = scrapertools.find_multiple_matches(data, 'file:"([^"]+)",label:"([^"]+)"')
for media_url, label in media_urls:
if not media_url.endswith("srt"):
video_urls.append(
[scrapertools.get_filename_from_url(media_url)[-4:] + " " + label + " [turbovideos]", media_url])
return video_urls

View File

@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from platformcode import logger
@@ -7,35 +8,28 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = scrapertools.cache_page(page_url)
data = httptools.downloadpage(page_url).data
if "This video has been removed from public access" in data:
if "This video has been removed from public access" in data or "Video not found." in data:
return False, "El archivo ya no esta disponible<br/>en VK (ha sido borrado)"
else:
return True, ""
return True, ""
# Returns an array of possible video url's from the page_url
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
try:
oid, id = scrapertools.find_single_match(page_url, 'oid=([^&]+)&id=(\d+)')
except:
oid, id = scrapertools.find_single_match(page_url, 'video(\d+)_(\d+)')
from core import httptools
headers = {'User-Agent': 'Mozilla/5.0'}
url = "http://vk.com/al_video.php?act=show_inline&al=1&video=%s_%s" % (oid, id)
data = httptools.downloadpage(url, headers=headers).data
matches = scrapertools.find_multiple_matches(data, '<source src="([^"]+)" type="video/(\w+)')
for media_url, ext in matches:
calidad = scrapertools.find_single_match(media_url, '(\d+)\.%s' % ext)
video_urls.append(["." + ext + " [vk:" + calidad + "]", media_url])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls