Merge pull request #213 from Intel11/actualizados

Actualizados
This commit is contained in:
Alfa
2018-03-02 16:07:30 -05:00
committed by GitHub
13 changed files with 61 additions and 547 deletions

View File

@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
@@ -20,12 +21,12 @@ except:
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel = item.channel, title = "Novedades", action = "peliculas", url = host))
itemlist.append(Item(channel = item.channel, title = "Por género", action = "generos_years", url = host, extra = "Genero" ))
itemlist.append(Item(channel = item.channel, title = "Por año", action = "generos_years", url = host, extra = ">Año<"))
itemlist.append(Item(channel = item.channel, title = "Favoritas", action = "peliculas", url = host + "/favorites" ))
itemlist.append(Item(channel = item.channel, title = "Novedades", action = "peliculas", url = host, thumbnail = get_thumb("newest", auto = True)))
itemlist.append(Item(channel = item.channel, title = "Por género", action = "generos_years", url = host, extra = "Genero", thumbnail = get_thumb("genres", auto = True) ))
itemlist.append(Item(channel = item.channel, title = "Por año", action = "generos_years", url = host, extra = ">Año<", thumbnail = get_thumb("year", auto = True)))
itemlist.append(Item(channel = item.channel, title = "Favoritas", action = "peliculas", url = host + "/favorites", thumbnail = get_thumb("favorites", auto = True) ))
itemlist.append(Item(channel = item.channel, title = ""))
itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "?s="))
itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "?s=", thumbnail = get_thumb("search", auto = True)))
return itemlist
def newest(categoria):

View File

@@ -146,8 +146,6 @@ def scraper(item):
except:
pass
for item_tmdb in itemlist:
logger.info(str(item_tmdb.infoLabels['tmdb_id']))
return itemlist

View File

@@ -7,7 +7,8 @@ from core.item import Item
from platformcode import config, logger
host = "http://gnula.nu/"
host_search = "https://www.googleapis.com/customsearch/v1element?key=AIzaSyCVAXiUzRYsML1Pv6RwSG1gunmMikTzQqY&rsz=small&num=10&hl=es&prettyPrint=false&source=gcsc&gss=.es&sig=45e50696e04f15ce6310843f10a3a8fb&cx=014793692610101313036:vwtjajbclpq&q=%s&cse_tok=AOdTmaBgzSiy5RxoV4cZSGGEr17reWoGLg:1519145966291&googlehost=www.google.com&callback=google.search.Search.apiary10745&nocache=1519145965573&start=0"
host_search = "https://www.googleapis.com/customsearch/v1element?key=AIzaSyCVAXiUzRYsML1Pv6RwSG1gunmMikTzQqY&rsz=small&num=20&hl=es&prettyPrint=false&source=gcsc&gss=.es&sig=45e50696e04f15ce6310843f10a3a8fb&cx=014793692610101313036:vwtjajbclpq&q=%s&cse_tok=%s&googlehost=www.google.com&callback=google.search.Search.apiary10745&nocache=1519145965573&start=0"
item_per_page = 20
def mainlist(item):
@@ -28,7 +29,16 @@ def mainlist(item):
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url %texto
data = httptools.downloadpage(host).data
url_cse = scrapertools.find_single_match(data, '<form action="([^"]+)"') + "?"
bloque = scrapertools.find_single_match(data, '<form action=.*?</form>').replace('name="q"', "")
matches = scrapertools.find_multiple_matches(bloque, 'name="([^"]+).*?value="([^"]+)')
post = "q=" + texto + "&"
for name, value in matches:
post += name + "=" + value + "&"
data = httptools.downloadpage(url_cse + post).data
cse_token = scrapertools.find_single_match(data, "var cse_token='([^']+)'")
item.url = host_search %(texto, cse_token)
try:
return sub_search(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
@@ -42,35 +52,31 @@ def search(item, texto):
def sub_search(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '(?s)clicktrackUrl":".*?q=(.*?)".*?'
patron += 'title":"([^"]+)".*?'
patron += 'cseImage":{"src":"([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedurl = scrapertools.find_single_match(scrapedurl, ".*?online/")
scrapedtitle = scrapedtitle.decode("unicode-escape").replace(" online", "").replace("<b>", "").replace("</b>", "")
if "ver-" not in scrapedurl:
continue
year = scrapertools.find_single_match(scrapedtitle, "\d{4}")
contentTitle = scrapedtitle.replace("(%s)" %year,"").replace("Ver","").strip()
itemlist.append(Item(action = "findvideos",
channel = item.channel,
contentTitle = contentTitle,
infoLabels = {"year":year},
title = scrapedtitle,
thumbnail = scrapedthumbnail,
url = scrapedurl
))
if itemlist:
page = int(scrapertools.find_single_match(item.url, ".*?start=(\d+)")) + 10
npage = (page / 10) + 1
item_page = scrapertools.find_single_match(item.url, "(.*?start=)") + str(page)
itemlist.append(Item(action = "sub_search",
channel = item.channel,
title = "[COLOR green]Página %s[/COLOR]" %npage,
url = item_page
))
while True:
data = httptools.downloadpage(item.url).data
if len(data) < 500 :
break
page = int(scrapertools.find_single_match(item.url, ".*?start=(\d+)")) + item_per_page
item.url = scrapertools.find_single_match(item.url, "(.*?start=)") + str(page)
patron = '(?s)clicktrackUrl":".*?q=(.*?)".*?'
patron += 'title":"([^"]+)".*?'
patron += 'cseImage":{"src":"([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedurl = scrapertools.find_single_match(scrapedurl, ".*?online/")
scrapedtitle = scrapedtitle.decode("unicode-escape").replace(" online", "").replace("<b>", "").replace("</b>", "")
if "ver-" not in scrapedurl:
continue
year = scrapertools.find_single_match(scrapedtitle, "\d{4}")
contentTitle = scrapedtitle.replace("(%s)" %year,"").replace("Ver","").strip()
itemlist.append(Item(action = "findvideos",
channel = item.channel,
contentTitle = contentTitle,
infoLabels = {"year":year},
title = scrapedtitle,
thumbnail = scrapedthumbnail,
url = scrapedurl,
))
return itemlist
@@ -125,7 +131,7 @@ def peliculas(item):
def findvideos(item):
logger.info("item=" + item.tostring())
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
item.plot = scrapertools.find_single_match(data, '<div class="entry">(.*?)<div class="iframes">')

View File

@@ -1,22 +0,0 @@
{
"id": "pelisadicto",
"name": "Pelisadicto",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "pelisadicto.png",
"banner": "pelisadicto.png",
"categories": [
"movie"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,220 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(
Item(channel=item.channel, title="Últimas agregadas", action="agregadas", url="http://pelisadicto.com",
viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, title="Listado por género", action="porGenero", url="http://pelisadicto.com"))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="http://pelisadicto.com"))
return itemlist
def porGenero(item):
logger.info()
itemlist = []
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Acción", url="http://pelisadicto.com/genero/Acción/1",
viewmode="movie_with_plot"))
if config.get_setting("adult_mode") != 0:
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Adulto", url="http://pelisadicto.com/genero/Adulto/1",
viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, action="agregadas", title="Animación",
url="http://pelisadicto.com/genero/Animación/1", viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Aventura", url="http://pelisadicto.com/genero/Aventura/1",
viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, action="agregadas", title="Biográfico",
url="http://pelisadicto.com/genero/Biográfico/1", viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, action="agregadas", title="Ciencia Ficción",
url="http://pelisadicto.com/genero/Ciencia Ficción/1", viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, action="agregadas", title="Cine Negro",
url="http://pelisadicto.com/genero/Cine Negro/1", viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Comedia", url="http://pelisadicto.com/genero/Comedia/1",
viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Corto", url="http://pelisadicto.com/genero/Corto/1",
viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Crimen", url="http://pelisadicto.com/genero/Crimen/1",
viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Deporte", url="http://pelisadicto.com/genero/Deporte/1",
viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, action="agregadas", title="Documental",
url="http://pelisadicto.com/genero/Documental/1", viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Drama", url="http://pelisadicto.com/genero/Drama/1",
viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Familiar", url="http://pelisadicto.com/genero/Familiar/1",
viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Fantasía", url="http://pelisadicto.com/genero/Fantasía/1",
viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Guerra", url="http://pelisadicto.com/genero/Guerra/1",
viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Historia", url="http://pelisadicto.com/genero/Historia/1",
viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Misterio", url="http://pelisadicto.com/genero/Misterio/1",
viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Música", url="http://pelisadicto.com/genero/Música/1",
viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Musical", url="http://pelisadicto.com/genero/Musical/1",
viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Romance", url="http://pelisadicto.com/genero/Romance/1",
viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Terror", url="http://pelisadicto.com/genero/Terror/1",
viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Thriller", url="http://pelisadicto.com/genero/Thriller/1",
viewmode="movie_with_plot"))
itemlist.append(
Item(channel=item.channel, action="agregadas", title="Western", url="http://pelisadicto.com/genero/Western/1",
viewmode="movie_with_plot"))
return itemlist
def search(item, texto):
logger.info()
'''
texto_get = texto.replace(" ","%20")
texto_post = texto.replace(" ","+")
item.url = "http://pelisadicto.com/buscar/%s?search=%s" % (texto_get,texto_post)
'''
texto = texto.replace(" ", "+")
item.url = "http://pelisadicto.com/buscar/%s" % texto
try:
return agregadas(item)
# Se captura la excepci?n, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def agregadas(item):
logger.info()
itemlist = []
'''
# Descarga la pagina
if "?search=" in item.url:
url_search = item.url.split("?search=")
data = scrapertools.cache_page(url_search[0], url_search[1])
else:
data = scrapertools.cache_page(item.url)
logger.info("data="+data)
'''
data = scrapertools.cache_page(item.url)
# logger.info("data="+data)
# Extrae las entradas
fichas = re.sub(r"\n|\s{2}", "", scrapertools.get_match(data, '<ul class="thumbnails">(.*?)</ul>'))
# <li class="col-xs-6 col-sm-2 CALDVD"><a href="/pelicula/101-dalmatas" title="Ver 101 dálmatas Online" class="thumbnail thumbnail-artist-grid"><img class="poster" style="width: 180px; height: 210px;" src="/img/peliculas/101-dalmatas.jpg" alt="101 dálmatas"/><div class="calidad">DVD</div><div class="idiomas"><img src="/img/1.png" height="20" width="30" /></div><div class="thumbnail-artist-grid-name-container-1"><div class="thumbnail-artist-grid-name-container-2"><span class="thumbnail-artist-grid-name">101 dálmatas</span></div></div></a></li>
patron = 'href="([^"]+)".*?' # url
patron += 'src="([^"]+)" ' # thumbnail
patron += 'alt="([^"]+)' # title
matches = re.compile(patron, re.DOTALL).findall(fichas)
for url, thumbnail, title in matches:
url = urlparse.urljoin(item.url, url)
thumbnail = urlparse.urljoin(url, thumbnail)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title + " ", fulltitle=title, url=url,
thumbnail=thumbnail, show=title))
# Paginación
try:
# <ul class="pagination"><li class="active"><span>1</span></li><li><span><a href="2">2</a></span></li><li><span><a href="3">3</a></span></li><li><span><a href="4">4</a></span></li><li><span><a href="5">5</a></span></li><li><span><a href="6">6</a></span></li></ul>
current_page_number = int(scrapertools.get_match(item.url, '/(\d+)$'))
item.url = re.sub(r"\d+$", "%s", item.url)
next_page_number = current_page_number + 1
next_page = item.url % (next_page_number)
itemlist.append(Item(channel=item.channel, action="agregadas", title="Página siguiente >>", url=next_page,
viewmode="movie_with_plot"))
except:
pass
return itemlist
def findvideos(item):
logger.info()
itemlist = []
plot = ""
data = re.sub(r"\n|\s{2}", "", scrapertools.cache_page(item.url))
# <!-- SINOPSIS --> <h2>Sinopsis de 101 dálmatas</h2> <p>Pongo y Perdita, los dálmatas protagonistas, son una feliz pareja canina que vive rodeada de sus cachorros y con sus amos Roger y Anita. Pero su felicidad está amenazada. Cruella de Ville, una pérfida mujer que vive en una gran mansión y adora los abrigos de pieles, se entera de que los protagonistas tienen quince cachorros dálmatas. Entonces, la idea de secuestrarlos para hacerse un exclusivo abrigo de pieles se convierte en una obsesión enfermiza. Para hacer realidad su sueño contrata a dos ladrones.</p>
patron = "<!-- SINOPSIS --> "
patron += "<h2>[^<]+</h2> "
patron += "<p>([^<]+)</p>"
matches = re.compile(patron, re.DOTALL).findall(data)
if matches:
plot = matches[0]
# Descarga la pagina
data = scrapertools.cache_page(item.url)
patron = '<tr>.*?'
patron += '<td><img src="(.*?)".*?<td>(.*?)</td>.*?<td>(.*?)</td>.*?<a href="(.*?)".*?</tr>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedidioma, scrapedcalidad, scrapedserver, scrapedurl in matches:
idioma = ""
if "/img/1.png" in scrapedidioma: idioma = "Castellano"
if "/img/2.png" in scrapedidioma: idioma = "Latino"
if "/img/3.png" in scrapedidioma: idioma = "Subtitulado"
title = item.title + " [" + scrapedcalidad + "][" + idioma + "][" + scrapedserver + "]"
itemlist.append(
Item(channel=item.channel, action="play", title=title, fulltitle=title, url=scrapedurl, thumbnail="",
plot=plot, show=item.show))
return itemlist
def play(item):
logger.info()
itemlist = servertools.find_video_items(data=item.url)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist

View File

@@ -171,7 +171,7 @@ def episodios(item):
# post = "page=%s&x=34&y=14" % urllib.quote(item.url)
# response = httptools.downloadpage(url, post, follow_redirects=False).data
# url = scrapertools.find_single_match(response, '<meta http-equiv="refresh".*?url=([^"]+)"')
# data = httptools.downloadpage(item.url).data
data = httptools.downloadpage(item.url).data
data = jsontools.load(data)
@@ -214,14 +214,12 @@ def episodios(item):
if dict_episodes[numero]["plot"] == "":
dict_episodes[numero]["plot"] = j.get("overviewcapitul", "")
# logger.debug("\n\n\n dict_episodes: %s " % dict_episodes)
for key, value in dict_episodes.items():
list_no_duplicate = list(set(value["quality"]))
title = "%s %s [%s]" % (key, value["title"], "][".join(list_no_duplicate))
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url,
Item(channel=item.channel, action="findvideos", title=title, url=dict_episodes[numero]["url"],
thumbnail=item.thumbnail, fanart=item.fanart, show=item.show, data=value,
contentSerieName=item.contentTitle, contentSeason=value["season"],
contentEpisodeNumber=value["episode"]))

View File

@@ -14,7 +14,7 @@ from core import tmdb
from core.item import Item, InfoLabels
from platformcode import config, logger
host = "https://pepecine.tv"
host = "https://pepecine.info"
perpage = 20
def mainlist1(item):
@@ -29,7 +29,7 @@ def mainlist(item):
itemlist = []
itemlist.append(Item(channel=item.channel,
title="Ultimas",
url=host+'/tv-peliculas-online',
url=host+'/peliculas-tv-online',
action='list_latest',
indexp=1,
type='movie'))

View File

@@ -1,7 +1,7 @@
{
"id": "repelis",
"name": "Repelis",
"active": true,
"active": false,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "repelis.png",
@@ -21,4 +21,4 @@
"visible": true
}
]
}
}

View File

@@ -1,23 +0,0 @@
{
"id": "seriesadicto",
"name": "Seriesadicto",
"active": true,
"adult": false,
"language": ["cast"],
"thumbnail": "seriesadicto.png",
"banner": "seriesadicto.png",
"categories": [
"tvshow",
"anime"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,224 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(
Item(channel=item.channel, action="letras", title="Todas por orden alfabético", url="http://seriesadicto.com/",
folder=True))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar..."))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "http://seriesadicto.com/buscar/" + texto
try:
return series(item)
# Se captura la excepci?n, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def letras(item):
logger.info()
itemlist = []
# Descarga la página
data = scrapertools.cachePage(item.url)
data = scrapertools.find_single_match(data, '<li class="nav-header">Por inicial</li>(.*?)</ul>')
logger.info("data=" + data)
patronvideos = '<li><a rel="nofollow" href="([^"]+)">([^<]+)</a>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle
plot = ""
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action='series', title=title, url=url, thumbnail=thumbnail, plot=plot))
return itemlist
def series(item):
logger.info()
itemlist = []
'''
<li class="col-xs-6 col-sm-4 col-md-2">
<a href="/serie/justicia-ciega-blind-justuce" title="Ver Justicia ciega ( Blind Justuce ) Online" class="thumbnail thumbnail-artist-grid">
<img style="width: 120px; height: 180px;" src="/img/series/justicia-ciega-blind-justuce-th.jpg" alt="Justicia ciega ( Blind Justuce )"/>
'''
data = scrapertools.cachePage(item.url)
logger.info("data=" + data)
patron = '<li class="col-xs-6[^<]+'
patron += '<a href="([^"]+)"[^<]+'
patron += '<img style="[^"]+" src="([^"]+)" alt="([^"]+)"'
logger.info("patron=" + patron)
matches = re.compile(patron, re.DOTALL).findall(data)
logger.info("matches=" + repr(matches))
scrapertools.printMatches(matches)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
title = scrapertools.htmlclean(scrapedtitle.strip())
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=url, thumbnail=thumbnail,
plot=plot, show=title, folder=True))
return itemlist
def episodios(item):
logger.info()
itemlist = []
'''
<tr>
<td class="sape"><i class="glyphicon glyphicon-film"></i> <a href="/capitulo/saving-hope/1/2/82539" class="color4">Saving Hope 1x02</a></td>
<td><div class="vistodiv" title="82539"><a title="Marcar como Visto"><span class="visto visto-no"></span></a></div></td>
<td>
<img src="/img/3.png" border="0" height="14" width="22" />&nbsp;<img src="/img/4.png" border="0" height="14" width="22" />&nbsp; </td>
</tr>
'''
data = scrapertools.cachePage(item.url)
patron = '<tr[^<]+'
patron += '<td class="sape"><i[^<]+</i[^<]+<a href="([^"]+)"[^>]+>([^<]+)</a></td[^<]+'
patron += '<td><div[^<]+<a[^<]+<span[^<]+</span></a></div></td[^<]+'
patron += '<td>(.*?)</td'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl, scrapedtitle, bloqueidiomas in matches:
idiomas, language = extrae_idiomas(bloqueidiomas)
title = scrapedtitle.strip() + " (" + idiomas + ")"
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = ""
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumbnail,
plot=plot, show=item.show, folder=True, language=language))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show))
itemlist.append(Item(channel=item.channel, title="Descargar todos los episodios de la serie", url=item.url,
action="download_all_episodes", extra="episodios", show=item.show))
return itemlist
def extrae_idiomas(bloqueidiomas):
logger.info("idiomas=" + bloqueidiomas)
patronidiomas = '([a-z0-9]+).png"'
idiomas = re.compile(patronidiomas, re.DOTALL).findall(bloqueidiomas)
textoidiomas = ""
language=[]
for idioma in idiomas:
if idioma == "1":
textoidiomas = textoidiomas + "Español" + "/"
if idioma == "2":
textoidiomas = textoidiomas + "Latino" + "/"
if idioma == "3":
textoidiomas = textoidiomas + "VOSE" + "/"
if idioma == "4":
textoidiomas = textoidiomas + "VO" + "/"
language.append(codigo_a_idioma(idioma))
textoidiomas = textoidiomas[:-1]
return textoidiomas, language
def codigo_a_idioma(codigo):
idioma = ""
if codigo == "1":
idioma = "Español"
if codigo == "2":
idioma = "Latino"
if codigo == "3":
idioma = "VOSE"
if codigo == "4":
idioma = "VO"
return idioma
def findvideos(item):
logger.info()
itemlist = []
'''
<tr class="lang_3 no-mobile">
<td><img src="/img/3.png" border="0" height="14" width="22" /></td>
<td>Nowvideo</td>
<td class="enlacevideo" title="82539"><a href="http://www.nowvideo.eu/video/4fdc641896fe8" rel="nofollow" target="_blank" class="btn btn-primary btn-xs bg2"><i class="glyphicon glyphicon-play"></i> Reproducir</a></td>
</td>
</tr>
'''
# Descarga la pagina
data = scrapertools.cachePage(item.url)
patron = '<tr class="lang_[^<]+'
patron += '<td><img src="/img/(\d).png"[^<]+</td[^<]+'
patron += '<td>([^<]+)</td[^<]+'
patron += '<td class="enlacevideo"[^<]+<a href="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for idioma, servername, scrapedurl in matches:
title = "Mirror en " + servername + " (" + codigo_a_idioma(idioma) + ")"
language = codigo_a_idioma(idioma)
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = ""
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="play", title=title, fulltitle=title, url=url, thumbnail=thumbnail,
plot=plot, folder=False, language=language))
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist
def play(item):
logger.info()
itemlist = servertools.find_video_items(data=item.url)
for videoitem in itemlist:
videoitem.title = "Enlace encontrado en " + videoitem.server + " (" + scrapertools.get_filename_from_url(
videoitem.url) + ")"
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist

View File

@@ -90,7 +90,7 @@ thumb_dict = {"movies": "https://s10.postimg.org/fxtqzdog9/peliculas.png",
}
def set_genre(string):
logger.info()
#logger.info()
genres_dict = {'accion':['accion', 'action', 'accion y aventura', 'action & adventure'],
'adultos':['adultos', 'adultos +', 'adulto'],
@@ -131,7 +131,7 @@ def set_genre(string):
return string
def remove_format(string):
logger.info()
#logger.info()
#logger.debug('entra en remove: %s' % string)
string = string.rstrip()
string = re.sub(r'(\[|\[\/)(?:color|COLOR|b|B|i|I).*?\]|\[|\]|\(|\)|\:|\.', '', string)
@@ -140,7 +140,7 @@ def remove_format(string):
def simplify(string):
logger.info()
#logger.info()
#logger.debug('entra en simplify: %s'%string)
string = remove_format(string)
string = string.replace('-',' ').replace('_',' ')
@@ -155,7 +155,7 @@ def simplify(string):
return string
def add_languages(title, languages):
logger.info()
#logger.info()
if isinstance(languages, list):
for language in languages:
@@ -165,7 +165,7 @@ def add_languages(title, languages):
return title
def set_color(title, category):
logger.info()
#logger.info()
color_scheme = {'otro': 'white'}
@@ -199,7 +199,7 @@ def set_color(title, category):
return title
def set_lang(language):
logger.info()
#logger.info()
cast =['castellano','espanol','cast','esp','espaol', 'es','zc', 'spa', 'spanish', 'vc']
lat=['latino','lat','la', 'espanol latino', 'espaol latino', 'zl', 'mx', 'co', 'vl']
@@ -237,7 +237,7 @@ def set_lang(language):
def title_format(item):
logger.info()
#logger.info()
lang = False
valid = True
@@ -349,7 +349,7 @@ def title_format(item):
else:
simple_language = ''
item.language = simple_language
#item.language = simple_language
# Damos formato al año si existiera y lo agregamos
# al titulo excepto que sea un episodio
@@ -446,7 +446,7 @@ def title_format(item):
return item
def thumbnail_type(item):
logger.info()
#logger.info()
# Se comprueba que tipo de thumbnail se utilizara en findvideos,
# Poster o Logo del servidor

View File

@@ -27,7 +27,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
for ext, encoded, code, quality in matches:
media_url = decode(encoded, int(code))
media_url = media_url.replace("@","")
if not media_url.startswith("http"):
media_url = "http:" + media_url
video_urls.append([".%s %sp [streamango]" % (ext, quality), media_url])

View File

@@ -32,7 +32,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
for ext, encoded, code, quality in matches:
media_url = decode(encoded, int(code))
media_url = media_url.replace("@","")
if not media_url.startswith("http"):
media_url = "http:" + media_url
video_urls.append([".%s %sp [streamcherry]" % (ext, quality), media_url])