fix varios

allcalidad: fix findvideos
asialiveaction: fix enlaces + autoplay
cuelgame: desactivado, web con muchas cosas que no son solo videos
jkanime: fix cambios en la web
pedropolis: cambio de dominio + autoplay
vidup: fix
This commit is contained in:
Intel1
2018-08-25 12:25:27 -05:00
parent aaf6855de0
commit 0dd8d14c05
7 changed files with 195 additions and 364 deletions

View File

@@ -142,10 +142,13 @@ def findvideos(item):
contentTitle = scrapertools.find_single_match(data, 'orig_title.*?>([^<]+)<').strip()
if contentTitle != "":
item.contentTitle = contentTitle
patron = '(?s)fmi(.*?)thead'
bloque = scrapertools.find_single_match(data, patron)
match = scrapertools.find_multiple_matches(bloque, '(?is)(?:iframe|script) .*?src="([^"]+)')
for url in match:
bloque = scrapertools.find_single_match(data, '(?s)<div class="bottomPlayer">(.*?)<script>')
match = scrapertools.find_multiple_matches(bloque, '(?is)data-Url="([^"]+).*?data-postId="([^"]+)')
for dataurl, datapostid in match:
page_url = host + "wp-admin/admin-ajax.php"
post = "action=get_more_top_news&postID=%s&dataurl=%s" %(datapostid, dataurl)
data = httptools.downloadpage(page_url, post=post).data
url = scrapertools.find_single_match(data, '(?i)src="([^"]+)')
titulo = "Ver en: %s"
text_color = "white"
if "goo.gl" in url:

View File

@@ -3,6 +3,8 @@
import re
import urlparse
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
@@ -12,27 +14,25 @@ from platformcode import config, logger
host = "http://www.asialiveaction.com"
IDIOMAS = {'Japones': 'Japones'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['gvideo', 'openload','streamango']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="lista", title="Peliculas",
url=urlparse.urljoin(host, "p/peliculas.html"), type='pl', first=0))
itemlist.append(Item(channel=item.channel, action="lista", title="Series",
url=urlparse.urljoin(host, "p/series.html"), type='sr', first=0))
itemlist.append(Item(channel=item.channel, action="category", title="Géneros", url=host, cat='genre'))
itemlist.append(Item(channel=item.channel, action="category", title="Calidad", url=host, cat='quality'))
itemlist.append(Item(channel=item.channel, action="category", title="Orden Alfabético", url=host, cat='abc'))
itemlist.append(Item(channel=item.channel, action="category", title="Año de Estreno", url=host, cat='year'))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+"/search?q="))
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -41,7 +41,6 @@ def category(item):
itemlist = list()
data = httptools.downloadpage(host).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
if item.cat == 'abc':
data = scrapertools.find_single_match(data, '<span>Orden Alfabético</span>.*?</ul>')
elif item.cat == 'genre':
@@ -50,31 +49,23 @@ def category(item):
data = scrapertools.find_single_match(data, '<span>Año</span>.*?</ul>')
elif item.cat == 'quality':
data = scrapertools.find_single_match(data, '<span>Calidad</span>.*?</ul>')
patron = "<li>([^<]+)<a href='([^']+)'>"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle, scrapedurl in matches:
if scrapedtitle != 'Próximas Películas':
itemlist.append(item.clone(action='lista', title=scrapedtitle, url=host+scrapedurl, type='cat', first=0))
return itemlist
def search_results(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<span class=.post-labels.>([^<]+)</span>.*?class="poster-bg" src="([^"]+)"/>.*?<h4>.*?'
patron +=">(\d{4})</a>.*?<h6>([^<]+)<a href='([^']+)"
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedtype, scrapedthumbnail, scrapedyear, scrapedtitle ,scrapedurl in matches:
title="%s [%s]" % (scrapedtitle,scrapedyear)
new_item= Item(channel=item.channel, title=title, url=scrapedurl, thumbnail=scrapedthumbnail)
if scrapedtype.strip() == 'Serie':
@@ -85,12 +76,10 @@ def search_results(item):
new_item.contentTitle = scrapedtitle
new_item.action = 'findvideos'
new_item.type = 'pl'
itemlist.append(new_item)
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
@@ -98,57 +87,49 @@ def search(item, texto):
if texto != '':
return search_results(item)
def episodios(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron ='<div id="ep(\d+)" class="eps"> <section class="section-post online"><div class="player">.*?'
patron += 'src="([^"]+)"/><a href="([^"]+)" target='
matches = re.compile(patron,re.DOTALL).findall(data)
data = data.replace('"ep0','"epp"')
patron = '(?is)<div id="ep(\d+)".*?'
patron += 'src="([^"]+)".*?'
patron += 'href="([^"]+)" target="_blank"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedepi, scrapedthumbnail, scrapedurl in matches:
url = scrapedurl
title="1x%s - %s" % (scrapedepi, item.contentSerieName)
itemlist.append(item.clone(action='findvideos', title=title, url=url, thumbnail=scrapedthumbnail, type=item.type,
infoLabels=item.infoLabels))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]",
url=item.url, action="add_serie_to_library", extra="episodios",
contentSerieName=item.contentSerieName))
return itemlist
def lista(item):
logger.info()
next = True
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data = scrapertools.find_single_match(data, "itemprop='headline'>.*?</h2>.*?</ul>")
patron = '<span class="([^"]+)">.*?<figure class="poster-bg"><header><span>(\d{4})</span></header><img src="([^"]+)" />'
patron += '<footer>(.*?)</footer></figure><h6>([^<]+)</h6><a href="([^"]+)"></a>'
matches = scrapertools.find_multiple_matches(data, patron)
first = int(item.first)
last = first + 19
if last > len(matches):
last = len(matches)
next = False
for scrapedtype, scrapedyear, scrapedthumbnail, scrapedquality, scrapedtitle ,scrapedurl in matches[first:last]:
patron_quality="<span>(.+?)</span>"
quality = scrapertools.find_multiple_matches(scrapedquality, patron_quality)
qual=""
for calidad in quality:
qual=qual+"["+calidad+"] "
title="%s [%s] %s" % (scrapedtitle,scrapedyear,qual)
new_item= Item(channel=item.channel, title=title, url=host+scrapedurl, thumbnail=scrapedthumbnail,
type=scrapedtype, infoLabels={'year':scrapedyear})
@@ -158,34 +139,26 @@ def lista(item):
else:
new_item.contentTitle = scrapedtitle
new_item.action = 'findvideos'
if scrapedtype == item.type or item.type == 'cat':
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
#pagination
url_next_page = item.url
first = last
if next:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='lista', first=first))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
dl_links = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
### obtiene los gvideo
patron = 'class="Button Sm fa fa-download mg"></a><a target="_blank" rel="nofollow" href="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, patron)
for dl_url in matches:
g_data = httptools.downloadpage(dl_url).data
video_id = scrapertools.find_single_match(g_data, 'jfk-button jfk-button-action" href="([^"]+)">')
@@ -194,22 +167,26 @@ def findvideos(item):
g_data = httptools.downloadpage(g_url, follow_redirects=False, only_headers=True).headers
url = g_data['location']
dl_links.append(Item(channel=item.channel, title='%s', url=url, action='play', infoLabels=item.infoLabels))
if item.type == 'pl':
new_url = scrapertools.find_single_match(data, '<div class="player">.*?<a href="([^"]+)" target')
data = httptools.downloadpage(new_url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<li class="btn.*?" data-video="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, patron)
for video_id in matches:
url_data = httptools.downloadpage('https://tinyurl.com/%s' % video_id, follow_redirects=False)
url = url_data.headers['location']
itemlist.append(Item(channel=item.channel, title = '%s', url=url, action='play', infoLabels=item.infoLabels))
patron = '<iframe src="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for url in matches:
itemlist.append(item.clone(title = '%s', url=url, action='play'))
itemlist.extend(dl_links)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
return itemlist
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist

View File

@@ -1,7 +1,7 @@
{
"id": "cuelgame",
"name": "Cuelgame",
"active": true,
"active": false,
"adult": false,
"language": ["cast"],
"thumbnail": "cuelgame.png",

View File

@@ -1,60 +1,46 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import logger
host = "https://jkanime.net"
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(
Item(channel=item.channel, action="ultimos_capitulos", title="Últimos Capitulos", url="http://jkanime.net/"))
itemlist.append(Item(channel=item.channel, action="ultimos", title="Últimos", url="http://jkanime.net/"))
itemlist.append(Item(channel=item.channel, action="letras", title="Listado Alfabetico", url="http://jkanime.net/"))
itemlist.append(Item(channel=item.channel, action="generos", title="Listado por Genero", url="http://jkanime.net/"))
itemlist.append(Item(channel=item.channel, action="ultimas_series", title="Últimas Series", url=host))
itemlist.append(Item(channel=item.channel, action="ultimos_episodios", title="Últimos Episodios", url=host))
itemlist.append(Item(channel=item.channel, action="p_tipo", title="Listado Alfabetico", url=host, extra="Animes por letra"))
itemlist.append(Item(channel=item.channel, action="p_tipo", title="Listado por Genero", url=host, extra="Animes por Genero"))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar"))
return itemlist
def ultimos_capitulos(item):
def ultimas_series(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data, '<ul class="ratedul">.+?</ul>')
data = data.replace('\t', '')
data = data.replace('\n', '')
data = data.replace('/thumbnail/', '/image/')
patron = '<img src="(http://cdn.jkanime.net/assets/images/animes/.+?)" .+?href="(.+?)">(.+?)<.+?span>(.+?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumb, scrapedurl, scrapedtitle, scrapedepisode in matches:
title = scrapedtitle.strip() + scrapedepisode
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = scrapedthumb
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
data = scrapertools.find_single_match(data, 'Últimos capitulos agregados.*?/div><!-- .content-box -->')
patron = '<a title="([^"]+).*?'
patron += 'href="([^"]+)".*?'
patron += 'src="([^"]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedtitle, scrapedurl, scrapedthumbnail in matches:
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
show=scrapedtitle.strip(), fulltitle=title))
Item(channel=item.channel, action="episodios", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
show=scrapedtitle))
tmdb.set_infoLabels(itemlist)
return itemlist
def search(item, texto):
logger.info()
if item.url == "":
item.url = "http://jkanime.net/buscar/%s/"
item.url = host + "/buscar/%s/"
texto = texto.replace(" ", "+")
item.url = item.url % texto
try:
@@ -67,117 +53,66 @@ def search(item, texto):
return []
def ultimos(item):
def ultimos_episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data, '<ul class="latestul">(.*?)</ul>')
patron = '<a href="([^"]+)">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle.strip()
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = ""
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
#data = scrapertools.find_single_match(data, '<ul class="latestul">(.*?)</ul>')
patron = '<a class="odd" title="([^"]+).*?'
patron += 'href="([^"]+)".*?'
patron += 'img src="([^"]+)".*?'
patron += 'Episodio.*?(\d+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedtitle, scrapedurl, scrapedthumbnail, scrapedepisode in matches:
title = scrapedtitle + " - Episodio " + scrapedepisode
itemlist.append(
Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, plot=plot))
Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl, thumbnail=scrapedthumbnail,
show=scrapedtitle))
tmdb.set_infoLabels(itemlist)
return itemlist
def generos(item):
def p_tipo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data, '<div class="genres">(.*?)</div>')
patron = '<a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
data = scrapertools.find_single_match(data, '<h3>%s(.*?)</ul>' %item.extra)
patron = 'href="([^"]+)".*?'
patron += 'title.*?>([^<]+)</a>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = ""
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="series", title=title, url=url, thumbnail=thumbnail, plot=plot,
viewmode="movie_with_plot"))
return itemlist
def letras(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data, '<ul class="animelet">(.*?)</ul>')
patron = '<a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = ""
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="series", title=title, url=url, thumbnail=thumbnail, plot=plot,
viewmode="movie_with_plot"))
if "Por Genero" not in scrapedtitle:
itemlist.append(
Item(channel=item.channel, action="series", title=scrapedtitle, url=host + scrapedurl,
viewmode="movie_with_plot"))
return itemlist
def series(item):
logger.info()
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# Extrae las entradas
patron = '<table class="search[^<]+'
patron += '<tr[^<]+'
patron += '<td[^<]+'
patron += '<a href="([^"]+)"><img src="([^"]+)"[^<]+</a>[^<]+'
patron += '</td>[^<]+'
patron += '<td><a[^>]+>([^<]+)</a></td>[^<]+'
patron += '<td[^>]+>([^<]+)</td>[^<]+'
patron += '<td[^>]+>([^<]+)</td>[^<]+'
patron += '</tr>[^<]+'
patron += '<tr>[^<]+'
patron += '<td>(.*?)</td>'
matches = re.compile(patron, re.DOTALL).findall(data)
patron = '(?is)let-post.*?src="([^"]+).*?'
patron += 'alt="([^"]+).*?'
patron += 'href="([^"]+).*?'
patron += '<p>([^\<]+).*?'
patron += 'eps-num">([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
itemlist = []
for scrapedurl, scrapedthumbnail, scrapedtitle, line1, line2, scrapedplot in matches:
title = scrapedtitle.strip() + " (" + line1.strip() + ") (" + line2.strip() + ")"
extra = line2.strip()
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
thumbnail = thumbnail.replace("thumbnail", "image")
for scrapedthumbnail, scrapedtitle, scrapedurl, scrapedplot, scrapedepisode in matches:
title = scrapedtitle + " (" + scrapedepisode + ")"
scrapedthumbnail = scrapedthumbnail.replace("thumbnail", "image")
plot = scrapertools.htmlclean(scrapedplot)
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, fanart=thumbnail,
plot=plot, extra=extra, show=scrapedtitle.strip()))
itemlist.append(Item(channel=item.channel, action="episodios", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, fanart=scrapedthumbnail,
plot=scrapedplot, show=scrapedtitle))
tmdb.set_infoLabels(itemlist)
try:
siguiente = scrapertools.get_match(data, '<a class="listsiguiente" href="([^"]+)" >Resultados Siguientes')
scrapedurl = urlparse.urljoin(item.url, siguiente)
siguiente = scrapertools.find_single_match(data, '<a class="listsiguiente" href="([^"]+)" >Resultados Siguientes')
scrapedurl = item.url + siguiente
scrapedtitle = ">> Pagina Siguiente"
scrapedthumbnail = ""
scrapedplot = ""
itemlist.append(
Item(channel=item.channel, action="series", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
plot=scrapedplot, folder=True, viewmode="movie_with_plot"))
@@ -187,7 +122,7 @@ def series(item):
def get_pages_and_episodes(data):
results = re.findall('href="#pag([0-9]+)">[0-9]+ - ([0-9]+)', data)
results = scrapertools.find_multiple_matches(data, 'href="#pag([0-9]+)".*?>[0-9]+ - ([0-9]+)')
if results:
return int(results[-1][0]), int(results[-1][1])
return 1, 0
@@ -196,14 +131,11 @@ def get_pages_and_episodes(data):
def episodios(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
scrapedplot = scrapertools.get_match(data, '<meta name="description" content="([^"]+)"/>')
scrapedplot = scrapertools.find_single_match(data, '<meta name="description" content="([^"]+)"/>')
scrapedthumbnail = scrapertools.find_single_match(data, '<div class="separedescrip">.*?src="([^"]+)"')
idserie = scrapertools.get_match(data, "ajax/pagination_episodes/(\d+)/")
idserie = scrapertools.find_single_match(data, "ajax/pagination_episodes/(\d+)/")
logger.info("idserie=" + idserie)
if " Eps" in item.extra and "Desc" not in item.extra:
caps_x = item.extra
@@ -212,69 +144,55 @@ def episodios(item):
paginas = capitulos / 10 + (capitulos % 10 > 0)
else:
paginas, capitulos = get_pages_and_episodes(data)
logger.info("idserie=" + idserie)
for num_pag in range(1, paginas + 1):
numero_pagina = str(num_pag)
headers = {"Referer": item.url}
data2 = scrapertools.cache_page("http://jkanime.net/ajax/pagination_episodes/%s/%s/" % (idserie, numero_pagina),
headers=headers)
# logger.info("data2=" + data2)
data2 = httptools.downloadpage(host + "/ajax/pagination_episodes/%s/%s/" % (idserie, numero_pagina),
headers=headers).data
patron = '"number"\:"(\d+)","title"\:"([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data2)
# http://jkanime.net/get-backers/1/
matches = scrapertools.find_multiple_matches(data2, patron)
for numero, scrapedtitle in matches:
title = scrapedtitle.strip()
url = urlparse.urljoin(item.url, numero)
thumbnail = scrapedthumbnail
url = item.url + numero
plot = scrapedplot
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail,
fanart=thumbnail, plot=plot, fulltitle=title))
itemlist.append(item.clone(action="findvideos", title=title, url=url, plot=plot))
if len(itemlist) == 0:
try:
# porestrenar = scrapertools.get_match(data,
# '<div[^<]+<span class="labl">Estad[^<]+</span[^<]+<span[^>]+>Por estrenar</span>')
itemlist.append(Item(channel=item.channel, action="findvideos", title="Serie por estrenar", url="",
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot,
server="directo", folder=False))
except:
pass
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", httptools.downloadpage(item.url).data)
list_videos = scrapertools.find_multiple_matches(data, '<iframe class="player_conte" src="([^"]+)"')
aux_url = []
data = httptools.downloadpage(item.url).data
list_videos = scrapertools.find_multiple_matches(data, '<iframe class="player_conte" src="([^"]+)"')
index = 1
for e in list_videos:
if e.startswith("https://jkanime.net/jk.php?"):
if e.startswith(host + "/jk"):
headers = {"Referer": item.url}
data = httptools.downloadpage(e, headers=headers).data
url = scrapertools.find_single_match(data, '<embed class="player_conte".*?&file=([^\"]+)\"')
if not url:
url = scrapertools.find_single_match(data, 'source src="([^\"]+)\"')
if not url:
url = scrapertools.find_single_match(data, '<iframe class="player_conte" src="([^\"]+)\"')
if "jkanime" in url:
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
if url:
itemlist.append(item.clone(title="Enlace encontrado en server #%s" % index, url=url, action="play"))
itemlist.append(item.clone(title="Enlace encontrado en server #" + str(index) + " (%s)", url=url, action="play"))
index += 1
else:
aux_url.append(e)
itemlist.extend(servertools.find_video_items(data=",".join(aux_url)))
aux_url.append(item.clone(title="Enlace encontrado (%s)", url=e, action="play"))
itemlist.extend(aux_url)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
for videoitem in itemlist:
videoitem.fulltitle = item.fulltitle
videoitem.channel = item.channel
videoitem.thumbnail = item.thumbnail
return itemlist

View File

@@ -14,6 +14,26 @@
],
"settings": [
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino"
]
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",

View File

@@ -8,6 +8,8 @@ import sys
import urllib
import urlparse
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
@@ -19,7 +21,7 @@ from channelselector import get_thumb
__channel__ = "pedropolis"
host = "http://pedropolis.com/"
host = "http://pedropolis.tv/"
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
@@ -44,10 +46,16 @@ parameters = channeltools.get_channel_parameters(__channel__)
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
IDIOMAS = {'Latino': 'LAT'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['rapidvideo', 'streamango', 'fastplay', 'openload']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = [item.clone(title="Peliculas", action="menumovies", text_blod=True,
viewcontent='movies', viewmode="movie_with_plot", thumbnail=get_thumb("channels_movie.png")),
@@ -57,31 +65,27 @@ def mainlist(item):
item.clone(title="Buscar", action="search", text_blod=True, extra='buscar',
thumbnail=get_thumb('search.png'), url=host)]
autoplay.show_option(item.channel, itemlist)
return itemlist
def menumovies(item):
logger.info()
itemlist = [item.clone(title="Todas", action="peliculas", text_blod=True, url=host + 'movies/',
itemlist = [item.clone(title="Todas", action="peliculas", text_blod=True, url=host + 'pelicula/',
viewcontent='movies', viewmode="movie_with_plot"),
item.clone(title="Más Vistas", action="peliculas", text_blod=True,
viewcontent='movies', url=host + 'tendencias/?get=movies', viewmode="movie_with_plot"),
item.clone(title="Más Valoradas", action="peliculas", text_blod=True, viewcontent='movies',
url=host + 'calificaciones/?get=movies', viewmode="movie_with_plot"),
item.clone(title="Géneros", action="generos", text_blod=True, viewmode="movie_with_plot",
viewcontent='movies', url=host)]
viewcontent='movies', url=host + 'tendencias/?get=movie', viewmode="movie_with_plot"),
item.clone(title="Por año", action="p_portipo", text_blod=True, extra="Películas Por año",
viewcontent='movies', url=host, viewmode="movie_with_plot"),
item.clone(title="Por género", action="p_portipo", text_blod=True, extra="Categorías",
viewcontent='movies', url=host, viewmode="movie_with_plot")]
return itemlist
def menuseries(item):
logger.info()
itemlist = [item.clone(title="Todas", action="series", text_blod=True, extra='serie', mediatype="tvshow",
viewcontent='tvshows', url=host + 'tvshows/', viewmode="movie_with_plot"),
viewcontent='tvshows', url=host + 'serie/', viewmode="movie_with_plot"),
item.clone(title="Más Vistas", action="series", text_blod=True, extra='serie', mediatype="tvshow",
viewcontent='tvshows', url=host + 'tendencias/?get=tv', viewmode="movie_with_plot"),
@@ -92,6 +96,22 @@ def menuseries(item):
return itemlist
def p_portipo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '(?is)%s.*?</ul>' %item.extra)
patron = 'href="([^"]+).*?'
patron += '>([^"<]+)'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(action = "peliculas",
title = scrapedtitle,
url = scrapedurl
))
return itemlist
def peliculas(item):
logger.info()
itemlist = []
@@ -99,14 +119,11 @@ def peliculas(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", data)
# logger.info(data)
patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?' # img, title
patron = '<div class="poster"> <img src="([^"]+)" alt="([^"]+)">.*?' # img, title
patron += '<div class="rating"><span class="[^"]+"></span>([^<]+).*?' # rating
patron += '<span class="quality">([^<]+)</span></div><a href="([^"]+)">.*?' # calidad, url
patron += '<span class="quality">([^<]+)</span></div> <a href="([^"]+)">.*?' # calidad, url
patron += '<span>([^<]+)</span>' # year
matches = scrapertools.find_multiple_matches(data, patron)
# Paginación
if item.next_page != 'b':
if len(matches) > 19:
@@ -120,65 +137,28 @@ def peliculas(item):
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
if len(matches_next_page) > 0:
url_next_page = urlparse.urljoin(item.url, matches_next_page[0])
for scrapedthumbnail, scrapedtitle, rating, quality, scrapedurl, year in matches:
if 'Proximamente' not in quality:
scrapedtitle = scrapedtitle.replace('Ver ', '').partition(' /')[0].partition(':')[0].replace(
'Español Latino', '').strip()
title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (scrapedtitle, year, quality)
itemlist.append(Item(channel=item.channel, action="findvideos", contentTitle=scrapedtitle,
infoLabels={"year":year, "rating":rating}, thumbnail=scrapedthumbnail,
url=scrapedurl, next_page=next_page, quality=quality, title=title))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if url_next_page:
itemlist.append(Item(channel=__channel__, action="peliculas", title="» Siguiente »",
url=url_next_page, next_page=next_page, folder=True, text_blod=True,
thumbnail=get_thumb("next.png")))
for no_plot in itemlist:
if no_plot.infoLabels['plot'] == '':
thumb_id = scrapertools.find_single_match(no_plot.thumbnail, '.*?\/\d{2}\/(.*?)-')
thumbnail = "/%s.jpg" % thumb_id
filtro_list = {"poster_path": thumbnail}
filtro_list = filtro_list.items()
no_plot.infoLabels={'filtro':filtro_list}
tmdb.set_infoLabels_item(no_plot, __modo_grafico__)
if no_plot.infoLabels['plot'] == '':
data = httptools.downloadpage(no_plot.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
# logger.info(data)
no_plot.fanart = scrapertools.find_single_match(data,
"<meta property='og:image' content='([^']+)' />").replace(
'w780', 'original')
no_plot.plot = scrapertools.find_single_match(data, '<div itemprop="description" '
'class="wp-content">.*?<p>(['
'^<]+)</p>')
no_plot.plot = scrapertools.htmlclean(no_plot.plot)
no_plot.infoLabels['director'] = scrapertools.find_single_match(data,
'<div class="name"><a href="[^"]+">([^<]+)</a>')
no_plot.infoLabels['rating'] = scrapertools.find_single_match(data, '<b id="repimdb"><strong>(['
'^<]+)</strong>')
no_plot.infoLabels['votes'] = scrapertools.find_single_match(data, '<b id="repimdb"><strong>['
'^<]+</strong>\s(.*?) votos</b>')
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = urlparse.urljoin(item.url, "?s={0}".format(texto))
try:
return sub_search(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
@@ -189,20 +169,18 @@ def search(item, texto):
def sub_search(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)" />' # url, img, title
bloque = scrapertools.find_single_match(data, 'Resultados encontrados.*?class="widget widget_fbw_id')
patron = '(?is)<a href="([^"]+)">.*?'
patron += '<img src="([^"]+)".*?'
patron += 'alt="([^"]+)" />.*?' # url, img, title
patron += '<span class="[^"]+">([^<]+)</span>.*?' # tipo
patron += '<span class="year">([^"]+)</span>.*?<div class="contenido"><p>([^<]+)</p>' # year, plot
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, tipo, year, plot in matches:
patron += '<span class="year">([^"]+)' # year
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, tipo, year in matches:
title = scrapedtitle
if tipo == 'Serie':
if tipo == ' Serie ':
contentType = 'tvshow'
action = 'temporadas'
title += ' [COLOR red](' + tipo + ')[/COLOR]'
@@ -210,18 +188,14 @@ def sub_search(item):
contentType = 'movie'
action = 'findvideos'
title += ' [COLOR green](' + tipo + ')[/COLOR]'
itemlist.append(item.clone(title=title, url=scrapedurl, contentTitle=scrapedtitle, extra='buscar',
action=action, infoLabels={"year": year}, contentType=contentType,
thumbnail=scrapedthumbnail, text_color=color1, contentSerieName=scrapedtitle))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
paginacion = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
if paginacion:
itemlist.append(Item(channel=item.channel, action="sub_search",
title="» Siguiente »", url=paginacion, thumbnail=get_thumb("next.png")))
return itemlist
@@ -253,42 +227,17 @@ def newest(categoria):
return itemlist
def generos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
# logger.info(data)
data = scrapertools.find_single_match(data, 'Genero</a><ulclass="sub-menu">(.*?)</ul></li><li id')
patron = '<li id="[^"]+" class="menu-item.*?<a href="([^"]+)">([^<]+)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
if scrapedtitle != 'Proximamente':
title = "%s" % scrapedtitle
itemlist.append(item.clone(channel=item.channel, action="peliculas", title=title,
url=scrapedurl, text_color=color3, viewmode="movie_with_plot"))
itemlist.sort(key=lambda it: it.title)
return itemlist
def series(item):
logger.info()
url_next_page = ''
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
# logger.info(data)
patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?<a href="([^"]+)">' # img, title, url
patron = '<div class="poster"> <img src="([^"]+)"'
patron += ' alt="([^"]+)">.*?'
patron += '<a href="([^"]+)">'
matches = scrapertools.find_multiple_matches(data, patron)
if item.next_page != 'b':
if len(matches) > 19:
url_next_page = item.url
@@ -301,45 +250,27 @@ def series(item):
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
if len(matches_next_page) > 0:
url_next_page = urlparse.urljoin(item.url, matches_next_page[0])
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
scrapedtitle = scrapedtitle.replace('&#8217;', "'")
itemlist.append(Item(channel=__channel__, title=scrapedtitle, extra='serie',
url=scrapedurl, thumbnail=scrapedthumbnail,
contentSerieName=scrapedtitle, show=scrapedtitle,
next_page=next_page, action="temporadas", contentType='tvshow'))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
if url_next_page:
itemlist.append(Item(channel=__channel__, action="series", title="» Siguiente »", url=url_next_page,
next_page=next_page, thumbnail=get_thumb("next.png")))
for item in itemlist:
if item.infoLabels['plot'] == '':
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
# logger.info(data)
item.fanart = scrapertools.find_single_match(data,
"<meta property='og:image' content='([^']+)' />").replace(
'w780', 'original')
item.plot = scrapertools.find_single_match(data, '<h2>Sinopsis</h2><div class="wp-content"><p>([^<]+)</p>')
item.plot = scrapertools.htmlclean(item.plot)
return itemlist
def temporadas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
# logger.info(data)
patron = '<span class="title">([^<]+)<i>.*?' # season
patron += '<img src="([^"]+)"></a></div>' # img
matches = scrapertools.find_multiple_matches(data, patron)
if len(matches) > 1:
for scrapedseason, scrapedthumbnail in matches:
@@ -349,7 +280,6 @@ def temporadas(item):
new_item.infoLabels['season'] = temporada
new_item.extra = ""
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
for i in itemlist:
i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle'])
@@ -359,14 +289,11 @@ def temporadas(item):
if i.infoLabels.has_key('poster_path'):
# Si la temporada tiene poster propio remplazar al de la serie
i.thumbnail = i.infoLabels['poster_path']
itemlist.sort(key=lambda it: it.title)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host))
return itemlist
else:
return episodios(item)
@@ -375,36 +302,28 @@ def temporadas(item):
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
# logger.info(data)
patron = '<div class="imagen"><a href="([^"]+)">.*?' # url
patron += '<div class="numerando">(.*?)</div>.*?' # numerando cap
patron += '<a href="[^"]+">([^<]+)</a>' # title de episodios
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedname in matches:
scrapedtitle = scrapedtitle.replace('--', '0')
patron = '(\d+) - (\d+)'
match = re.compile(patron, re.DOTALL).findall(scrapedtitle)
season, episode = match[0]
if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season):
continue
title = "%sx%s: %s" % (season, episode.zfill(2), scrapertools.unescape(scrapedname))
new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title,
contentType="episode", extra='serie')
if 'infoLabels' not in new_item:
new_item.infoLabels = {}
new_item.infoLabels['season'] = season
new_item.infoLabels['episode'] = episode.zfill(2)
itemlist.append(new_item)
# TODO no hacer esto si estamos añadiendo a la videoteca
if not item.extra:
# Obtenemos los datos de todos los capítulos de la temporada mediante multihilos
@@ -416,31 +335,25 @@ def episodios(item):
if i.infoLabels.has_key('poster_path'):
# Si el capitulo tiene imagen propia remplazar al poster
i.thumbnail = i.infoLabels['poster_path']
itemlist.sort(key=lambda it: int(it.infoLabels['episode']),
reverse=config.get_setting('orden_episodios', __channel__))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
# Opción "Añadir esta serie a la videoteca"
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
# logger.info(data)
patron = '<div id="option-(\d+)" class="[^"]+"><iframe.*?src="([^"]+)".*?</iframe>' # lang, url
patron = '<div id="option-(\d+)".*?<iframe.*?src="([^"]+)".*?</iframe>' # lang, url
matches = re.compile(patron, re.DOTALL).findall(data)
for option, url in matches:
lang = scrapertools.find_single_match(data, '<li><a class="options" href="#option-%s">.*?</b>-->(\w+)' % option)
lang = lang.lower()
@@ -451,17 +364,13 @@ def findvideos(item):
'ingles': '[COLOR red](VOS)[/COLOR]'}
if lang in idioma:
lang = idioma[lang]
# obtenemos los redirecionamiento de shorturl en caso de coincidencia
if "bit.ly" in url:
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
itemlist.append(item.clone(channel=__channel__, url=url, title=item.contentTitle,
action='play', language=lang))
itemlist = servertools.get_servers_itemlist(itemlist)
itemlist.sort(key=lambda it: it.language, reverse=False)
for x in itemlist:
if x.extra != 'directo':
x.thumbnail = item.thumbnail
@@ -469,10 +378,14 @@ def findvideos(item):
if item.extra != 'serie' and item.extra != 'buscar':
x.title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % (
x.server.title(), x.quality, x.language)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'serie':
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
thumbnail=get_thumb("videolibrary_movie.png"), contentTitle=item.contentTitle))
return itemlist

View File

@@ -1,5 +1,7 @@
# -*- coding: utf-8 -*-
import urllib
from core import httptools
from core import scrapertools
from platformcode import logger
@@ -15,17 +17,15 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
data = httptools.downloadpage(page_url).data
key = scrapertools.find_single_match(data, "var thief\s*=\s*'([^']+)'")
data_vt = httptools.downloadpage("http://vidup.tv/jwv/%s" % key).data
vt = scrapertools.find_single_match(data_vt, 'file\|direct\|(.*?)\|')
# Extrae la URL
video_urls = []
media_urls = scrapertools.find_multiple_matches(data, '\{"file"\:"([^"]+)","label"\:"([^"]+)"\}')
for media_url, label in media_urls:
ext = scrapertools.get_filename_from_url(media_url)[-4:]
media_url += "?direct=false&ua=1&vt=%s" % vt
video_urls.append(["%s (%s) [vidup]" % (ext, label), media_url])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
post= {}
post = urllib.urlencode(post)
url = httptools.downloadpage(page_url, follow_redirects=False, only_headers=True).headers.get("location", "")
data = httptools.downloadpage("https://vidup.io/api/serve/video/" + scrapertools.find_single_match(url, "embed/([A-z0-9]+)"), post=post).data
bloque = scrapertools.find_single_match(data, 'qualities":\{(.*?)\}')
matches = scrapertools.find_multiple_matches(bloque, '"([^"]+)":"([^"]+)')
for res, media_url in matches:
video_urls.append(
[scrapertools.get_filename_from_url(media_url)[-4:] + " (" + res + ") [vidup.tv]", media_url])
video_urls.reverse()
return video_urls