Merge pull request #231 from Intel11/actualizados

Actualizados
This commit is contained in:
Alfa
2018-03-28 16:50:09 -05:00
committed by GitHub
7 changed files with 522 additions and 765 deletions

View File

@@ -4,6 +4,7 @@
# -*- By the Alfa Develop Group -*-
import re
import urllib
from channelselector import get_thumb
from core import httptools
from core import scrapertools
@@ -58,17 +59,16 @@ def list_all(item):
itemlist = []
data = get_source(item.url)
if item.section == 'alpha':
patron = '<span class=Num>\d+.*?<a href=(.*?) class.*?<img src=(.*?) alt=.*?<strong>(.*?)</strong>.*?'
patron += '<td>(\d{4})</td>.*?Qlty>(.*?)</span>'
patron += '<td>(\d{4})</td>'
else:
patron = '<article id=post-.*?<a href=(.*?)>.*?<img src=(.*?) alt=.*?'
patron += '<h2 class=Title>(.*?)<\/h2>.*?<span class=Year>(.*?)<\/span>.*?Qlty>(.*?)<\/span>'
patron += '<h3 class=Title>(.*?)<\/h3>.*?<span class=Year>(.*?)<\/span>'
data = get_source(item.url)
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year, quality in matches:
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
url = scrapedurl
if "|" in scrapedtitle:
@@ -79,14 +79,13 @@ def list_all(item):
contentTitle = re.sub('\(.*?\)','', contentTitle)
title = '%s [%s] [%s]'%(contentTitle, year, quality)
title = '%s [%s]'%(contentTitle, year)
thumbnail = 'http:'+scrapedthumbnail
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
quality = quality,
infoLabels={'year':year}
))
tmdb.set_infoLabels_itemlist(itemlist, True)
@@ -132,16 +131,16 @@ def findvideos(item):
itemlist = []
data = get_source(item.url)
data = scrapertools.decodeHtmlentities(data)
patron = 'id=(Opt\d+)>.*?src=(.*?) frameborder.*?</iframe>'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, scrapedurl in matches:
url= scrapedurl
opt_data = scrapertools.find_single_match(data,'%s><span>.*?<strong>\d+<.*?</span>.*?<span>('
'.*?)</span>'%option).split('-')
scrapedurl = scrapedurl.replace('"','').replace('&#038;','&')
data_video = get_source(scrapedurl)
url = scrapertools.find_single_match(data_video, '<div class=Video>.*?src=(.*?) frameborder')
opt_data = scrapertools.find_single_match(data,'%s><span>.*?</span>.*?<span>(.*?)</span>'%option).split('-')
language = opt_data[0].strip()
quality = opt_data[1].strip()
if url != '' and 'youtube' not in url:
@@ -151,7 +150,10 @@ def findvideos(item):
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % '%s [%s] [%s]'%(i.server.capitalize(),
i.language, i.quality))
itemlist.append(trailer)
try:
itemlist.append(trailer)
except:
pass
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)

View File

@@ -0,0 +1,34 @@
{
"id": "doramasmp4",
"name": "DoramasMP4",
"active": true,
"adult": false,
"language": [],
"thumbnail": "https://s14.postimg.org/ibh4znkox/doramasmp4.png",
"banner": "",
"categories": [
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"VOSE"
]
}
]
}

View File

@@ -0,0 +1,222 @@
# -*- coding: utf-8 -*-
# -*- Channel DoramasMP4 -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import jsontools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'https://www.doramasmp4.com/'
IDIOMAS = {'sub': 'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['openload', 'streamango', 'netutv', 'okru', 'directo', 'mp4upload']
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel= item.channel, title="Doramas", action="doramas_menu",
thumbnail=get_thumb('doramas', auto=True), type='dorama'))
itemlist.append(Item(channel=item.channel, title="Películas", action="list_all",
url=host + 'catalogue?type[]=pelicula', thumbnail=get_thumb('movies', auto=True),
type='movie'))
itemlist.append(Item(channel=item.channel, title = 'Buscar', action="search", url= host+'search?q=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def doramas_menu(item):
logger.info()
itemlist =[]
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host + 'catalogue',
thumbnail=get_thumb('all', auto=True), type='dorama'))
itemlist.append(Item(channel=item.channel, title="Nuevos capitulos", action="latest_episodes",
url=host + 'latest-episodes', thumbnail=get_thumb('new episodes', auto=True), type='dorama'))
return itemlist
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<a class=item_episode href=(.*?) title=.*?<img src=(.*?) title=.*?title>(.*?)'
patron += '</div> <div class=options> <span>(.*?)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedtype in matches:
url = scrapedurl
scrapedtype = scrapedtype.lower()
scrapedtitle = scrapedtitle
thumbnail = scrapedthumbnail
new_item = Item(channel=item.channel, title=scrapedtitle, url=url,
thumbnail=thumbnail, type=scrapedtype)
if scrapedtype != 'dorama':
new_item.action = 'findvideos'
new_item.contentTitle = scrapedtitle
else:
new_item.contentSerieName=scrapedtitle
new_item.action = 'episodes'
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
if itemlist != []:
if item.type != 'dorama':
page_base = host+'catalogue?type[]=pelicula'
else:
page_base = host + 'catalogue'
next_page = scrapertools.find_single_match(data, '<a href=([^ ]+) aria-label=Netx>')
if next_page != '':
itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>',
url=page_base+next_page, thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png',
type=item.type))
return itemlist
def latest_episodes(item):
logger.info()
itemlist = []
infoLabels = dict()
data = get_source(item.url)
patron = '<a class=episode href=(.*?) title=.*?<img src=(.*?) title=.*?title>(.*?)</div>.*?episode>(.*?)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedep in matches:
title = '%s %s' % (scrapedtitle, scrapedep)
contentSerieName = scrapedtitle
itemlist.append(Item(channel=item.channel, action='findvideos', url=scrapedurl, thumbnail=scrapedthumbnail,
title=title, contentSerieName=contentSerieName, type='episode'))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def episodes(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<li class=link_episode><a itemprop=url href=(.*?) title=.*?itemprop=name>(.*?)'
patron += '</span></a><meta itemprop=episodeNumber content=(.*?) /></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, scrapedtitle, scrapedep in matches:
url = scrapedurl
contentEpisodeNumber = scrapedep
infoLabels['season'] = 1
infoLabels['episode'] = contentEpisodeNumber
if scrapedtitle != '':
title = scrapedtitle
else:
title = 'episodio %s' % scrapedep
infoLabels = item.infoLabels
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url,
contentEpisodeNumber=contentEpisodeNumber, type='episode', infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
duplicated = []
data = get_source(item.url)
if item.type !='episode' and '<meta property=article:section content=Pelicula>' not in data:
item.type = 'dorama'
item.contentSerieName = item.contentTitle
item.contentTitle = ''
return episodes(item)
else:
itemlist.extend(servertools.find_video_items(data=data))
for video_item in itemlist:
if 'sgl.php' in video_item.url:
headers = {'referer': item.url}
patron_gvideo = "'file':'(.*?)','type'"
data_gvideo = httptools.downloadpage(video_item.url, headers=headers).data
video_item.url = scrapertools.find_single_match(data_gvideo, patron_gvideo)
duplicated.append(video_item.url)
video_item.channel = item.channel
video_item.infoLabels = item.infoLabels
video_item.language=IDIOMAS['sub']
patron = 'var item = {id: (\d+), episode: (\d+),'
matches = re.compile(patron, re.DOTALL).findall(data)
for id, episode in matches:
data_json=jsontools.load(httptools.downloadpage(host+'/api/stream/?id=%s&episode=%s' %(id, episode)).data)
sources = data_json['options']
for src in sources:
url = sources[src]
if 'sgl.php' in url:
headers = {'referer':item.url}
patron_gvideo = "'file':'(.*?)','type'"
data_gvideo = httptools.downloadpage(url, headers = headers).data
url = scrapertools.find_single_match(data_gvideo, patron_gvideo)
new_item = Item(channel=item.channel, title='%s', url=url, language=IDIOMAS['sub'], action='play',
infoLabels=item.infoLabels)
if url != '' and url not in duplicated:
itemlist.append(new_item)
duplicated.append(url)
try:
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
except:
pass
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
item.type = 'search'
if texto != '':
return list_all(item)

View File

@@ -1,33 +0,0 @@
{
"id": "grantorrent",
"name": "GranTorrent",
"active": true,
"adult": false,
"language": ["cast"],
"thumbnail": "grantorrent.jpg",
"banner": "grantorrent.png",
"fanart": "grantorrent.png",
"categories": [
"torrent",
"movie",
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra (TMDB)",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,273 +0,0 @@
# -*- coding: utf-8 -*-
import re
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import config, logger
host = "https://grantorrent.com/"
dict_url_seasons = dict()
__modo_grafico__ = config.get_setting('modo_grafico', 'grantorrent')
def mainlist(item):
logger.info()
thumb_movie = get_thumb("movies", auto=True)
thumb_tvshow = get_thumb("tvshows", auto=True)
itemlist = list()
itemlist.append(
Item(channel=item.channel, title="Peliculas", action="peliculas", thumbnail=thumb_movie))
itemlist.append(
Item(channel=item.channel, title="Series", action="series", thumbnail=thumb_tvshow))
return itemlist
def peliculas(item):
logger.info()
thumb_search = get_thumb("search.png")
itemlist = list()
itemlist.append(item.clone(channel=item.channel, title="Novedades", action="listado", url=host))
# itemlist.append(item.clone(channel=item.channel, title="Filtrar películas", action="listado", url=host))
itemlist.append(item.clone(channel=item.channel, title="Buscar", action="search", url=host, media="película",
thumbnail=thumb_search))
return itemlist
def series(item):
logger.info()
thumb_search = get_thumb("search.png")
itemlist = list()
itemlist.append(item.clone(channel=item.channel, title="Novedades", action="listado", url=host + "series/"))
# itemlist.append(item.clone(channel=item.channel, title="Filtrar series", action="listado", url=host))
itemlist.append(item.clone(channel=item.channel, title="Buscar", action="search", url=host + "series/",
media="serie", thumbnail=thumb_search))
return itemlist
def search(item, texto):
logger.info("texto:" + texto)
texto = texto.replace(" ", "+")
itemlist = []
try:
url = "%s?s=%s" % (item.url, texto)
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(url).data)
# logger.debug("data %s \n\n" % data)
video_section = scrapertools.find_single_match(data, '<div class="contenedor-imagen">(.*?</div>)</div></div>')
pattern = '<a href="(?P<url>[^"]+)"><img.*?src="(?P<thumb>[^"]+)".*?class="bloque-inferior">' \
'\s*(?P<title>.*?)\s*</div>'
matches = re.compile(pattern, re.DOTALL).findall(video_section)
for url, thumb, title in matches:
if item.media == "serie":
action = "episodios"
else:
action = "findvideos"
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumb,
contentTitle=title, contentType="movie"))
return itemlist
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def listado(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
# logger.debug("data %s \n\n" % data)
video_section = scrapertools.find_single_match(data, '<br><div class="contenedor-home">(.*?</div>)</div></div>')
# logger.debug("data %s \n\n" % video_section)
pattern = '<a href="(?P<url>[^"]+)"><img.*?src="(?P<thumb>[^"]+)".*?.*?class="bloque-superior">\s*' \
'(?P<quality>.*?)\s*<div class="imagen-idioma">\s*<img src=".*?icono_(?P<lang>[^\.]+).*?<div class=' \
'"bloque-inferior">\s*(?P<title>.*?)\s*</div><div class="bloque-date">\s*(?P<date>.*?)\s*</div>'
matches = re.compile(pattern, re.DOTALL).findall(video_section)
for url, thumb, quality, lang, title, date in matches:
title = scrapertools.htmlclean(title)
title = re.sub(r"\s{2}", " ", title)
if "/series" in item.url:
if quality:
title2 = "%s [%s]" % (title, quality)
itemlist.append(Item(channel=item.channel, action="episodios", title=title2, url=url, thumbnail=thumb,
quality=quality, contentTitle=title, contentType="tvshow"))
else:
if quality:
title2 = "%s [%s]" % (title, quality)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title2, url=url, thumbnail=thumb,
quality=quality, contentTitle=title, contentType="movie"))
pagination = scrapertools.find_single_match(data, '<div class="nav-links">(.*?)</ul>')
if pagination:
next_page = scrapertools.find_single_match(pagination, "class='page-numbers current'.*?<a.*?href='([^']+)'")
# logger.debug("next %s" % next_page)
if next_page:
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente", url=next_page,
thumbnail=get_thumb("next.png")))
return itemlist
def episodios(item):
logger.info()
itemlist = []
dict_data = dict()
dict_data, item = get_episodes(item, dict_data)
for key in dict_data.keys():
d = dict_data[key]
quality = "[%s]" % "][".join(d["quality"])
d["s_e"] = re.sub(r"\(Contrase.*?\)\s*", "NO REPRODUCIBLE-RAR", d["s_e"])
title = "%s [%s] %s" % (d["s_e"], d["lang"], quality)
# logger.debug("%s" % d["s_e"])
if "temporada" in d["s_e"].lower():
regex = re.compile('temporada\s*', re.I)
d["s_e"] = regex.sub("", d["s_e"])
season = scrapertools.find_single_match(d["s_e"], "(\d+)")
episode = 1
else:
season, episode = scrapertools.find_single_match(d["s_e"], "(\d+)&#215;(\d+)")
itemlist.append(item.clone(action="findvideos", title=title, thumbnail=item.thumbnail, url=d["url"],
server="torrent", contentSeason=season, contentEpisodeNumber=episode,
contentType="tvshow", fulltitle=item.title, quality=d["quality"], lang=d["lang"]))
# order list
if len(itemlist) > 1:
itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber)))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
return itemlist
def get_episodes(item, dict_data):
global dict_url_seasons
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
# logger.debug("data %s \n\n" % data)
if item.contentTitle != "":
title = scrapertools.find_single_match(data, '<h3 class="bold">.*?original:\s*(.*?)[.]</h3>')
year = scrapertools.find_single_match(data, '<h3 class="bold">\s*Estreno:\s*(\d+)[.]</h')
# logger.debug("title es %s" % title)
if title:
item.contentTitle = title
item.show = title
if year:
item.infoLabels['year'] = year
links_section = scrapertools.find_single_match(data, 'div id="Tokyo" [^>]+>(.*?)</div>')
# logger.debug("data %s \n\n" % links_section)
pattern = 'icono_.*?png" title="(?P<lang>.*?)" [^>]+></td><td>(?P<s_e>.*?)</td><td>(?P<quality>.*?)</td><td>' \
'<a class="link" href="(?P<url>[^"]+)"'
matches = re.compile(pattern, re.DOTALL).findall(links_section)
for lang, s_e, quality, url in matches:
if s_e + lang not in dict_data:
dict_data[s_e + lang] = {"url": [url], "lang": lang, "s_e": s_e,
"quality": [quality]}
else:
if quality not in dict_data[s_e+lang]["quality"]:
dict_data[s_e + lang]["quality"].append(quality)
dict_data[s_e + lang]["url"].append(url)
url_to_check = scrapertools.find_single_match(links_section, '</table><p><a.*?href="([^"]+)".*?>\s*Temporada.*?</a>')
# logger.debug("url es %s " % url_to_check)
# if url doesn't exist we add it into the dict
if url_to_check and url_to_check not in dict_url_seasons:
dict_url_seasons[url_to_check] = False
for key, value in dict_url_seasons.items():
if not value:
item.url = key
dict_url_seasons[key] = True
dict_data, item = get_episodes(item, dict_data)
# logger.debug("URL_LIST es %s " % dict_url_seasons)
return dict_data, item
def findvideos(item):
logger.info()
itemlist = []
if item.contentType == "movie":
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
# logger.debug("data %s \n\n" % data)
if item.contentTitle != "":
title = scrapertools.find_single_match(data, '<div class="titulo_page_exit">(.*?)[.]</div>')
year = scrapertools.find_single_match(data, '<div class="ano_page_exit">(\d+)</div>')
logger.debug("title es %s" % title)
if title:
item.contentTitle = title
item.show = title
if year:
item.infoLabels['year'] = year
links_section = scrapertools.find_single_match(data, 'div id="Tokyo" [^>]+>(.*?)</div>')
# logger.debug("data %s \n\n" % data)
pattern = 'icono_.*?png" title="(?P<lang>.*?)" [^>]+></td><td>(?P<quality>.*?)</td><td>(?P<size>.*?)</td><td>' \
'<a class="link" href="(?P<url>[^"]+)"'
matches = re.compile(pattern, re.DOTALL).findall(links_section)
for lang, quality, size, url in matches:
title = "[%s] [%s] (%s)" % (lang, quality, size)
itemlist.append(item.clone(action="play", title=title, url=url, thumbnail=item.thumbnail, server="torrent",
fulltitle=item.title))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
else:
for index, url in enumerate(item.url):
title = "%sx%s [%s] [%s]" % (item.contentSeason, item.contentEpisodeNumber, item.lang, item.quality[index])
itemlist.append(item.clone(action="play", title=title, url=url, thumbnail=item.thumbnail, server="torrent",
quality=item.quality[index]))
return itemlist

View File

@@ -7,18 +7,14 @@ from channels import filtertools
from core import httptools
from core import scrapertools
from core import tmdb
from core import jsontools
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
from core import servertools
host = "http://www.pelisplus.tv/"
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
patrones = ['<img src="([^"]+)" alt=".*?" class="picture-movie">',
'<span>Sinopsis:<\/span>.([^<]+)<span class="text-detail-hide"><\/span>.<\/p>']
IDIOMA = {'latino': 'Latino'}
list_language = IDIOMA.values()
@@ -36,6 +32,13 @@ list_servers = [
]
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def mainlist(item):
logger.info()
@@ -44,100 +47,218 @@ def mainlist(item):
itemlist.append(
item.clone(title="Peliculas",
action="menupeliculas",
thumbnail='https://s8.postimg.org/6wqwy2c2t/peliculas.png',
fanart='https://s8.postimg.org/6wqwy2c2t/peliculas.png',
extra='peliculas/'
action="sub_menu",
thumbnail=get_thumb('movies', auto=True),
))
itemlist.append(
item.clone(title="Series",
action="menuseries",
thumbnail='https://s27.postimg.org/iahczwgrn/series.png',
fanart='https://s27.postimg.org/iahczwgrn/series.png',
extra='peliculas/'
action="sub_menu",
thumbnail=get_thumb('tvshows', auto=True),
))
itemlist.append(
item.clone(title="Documentales",
action="lista",
url=host + 'documentales/pag-1',
thumbnail='https://s16.postimg.org/7xjj4bmol/documental.png',
fanart='https://s16.postimg.org/7xjj4bmol/documental.png',
extra='documentales/'
))
item.clone(title="Buscar", action="search", url=host + 'busqueda/?s=',
thumbnail=get_thumb('search', auto=True),
))
autoplay.show_option(item.channel, itemlist)
return itemlist
def menupeliculas(item):
def sub_menu(item):
logger.info()
itemlist = []
content = item.title.lower()
itemlist.append(item.clone(title="Todas",
action="lista",
url=host + 'peliculas/pag-1',
thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png',
fanart='https://s18.postimg.org/fwvaeo6qh/todas.png',
extra='peliculas/'
))
itemlist.append(item.clone(title="Ultimas",
action="lista",
url=host + 'estrenos/pag-1',
thumbnail='https://s22.postimg.org/cb7nmhwv5/ultimas.png',
fanart='https://s22.postimg.org/cb7nmhwv5/ultimas.png',
extra='estrenos/'
action="list_all",
url=host + '%s/ultimas-%s/' % (content, content),
thumbnail=get_thumb('all', auto=True),
))
itemlist.append(item.clone(title="Generos",
action="generos",
url=host + 'peliculas/pag-1',
thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png',
fanart='https://s3.postimg.org/5s9jg2wtf/generos.png',
extra='documentales/'
))
itemlist.append(item.clone(title="Buscar",
action="search",
url=host + 'busqueda/?s=',
thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png',
fanart='https://s30.postimg.org/pei7txpa9/buscar.png',
extra='peliculas/'
url=host + '%s/' % content,
thumbnail=get_thumb('genres', auto=True),
))
return itemlist
def menuseries(item):
def list_all(item):
logger.info()
itemlist=[]
data = get_source(item.url)
patron = '(?:</a>|Posters>)<a href=(.*?) class=Posters.*?data-title=(.*?) data.*?src=(.*?) alt'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
url = scrapedurl
title = scrapertools.decodeHtmlentities(scrapedtitle)
thumbnail = scrapedthumbnail
filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w154", "")
filtro_list = {"poster_path": filtro_thumb}
filtro_list = filtro_list.items()
new_item=(
Item(channel=item.channel,
action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
infoLabels={'filtro': filtro_list},
context=autoplay.context
))
if 'serie' not in url:
new_item.contentTitle = scrapedtitle
new_item.action = 'findvideos'
else:
new_item.contentSerieName = scrapedtitle
new_item.action = 'seasons'
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Pagination
if itemlist != []:
next_page = scrapertools.find_single_match(data, '<li><a href=([^ ]+) rel=next>&raquo;</a>')
if next_page != '':
itemlist.append(item.clone(action="list_all",
title='Siguiente >>>',
url=host+next_page,
thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png'
))
return itemlist
def generos(item):
logger.info()
itemlist = []
data = get_source(item.url)
if 'series' not in item.url:
clean_genre = 'PELÍCULAS DE'
else:
clean_genre = 'SERIES DE'
itemlist.append(item.clone(title="Todas",
action="lista",
url=host + "series/pag-1",
thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png',
fanart='https://s18.postimg.org/fwvaeo6qh/todas.png',
extra='series/'
))
patron = '<h2 class=Heading--carousel> %s(.*?) <a class=Heading-link title=View All href=(.*?)><' % clean_genre
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist.append(item.clone(title="Generos",
action="generos",
url=host + 'series/pag-1',
thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png',
fanart='https://s3.postimg.org/5s9jg2wtf/generos.png',
extra='series/'
))
for scrapedtitle, scrapedurl in matches:
itemlist.append(item.clone(title="Buscar",
action="search",
url=host + 'busqueda/?s=',
thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png',
fanart='https://s30.postimg.org/pei7txpa9/buscar.png',
extra='series/'
))
url = scrapedurl
title = scrapedtitle
if 'agregadas' not in title.lower():
itemlist.append(
Item(channel=item.channel,
action="list_all",
title=title,
url=url,
))
return itemlist
def seasons(item):
logger.info()
itemlist = []
templist = []
data = get_source(item.url)
serie_id = scrapertools.find_single_match(data, '<div class=owl-carousel data-serieid=(.*?)>')
patron = 'class=js-season-item> SEASON<span>(.*?)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for season in matches:
contentSeasonNumber = season
infoLabels['season']=season
itemlist.append(Item(channel=item.channel, action="episodes", title='Temporada %s' % season,
serie_id=serie_id, contentSeasonNumber=contentSeasonNumber,
serie_url=item.url, infoLabels=infoLabels))
if item.extra == 'seasons':
for tempitem in itemlist:
templist += episodes(tempitem)
else:
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel,
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url=item.url,
action="add_serie_to_library",
extra="seasons",
contentSerieName=item.contentSerieName,
contentSeasonNumber=contentSeasonNumber
))
if item.extra == 'seasons':
return templist
else:
return itemlist
def episodes(item):
logger.info()
itemlist= []
url = host+'api/episodes?titleId=%s&seasonNumber=%s' % (item.serie_id, item.contentSeasonNumber)
data = jsontools.load(httptools.downloadpage(url).data)
episode_list = data['titles']
infoLabels = item.infoLabels
for episode in episode_list:
url = item.serie_url+episode['friendlyTitle4Url']
thumbnail = episode['url_image']
plot = episode['shortDescription']
contentEpisodeNumber = episode['tvSeasonEpisodeNumber']
title = '%sx%s - %s' % (item.contentSeasonNumber, contentEpisodeNumber, episode['title'])
infoLabels['episode']=contentEpisodeNumber
itemlist.append(Item(channel=item.channel, action='findvideos', title=title, url=url, thumbnail=thumbnail,
plot=plot, infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
videoitem.channel = item.channel
videoitem.language = IDIOMA['latino']
videoitem.title = '[%s] [%s]' % (videoitem.server, videoitem.language)
videoitem.infoLabels = item.infoLabels
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if item.contentType == 'movie':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url, action="add_pelicula_to_library", extra="findvideos",
contentTitle=item.contentTitle))
return itemlist
@@ -149,7 +270,7 @@ def search(item, texto):
try:
if texto != '':
return lista(item)
return list_all(item)
else:
return []
except:
@@ -159,380 +280,22 @@ def search(item, texto):
return []
def lista(item):
logger.info()
itemlist = []
if 'series/' in item.extra:
accion = 'temporadas'
tipo = 'tvshow'
else:
accion = 'findvideos'
tipo = 'movie'
data = httptools.downloadpage(item.url).data
if item.action != 'search':
patron = '<img.*?width="147" heigh="197".*?src="([^"]+)".*?>.*?.<i class="icon online-play"><\/i>.*?.<h2 ' \
'class="title title-.*?">.*?.<a href="([^"]+)" title="([^"]+)">.*?>'
actual = scrapertools.find_single_match(data,
'<a href="https:\/\/www.pelisplus.tv\/.*?\/pag-([^p]+)pag-2" '
'class="page bicon last"><<\/a>')
else:
patron = '<img data-original="([^"]+)".*?width="147" heigh="197".*?src=.*?>.*?\n<i class="icon ' \
'online-play"><\/i>.*?\n<h2 class="title title-.*?">.*?\n<a href="([^"]+)" title="([^"]+)">.*?>'
actual = ''
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
url = scrapedurl
title = scrapertools.decodeHtmlentities(scrapedtitle)
thumbnail = scrapedthumbnail
filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w154", "")
filtro_list = {"poster_path": filtro_thumb} # Nombre del campo a filtrar y valor en los resultados de la api
# de tmdb
filtro_list = filtro_list.items()
if item.action != 'search':
new_item=(
Item(channel=item.channel,
contentType=tipo,
action=accion,
title=title,
url=scrapedurl,
thumbnail=thumbnail,
fulltitle=scrapedtitle,
infoLabels={'filtro': filtro_list},
extra=item.extra,
context=autoplay.context
))
if 'serie' in scrapedurl:
new_item.contentSerieName=scrapedtitle
else:
new_item.contentTitle = scrapedtitle
itemlist.append(new_item)
else:
if item.extra=='':
item.extra = scrapertools.find_single_match(url, 'serie|pelicula')+'s/'
if 'series/' in item.extra:
accion = 'temporadas'
tipo = 'tvshow'
else:
accion = 'findvideos'
tipo = 'movie'
item.extra = item.extra.rstrip('s/')
if item.extra in url:
new_item=(
Item(channel=item.channel,
contentType=tipo,
action=accion,
title=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=scrapedtitle,
infoLabels={'filtro': filtro_list},
extra=item.extra,
context=autoplay.context
))
if 'serie' in scrapedurl:
new_item.contentSerieName=scrapedtitle
else:
new_item.contentTitle = scrapedtitle
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if item.action != 'search' and actual != '':
if itemlist != []:
next_page = str(int(actual) + 1)
next_page_url = item.extra + 'pag-' + next_page
if not next_page_url.startswith("http"):
next_page_url = host + next_page_url
itemlist.append(
Item(channel=item.channel,
action="lista",
title='Siguiente >>>',
url=next_page_url,
thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png',
extra=item.extra
))
return itemlist
def temporadas(item):
logger.info()
itemlist = []
templist = []
data = httptools.downloadpage(item.url).data
patron = '<span class="ico accordion_down"><\/span>Temporada([^<]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle in matches:
infoLabels = item.infoLabels
url = item.url
title = 'Temporada ' + scrapedtitle.strip(' \r\n')
thumbnail = scrapertools.find_single_match(data, '<img src="([^"]+)" alt="" class="picture-movie">')
plot = scrapertools.find_single_match(data,
'<span>Sinopsis:<\/span>.([^<]+).<span class="text-detail-hide"><\/span>')
fanart = scrapertools.find_single_match(data, '<img src="([^"]+)"/>.*?</a>')
contentSeasonNumber = scrapedtitle.strip(' \r\n')
itemlist.append(
Item(channel=item.channel,
action="episodios",
title=title,
fulltitle=item.title,
url=url,
thumbnail=thumbnail,
plot=plot,
fanart=fanart,
extra=scrapedtitle.rstrip('\n'),
contentSerieName=item.contentSerieName,
contentSeasonNumber=contentSeasonNumber,
infoLabels={'season': contentSeasonNumber},
context=item.context
))
if item.extra == 'temporadas':
for tempitem in itemlist:
templist += episodios(tempitem)
else:
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel,
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url=item.url,
action="add_serie_to_library",
extra="temporadas",
contentSerieName=item.contentSerieName,
contentSeasonNumber=contentSeasonNumber
))
if item.extra == 'temporadas':
return templist
else:
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<span class="ico season_play"><\/span>([^<]+)<\/a>.<a href="([^"]+)" class="season-online enabled">'
temporada = 'temporada/' + item.extra.strip(' ')
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedtitle, scrapedurl in matches:
if temporada in scrapedurl:
url = scrapedurl
contentSeasonNumber = re.findall(r'temporada.*?(\d+)', url)
capitulo = re.findall(r'Capitulo \d+', scrapedtitle)
contentEpisodeNumber = re.findall(r'\d+', capitulo[0])
contentEpisodeNumber = contentEpisodeNumber[0]
infoLabels['episode'] = contentEpisodeNumber
title = contentSeasonNumber[0] + 'x' + contentEpisodeNumber + ' - ' + scrapedtitle
thumbnail = scrapertools.find_single_match(data, '<img src="([^"]+)" alt="" class="picture-movie">')
plot = ''
fanart = ''
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title=title,
fulltitle=item.title,
url=url,
thumbnail=thumbnail,
plot=plot,
fanart=fanart,
extra=scrapedtitle,
contentSeasonNumber=item.contentSeasonNumber,
infoLabels=infoLabels,
context=item.context
))
if item.extra != 'temporadas':
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
itemlist = fail_tmdb(itemlist)
return itemlist
def fail_tmdb(itemlist):
logger.info()
realplot = ''
for item in itemlist:
if item.infoLabels['plot'] == '':
data = httptools.downloadpage(item.url).data
if item.fanart == '':
item.fanart = scrapertools.find_single_match(data, patrones[0])
realplot = scrapertools.find_single_match(data, patrones[1])
item.plot = scrapertools.remove_htmltags(realplot)
return itemlist
def generos(item):
tgenero = {"Comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png",
"Suspense": "https://s13.postimg.org/wmw6vl1cn/suspenso.png",
"Drama": "https://s16.postimg.org/94sia332d/drama.png",
"Accion": "https://s3.postimg.org/y6o9puflv/accion.png",
"Aventura": "https://s10.postimg.org/6su40czih/aventura.png",
"Romance": "https://s15.postimg.org/fb5j8cl63/romance.png",
"Animacion": "https://s13.postimg.org/5on877l87/animacion.png",
"Ciencia Ficcion": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png",
"Terror": "https://s7.postimg.org/yi0gij3gb/terror.png",
"Documental": "https://s16.postimg.org/7xjj4bmol/documental.png",
"Musica": "https://s29.postimg.org/bbxmdh9c7/musical.png",
"Western": "https://s23.postimg.org/lzyfbjzhn/western.png",
"Fantasia": "https://s13.postimg.org/65ylohgvb/fantasia.png",
"Guerra": "https://s23.postimg.org/71itp9hcr/belica.png",
"Misterio": "https://s1.postimg.org/w7fdgf2vj/misterio.png",
"Crimen": "https://s4.postimg.org/6z27zhirx/crimen.png",
"Historia": "https://s15.postimg.org/fmc050h1n/historia.png",
"Pelicula De La Television": "https://s9.postimg.org/t8xb14fb3/delatv.png",
"Foreign": "https://s29.postimg.org/jdc2m158n/extranjera.png"}
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<i class="s-upper" id="([^"]+)"><\/i>.<span>([^<]+)<\/span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
url = scrapedurl + 'pag-1'
title = scrapedtitle
if scrapedtitle in tgenero:
thumbnail = tgenero[scrapedtitle]
fanart = tgenero[scrapedtitle]
else:
thumbnail = ''
fanart = ''
extra = scrapedurl.replace('http://www.pelisplus.tv/', '')
itemlist.append(
Item(channel=item.channel,
action="lista",
title=title,
fulltitle=item.title,
url=url,
thumbnail=thumbnail,
fanart=fanart,
extra=extra
))
return itemlist
def get_vip(url):
logger.info()
itemlist =[]
url= url.replace('reproductor','vip')
data = httptools.downloadpage(url).data
video_urls = scrapertools.find_multiple_matches(data,'<a href="(.*?)".*?>')
for item in video_urls:
if 'elreyxhd' in item:
if 'plus'in item:
id, tipo, lang= scrapertools.find_single_match(item,'plus\/(\d+)\/.*?=(\d+).*?=(.*)')
new_url = 'https://www.elreyxhd.com/pelisplus.php?id=%s&tipo=%s&idioma=%s' % (id, tipo, lang)
datax=httptools.downloadpage(new_url, follow_redirects=False).headers.get("location", "")
itemlist.append(Item(url=datax))
else:
id = scrapertools.find_single_match(item,'episodes\/(\d+)')
data_vip = httptools.downloadpage(item).data
patron = '<a href="(.*?)">'
matches = re.compile(patron, re.DOTALL).findall(data_vip)
for urls in matches:
x = scrapertools.find_single_match(urls,r"&x=(\d)&")
if x != '':
new_url = 'https://www.elreyxhd.com/samir.php?id=%s&tipo=capitulo&idioma=latino&x=%s&sv=si' % (id, x)
datax = httptools.downloadpage(new_url, follow_redirects=False).headers.get("location", "")
itemlist.append(Item(url=datax))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
duplicados = []
data = httptools.downloadpage(item.url).data
video_page = scrapertools.find_single_match(data, "<iframe width='100%' height='500' src='(.*?)' frameborder='0'")
itemlist.extend(get_vip(video_page))
data = httptools.downloadpage(video_page).data
patron = '<li data-id=".*?">\s+<a href="(.*?)" >'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
videoitem.channel = item.channel
videoitem.infoLabels = item.infoLabels
if videoitem.quality == '' or videoitem.language == '':
videoitem.quality = 'default'
videoitem.language = 'Latino'
videoitem.action = 'play'
videoitem.fulltitle = item.title
if videoitem.extra != 'directo' and 'youtube' not in videoitem.url:
videoitem.title = item.contentTitle + ' (%s)'
itemlist=servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
n = 0
for videoitem in itemlist:
if 'youtube' in videoitem.url:
videoitem.title = '[COLOR orange]Trailer en' + ' (' + videoitem.server + ')[/COLOR]'
itemlist[n], itemlist[-1] = itemlist[-1], itemlist[n]
n = n + 1
if item.extra == 'findvideos' and 'youtube' in itemlist[-1]:
itemlist.pop(1)
# Requerido para FilterTools
tmdb.set_infoLabels_itemlist(itemlist, True)
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if 'serie' not in item.url:
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle
))
return itemlist
def play(item):
item.thumbnail = item.contentThumbnail
return [item]
def newest(categoria):
logger.info()
itemlist = []
item = Item()
item.extra = 'estrenos/'
try:
if categoria in ['peliculas','latino']:
item.url = host + 'estrenos/pag-1'
item.url = host + 'peliculas/ultimas-agregadas/'
elif categoria == 'infantiles':
item.url = host + 'peliculas/animacion/pag-1'
item.url = host + 'peliculas/animacion/'
elif categoria == 'terror':
item.url = host + 'peliculas/terror/pag-1'
item.url = host + 'peliculas/terror/'
elif categoria == 'documentales':
item.url = host + 'documentales/pag-1'
item.extra = 'documentales/'
item.url = host + 'documentales/'
itemlist = lista(item)
if itemlist[-1].title == 'Siguiente >>>':
@@ -543,5 +306,4 @@ def newest(categoria):
logger.error("{0}".format(line))
return []
#itemlist = filtertools.get_links(itemlist, item, list_language)
return itemlist

View File

@@ -8,18 +8,24 @@ from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from core import tmdb
from channels import autoplay
from platformcode import config, logger
HOST = 'http://seriesdanko.to/'
IDIOMAS = {'es': 'Español', 'la': 'Latino', 'vos': 'VOS', 'vo': 'VO'}
list_idiomas = IDIOMAS.values()
list_servers = ['streamcloud', 'powvideo', 'gamovideo', 'streamplay', 'openload', 'flashx', 'nowvideo', 'thevideo']
CALIDADES = ['SD', 'MicroHD', 'HD/MKV']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, CALIDADES)
itemlist = list()
itemlist.append(Item(channel=item.channel, title="Novedades", action="novedades", url=HOST))
itemlist.append(Item(channel=item.channel, title="Más vistas", action="mas_vistas", url=HOST))
itemlist.append(Item(channel=item.channel, title="Listado Alfabético", action="listado_alfabetico", url=HOST))
@@ -27,7 +33,9 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search",
url=urlparse.urljoin(HOST, "all.php")))
itemlist = filtertools.show_option(itemlist, item.channel, list_idiomas, CALIDADES)
#itemlist = filtertools.show_option(itemlist, item.channel, list_idiomas, CALIDADES)
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -40,7 +48,6 @@ def novedades(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
data = re.sub(r"<!--.*?-->", "", data)
logger.debug(data)
patron = '<a title="([^"]+)" href="([^"]+)".*?>'
patron += "<img.*?src='([^']+)'"
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -72,11 +79,11 @@ def mas_vistas(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
data = re.sub(r"<!--.*?-->", "", data)
patron = "<div class='widget HTML' id='HTML3'.+?<div class='widget-content'>(.*?)</div>"
data = scrapertools.get_match(data, patron)
return series_seccion(item, data)
item.data = data
item.first = 0
return series_seccion(item)
def listado_completo(item):
@@ -87,21 +94,37 @@ def listado_completo(item):
data = re.sub(r"<!--.*?-->", "", data)
patron = '<div class="widget HTML" id="HTML10".+?<div class="widget-content">(.*?)</div>'
data = scrapertools.get_match(data, patron)
return series_seccion(item, data)
item.first = 0
item.data = data
return series_seccion(item)
def series_seccion(item, data):
def series_seccion(item):
logger.info()
itemlist = []
next_page = ''
data = item.data
data = data.replace('ahref', 'a href')
patron = "<a href='([^']+)'.*?>(.*?)</a>"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
if int(item.first)+20 < len(matches):
limit = int(item.first)+20
next_page = limit + 1
else:
limit = len(matches)
for scrapedurl, scrapedtitle in matches[item.first:limit]:
itemlist.append(Item(channel=item.channel, action="episodios", title=scrapedtitle, show=scrapedtitle,
url=urlparse.urljoin(HOST, scrapedurl),
context=filtertools.context(item, list_idiomas, CALIDADES)))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
#pagination
if next_page !='':
itemlist.append(Item(channel=item.channel, action="series_seccion", title='Siguiente >>>', data=item.data,
first=next_page))
return itemlist
@@ -117,6 +140,7 @@ def listado_alfabetico(item):
return itemlist
def series_por_letra(item):
logger.info("letra = {0}".format(item.title))
data = httptools.downloadpage(item.url).data
@@ -142,6 +166,7 @@ def search(item, texto):
itemlist.append(item.clone(title=title, url=urlparse.urljoin(HOST, url), action="episodios", show=title,
context=filtertools.context(item, list_idiomas, CALIDADES)))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
@@ -176,18 +201,27 @@ def episodios(item):
patron = "<a href='([^']+)'>(.*?)</a><idioma>(.*?)</idioma>"
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, scrapedtitle, scrapedidioma in matches:
idioma = ""
filter_langs = []
for i in scrapedidioma.split("|"):
idioma += " [" + IDIOMAS.get(i, "OVOS") + "]"
filter_langs.append(IDIOMAS.get(i, "OVOS"))
title = scrapedtitle + idioma
season_episode = scrapertools.get_season_and_episode(scrapedtitle)
title = '%s %s %s' % (season_episode, scrapedtitle, idioma)
season_episode = season_episode.split('x')
infoLabels['season'] = season_episode[0]
infoLabels['episode'] = season_episode[1]
itemlist.append(Item(channel=item.channel, title=title, url=urlparse.urljoin(HOST, scrapedurl),
action="findvideos", show=item.show, thumbnail=thumbnail, plot="", language=filter_langs))
action="findvideos", show=item.show, thumbnail=thumbnail, plot="", language=filter_langs,
infoLabels=infoLabels))
itemlist = filtertools.get_links(itemlist, item, list_idiomas, CALIDADES)
#itemlist = filtertools.get_links(itemlist, item, list_idiomas, CALIDADES)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Opción "Añadir esta serie a la videoteca de XBMC"
if config.get_videolibrary_support() and len(itemlist) > 0:
@@ -212,6 +246,14 @@ def findvideos(item):
itemlist = filtertools.get_links(itemlist, item, list_idiomas, CALIDADES)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_idiomas)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
@@ -227,6 +269,7 @@ def parse_videos(item, tipo, data):
links = re.findall(pattern, data, re.MULTILINE | re.DOTALL)
for language, date, server, link, quality in links:
if quality == "":
quality = "SD"
title = "%s en %s [%s] [%s] (%s)" % (tipo, server, IDIOMAS.get(language, "OVOS"), quality, date)
@@ -243,8 +286,8 @@ def play(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
patron = '<div id="url2".*?><a href="([^"]+)">.+?</a></div>'
#patron = '<div id="url2".*?><a href="([^"]+)">.+?</a></div>'
patron = '<a target="_blank" href="(.*?)">'
url = scrapertools.find_single_match(data, patron)
itemlist = servertools.find_video_items(data=url)