Merge pull request #8 from Alfa-beto/master

fecth
This commit is contained in:
Alfa-beto
2019-03-22 15:44:03 -03:00
committed by GitHub
125 changed files with 2293 additions and 1556 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.7.25" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.7.31" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,13 +19,15 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Arreglos[/B][/COLOR]
¤ allcalidad ¤ canalpelis ¤ ciberpeliculashd
¤ pelisplay ¤ doramasmp4 ¤ Newpct1
¤ AnimeBoom ¤ AnimeID ¤ abtoon
¤ mixtoon ¤ Animeflv
¤ maxipelis24 ¤ cuevana3 ¤ pelisplusco
¤ mejortorrent ¤ newpct1
[COLOR green][B]Novedades[/B][/COLOR]
¤ Mirapeliculas
¤ Mundopelis ¤ thevideobee ¤ tusfiles
¤ vup
¤ Agradecimientos a @mac12m99 y @chivmalev por colaborar con ésta versión
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary>

View File

@@ -9,15 +9,16 @@ from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from channels import renumbertools,autoplay
from channels import renumbertools, autoplay
CHANNEL_HOST = "https://www.animeid.tv/"
IDIOMAS = {'Latino':'LAT', 'VOSE': 'VOSE'}
IDIOMAS = {'Latino': 'LAT', 'VOSE': 'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['animeid']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
@@ -69,7 +70,7 @@ def search(item, texto):
["User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:19.0) Gecko/20100101 Firefox/19.0"])
headers.append(["Referer", CHANNEL_HOST])
headers.append(["X-Requested-With", "XMLHttpRequest"])
data = scrapertools.cache_page(item.url, headers=headers)
data = httptools.downloadpage(item.url, headers=headers).data
data = data.replace("\\", "")
patron = '{"id":"([^"]+)","text":"([^"]+)","date":"[^"]*","image":"([^"]+)","link":"([^"]+)"}'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -84,7 +85,7 @@ def search(item, texto):
context.extend(context2)
itemlist.append(
Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, plot=plot,
context=context,show=title, viewmode="movie_with_plot"))
context=context, show=title, viewmode="movie_with_plot"))
return itemlist
@@ -110,7 +111,7 @@ def novedades_series(item):
context2 = autoplay.context
context.extend(context2)
itemlist.append(Item(channel=item.channel, action="episodios", title=scrapedtitle, url=scrapedurl,
context=context,show=title, viewmode="movie_with_plot"))
context=context, show=title, viewmode="movie_with_plot"))
return itemlist
@@ -118,7 +119,7 @@ def novedades_episodios(item):
logger.info()
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<section class="lastcap">(.*?)</section>')
patronvideos = '(?s)<a href="([^"]+)">[^<]+<header>([^<]+).*?src="([^"]+)"[\s\S]+?<p>(.+?)</p>'
patronvideos = '(?s)<a href="([^"]+)">[^<]+<header>([^<]+).*?src="([^"]+)"[\s\S]+?<p>(.+?)</p>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
itemlist = []
for url, title, thumbnail, plot in matches:
@@ -204,13 +205,13 @@ def episodios(item, final=True):
data = httptools.downloadpage(item.url).data
data_id = scrapertools.find_single_match(data, 'data-id="([^"]+)')
CHANNEL_HEADERS = [
["Host", "m.animeid.tv"],
["X-Requested-With", "XMLHttpRequest"]
["Host", "m.animeid.tv"],
["X-Requested-With", "XMLHttpRequest"]
]
page = 0
while True:
page += 1
u = "https://m.animeid.tv/ajax/caps?id=%s&ord=DESC&pag=%s" %(data_id, page)
u = "https://m.animeid.tv/ajax/caps?id=%s&ord=DESC&pag=%s" % (data_id, page)
data = httptools.downloadpage(u, headers=CHANNEL_HEADERS).data
# Cuando ya no hay datos devuelve: "list":[]
if '"list":[]' in data:
@@ -218,21 +219,25 @@ def episodios(item, final=True):
dict_data = jsontools.load(data)
list = dict_data['list'][::-1]
for dict in list:
season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1, int(dict["numero"]))
title = "%sx%s - %s" % (season, str(episode).zfill(2),dict["date"])
itemlist.append(Item(action = "findvideos",
channel = item.channel,
title = title,
url = CHANNEL_HOST + dict['href'],
thumbnail = item.thumbnail,
show = item.show,
viewmode = "movie_with_plot"
))
season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1,
int(dict["numero"]))
title = "%sx%s - %s" % (season, str(episode).zfill(2), dict["date"])
itemlist.append(Item(action="findvideos",
channel=item.channel,
title=title,
url=CHANNEL_HOST + dict['href'],
thumbnail=item.thumbnail,
show=item.show,
viewmode="movie_with_plot"
))
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show))
itemlist.append(Item(channel=item.channel, title="[COLOR white]Descargar todos los episodios de la serie[/COLOR]", url=item.url,
action="download_all_episodes", extra="episodios", show=item.show))
itemlist.append(
Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show))
itemlist.append(
Item(channel=item.channel, title="[COLOR white]Descargar todos los episodios de la serie[/COLOR]",
url=item.url,
action="download_all_episodes", extra="episodios", show=item.show))
return itemlist
@@ -270,8 +275,8 @@ def findvideos(item):
itemlist.append(Item(channel=item.channel, action="findvideos", title="Siguiente: " + title_siguiente,
url=CHANNEL_HOST + url_siguiente, thumbnail=item.thumbnail, plot=item.plot, show=item.show,
fanart=item.thumbnail, folder=True))
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist

View File

@@ -25,14 +25,14 @@ def mainlist(item):
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="lista", title="Peliculas",
url=urlparse.urljoin(host, "p/peliculas.html"), type='pl', first=0))
itemlist.append(Item(channel=item.channel, action="lista", title="Series",
url=urlparse.urljoin(host, "p/series.html"), type='sr', first=0))
url=urlparse.urljoin(host, "/category/pelicula"), type='pl', pag=1))
#itemlist.append(Item(channel=item.channel, action="lista", title="Series",
# url=urlparse.urljoin(host, "/category/serie"), type='sr', pag=1))
itemlist.append(Item(channel=item.channel, action="category", title="Géneros", url=host, cat='genre'))
itemlist.append(Item(channel=item.channel, action="category", title="Calidad", url=host, cat='quality'))
itemlist.append(Item(channel=item.channel, action="category", title="Orden Alfabético", url=host, cat='abc'))
itemlist.append(Item(channel=item.channel, action="category", title="Año de Estreno", url=host, cat='year'))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+"/search?q="))
#itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+"/search?q="))
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -43,18 +43,18 @@ def category(item):
data = httptools.downloadpage(host).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
if item.cat == 'abc':
data = scrapertools.find_single_match(data, '<span>Orden Alfabético</span>.*?</ul>')
data = scrapertools.find_single_match(data, '<div class="Body Container">(.+?)<main>')
elif item.cat == 'genre':
data = scrapertools.find_single_match(data, '<span>Géneros</span>.*?</ul>')
data = scrapertools.find_single_match(data, '<a>Géneros<\/a><ul class="sub.menu">(.+?)<a>Año<\/a>')
elif item.cat == 'year':
data = scrapertools.find_single_match(data, '<span>Año</span>.*?</ul>')
data = scrapertools.find_single_match(data, '<a>Año<\/a><ul class="sub.menu">(.+?)<a>Idioma<\/a>')
elif item.cat == 'quality':
data = scrapertools.find_single_match(data, '<span>Calidad</span>.*?</ul>')
patron = "<li.*?>([^<]+)<a href='([^']+)'>"
data = scrapertools.find_single_match(data, '<a>Calidad<\/a><ul class="sub-menu">(.+?)<a>Géneros<\/a>')
patron = '<li.*?><a href="(.*?)">(.*?)<\/a><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle, scrapedurl in matches:
for scrapedurl,scrapedtitle in matches:
if scrapedtitle != 'Próximas Películas':
itemlist.append(item.clone(action='lista', title=scrapedtitle, url=host+scrapedurl, type='cat', first=0))
itemlist.append(item.clone(action='lista', title=scrapedtitle, url=host+scrapedurl, type='cat', pag=0))
return itemlist
@@ -63,6 +63,7 @@ def search_results(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
logger.info(data)
patron = '<span class=.post-labels.>([^<]+)</span>.*?class="poster-bg" src="([^"]+)"/>.*?<h4>.*?'
patron +=">(\d{4})</a>.*?<h6>([^<]+)<a href='([^']+)"
matches = scrapertools.find_multiple_matches(data, patron)
@@ -90,28 +91,6 @@ def search(item, texto):
if texto != '':
return search_results(item)
def episodios(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(item.url).data
data = data.replace('"ep0','"epp"')
patron = '(?is)<div id="ep(\d+)".*?'
patron += 'src="([^"]+)".*?'
patron += '(href.*?)fa fa-download'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedepi, scrapedthumbnail, scrapedurls in matches:
title="1x%s - %s" % (scrapedepi, item.contentSerieName)
urls = scrapertools.find_multiple_matches(scrapedurls, 'href="([^"]+)')
itemlist.append(item.clone(action='findvideos', title=title, url=item.url, thumbnail=scrapedthumbnail, type=item.type,
urls = urls, infoLabels=item.infoLabels))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]",
url=item.url, action="add_serie_to_library", extra="episodios",
contentSerieName=item.contentSerieName))
return itemlist
def lista(item):
logger.info()
next = True
@@ -119,64 +98,37 @@ def lista(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
css_data = scrapertools.find_single_match(data, "<style id='page-skin-1' type='text/css'>(.*?)</style>")
data = scrapertools.find_single_match(data, "itemprop='headline'>.*?</h2>.*?</ul>")
patron = '<span class="([^"]+)">.*?<figure class="poster-bg">(.*?)<img src="([^"]+)" />'
patron += '(.*?)</figure><h6>([^<]+)</h6><a href="([^"]+)"></a>'
patron = '<article .*?">'
patron += '<a href="([^"]+)"><.*?><figure.*?>' #scrapedurl
patron += '<img.*?src="([^"]+)".*?>.*?' #scrapedthumbnail
patron += '<h3 class=".*?">([^"]+)<\/h3>' #scrapedtitle
patron += '<span.*?>([^"]+)<\/span>.+?' #scrapedyear
patron += '<a.+?>([^"]+)<\/a>' #scrapedtype
matches = scrapertools.find_multiple_matches(data, patron)
first = int(item.first)
last = first + 19
if last > len(matches):
last = len(matches)
next = False
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedtype in matches:
title="%s - %s" % (scrapedtitle,scrapedyear)
for scrapedtype, scrapedyear, scrapedthumbnail, scrapedquality, scrapedtitle, scrapedurl in matches[first:last]:
year = scrapertools.find_single_match(scrapedyear, '<span>(\d{4})</span>')
new_item = Item(channel=item.channel, title=title, url=scrapedurl, thumbnail=scrapedthumbnail,
type=scrapedtype, infoLabels={'year':scrapedyear})
if not year:
class_year = scrapertools.find_single_match(scrapedyear, 'class="([^\"]+)"')
year = scrapertools.find_single_match(css_data, "\." + class_year + ":after {content:'(\d{4})';}")
if not year:
year = scrapertools.find_single_match(data, "headline'>(\d{4})</h2>")
qual = ""
if scrapedquality:
patron_qualities='<i class="([^"]+)"></i>'
qualities = scrapertools.find_multiple_matches(scrapedquality, patron_qualities)
for quality in qualities:
patron_desc = "\." + quality + ":after {content:'([^\']+)';}"
quality_desc = scrapertools.find_single_match(css_data, patron_desc)
qual = qual+ "[" + quality_desc + "] "
title="%s [%s] %s" % (scrapedtitle,year,qual)
new_item = Item(channel=item.channel, title=title, url=host+scrapedurl, thumbnail=scrapedthumbnail,
type=scrapedtype, infoLabels={'year':year})
if scrapedtype.strip() == 'sr':
if scrapedtype == 'sr':
new_item.contentSerieName = scrapedtitle
new_item.action = 'episodios'
else:
new_item.contentTitle = scrapedtitle
new_item.action = 'findvideos'
if scrapedtype == item.type or item.type == 'cat':
itemlist.append(new_item)
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
#pagination
url_next_page = item.url
first = last
if next:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='lista', first=first))
pag = item.pag + 1
url_next_page = item.url+"/page/"+str(pag)+"/"
if len(itemlist)>19:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='lista', pag=pag))
return itemlist

View File

@@ -9,6 +9,7 @@ from platformcode import config, logger
from platformcode import platformtools
from platformcode import launcher
from time import sleep
from platformcode.config import get_setting
__channel__ = "autoplay"
@@ -117,7 +118,7 @@ def start(itemlist, item):
# Obtiene los ajustes des autoplay para este canal
settings_node = channel_node.get('settings', {})
if settings_node['active']:
if get_setting('autoplay') or settings_node['active']:
url_list_valid = []
autoplay_list = []
autoplay_b = []
@@ -142,7 +143,7 @@ def start(itemlist, item):
# 2: Solo servidores
# 3: Solo calidades
# 4: No ordenar
if settings_node['custom_servers'] and settings_node['custom_quality']:
if (settings_node['custom_servers'] and settings_node['custom_quality']) or get_setting('autoplay'):
priority = settings_node['priority'] # 0: Servidores y calidades o 1: Calidades y servidores
elif settings_node['custom_servers']:
priority = 2 # Solo servidores
@@ -391,14 +392,15 @@ def init(channel, list_servers, list_quality, reset=False):
# Se comprueba que no haya calidades ni servidores duplicados
if 'default' not in list_quality:
list_quality.append('default')
list_servers = list(set(list_servers))
list_quality = list(set(list_quality))
# list_servers = list(set(list_servers))
# list_quality = list(set(list_quality))
# Creamos el nodo del canal y lo añadimos
channel_node = {"servers": list_servers,
"quality": list_quality,
"settings": {
"active": False,
"plan_b": True,
"custom_servers": False,
"custom_quality": False,
"priority": 0}}
@@ -455,7 +457,7 @@ def check_value(channel, itemlist):
for item in itemlist:
if item.server.lower() not in server_list and item.server !='':
server_list.append(item.server)
server_list.append(item.server.lower())
change = True
if item.quality not in quality_list and item.quality !='':
quality_list.append(item.quality)
@@ -672,7 +674,7 @@ def is_active(channel):
# Obtiene los ajustes des autoplay para este canal
settings_node = channel_node.get('settings', {})
return settings_node.get('active', False)
return settings_node.get('active', False) or get_setting('autoplay')
def reset(item, dict):

View File

@@ -3,7 +3,7 @@
import re
import urllib
from core import jsontools as json
from core import jsontools as json, httptools
from core import scrapertools
from core.item import Item
from platformcode import logger
@@ -12,6 +12,7 @@ url_api = ""
beeg_salt = ""
Host = "https://beeg.com"
def get_api_url():
global url_api
global beeg_salt
@@ -53,7 +54,7 @@ def mainlist(item):
itemlist = []
itemlist.append(Item(channel=item.channel, action="videos", title="Útimos videos", url=url_api + "/index/main/0/pc",
viewmode="movie"))
#itemlist.append(Item(channel=item.channel, action="listcategorias", title="Listado categorias Populares",
# itemlist.append(Item(channel=item.channel, action="listcategorias", title="Listado categorias Populares",
# url=url_api + "/index/main/0/pc", extra="popular"))
itemlist.append(Item(channel=item.channel, action="listcategorias", title="Listado categorias completo",
url=url_api + "/index/main/0/pc", extra="nonpopular"))
@@ -65,7 +66,7 @@ def mainlist(item):
def videos(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
JSONData = json.load(data)
for Video in JSONData["videos"]:
@@ -90,14 +91,14 @@ def videos(item):
def listcategorias(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
JSONData = json.load(data)
#for Tag in JSONData["tags"][item.extra]:
# for Tag in JSONData["tags"][item.extra]:
for Tag in JSONData["tags"]:
url = url_api + "/index/tag/0/pc?tag=" + Tag["tag"]
title = '%s - %s' % (str(Tag["tag"]), str(Tag["videos"]))
#title = title[:1].upper() + title[1:]
# title = title[:1].upper() + title[1:]
itemlist.append(
Item(channel=item.channel, action="videos", title=title, url=url, folder=True, viewmode="movie"))
@@ -109,7 +110,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = item.url % (texto)
try:
return videos(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
@@ -136,7 +137,8 @@ def play(item):
viedokey = re.compile("key=(.*?)%2Cend=", re.DOTALL).findall(url)[0]
url = url.replace(viedokey, decode(viedokey))
if not url.startswith("https:"): url = "https:" + url
if not url.startswith("https:"):
url = "https:" + url
title = videourl
itemlist.append(["%s %s [directo]" % (title, url[-4:]), url])

View File

@@ -40,7 +40,6 @@ else:
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
parameters = channeltools.get_channel_parameters(__channel__)
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
@@ -105,7 +104,6 @@ def sub_search(item):
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, tipo, year in matches:
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, contentTitle=scrapedtitle,
action="findvideos", infoLabels={"year": year},
thumbnail=scrapedthumbnail, text_color=color3, page=0))
@@ -167,7 +165,6 @@ def peliculas(item):
for scrapedthumbnail, scrapedtitle, rating, quality, scrapedurl, year in matches[item.page:item.page + 30]:
if 'Próximamente' not in quality and '-XXX.jpg' not in scrapedthumbnail:
scrapedtitle = scrapedtitle.replace('Ver ', '').strip()
contentTitle = scrapedtitle.partition(':')[0].partition(',')[0]
title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (
@@ -212,7 +209,7 @@ def generos(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<li class="cat-item cat-item-[^"]+"><a href="([^"]+)" title="[^"]+">([^<]+)</a> <i>([^<]+)</i></li>'
@@ -231,14 +228,13 @@ def year_release(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
# logger.info(data)
patron = '<li><a href="([^"]+)">([^<]+)</a></li>' # url, title
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(channel=item.channel, action="peliculas", title=scrapedtitle, page=0,
url=scrapedurl, text_color=color3, viewmode="movie_with_plot", extra='next'))
@@ -365,7 +361,7 @@ def episodios(item):
if i.infoLabels['title']:
# Si el capitulo tiene nombre propio añadirselo al titulo del item
i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels[
'episode'], i.infoLabels['title'])
'episode'], i.infoLabels['title'])
if i.infoLabels.has_key('poster_path'):
# Si el capitulo tiene imagen propia remplazar al poster
i.thumbnail = i.infoLabels['poster_path']

View File

@@ -41,7 +41,6 @@ else:
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
parameters = channeltools.get_channel_parameters(__channel__)
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
@@ -121,10 +120,10 @@ def peliculas(item):
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", data)
data = scrapertools.decodeHtmlentities(data)
patron = '<article id="[^"]+" class="TPost[^<]+<a href="([^"]+)">.*?' # url
patron += '<img src="([^"]+)".*?' # img
patron += '</figure>(.*?)' # tipo
patron += '<h3 class="Title">([^<]+)</h3>.*?' # title
patron += '<span class="Year">([^<]+)</span>.*?' # year
patron += '<img src="([^"]+)".*?' # img
patron += '</figure>(.*?)' # tipo
patron += '<h3 class="Title">([^<]+)</h3>.*?' # title
patron += '<span class="Year">([^<]+)</span>.*?' # year
matches = scrapertools.find_multiple_matches(data, patron)
@@ -173,7 +172,6 @@ def genresYears(item):
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="peliculas"))
return itemlist
@@ -183,13 +181,12 @@ def year_release(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<li><a href="([^"]+)">([^<]+)</a></li>' # url, title
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(channel=item.channel, action="peliculas", title=scrapedtitle, page=0,
url=scrapedurl, text_color=color3, viewmode="movie_with_plot", extra='next'))
@@ -203,13 +200,12 @@ def series(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|&nbsp;|<br>", "", data)
patron = '<article class="TPost C TPostd">\s*<a href="([^"]+)">.*?' # url
patron += '<img src="([^"]+)".*?' # img
patron += '<h3 class="Title">([^<]+)</h3>' # title
patron += '<img src="([^"]+)".*?' # img
patron += '<h3 class="Title">([^<]+)</h3>' # title
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches[item.page:item.page + 30]:
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="temporadas",
contentSerieName=scrapedtitle, show=scrapedtitle,
thumbnail='https:' + scrapedthumbnail, contentType='tvshow'))
@@ -274,7 +270,7 @@ def episodios(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<td class="MvTbImg B"><a href="([^"]+)".*?' # url
patron = '<td class="MvTbImg B"><a href="([^"]+)".*?' # url
patron += '<td class="MvTbTtl"><a href="https://cine24h.net/episode/(.*?)/">([^<]+)</a>' # title de episodios
matches = scrapertools.find_multiple_matches(data, patron)
@@ -307,7 +303,7 @@ def episodios(item):
if i.infoLabels['title']:
# Si el capitulo tiene nombre propio añadirselo al titulo del item
i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels[
'episode'], i.infoLabels['title'])
'episode'], i.infoLabels['title'])
if i.infoLabels.has_key('poster_path'):
# Si el capitulo tiene imagen propia remplazar al poster
i.thumbnail = i.infoLabels['poster_path']
@@ -340,7 +336,8 @@ def findvideos(item):
lang, quality = match[0]
quality = quality.strip()
headers = {'Referer': item.url}
url_1 = scrapertools.find_single_match(data, 'id="Opt%s"><iframe width="560" height="315" src="([^"]+)"' % option)
url_1 = scrapertools.find_single_match(data,
'id="Opt%s"><iframe width="560" height="315" src="([^"]+)"' % option)
new_data = httptools.downloadpage(url_1, headers=headers).data
new_data = re.sub(r"\n|\r|\t|amp;|\(.*?\)|\s{2}|&nbsp;", "", new_data)
new_data = scrapertools.decodeHtmlentities(new_data)

View File

@@ -3,7 +3,7 @@
"name": "CineHindi",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"language": ["vos"],
"thumbnail": "cinehindi.png",
"banner": "http://i.imgur.com/cau9TVe.png",
"categories": [

View File

@@ -27,8 +27,8 @@ def mainlist(item):
itemlist = list()
itemlist.append(Item(channel=item.channel, action="genero", title="Generos", url=host, thumbnail = get_thumb("genres", auto = True)))
itemlist.append(Item(channel=item.channel, action="lista", title="Novedades", url=host, thumbnail = get_thumb("newest", auto = True)))
itemlist.append(Item(channel=item.channel, action="proximas", title="Próximas Películas",
url=urlparse.urljoin(host, "proximamente")))
#itemlist.append(Item(channel=item.channel, action="proximas", title="Próximas Películas",
# url=urlparse.urljoin(host, "proximamente")))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=urlparse.urljoin(host, "?s="), thumbnail = get_thumb("search", auto = True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -38,8 +38,8 @@ def genero(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(host).data
patron = 'level-0.*?value="([^"]+)"'
patron += '>([^<]+)'
patron = '<option class=.*? value=([^<]+)>'
patron += '([^<]+)<\/option>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
if 'Próximas Películas' in scrapedtitle:
@@ -94,28 +94,29 @@ def lista(item):
else:
url = httptools.downloadpage("%s?cat=%s" %(host, item.cat), follow_redirects=False, only_headers=True).headers.get("location", "")
data = httptools.downloadpage(url).data
bloque = scrapertools.find_single_match(data, """class="item_1 items.*?id="paginador">""")
patron = 'class="item">.*?' # Todos los items de peliculas (en esta web) empiezan con esto
patron += '<a href="([^"]+).*?' # scrapedurl
patron += '<img src="([^"]+).*?' # scrapedthumbnail
patron += 'alt="([^"]+).*?' # scrapedtitle
patron += '<div class="fixyear">(.*?)</span></div><' # scrapedfixyear
bloque = data#scrapertools.find_single_match(data, """class="item_1 items.*?id="paginador">""")
patron = '<div id=mt.+?>' # Todos los items de peliculas (en esta web) empiezan con esto
patron += '<a href=([^"]+)\/><div class=image>' # scrapedurl
patron += '<img src=([^"]+) alt=.*?' # scrapedthumbnail
patron += '<span class=tt>([^"]+)<\/span>' # scrapedtitle
patron += '<span class=ttx>([^"]+)<div class=degradado>.*?' # scrapedplot
patron += '<span class=year>([^"]+)<\/span><\/div><\/div>' # scrapedfixyear
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedfixyear in matches:
patron = '<span class="year">([^<]+)' # scrapedyear
scrapedyear = scrapertools.find_single_match(scrapedfixyear, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot, scrapedyear in matches:
#patron = '<span class="year">([^<]+)' # scrapedyear
#scrapedyear = scrapertools.find_single_match(scrapedfixyear, patron)
scrapedtitle = scrapedtitle.replace(scrapertools.find_single_match(scrapedtitle,'\(\d{4}\)'),'').strip()
title = scrapedtitle
if scrapedyear:
title += ' (%s)' % (scrapedyear)
item.infoLabels['year'] = int(scrapedyear)
patron = '<span class="calidad2">([^<]+).*?' # scrapedquality
scrapedquality = scrapertools.find_single_match(scrapedfixyear, patron)
if scrapedquality:
title += ' [%s]' % (scrapedquality)
#scrapedquality = scrapertools.find_single_match(scrapedfixyear, patron)
#if scrapedquality:
# title += ' [%s]' % (scrapedquality)
itemlist.append(
item.clone(title=title, url=scrapedurl, action="findvideos", extra=scrapedtitle,
contentTitle=scrapedtitle, thumbnail=scrapedthumbnail, contentType="movie", context=["buscar_trailer"]))
contentTitle=scrapedtitle, thumbnail=scrapedthumbnail, plot=scrapedplot, contentType="movie", context=["buscar_trailer"]))
tmdb.set_infoLabels(itemlist)
# Paginacion
patron = 'rel="next" href="([^"]+)'

View File

@@ -0,0 +1,63 @@
{
"id": "cineonline",
"name": "cineonline",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "https://www.cine-online.eu/wp-content/uploads/2015/04/CINE-logo-bueno.png",
"banner": "",
"categories": [
"movie"
],
"settings": [
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"ESP",
"VOSE"
]
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verificar si los enlaces existen",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
}
]
}

View File

@@ -0,0 +1,210 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import re, urllib, urlparse
from channels import autoplay
from platformcode import config, logger, platformtools
from core.item import Item
from core import httptools, scrapertools, jsontools, tmdb
from core import servertools
from channels import filtertools
host = 'https://www.cine-online.eu'
IDIOMAS = {'Español': 'ESP', 'Cast': 'ESP', 'Latino': 'LAT', 'Lat': 'LAT', 'Subtitulado': 'VOSE', 'Sub': 'VOSE'}
list_language = IDIOMAS.values()
list_servers = ['Streamango', 'Vidoza', 'Openload', 'Streamcherry', 'Netutv']
list_quality = []
__channel__='cineonline'
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', __channel__)
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', __channel__)
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
except:
__modo_grafico__ = True
def mainlist(item):
logger.info()
itemlist = []
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(item.clone(title ="Películas", action ="mainlist_pelis"))
itemlist.append(item.clone(title="Series" , action="lista", url= host + "/serie/"))
itemlist.append(item.clone(title="Buscar", action="search"))
itemlist.append(item.clone(title="Configurar canal...", text_color="gold", action="configuracion", folder=False))
autoplay.show_option(item.channel, itemlist)
return itemlist
def mainlist_pelis(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Novedades" , action="lista", url= host))
itemlist.append(item.clone(title="Castellano" , action="lista", url= host + "/tag/castellano/"))
itemlist.append(item.clone(title="Latino" , action="lista", url= host + "/tag/latino/"))
itemlist.append(item.clone(title="Subtituladas" , action="lista", url= host + "/tag/subtitulado/"))
itemlist.append(item.clone(title="Categorias" , action="categorias", url= host))
itemlist.append(item.clone(title="Año" , action="categorias", url= host))
itemlist.append(item.clone( title = 'Buscar', action = 'search', search_type = 'movie' ))
return itemlist
def configuracion(item):
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if "Año" in item.title:
data = scrapertools.get_match(data,'<h3>Año de estreno(.*?)</ul>')
patron = '<li><a href="([^"]+)">(\d+)</(\w)>'
else:
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)">([^"]+)</a> <span>(\d+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, cantidad in matches:
scrapedplot = ""
scrapedthumbnail = ""
title = scrapedtitle + " %s" % cantidad
itemlist.append(item.clone(channel=item.channel, action="lista", title=title , url=scrapedurl ,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div id="mt-\d+".*?<a href="([^"]+)".*?'
patron += '<img src="([^"]+)" alt="([^"]+)".*?'
patron += '<span class="year">(\d+)</span>.*?'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
scrapedtitle = scrapedtitle.replace("Ver", "").replace("online", "")
title = '%s (%s)' % (scrapedtitle, scrapedyear)
url = scrapedurl
new_item = Item(channel=item.channel,
title=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
infoLabels={'year':scrapedyear})
if '/serie/' in url:
new_item.action = 'temporadas'
new_item.contentSerieName = scrapedtitle
else:
new_item.action = 'findvideos'
new_item.contentTitle = scrapedtitle
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, True)
next_page_url = scrapertools.find_single_match(data,'<a href="([^"]+)">Siguiente</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append(item.clone(channel=item.channel , action="lista" , title="Next page >>" ,
text_color="blue", url=next_page_url) )
return itemlist
def temporadas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<span class="se-t">(\d+)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for numtempo in matches:
itemlist.append(item.clone( action='episodesxseason', title='Temporada %s' % numtempo, url = item.url,
contentType='season', contentSeason=numtempo ))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
# return sorted(itemlist, key=lambda it: it.title)
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = temporadas(item)
for tempitem in templist:
itemlist += episodesxseason(tempitem)
return itemlist
def episodesxseason(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="numerando">%s x (\d+)</div>.*?' % item.contentSeason
patron += '<a href="([^"]+)">([^"]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for episode, url, title in matches:
titulo = '%sx%s %s' % (item.contentSeason, episode, title)
itemlist.append(item.clone( action='findvideos', url=url, title=titulo,
contentType='episode', contentEpisodeNumber=episode ))
tmdb.set_infoLabels(itemlist)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = 'id="plays-(\d+)">\s*([^<]+)</div'
matches = scrapertools.find_multiple_matches(data, patron)
for xnumber, xname in matches:
if "/episodios/" in item.url:
lang = scrapertools.find_single_match(data, '#player2%s">([^<]+)</a>' % xnumber)
else:
lang = scrapertools.find_single_match(data, '#div%s">([^<]+)<' % xnumber)
if "lat" in lang.lower(): lang= "Lat"
if 'cast' in lang.lower(): lang= "Cast"
if 'sub' in lang.lower(): lang= "Sub"
if lang in IDIOMAS:
lang = IDIOMAS[lang]
post= {"nombre":xname}
url= httptools.downloadpage("https://www.cine-online.eu/ecrypt", post=urllib.urlencode(post)).data
url = scrapertools.find_single_match(url,'<(?:IFRAME SRC|iframe src)="([^"]+)"')
if not config.get_setting('unify'):
title = ' (%s)' % (lang)
else:
title = ''
if url != '':
itemlist.append(item.clone(action="play", title='%s'+title, url=url, language=lang ))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if not "/episodios/" in item.url:
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra !='findvideos':
itemlist.append(Item(channel=item.channel, action="add_pelicula_to_library",
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
extra="findvideos", contentTitle=item.contentTitle))
return itemlist

View File

@@ -5,6 +5,7 @@ import re
from core import scrapertools
from core import servertools
from core import httptools
from core.item import Item
from platformcode import config, logger
@@ -43,7 +44,7 @@ def lista(item):
itemlist = []
# Descarga la pagina
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
# Extrae las entradas (carpetas)
patronvideos = '&lt;img .*?src=&quot;(.*?)&quot;'
@@ -92,7 +93,7 @@ def detail(item):
itemlist = []
# Descarga la pagina
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
data = data.replace("%3A", ":")
data = data.replace("%2F", "/")

View File

@@ -5,6 +5,7 @@ import urlparse
from core import scrapertools
from core import servertools
from core import httptools
from core.item import Item
from platformcode import logger
@@ -30,7 +31,7 @@ def DocuSeries(item):
itemlist = []
# Descarga la página
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
# Extrae las entradas (carpetas)
patronvideos = '<li><b><a href="([^"]+)" target="_blank">([^<]+)</a></b></li>'
@@ -54,7 +55,7 @@ def DocuTag(item):
itemlist = []
# Descarga la página
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
patronvideos = "<a dir='ltr' href='([^']+)'>([^<]+)</a>[^<]+<span class='label-count' dir='ltr'>(.+?)</span>"
matches = re.compile(patronvideos, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
@@ -76,7 +77,7 @@ def DocuARCHIVO(item):
itemlist = []
# Descarga la página
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
patronvideos = "<a class='post-count-link' href='([^']+)'>([^<]+)</a>[^<]+"
patronvideos += "<span class='post-count' dir='ltr'>(.+?)</span>"
matches = re.compile(patronvideos, re.DOTALL).findall(data)
@@ -102,7 +103,7 @@ def listvideos(item):
scrapedplot = ""
# Descarga la página
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
patronvideos = "<h3 class='post-title entry-title'[^<]+"
patronvideos += "<a href='([^']+)'>([^<]+)</a>.*?"
patronvideos += "<div class='post-body entry-content'(.*?)<div class='post-footer'>"
@@ -156,7 +157,7 @@ def findvideos(item):
itemlist = []
# Descarga la página
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data, "<div class='post-body entry-content'(.*?)<div class='post-footer'>")
# Busca los enlaces a los videos

View File

@@ -59,9 +59,8 @@ def lista(item):
title = scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, thumbnail=thumbnail,
plot=plot, contentTitle=contentTitle, infoLabels={'year':year} ))
plot=plot, fanart=scrapedthumbnail, contentTitle=contentTitle ))
next_page = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
@@ -71,11 +70,10 @@ def lista(item):
def play(item):
logger.info()
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videochannel=item.channel
return itemlist

View File

@@ -55,7 +55,7 @@ def categorias(item):
def lista(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)" itemprop="url">.*?'
patron += '<img src="([^"]+)" alt="([^"]+)">.*?'
@@ -64,12 +64,11 @@ def lista(item):
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
url = scrapedurl
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
contentTitle = title
contentTitle = scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
plot=plot, fanart=scrapedthumbnail, contentTitle = contentTitle ))
next_page = scrapertools.find_single_match(data,'<li><a data=\'\d+\' href="([^"]+)" title="Next">')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
@@ -80,11 +79,11 @@ def lista(item):
def play(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
patron = 'video_url: \'([^\']+)\''
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
return itemlist

View File

@@ -26,7 +26,7 @@ def mainlist(item):
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl,
thumbnail=thumbnail, plot=plot, contentTitle=contentTitle))
thumbnail=thumbnail, fanart=thumbnail, plot=plot, contentTitle=contentTitle))
next_page_url = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
@@ -36,7 +36,7 @@ def mainlist(item):
def play(item):
logger.info()
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title

View File

@@ -179,7 +179,7 @@ def genres(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<li class="myli"><a href="/([^"]+)">([^<]+)</a>'

View File

@@ -533,17 +533,17 @@ def findvideos(item):
key = scrapertools.find_single_match(data_js, 'JSON.parse\(atob.*?substrings\((.*?)\)')
data_js = httptools.downloadpage("%s/js/providers.js" % host).data
try:
from lib import alfaresolver
provs = alfaresolver.hdfull_providers(data_js)
if provs == '': return []
except:
return []
decoded = jhexdecode(data_js)
providers_pattern = 'p\[(\d+)\]= {"t":"([^"]+)","d":".*?","e":.function.*?,"l":.function.*?return "([^"]+)".*?};'
providers = scrapertools.find_multiple_matches (decoded, providers_pattern)
provs = {}
for provider, e, l in providers:
provs[provider]=[e,l]
data = agrupa_datos(httptools.downloadpage(item.url).data)
data_obf = scrapertools.find_single_match(data, "var ad\s*=\s*'([^']+)'")
data_decrypt = jsontools.load(obfs(base64.b64decode(data_obf), 126 - int(key)))
data_decrypt = jsontools.load(obfs(base64.b64decode(data_obf), 126 - int(key)))
infolabels = {}
year = scrapertools.find_single_match(data, '<span>A&ntilde;o:\s*</span>.*?(\d{4})')
infolabels["year"] = year
@@ -552,7 +552,7 @@ def findvideos(item):
if match['provider'] in provs:
try:
embed = provs[match['provider']][0]
url = eval(provs[match['provider']][1].replace('_code_', "match['code']"))
url = provs[match['provider']][1]+match['code']
matches.append([match['lang'], match['quality'], url, embed])
except:
pass
@@ -691,7 +691,6 @@ def get_status(status, type, id):
## --------------------------------------------------------------------------------
## --------------------------------------------------------------------------------
def jhexdecode(t):
r = re.sub(r'_\d+x\w+x(\d+)', 'var_' + r'\1', t)
r = re.sub(r'_\d+x\w+', 'var_0', r)

View File

@@ -28,6 +28,8 @@ list_language = IDIOMAS.values()
list_quality = ['Cam', 'TSHQ', 'Dvdrip', 'Blurayrip', 'HD Rip 320p', 'hd rip 320p', 'HD Real 720p', 'Full HD 1080p']
list_servers = ['openload', 'gamovideo', 'streamplay', 'streamango', 'vidoza']
host = 'https://www.inkapelis.to/'
def mainlist(item):
logger.info()
@@ -35,28 +37,28 @@ def mainlist(item):
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title="Novedades", action="entradas", url="http://www.inkapelis.com/",
itemlist.append(Item(channel=item.channel, title="Novedades", action="entradas", url=host,
extra="Novedades", text_color=color1, thumbnail=get_thumb('newest', auto=True)))
#itemlist.append(Item(channel=item.channel, title="Estrenos", action="entradas", url="http://www.inkapelis.com/genero/estrenos/",
# text_color=color1, thumbnail=get_thumb('premieres', auto=True)))
itemlist.append(Item(channel=item.channel, title="Castellano", action="entradas",
url="https://www.inkapelis.com/?anio=&genero=&calidad=&idioma=Castellano&s=",
url=host+"?anio=&genero=&calidad=&idioma=Castellano&s=",
extra="Buscar", text_color=color1, thumbnail=get_thumb('espanolas', auto=True)))
itemlist.append(Item(channel=item.channel, title="Latino", action="entradas",
url="https://www.inkapelis.com/?anio=&genero=&calidad=&idioma=Latino&s=",
url=host+"?anio=&genero=&calidad=&idioma=Latino&s=",
extra="Buscar", text_color=color1, thumbnail=get_thumb('latino', auto=True)))
itemlist.append(Item(channel=item.channel, title="VOSE", action="entradas",
url="https://www.inkapelis.com/?anio=&genero=&calidad=&idioma=Subtitulada&s=",
url=host+"?anio=&genero=&calidad=&idioma=Subtitulada&s=",
extra="Buscar", text_color=color1, thumbnail=get_thumb('newest', auto=True)))
itemlist.append(Item(channel=item.channel, title="Géneros", action="generos", url="http://www.inkapelis.com/", text_color=color1,
itemlist.append(Item(channel=item.channel, title="Géneros", action="generos", url=host, text_color=color1,
thumbnail=get_thumb('genres', auto=True),))
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", url="http://www.inkapelis.com/?s=", text_color=color1))
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", url=host+"?s=", text_color=color1))
itemlist.append(Item(channel=item.channel, action="", title=""))
itemlist.append(
Item(channel=item.channel, action="filtro", title="Filtrar películas", url="http://www.inkapelis.com/?s=", text_color=color1))
Item(channel=item.channel, action="filtro", title="Filtrar películas", url=host+"?s=", text_color=color1))
# Filtros personalizados para peliculas
for i in range(1, 4):
filtros = config.get_setting("pers_peliculas" + str(i), item.channel)
@@ -65,7 +67,7 @@ def mainlist(item):
new_item = item.clone()
new_item.values = filtros
itemlist.append(
new_item.clone(action="filtro", title=title, url="http://www.inkapelis.com/?s=", text_color=color2))
new_item.clone(action="filtro", title=title, url=host+"?s=", text_color=color2))
itemlist.append(Item(channel=item.channel, action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
autoplay.show_option(item.channel, itemlist)
@@ -86,21 +88,21 @@ def newest(categoria):
item = Item()
try:
if categoria == "peliculas":
item.url = "http://www.inkapelis.com/"
item.url = host
item.action = "entradas"
item.extra = "Novedades"
if categoria == "terror":
item.url = "https://www.inkapelis.com/genero/terror/"
item.url = host+"genero/terror/"
item.action = "entradas"
if categoria == "castellano":
item.url = "https://www.inkapelis.com/?anio=&genero=&calidad=&idioma=Castellano&s="
item.url = host+"?anio=&genero=&calidad=&idioma=Castellano&s="
item.extra = "Buscar"
item.action = "entradas"
if categoria == "latino":
item.url = "https://www.inkapelis.com/?anio=&genero=&calidad=&idioma=Latino&s="
item.url = host+"?anio=&genero=&calidad=&idioma=Latino&s="
item.extra = "Buscar"
item.action = "entradas"
itemlist = entradas(item)
@@ -122,7 +124,7 @@ def search(item, texto):
logger.info()
itemlist = []
item.extra = "Buscar"
item.url = "http://www.inkapelis.com/?s=%s" % texto
item.url = host+"?s=%s" % texto
try:
return entradas(item)
@@ -254,7 +256,7 @@ def filtrado(item, values):
item.valores = "Filtro: " + ", ".join(sorted(strings))
item.strings = ""
item.url = "http://www.inkapelis.com/?anio=%s&genero=%s&calidad=%s&idioma=%s&s=%s" % \
item.url = host+"?anio=%s&genero=%s&calidad=%s&idioma=%s&s=%s" % \
(year, genero, calidad, idioma, texto)
item.extra = "Buscar"
@@ -292,7 +294,7 @@ def entradas(item):
else:
# Extrae las entradas
if item.extra == "Novedades":
data2 = data.split("<h3>Últimas Películas Agregadas y Actualizadas</h3>", 1)[1]
data2 = data.split("<h2>Últimas Películas Agregadas y Actualizadas</h2>", 1)[1]
entradas = scrapertools.find_multiple_matches(data2, '<div class="col-mt-5 postsh">(.*?)</div></div></div>')
else:

View File

@@ -1,33 +0,0 @@
{
"id": "mastorrents",
"name": "MasTorrents",
"active": true,
"adult": false,
"language": ["cast","lat"],
"thumbnail": "https://s33.postimg.cc/3y8720l9b/mastorrents.png",
"banner": "",
"version": 1,
"categories": [
"movie",
"tvshow",
"torrent"
],
"settings": [
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_torrent",
"type": "bool",
"label": "Incluir en Novedades - Torrent",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,323 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Channel MasTorrents -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channelselector import get_thumb
from platformcode import logger
from platformcode import config
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
host = 'http://www.mastorrents.com/'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Peliculas",
action="movie_list",
thumbnail=get_thumb("channels_movie.png")
))
itemlist.append(item.clone(title="Series",
action="series_list",
thumbnail=get_thumb("channels_tvshow.png")
))
return itemlist
def movie_list(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Todas",
action="lista",
url=host+'peliculas',
extra='movie',
thumbnail=get_thumb('all', auto=True)
))
itemlist.append(item.clone(title="Generos",
action="genres",
url=host,
extra='movie',
thumbnail=get_thumb('genres', auto=True)
))
itemlist.append(item.clone(title="Buscar",
action="search",
url=host + '?pTit=', thumbnail=get_thumb('search', auto=True),
extra='movie'
))
return itemlist
def series_list(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Todas",
action="lista",
url=host + 'series',
extra='serie',
thumbnail=get_thumb('all', auto=True)
))
itemlist.append(item.clone(title="Generos",
action="genres",
url=host + 'series/',
extra='serie',
thumbnail=get_thumb('genres', auto=True)
))
itemlist.append(item.clone(title="Buscar",
action="search",
url=host + 'series/?pTit=',
extra='serie',
thumbnail=get_thumb('search', auto=True)
))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def lista (item):
logger.info ()
itemlist = []
infoLabels = dict()
data = get_source(item.url)
patron = "<div class=moviesbox>.*?</div><a href=(.*?)><div class=moviesbox_img style=background-image:url\('("
patron += ".*?)'\)>.*?tooltipbox>(.*?)(?: <i>| <br /><i>)(.*?)<"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, extra_data in matches:
extra_data = extra_data.replace('(','').replace(')','')
url = scrapedurl
thumbnail = scrapedthumbnail
contentTitle = scrapedtitle.decode('latin1').encode('utf8')
title = contentTitle
tvshow = False
if 'x' in extra_data:
tvshow = True
filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w200_and_h300_bestv2", "")
filtro_list = {"poster_path": filtro_thumb}
filtro_list = filtro_list.items()
infoLabels['filtro']= filtro_list
else:
infoLabels['year']=extra_data
new_item=(Item(channel=item.channel,
action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
infoLabels=infoLabels,
extra=item.extra
))
if tvshow:
new_item.contentSerieName = contentTitle
new_item.action = 'seasons'
else:
new_item.contentTitle = contentTitle
new_item.action = 'findvideos'
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb =True)
#Paginacion
if itemlist !=[]:
actual_page_url = item.url
next_page = scrapertools.find_single_match(data,'<span class=pagination_next><a href=(.*?)>')
import inspect
if next_page !='':
itemlist.append(item.clone(action = "lista",
title = 'Siguiente >>>',
url = next_page
))
return itemlist
def genres(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = scrapertools.find_single_match(data,'G&eacute;neros</option>(.+)</select></div>')
patron = '<option value=(.*?)>(.*?)</option>'
matches = re.compile(patron,re.DOTALL).findall(data)
for value, title in matches:
url = item.url + value
title = title.decode('latin1').encode('utf8')
itemlist.append(Item(channel=item.channel, title=title, url=url, action='lista'))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return lista(item)
else:
return []
def seasons(item):
logger.info()
itemlist=[]
infoLabels = item.infoLabels
data=get_source(item.url)
patron ='href=javascript:showSeasson\(.*?\); id=.*?>Temporada (.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for season in matches:
title='Temporada %s' % season
infoLabels['season'] = season
itemlist.append(Item(channel=item.channel,
title= title,
url=item.url,
action='episodesxseasons',
contentSeasonNumber=season,
contentSerieName=item.contentSerieName,
infoLabels=infoLabels
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
itemlist = itemlist[::-1]
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="all_episodes", contentSerieName=item.contentSerieName))
return itemlist
def all_episodes(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron = "<div class=corner-episode>%sx(.\d+)<\/div><a href=(.*?)>.*?" % item.contentSeasonNumber
patron += "image:url\('(.*?)'.*?href.*?>(%s)<" % item.contentSerieName
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels=item.infoLabels
for episode, scrapedurl, scrapedthumbnail, scrapedtitle in matches:
contentEpisodeNumber=episode
season = item.contentSeasonNumber
url=scrapedurl
thumbnail=scrapedthumbnail
infoLabels['episode']=episode
title = '%sx%s - %s' % (season, episode, item.contentSerieName)
itemlist.append(Item(channel=item.channel,
action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=item.contentSerieName,
contentEpisodeNumber=contentEpisodeNumber,
infoLabels=infoLabels
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist[::-1]
def findvideos(item):
logger.info()
itemlist=[]
data = get_source(item.url)
patron = "showDownload\(([^\)]+)\);.*?alt=.*?torrent (.*?) "
matches = re.compile(patron, re.DOTALL).findall(data)
for extra_info, quality in matches:
extra_info= extra_info.replace(",'",'|')
extra_info= extra_info.split('|')
title = '%s [%s]' % ('Torrent', quality.strip())
if item.extra == 'movie':
url = extra_info[2].strip("'")
else:
url = extra_info[3].strip("'")
server = 'torrent'
if not '.torrent' in url:
if 'tvsinpagar' in url:
url = url.replace('http://','http://www.')
try:
from_web = httptools.downloadpage(url, follow_redirects=False)
url = from_web.headers['location']
except:
pass
if '.torrent' in url:
itemlist.append(Item(channel=item.channel,
title=title,
contentTitle= item.title,
url=url,
action='play',
quality=quality,
server=server,
thumbnail = item.infoLabels['thumbnail'],
infoLabels=item.infoLabels
))
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle
))
return itemlist
def newest(category):
logger.info()
item = Item()
try:
if category in ['peliculas', 'torrent']:
item.url = host + 'estrenos-de-cine'
item.extra='movie'
itemlist = lista(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
if category == 'torrent':
item.url = host+'series'
item.extra = 'serie'
itemlist.extend(lista(item))
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return itemlist

View File

@@ -14,10 +14,10 @@ from channelselector import get_thumb
host = "https://maxipelis24.tv"
IDIOMAS = {'Latino': 'Latino', 'Subtitulado': 'VOSE', 'Español': 'CAST'}
IDIOMAS = {'Latino': 'Latino', 'Sub':'VOSE', 'Subtitulado': 'VOSE', 'Español': 'CAST', 'Castellano':'CAST'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['rapidvideo', 'vidoza', 'openload', 'streamango']
list_servers = ['rapidvideo', 'vidoza', 'openload', 'streamango', 'okru']
def mainlist(item):
@@ -28,11 +28,11 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, title="Peliculas",
action="movies", url=host, page=0, thumbnail=get_thumb('movies', auto=True)))
itemlist.append(Item(channel=item.channel, action="category", title="Año de Estreno",
url=host, cat='year', page=0, thumbnail=get_thumb('year', auto=True)))
url=host, cat='year', thumbnail=get_thumb('year', auto=True)))
itemlist.append(Item(channel=item.channel, action="category", title="Géneros",
url=host, cat='genre', page=0, thumbnail=get_thumb('genres', auto=True)))
url=host, cat='genre', thumbnail=get_thumb('genres', auto=True)))
itemlist.append(Item(channel=item.channel, action="category", title="Calidad",
url=host, cat='quality', page=0, thumbnail=get_thumb("quality", auto=True)))
url=host, cat='quality', thumbnail=get_thumb("quality", auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search",
url=host + "?s=", page=0, thumbnail=get_thumb("search", auto=True)))
@@ -56,14 +56,14 @@ def category(item):
if item.cat == 'genre':
data = scrapertools.find_single_match(
data, '<h3>Géneros <span class="icon-sort">.*?</ul>')
patron = '<li class="cat-item cat-item.*?<a href="([^"]+)" >([^<]+)<'
patron = '<li class="cat-item cat-item.*?<a href="([^"]+)".*?>([^<]+)<'
elif item.cat == 'year':
data = scrapertools.find_single_match(
data, '<h3>Año de estreno.*?</div>')
patron = 'li><a href="([^"]+)">([^<]+).*?<'
patron = 'li><a href="([^"]+)".*?>([^<]+).*?<'
elif item.cat == 'quality':
data = scrapertools.find_single_match(data, '<h3>Calidad.*?</div>')
patron = 'li><a href="([^"]+)">([^<]+)<'
patron = 'li><a href="([^"]+)".*?>([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
itemlist.append(Item(channel=item.channel, action='movies',
@@ -81,7 +81,7 @@ def movies(item):
patron += '<span class="ttx">([^<]+).*?'
patron += 'class="year">([^<]+).+?class="calidad2">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, img, scrapedtitle, resto, year, quality in matches[item.page:item.page + 30]:
for scrapedurl, img, scrapedtitle, resto, year, quality in matches[item.page:item.page + 20]:
scrapedtitle = re.sub(r' \((\d+)\)', '', scrapedtitle)
plot = scrapertools.htmlclean(resto).strip()
title = ' %s [COLOR red][%s][/COLOR]' % (scrapedtitle, quality)
@@ -97,14 +97,14 @@ def movies(item):
infoLabels={'year': year}))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
if item.page + 30 < len(matches):
itemlist.append(item.clone(page=item.page + 30, title=">> Siguiente"))
if item.page + 20 < len(matches):
itemlist.append(item.clone(page=item.page + 20, title=">> Siguiente"))
else:
next_page = scrapertools.find_single_match(
data, 'class="respo_pag"><div class="pag.*?<a href="([^"]+)" >Siguiente</a><')
data, '<link rel="next" href="([^"]+)" />')
if next_page:
itemlist.append(item.clone(
url=next_page, page=0, title=">> Siguiente"))
itemlist.append(item.clone(url=next_page, page=0,
title=" Siguiente »"))
return itemlist
@@ -113,15 +113,28 @@ def findvideos(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data1= scrapertools.find_single_match(data,'<ul class="idTabs">.*?</ul></div>')
patron = "li>.*?href=.*?>([^\s]+)"
matches1 = re.compile(patron, re.DOTALL).findall(data1)
for lang in matches1:
idioma = lang
patron = '<div id="div.*?<div class="movieplay".*?(?:iframe.*?src|IFRAME SRC)="([^&]+)&'
patron = '<div id="div(\d+)".*?<div class="movieplay".*?(?:iframe.*?src|IFRAME SRC)="([^&]+)&'
matches = re.compile(patron, re.DOTALL).findall(data)
for link in matches:
for ot, link in matches:
data1 = scrapertools.find_single_match(data, '<ul class="idTabs">.*?</ul></div>')
patron = 'li>.*?href="#div%s.*?>.*?([^<|\s]+)' % ot
matches1 = re.compile(patron, re.DOTALL).findall(data1)
for lang in matches1:
if "VIP" in lang:
continue
idioma = lang
if 'ok.ru' in link:
patron = '<div id="div.*?<div class="movieplay".*?(?:iframe.*?src|IFRAME SRC)="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for link in matches:
if not link.startswith("https"):
url = "https:%s" % link
title = '%s'
new_item = Item(channel=item.channel, title=title, url=url,
action='play', language=IDIOMAS[idioma], infoLabels=item.infoLabels)
itemlist.append(new_item)
if 'maxipelis24.tv/hideload/?' in link:
id_letter = scrapertools.find_single_match(link, '?(\w)d')
id_type = '%sd' % id_letter
@@ -135,8 +148,6 @@ def findvideos(item):
follow_redirects=False)
url = video_data.headers['location']
title = '%s'
else:
patron = '<div id="div.*?<div class="movieplay".*?(?:iframe.*?src|IFRAME SRC)="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -144,9 +155,10 @@ def findvideos(item):
url = link
title = '%s'
new_item = Item(channel=item.channel, title=title, url=url,
action='play', language= IDIOMAS[idioma], infoLabels=item.infoLabels)
action='play', language=IDIOMAS[idioma], infoLabels=item.infoLabels)
itemlist.append(new_item)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % '%s [%s]'%(i.server.capitalize(),i.language))
itemlist = servertools.get_servers_itemlist(
itemlist, lambda i: i.title % '%s [%s]' % (i.server.capitalize(), i.language))
#itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if itemlist:
if config.get_videolibrary_support():

View File

@@ -18,7 +18,7 @@
"id": "domain_name",
"type": "text",
"label": "URL actual de la Web Mejor Torrent",
"default": "http://www.mejortorrent.org/",
"default": "http://www.mejortorrent.tv/",
"enabled": true,
"visible": true
},

View File

@@ -26,7 +26,8 @@ list_servers = ['torrent']
channel = "mejortorrent"
host = 'http://www.mejortorrent.org/'
host = 'http://www.mejortorrent.tv/'
host_sufix = '.tv'
#host = config.get_setting('domain_name', channel)
categoria = channel.capitalize()
@@ -296,8 +297,8 @@ def listado(item):
item_local.title = scrapertools.get_match(scrapedurl, patron_enlace)
item_local.title = item_local.title.replace("-", " ")
item_local.url = urlparse.urljoin(item_local.url, scrapedurl)
item_local.thumbnail = host + urllib.quote(scrapedthumbnail)
item_local.url = verificar_url(urlparse.urljoin(item_local.url, scrapedurl))
item_local.thumbnail = verificar_url(host + urllib.quote(scrapedthumbnail))
item_local.contentThumbnail = item_local.thumbnail
item_local.infoLabels['year'] = '-' # Al no saber el año, le ponemos "-" y TmDB lo calcula automáticamente
@@ -660,7 +661,7 @@ def listado_busqueda(item):
item_local.quality = scrapertools.remove_htmltags(scrapedinfo).decode('iso-8859-1').encode('utf8')
item_local.quality = item_local.quality.replace("(", "").replace(")", "").replace("[", "").replace("]", "").replace("Documental", "").replace("documental", "")
item_local.url = urlparse.urljoin(item.url, scrapedurl)
item_local.url = verificar_url(urlparse.urljoin(item.url, scrapedurl))
#Preparamos la información básica para TMDB
if "/serie-" in scrapedurl or "/doc-" in scrapedurl:
@@ -829,10 +830,10 @@ def findvideos(item):
for scrapedurl in matches:
#Generamos una copia de Item para trabajar sobre ella
item_local = item.clone()
url = urlparse.urljoin(item.url, scrapedurl)
url = verificar_url(urlparse.urljoin(item.url, scrapedurl))
# Localiza el .torrent en el siguiente link
if not item.post and not item.armagedon: # Si no es llamada con Post, hay que bajar un nivel más
if not item.post and not item.armagedon: # Si no es llamada con Post, hay que bajar un nivel más
try:
torrent_data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(url).data)
except: #error
@@ -849,15 +850,15 @@ def findvideos(item):
if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca...
return item #Devolvemos el Item de la llamada
else:
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
#logger.debug(torrent_data)
if not item.armagedon:
item_local.url = scrapertools.get_match(torrent_data, ">Pincha.*?<a href='(.*?\/uploads\/torrents\/\w+\/.*?\.torrent)'")
item_local.url = urlparse.urljoin(url, item_local.url)
item_local.url = verificar_url(urlparse.urljoin(url, item_local.url))
elif not item.armagedon:
item_local.url = url # Ya teníamos el link desde el primer nivel (documentales)
item_local.url = url # Ya teníamos el link desde el primer nivel (documentales)
item_local.url = item_local.url.replace(" ", "%20")
if item.armagedon and item.emergency_urls and not item.videolibray_emergency_urls:
@@ -867,10 +868,10 @@ def findvideos(item):
del item.emergency_urls[0][0]
if not item.armagedon and item.emergency_urls and not item.videolibray_emergency_urls:
if len(item.emergency_urls[0]):
item_local.torrent_alt = item.emergency_urls[0][0] #Guardamos la primera url del .Torrent ALTERNATIVA
item_local.torrent_alt = item.emergency_urls[0][0] #Guardamos la primera url del .Torrent ALTERNATIVA
if item.videolibray_emergency_urls:
item.emergency_urls[0].append(item_local.url) #Salvamnos la url...
item.emergency_urls[0].verificar_url(append(item_local.url)) #Salvamnos la url...
# Poner la calidad, si es necesario
if not item_local.quality:
@@ -1003,7 +1004,7 @@ def episodios(item):
item_local.title = ''
item_local.context = "['buscar_trailer']"
item_local.url = urlparse.urljoin(host, scrapedurl)
item_local.url = verificar_url(urlparse.urljoin(host, scrapedurl))
scrapedtitle = re.sub('\r\n', '', scrapedtitle).decode('iso-8859-1').encode('utf8').strip()
if scrapedtitle.endswith('.'):
@@ -1030,7 +1031,7 @@ def episodios(item):
else: #Se prepara el Post para documentales
item_local.contentSeason = 1
item_local.contentEpisodeNumber = 1
item_local.url = host + "/secciones.php?sec=descargas&ap=contar_varios"
item_local.url = verificar_url(host + "/secciones.php?sec=descargas&ap=contar_varios")
item_local.post = urllib.urlencode({name: value, "total_capis": total_capis, "tabla": tabla, "titulo": titulo_post})
if year:
@@ -1050,6 +1051,15 @@ def episodios(item):
item, itemlist = generictools.post_tmdb_episodios(item, itemlist)
return itemlist
def verificar_url(url):
if '.com' in url or '.net' in url or '.org' in url:
url = url.replace('.com', '.tv').replace('.net', '.tv').replace('.org', '.tv')
url = url.replace('torrents/tmp/torrent.php?table=peliculas/&name=', 'torrents/peliculas/')
url = url.replace('torrents/tmp/torrent.php?table=series/&name=', 'torrents/series/')
url = url.replace('torrents/tmp/torrent.php?table=documentales/&name=', 'torrents/documentales/')
return url
def actualizar_titulos(item):

View File

@@ -0,0 +1,50 @@
{
"id": "mundopelis",
"name": "mundopelis",
"active": true,
"adult": false,
"language": ["vos"],
"thumbnail": "https://mundopelis.xyz/images/logo.png",
"banner": "",
"categories": [
"movie",
"vos"
],
"settings": [
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verificar si los enlaces existen",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
}
]
}

View File

@@ -0,0 +1,145 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import re, urllib, urlparse
from channels import autoplay
from platformcode import config, logger, platformtools
from core.item import Item
from core import httptools, scrapertools, jsontools, tmdb
from core import servertools
from channels import filtertools
host = 'https://mundopelis.xyz'
list_language = []
list_servers = ['Rapidvideo', 'Vidoza', 'Openload', 'Youtube']
list_quality = []
__channel__='mundopelis'
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', __channel__)
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', __channel__)
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
except:
__modo_grafico__ = True
def mainlist(item):
logger.info()
itemlist = []
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(item.clone(title="Novedades" , action="lista", url= host + "/todos-los-estrenos", first=0))
itemlist.append(item.clone(title="Categorias" , action="categorias", url= host))
itemlist.append(item.clone(title="Buscar", action="search"))
itemlist.append(item.clone(title="Configurar canal...", text_color="gold", action="configuracion", folder=False))
autoplay.show_option(item.channel, itemlist)
return itemlist
def configuracion(item):
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/?option=com_spmoviedb&view=searchresults&searchword=%s&type=movies&Itemid=544" % texto
item.first = 0
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<a class="btn btn-xs btn-primary" href="/index.php([^"]+)".*?</i> ([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
url = urlparse.urljoin(item.url,scrapedurl)
title = scrapedtitle
itemlist.append(item.clone(channel=item.channel, action="lista", title=title , url=url, first=0,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
next = False
data = httptools.downloadpage(item.url).data
patron = '<div class="movie-poster">.*?'
patron += '<img src="([^"]+)".*?'
patron += '<a href="/index.php([^"]+)"><h4 class="movie-title">([^<]+)</h4>'
matches = re.compile(patron, re.DOTALL).findall(data)
first = item.first
last = first+20
if last > len(matches):
last = len(matches)
next = True
scrapertools.printMatches(matches)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches[first:last]:
scrapedyear = "-"
title = scrapedtitle.replace(" (2018)", "")
url = urlparse.urljoin(item.url,scrapedurl)
itemlist.append(item.clone(channel=item.channel, action = 'findvideos', title=title, contentTitle = scrapedtitle,
url=url, thumbnail=scrapedthumbnail, infoLabels={'year':scrapedyear} ))
tmdb.set_infoLabels(itemlist, True)
# Paginación
if not next:
url_next_page = item.url
first = last
else:
url_next_page = scrapertools.find_single_match(data, '<a title="Siguiente" href="([^"]+)"')
url_next_page = urlparse.urljoin(item.url,url_next_page)
first = 0
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='lista', first=first))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<(?:iframe|IFRAME).*?(?:src|SRC)="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for url in matches:
lang = "VOSE"
if not config.get_setting('unify'):
title = ' (%s)' % (lang)
else:
title = ''
if url != '':
itemlist.append(item.clone(action="play", title='%s'+title, url=url, language=lang ))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra !='findvideos' and not "/episodios/" in item.url :
itemlist.append(Item(channel=item.channel, action="add_pelicula_to_library",
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
extra="findvideos", contentTitle=item.contentTitle))
return itemlist

View File

@@ -7,6 +7,7 @@ import urlparse
import datetime
import ast
import random
import traceback
from channelselector import get_thumb
from core import httptools
@@ -700,8 +701,12 @@ def listado(item):
#Guardamos el resto de variables del vídeo
item_local.url = scrapedurl
if not item_local.url.startswith("http"): #Si le falta el http.: lo ponemos
item_local.url = scrapertools.find_single_match(item_local.channel_host, '(\w+:)//') + item_local.url
item_local.thumbnail = scrapedthumbnail
item_local.contentThumbnail = scrapedthumbnail
if not item_local.thumbnail.startswith("http"): #Si le falta el http.: lo ponemos
item_local.thumbnail = scrapertools.find_single_match(item_local.channel_host, '(\w+:)//') + item_local.thumbnail
item_local.contentThumbnail = item_local.thumbnail
#Guardamos el año que puede venir en la url, por si luego no hay resultados desde TMDB
year = ''
@@ -1008,7 +1013,7 @@ def listado_busqueda(item):
if not data_serie: #Si no ha logrado encontrar nada, salimos
title_subs += ["ERR"]
elif item_local.channel_alt: #Si ha habido fail-over, lo comento
elif item_local.channel_alt: #Si ha habido fail-over, lo comento
url = url.replace(item_local.channel_alt, item_local.category.lower())
title_subs += ["ALT"]
@@ -1029,8 +1034,10 @@ def listado_busqueda(item):
title_subs += ["Episodio %sx%s" % (scrapertools.find_single_match(url, '\/temp.*?-(\d+)-?\/cap.*?-(\d+(?:-al-\d+)?)-?\/'))]
url = item_local.url
except:
pass
logger.error(traceback.format_exc())
#logger.debug(item_local.url)
if item.extra == "novedades" and "/serie" in url:
if not item_local.url or episodio_serie == 0:
item_local.url = url
@@ -1204,8 +1211,12 @@ def listado_busqueda(item):
#Guardamos el resto de variables del vídeo
item_local.url = url
if not item_local.url.startswith("http"): #Si le falta el http.: lo ponemos
item_local.url = scrapertools.find_single_match(item_local.channel_host, '(\w+:)//') + item_local.url
item_local.thumbnail = scrapedthumbnail
item_local.contentThumbnail = scrapedthumbnail
if not item_local.thumbnail.startswith("http"): #Si le falta el http.: lo ponemos
item_local.thumbnail = scrapertools.find_single_match(item_local.channel_host, '(\w+:)//') + item_local.thumbnail
item_local.contentThumbnail = item_local.thumbnail
#Guardamos el año que puede venir en la url, por si luego no hay resultados desde TMDB
try:
@@ -1315,7 +1326,7 @@ def findvideos(item):
#Renombramos el canal al nombre de clone elegido. Actualizados URL
host = scrapertools.find_single_match(item.url, '(http.?\:\/\/(?:www.)?\w+\.\w+\/)')
item.channel_host = host
item.category = host.capitalize()
item.category = scrapertools.find_single_match(item.url, 'http.?\:\/\/(?:www.)?(\w+)\.\w+\/').capitalize()
verify_fo = True #Verificamos si el clone a usar está activo
item, data = generictools.fail_over_newpct1(item, verify_fo)
@@ -1446,8 +1457,9 @@ def findvideos(item):
if scrapertools.find_single_match(data, patron):
patron = patron_alt
url_torr = scrapertools.find_single_match(data, patron)
if not url_torr.startswith("http"): #Si le falta el http.: lo ponemos
url_torr = scrapertools.find_single_match(host, '(\w+:)//') + url_torr
if not url_torr.startswith("http"): #Si le falta el http.: lo ponemos
url_torr = scrapertools.find_single_match(item.channel_host, '(\w+:)//') + url_torr
#Verificamos si se ha cargado una página, y si además tiene la estructura correcta
if not data or not scrapertools.find_single_match(data, patron) or not videolibrarytools.verify_url_torrent(url_torr): # Si no hay datos o url, error
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
@@ -1509,6 +1521,9 @@ def findvideos(item):
patron = 'class="btn-torrent">.*?window.location.href = "(.*?)";' #Patron para .torrent
if not scrapertools.find_single_match(data, patron):
patron = '<a href="([^"]+)"\s?title="[^"]+"\s?class="btn-torrent"' #Patron para .torrent (planetatorrent)
url_torr = scrapertools.find_single_match(data, patron)
if not url_torr.startswith("http"): #Si le falta el http.: lo ponemos
url_torr = scrapertools.find_single_match(item.channel_host, '(\w+:)//') + url_torr
#buscamos el tamaño del .torrent
size = scrapertools.find_single_match(data, '<div class="entry-left".*?><a href=".*?span class=.*?>Size:<\/strong>?\s(\d+?\.?\d*?\s\w[b|B])<\/span>')
@@ -1884,11 +1899,11 @@ def episodios(item):
season_display = item.from_num_season_colapse
# Obtener la información actualizada de la Serie. TMDB es imprescindible para Videoteca
if not item.infoLabels['tmdb_id']:
try:
tmdb.set_infoLabels(item, True) #TMDB de cada Temp
except:
pass
#if not item.infoLabels['tmdb_id']:
try:
tmdb.set_infoLabels(item, True) #TMDB de cada Temp
except:
pass
modo_ultima_temp_alt = modo_ultima_temp
if item.ow_force == "1": #Si hay un traspaso de canal o url, se actualiza todo
@@ -1972,6 +1987,7 @@ def episodios(item):
num_temporadas_flag = True
else:
num_temporadas_flag = False
for page in list_pages: #Recorre la lista de páginas
if not list_pages:
break
@@ -2014,7 +2030,12 @@ def episodios(item):
item_local = item.clone() #Creamos copia local de Item por episodio
item_local.url = url
item_local.contentThumbnail = thumb
if not item_local.url.startswith("http"): #Si le falta el http.: lo ponemos
item_local.url = scrapertools.find_single_match(item_local.channel_host, '(\w+:)//') + item_local.url
item_local.thumbnail = thumb
if not item_local.thumbnail.startswith("http"): #Si le falta el http.: lo ponemos
item_local.thumbnail = scrapertools.find_single_match(item_local.channel_host, '(\w+:)//') + item_local.thumbnail
item_local.contentThumbnail = item_local.thumbnail
estado = True #Buena calidad de datos por defecto
if "<span" in info: # new style
@@ -2075,7 +2096,7 @@ def episodios(item):
itemlist.append(item.clone(action='', title=item.category + ': ERROR 02: EPISODIOS: Ha cambiado la estructura de la Web. Reportar el error con el log'))
break #si no hay más datos, algo no funciona, pintamos lo que tenemos
#Si no se encuentran valores, pero poner lo básico
#Si no se encuentran valores, se pone lo básico
if match['season'] is None or match['season'] == "0" or not match['season']: match['season'] = season
if match['episode'] is None: match['episode'] = "0"
try:
@@ -2085,6 +2106,7 @@ def episodios(item):
if match['season'] > max_temp:
logger.error("ERROR 07: EPISODIOS: Error en número de Temporada o Episodio: " + " / TEMPORADA/EPISODIO: " + str(match['season']) + " / " + str(match['episode']) + " / NUM_TEMPORADA: " + str(max_temp) + " / " + str(season) + " / MATCHES: " + str(matches))
match['season'] = scrapertools.find_single_match(item_local.url, '\/[t|T]emp\w+-*(\d+)\/')
num_temporadas_flag = False
if not match['season']:
match['season'] = season_alt
else:

View File

@@ -1,23 +1,27 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
# ------------------------------------------------------------
import urlparse
import urllib2
import urllib
import re
import os
import sys
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
host= 'https://pandamovies.pw'
host = 'https://pandamovies.pw'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/list-movies"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/list-movies"))
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/list-movies"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
itemlist.append(Item(channel=item.channel, title="Peliculas", action="lista", url=host + "/movies"))
itemlist.append(Item(channel=item.channel, title="Categorias", action="categorias", url=host + "/movies"))
itemlist.append(Item(channel=item.channel, title="Canal", action="categorias", url=host + "/movies"))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -35,44 +39,45 @@ def search(item, texto):
def categorias(item):
itemlist = []
data = scrapertools.cache_page(item.url)
if item.title == "Categorias" :
data = scrapertools.get_match(data,'<a href="#">Genres</a>(.*?)</ul>')
else:
data = scrapertools.get_match(data,'<a href="#">Studios</a>(.*?)</ul>')
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li><a title=".*?" href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedurl = scrapedurl.replace("https:", "")
scrapedurl = "https:" + scrapedurl
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
return itemlist
itemlist = []
data = httptools.downloadpage(item.url).data
if item.title == "Categorias":
data = scrapertools.get_match(data, '<a href="#">Genres</a>(.*?)</ul>')
else:
data = scrapertools.get_match(data, '<a href="#">Studios</a>(.*?)</ul>')
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedurl = scrapedurl.replace("https:", "")
scrapedurl = "https:" + scrapedurl
itemlist.append(Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot))
return itemlist
def lista(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
patron = '<a class="clip-link" title="([^"]+)" href="([^"]+)".*?'
patron += 'src="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedtitle,scrapedurl,scrapedthumbnail in matches:
url = urlparse.urljoin(item.url,scrapedurl)
data = httptools.downloadpage(item.url).data
patron = '<div data-movie-id="\d+".*?'
patron += '<a href="([^"]+)".*?oldtitle="([^"]+)".*?'
patron += '<img src="([^"]+)".*?'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
url = urlparse.urljoin(item.url, scrapedurl)
title = scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail,
plot=plot, contentTitle = title))
next_page = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">')
if next_page =="":
next_page = scrapertools.find_single_match(data,'<a.*?href="([^"]+)" >Next &raquo;</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail,
plot=plot, contentTitle=title))
# <li class='active'><a class=''>1</a></li><li><a rel='nofollow' class='page larger' href='https://pandamovies.pw/movies/page/2'>
next_page = scrapertools.find_single_match(data, '<li class=\'active\'>.*?href=\'([^\']+)\'>')
if next_page == "":
next_page = scrapertools.find_single_match(data, '<a.*?href="([^"]+)" >Next &raquo;</a>')
if next_page != "":
next_page = urlparse.urljoin(item.url, next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
return itemlist

View File

@@ -42,7 +42,6 @@ else:
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
parameters = channeltools.get_channel_parameters(__channel__)
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
@@ -103,7 +102,7 @@ def sub_search(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
data = scrapertools.find_single_match(data, 'Archivos (.*?)resppages')
patron = 'img alt="([^"]+)".*?'
patron = 'img alt="([^"]+)".*?'
patron += 'src="([^"]+)".*?'
patron += 'href="([^"]+)".*?'
patron += 'fechaestreno">([^<]+)'
@@ -111,7 +110,6 @@ def sub_search(item):
for scrapedtitle, scrapedthumbnail, scrapedurl, year in matches:
if 'tvshows' not in scrapedurl:
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, contentTitle=scrapedtitle,
action="findvideos", infoLabels={"year": year},
thumbnail=scrapedthumbnail, text_color=color3))
@@ -137,7 +135,7 @@ def peliculas(item):
# logger.info(data)
# img, title
patron = '(?is)movie-img img-box.*?alt="([^"]+)".*?'
patron = '(?is)movie-img img-box.*?alt="([^"]+)".*?'
patron += 'src="([^"]+)".*?'
patron += 'href="([^"]+)".*?'
patron += 'fechaestreno">([^<]+)<.*?'
@@ -187,7 +185,7 @@ def genresYears(item):
for scrapedurl, scrapedtitle in matches:
title = '%s' % (scrapedtitle)
title = title.replace("Peliculas de ","").replace(" Online","")
title = title.replace("Peliculas de ", "").replace(" Online", "")
itemlist.append(item.clone(title=title, url=scrapedurl, action="peliculas"))
return itemlist
@@ -196,14 +194,13 @@ def year_release(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
# logger.info(data)
patron = '<li><a href="([^"]+)">([^<]+)</a></li>' # url, title
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(channel=item.channel, action="peliculas", title=scrapedtitle, page=0,
url=scrapedurl, text_color=color3, viewmode="movie_with_plot", extra='next'))
@@ -219,13 +216,12 @@ def series(item):
# logger.info(data)
patron = '<article class="TPost C TPostd">\s*<a href="([^"]+)">.*?' # url
patron += '<img src="([^"]+)".*?' # img
patron += '<h3 class="Title">([^<]+)</h3>' # title
patron += '<img src="([^"]+)".*?' # img
patron += '<h3 class="Title">([^<]+)</h3>' # title
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches[item.page:item.page + 30]:
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="temporadas",
contentSerieName=scrapedtitle, show=scrapedtitle,
thumbnail='https:' + scrapedthumbnail, contentType='tvshow'))
@@ -291,7 +287,7 @@ def episodios(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<td class="MvTbImg B"><a href="([^"]+)".*?' # url
patron = '<td class="MvTbImg B"><a href="([^"]+)".*?' # url
patron += '<td class="MvTbTtl"><a href="https://cine24h.net/episode/(.*?)/">([^<]+)</a>' # title de episodios
matches = scrapertools.find_multiple_matches(data, patron)
@@ -324,7 +320,7 @@ def episodios(item):
if i.infoLabels['title']:
# Si el capitulo tiene nombre propio añadirselo al titulo del item
i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels[
'episode'], i.infoLabels['title'])
'episode'], i.infoLabels['title'])
if i.infoLabels.has_key('poster_path'):
# Si el capitulo tiene imagen propia remplazar al poster
i.thumbnail = i.infoLabels['poster_path']
@@ -369,7 +365,8 @@ def findvideos(item):
lang = languages[lang]
server = servertools.get_server_from_url(url)
title = "»» [COLOR yellow](%s)[/COLOR] [COLOR goldenrod](%s)[/COLOR] %s ««" % (server.title(), item.quality, lang)
title = "»» [COLOR yellow](%s)[/COLOR] [COLOR goldenrod](%s)[/COLOR] %s ««" % (
server.title(), item.quality, lang)
# if 'google' not in url and 'directo' not in server:
itemlist.append(item.clone(action='play', url=url, title=title, language=lang, text_color=color3))

View File

@@ -348,7 +348,7 @@ def listado(item):
title = re.sub(r'- $', '', title)
#Limpiamos el título de la basura innecesaria
title = re.sub(r'TV|Online|Spanish|Torrent|en Espa\xc3\xb1ol|Español|Latino|Subtitulado|Blurayrip|Bluray rip|\[.*?\]|R2 Pal|\xe3\x80\x90 Descargar Torrent \xe3\x80\x91|Completa|Temporada|Descargar|Torren', '', title, flags=re.IGNORECASE)
title = re.sub(r'(?i)TV|Online|Spanish|Torrent|en Espa\xc3\xb1ol|Español|Latino|Subtitulado|Blurayrip|Bluray rip|\[.*?\]|R2 Pal|\xe3\x80\x90 Descargar Torrent \xe3\x80\x91|Completa|Temporada|Descargar|Torren', '', title)
#Terminamos de limpiar el título
title = re.sub(r'\??\s?\d*?\&.*', '', title)

View File

@@ -200,7 +200,6 @@ def peliculas(item):
paginacion = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="([^"]+)">')
if paginacion:
itemlist.append(Item(channel=item.channel, action="peliculas",
title="» Siguiente »", url=paginacion, plot="Página Siguiente",
thumbnail='https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/next.png'))
@@ -219,7 +218,7 @@ def generos(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
# logger.info(data)
patron = '<div class="todos">.*?'
@@ -270,8 +269,9 @@ def findvideos(item):
server = servertools.get_server_from_url(scrapedurl)
quality = scrapertools.find_single_match(
datas, '<p class="hidden-xs hidden-sm">.*?class="magnet-download">([^<]+)p</a>')
title = "Ver en: [COLOR yellowgreen][{}][/COLOR] [COLOR yellow][{}][/COLOR]".format(servidores.capitalize(),
quality.upper())
title = "Ver en: [COLOR yellowgreen][{}][/COLOR] [COLOR yellow][{}][/COLOR]".format(
servidores.capitalize(),
quality.upper())
itemlist.append(item.clone(action='play', title=title, url=scrapedurl, quality=item.quality,
server=server, language=lang.replace('Español ', ''),

View File

@@ -19,7 +19,7 @@ def mainlist(item):
itemlist.append( Item(channel=item.channel, title=" categorias" , action="categorias", url=host + "/category/movies/"))
itemlist.append( Item(channel=item.channel, title="Videos" , action="lista", url=host + "/category/clips/"))
itemlist.append( Item(channel=item.channel, title=" categorias" , action="lista", url=host + "/category/clips/"))
itemlist.append( Item(channel=item.channel, title=" categorias" , action="categorias", url=host + "/category/clips/"))
return itemlist
@@ -41,11 +41,11 @@ def categorias(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
if item.url == host + "/category/movies/":
if "/category/movies/" in item.url:
data = scrapertools.get_match(data,'>Movies</a>(.*?)</ul>')
else:
data = scrapertools.get_match(data,'>Clips</a>(.*?)</ul>')
patron = '<a href="([^"]+)">([^"]+)</a>'
patron = '<a href=([^"]+)>([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
@@ -60,20 +60,21 @@ def lista(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
patron = '<article id="post-\d+".*?'
patron += '<img class="center cover" src="([^"]+)" alt="([^"]+)".*?'
patron += '<blockquote>.*?<a href=\'([^\']+)\''
patron = '<article id=post-\d+.*?'
patron += '<img class="center cover" src=([^"]+) alt="([^"]+)".*?'
patron += '<blockquote>.*?<a href=(.*?) target=_blank>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedthumbnail,scrapedtitle,scrapedurl in matches:
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">')
next_page = scrapertools.find_single_match(data,'<a class=nextpostslink rel=next href=(.*?)>')
if next_page!="":
itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info()
itemlist = servertools.find_video_items(data=item.url)

View File

@@ -1,7 +1,11 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
# ------------------------------------------------------------
import urlparse
import urllib2
import urllib
import re
import os
import sys
from core import scrapertools
from core import servertools
from core.item import Item
@@ -10,14 +14,15 @@ from core import httptools
host = 'http://porneq.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="lista", url=host + "/videos/browse/"))
itemlist.append( Item(channel=item.channel, title="Mas Vistos" , action="lista", url=host + "/videos/most-viewed/"))
itemlist.append( Item(channel=item.channel, title="Mas Votado" , action="lista", url=host + "/videos/most-liked/"))
itemlist.append( Item(channel=item.channel, title="Big Tits" , action="lista", url=host + "/show/big+tits&sort=w"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
itemlist.append(Item(channel=item.channel, title="Ultimos", action="lista", url=host + "/videos/browse/"))
itemlist.append(Item(channel=item.channel, title="Mas Vistos", action="lista", url=host + "/videos/most-viewed/"))
itemlist.append(Item(channel=item.channel, title="Mas Votado", action="lista", url=host + "/videos/most-liked/"))
itemlist.append(Item(channel=item.channel, title="Big Tits", action="lista", url=host + "/show/big+tits&sort=w"))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -37,20 +42,20 @@ def search(item, texto):
def lista(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class="clip-link" data-id="\d+" title="([^"]+)" href="([^"]+)">.*?'
patron += '<img src="([^"]+)".*?'
patron += '<span class="timer">(.*?)</span></div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedtitle,scrapedurl,scrapedthumbnail,scrapedtime in matches:
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle, scrapedurl, scrapedthumbnail, scrapedtime in matches:
scrapedplot = ""
scrapedtitle = "[COLOR yellow]" + (scrapedtime) + "[/COLOR] " + scrapedtitle
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<nav id="page_nav"><a href="(.*?)"')
if next_page !="":
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
itemlist.append(Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot))
next_page = scrapertools.find_single_match(data, '<nav id="page_nav"><a href="(.*?)"')
if next_page != "":
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
return itemlist
@@ -58,8 +63,8 @@ def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
scrapedurl = scrapertools.find_single_match(data,'<source src="([^"]+)"')
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
scrapedurl = scrapertools.find_single_match(data, '<source src="([^"]+)"')
itemlist.append(
Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
return itemlist

View File

@@ -81,6 +81,7 @@ def lista(item):
if not scrapedthumbnail.startswith("https"):
scrapedthumbnail = "https:%s" % scrapedthumbnail
scrapedtitle = "%s - [COLOR red]%s[/COLOR] %s" % (duration, quality, scrapedtitle)
scrapedthumbnail += "|Referer=https://www.porntrex.com/"
itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
contentThumbnail=scrapedthumbnail, fanart=scrapedthumbnail))
# Extrae la marca de siguiente página
@@ -256,6 +257,7 @@ def menu_info(item):
if i == 0:
continue
img = urlparse.urljoin(host, img)
img += "|Referer=https://www.porntrex.com/"
title = "Imagen %s" % (str(i))
itemlist.append(item.clone(action="", title=title, thumbnail=img, fanart=img))

View File

@@ -336,8 +336,8 @@ def listado(item):
item_local.season_colapse = True #Muestra las series agrupadas por temporadas
#Limpiamos el título de la basura innecesaria
title = re.sub(r'TV|Online', '', title, flags=re.IGNORECASE).strip()
item_local.quality = re.sub(r'proper|unrated|directors|cut|german|repack|internal|real|korean|extended|masted|docu|oar|super|duper|amzn|uncensored|hulu', '', item_local.quality, flags=re.IGNORECASE).strip()
title = re.sub(r'(?i)TV|Online', '', title).strip()
item_local.quality = re.sub(r'(?i)proper|unrated|directors|cut|german|repack|internal|real|korean|extended|masted|docu|oar|super|duper|amzn|uncensored|hulu', '', item_local.quality).strip()
#Analizamos el año. Si no está claro ponemos '-'
try:
@@ -472,7 +472,7 @@ def findvideos(item):
item_local.quality = ''
title = title.replace('.', ' ')
item_local.quality = item_local.quality.replace('.', ' ')
item_local.quality = re.sub(r'proper|unrated|directors|cut|german|repack|internal|real|korean|extended|masted|docu|oar|super|duper|amzn|uncensored|hulu', '', item_local.quality, flags=re.IGNORECASE).strip()
item_local.quality = re.sub(r'(?i)proper|unrated|directors|cut|german|repack|internal|real|korean|extended|masted|docu|oar|super|duper|amzn|uncensored|hulu', '', item_local.quality).strip()
#Buscamos si ya tiene tamaño, si no, los buscamos en el archivo .torrent
size = scrapedsize
@@ -533,7 +533,7 @@ def play(item): #Permite preparar la descarga de
from core import ziptools
#buscamos la url del .torrent
patron = '<tr><td align="(?:[^"]+)?"\s*class="(?:[^"]+)?"\s*width="(?:[^"]+)?">\s*Torrent:<\/td><td class="(?:[^"]+)?">\s*<img src="(?:[^"]+)?"\s*alt="(?:[^"]+)?"\s*border="(?:[^"]+)?"\s*\/>\s*<a onmouseover="(?:[^"]+)?"\s*onmouseout="(?:[^"]+)?" href="([^"]+)">.*?<\/a>'
patron = '<tr><td align="(?:[^"]+)?"\s*class="(?:[^"]+)?"\s*width="(?:[^"]+)?">\s*Torrent:<\/td><td class="(?:[^"]+)?">\s*<img src="(?:[^"]+)?"\s*alt="(?:[^"]+)?"\s*border="(?:[^"]+)?"\s*\/>\s*<a onmouseover="(?:[^"]+)?"\s*onmouseout="(?:[^"]+)?" href="([^"]+)".*?<\/a>'
try:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url, timeout=timeout).data)
data = unicode(data, "utf-8", errors="replace").encode("utf-8")
@@ -543,6 +543,7 @@ def play(item): #Permite preparar la descarga de
if status:
return itemlist #IP bloqueada
if not scrapertools.find_single_match(data, patron):
logger.error('ERROR 02: PLAY: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log: PATRON: ' + patron + ' / DATA: ' + data)
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: PLAY: No hay enlaces o ha cambiado la estructura de la Web. Verificar en la Web esto último y reportar el error con el log'))
return itemlist
item.url = urlparse.urljoin(host, scrapertools.find_single_match(data, patron))

View File

@@ -226,10 +226,40 @@ def findvideos(item):
for id in buttons:
new_url = golink(int(id), _sa, sl)
data_new = httptools.downloadpage(new_url).data
_x0x = scrapertools.find_single_match(data_new, 'var x0x = ([^;]+);')
logger.info(data_new)
valor = scrapertools.find_single_match(data_new, '\+ x92\((.*?)\)\+ ')
valores = valor.split("atob")
valor2 = valores[1].replace('(','').replace(')','')
valor1 = valores[0].split('+')
datos = []
logger.info("f4d5as6f")
logger.info(valor1)
stringTodo = ''
for val in valor1:
if '()' in val:
funcion = val.split('(')[0]
scrapedvalue = scrapertools.find_single_match(data_new, funcion+'.+?return (.+?);')
datos.append(scrapedvalue)
elif '.charAt' in val:
funcion = val.split('.charAt(')
stringTodo = funcion[0]
position = funcion[1].split(')')[0]
posiciones = []
logger.info(datos)
if datos:
for dato in datos:
logger.info(dato)
try:
posiciones.append(int(dato))
except Exception as e:
scrapedvalue = scrapertools.find_single_match(data_new, 'var %s = (.+?);' % (dato))
logger.info("scrapedvalue")
logger.info(scrapedvalue)
posiciones.append(int(scrapedvalue))
logger.info("positiones"+posiciones)
try:
x0x = eval(_x0x)
url = base64.b64decode(gktools.transforma_gsv(x0x[4], base64.b64decode(x0x[1])))
logger.info(base64.b64decode(data1, data2))
url = x92(data1, data2)
if 'download' in url:
url = url.replace('download', 'preview')
title = '%s'
@@ -250,11 +280,65 @@ def findvideos(item):
def golink (num, sa, sl):
import urllib
b = [3, 10, 5, 22, 31]
d = ''
for i in range(len(b)):
d += sl[2][b[i]+num:b[i]+num+1]
#d = ''
#for i in range(len(b)):
# d += sl[2][b[i]+num:b[i]+num+1]
SVR = "https://viteca.stream" if sa == 'true' else "http://serieslan.com"
TT = "/" + urllib.quote_plus(sl[3].replace("/", "><")) if num == 0 else ""
url_end = link(num,sl)
#return SVR + "/el/" + sl[0] + "/" + sl[1] + "/" + str(num) + "/" + sl[2] + d + TT
return SVR + "/el/" + sl[0] + "/" + sl[1] + "/" + str(num) + "/" + sl[2] + url_end + TT
return SVR + "/el/" + sl[0] + "/" + sl[1] + "/" + str(num) + "/" + sl[2] + d + TT
def link(ida,sl):
a=ida
b=[3,10,5,22,31]
c=1
d=""
e=sl[2]
for i in range(len(b)):
d=d+substr(e,b[i]+a,c)
return d
def substr(st,a,b):
return st[a:a+b]
def x92(data1, data2):
data3 = []
data4 = 0
data5 = ""
data6 = ""
for i in range(len(256)):
data3[i] = i
for i in range(len(256)):
data4 = (data4 + data3[i] + ord(data1[i])) % 256
data5 = data3[i]
data3[i] = data3[data4]
data3[data4] = data5
i = 0
data4 = 0
for j in range(len(data2)):
i = (i + 1) % 256
data4 = (data4 + data3[i]) % 256
data5 = data3[i]
data3[i] = data3[data4]
data3[data4] = data5
data6 =1#+= str(unichr(data2[ord(str(j)) ^ data3[(data3[i] + data3[data4]) % 256]))
return data6
def _ieshlgagkP(umZFJ):
return umZFJ
def _RyHChsfwdd(ZBKux):
return ZBKux
def _eladjkKtjf(czuwk):
return czuwk
def _slSekoKrHb():
return ''
def _VySdeBApGO():
return 'Z'
def _nEgqhkiRub():
return 28
def _lTjZxWGNnE():
return 57

View File

@@ -14,11 +14,11 @@ host = 'http://sexgalaxy.net'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="lista", url=host + "/new-releases/"))
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/full-movies/"))
itemlist.append( Item(channel=item.channel, title="Canales" , action="canales", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar" , action="search"))
itemlist.append(Item(channel=item.channel, title="Ultimos", action="lista", url=host + "/new-releases/"))
itemlist.append(Item(channel=item.channel, title="Peliculas", action="lista", url=host + "/full-movies/"))
itemlist.append(Item(channel=item.channel, title="Canales", action="canales", url=host))
itemlist.append(Item(channel=item.channel, title="Categorias", action="categorias", url=host))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -35,20 +35,20 @@ def search(item, texto):
return []
def canales (item):
def canales(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(host)
data = scrapertools.get_match(data,'Top Networks</a>(.*?)</ul>')
patron = '<li id=.*?<a href="(.*?)">(.*?)</a></li>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
data = httptools.downloadpage(host).data
data = scrapertools.get_match(data, 'Top Networks</a>(.*?)</ul>')
patron = '<li id=.*?<a href="(.*?)">(.*?)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedtitle = str(scrapedtitle)
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
itemlist.append(Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot))
return itemlist
@@ -56,16 +56,16 @@ def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data,'More Categories</a>(.*?)</ul>')
patron = '<li id=.*?<a href="(.*?)">(.*?)</a></li>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
data = scrapertools.get_match(data, 'More Categories</a>(.*?)</ul>')
patron = '<li id=.*?<a href="(.*?)">(.*?)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedtitle = str(scrapedtitle)
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
itemlist.append(Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot))
return itemlist
@@ -73,24 +73,24 @@ def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="post-img small-post-img">.*?<a href="(.*?)" title="(.*?)">.*?<img src="(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
patron = '<div class="post-img small-post-img">.*?<a href="(.*?)" title="(.*?)">.*?<img src="(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedplot = ""
calidad = scrapertools.find_single_match(scrapedtitle,'\(.*?/(\w+)\)')
calidad = scrapertools.find_single_match(scrapedtitle, '\(.*?/(\w+)\)')
if calidad:
scrapedtitle = "[COLOR red]" + calidad + "[/COLOR] " + scrapedtitle
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, fulltitle=scrapedtitle, plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<a class="next page-numbers" href="([^"]+)"')
if next_page!="":
itemlist.append(item.clone(action="lista", title="Next page >>", text_color="blue", url=next_page) )
itemlist.append(Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, fulltitle=scrapedtitle, plot=scrapedplot))
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)"')
if next_page != "":
itemlist.append(item.clone(action="lista", title="Next page >>", text_color="blue", url=next_page))
return itemlist
def play(item):
logger.info()
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title
@@ -98,4 +98,3 @@ def play(item):
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist

View File

@@ -294,7 +294,7 @@ def listado(item):
title = re.sub(r'\d+[M|m|G|g][B|b]', '', title)
#Limpiamos el título de la basura innecesaria
title = re.sub(r'TV|Online|Spanish|Torrent|en Espa\xc3\xb1ol|Español|Latino|Subtitulado|Blurayrip|Bluray rip|\[.*?\]|R2 Pal|\xe3\x80\x90 Descargar Torrent \xe3\x80\x91|Completa|Temporada|Descargar|Torren|\(iso\)|\(dvd.*?\)|(?:\d+\s*)?\d{3,4}p.*?$|extended|(?:\d+\s*)?bdrip.*?$|\(.*?\).*?$|iso$|unrated|\[.*?$|\d{4}$', '', title, flags=re.IGNORECASE)
title = re.sub(r'(?i)TV|Online|Spanish|Torrent|en Espa\xc3\xb1ol|Español|Latino|Subtitulado|Blurayrip|Bluray rip|\[.*?\]|R2 Pal|\xe3\x80\x90 Descargar Torrent \xe3\x80\x91|Completa|Temporada|Descargar|Torren|\(iso\)|\(dvd.*?\)|(?:\d+\s*)?\d{3,4}p.*?$|extended|(?:\d+\s*)?bdrip.*?$|\(.*?\).*?$|iso$|unrated|\[.*?$|\d{4}$', '', title)
#Obtenemos temporada y episodio si se trata de Episodios
if item_local.contentType == "episode":

View File

@@ -274,7 +274,7 @@ def listado(item):
#Limpiamos el título de la basura innecesaria
title = re.sub(r'- $', '', title)
title = re.sub(r'TV|Online|Spanish|Torrent|en Espa\xc3\xb1ol|Español|Latino|Subtitulado|Blurayrip|Bluray rip|\[.*?\]|R2 Pal|\xe3\x80\x90 Descargar Torrent \xe3\x80\x91|Completa|Temporada|Descargar|Torren', '', title, flags=re.IGNORECASE)
title = re.sub(r'(?i)TV|Online|Spanish|Torrent|en Espa\xc3\xb1ol|Español|Latino|Subtitulado|Blurayrip|Bluray rip|\[.*?\]|R2 Pal|\xe3\x80\x90 Descargar Torrent \xe3\x80\x91|Completa|Temporada|Descargar|Torren', '', title)
#Terminamos de limpiar el título
title = re.sub(r'\??\s?\d*?\&.*', '', title)

View File

@@ -15,7 +15,7 @@
"id": "tmdb",
"type": "list",
"label": "@70418",
"default": 3,
"default": 4,
"enabled": true,
"visible": true,
"lvalues": [
@@ -52,7 +52,7 @@
"type": "list",
"label": "@70427",
"color": "0xFFE0F04B",
"default": 3,
"default": 4,
"enabled": true,
"visible": true,
"lvalues": [

View File

@@ -38,10 +38,10 @@ def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<article id="post-\d+".*?<a href="([^"]+)" title="([^"]+)">.*?data-src="([^"]+)"'
patron = '<article id="post-\d+".*?<a href="([^"]+)".*?data-src="([^"]+)".*?alt="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle.replace("Permalink to Watch ", "").replace("Porn Online", "").replace("Permalink to ", "")
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl,

View File

@@ -1,7 +1,11 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
# ------------------------------------------------------------
import urlparse
import urllib2
import urllib
import re
import os
import sys
from core import scrapertools
from core import servertools
from core.item import Item
@@ -10,14 +14,17 @@ from core import httptools
host = 'http://www.webpeliculasporno.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Ultimas" , action="lista", url= host))
itemlist.append( Item(channel=item.channel, title="Mas vistas" , action="lista", url= host + "/?display=tube&filtre=views"))
itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="lista", url= host + "/?display=tube&filtre=rate"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url= host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
itemlist.append(Item(channel=item.channel, title="Ultimas", action="lista", url=host))
itemlist.append(
Item(channel=item.channel, title="Mas vistas", action="lista", url=host + "/?display=tube&filtre=views"))
itemlist.append(
Item(channel=item.channel, title="Mejor valoradas", action="lista", url=host + "/?display=tube&filtre=rate"))
itemlist.append(Item(channel=item.channel, title="Categorias", action="categorias", url=host))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -36,36 +43,35 @@ def search(item, texto):
def categorias(item):
itemlist = []
data = scrapertools.cache_page(item.url)
patron = '<li class="cat-item [^>]+><a href="([^"]+)" >([^<]+)'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
data = httptools.downloadpage(item.url).data
patron = '<li class="cat-item [^>]+><a href="([^"]+)" >([^<]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
itemlist.append(Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot))
return itemlist
def lista(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
patron = '<li class="border-radius-5 box-shadow">.*?'
patron += 'src="([^"]+)".*?'
patron += '<a href="([^"]+)" title="([^"]+)">'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedthumbnail,scrapedurl,scrapedtitle in matches:
url = urlparse.urljoin(item.url,scrapedurl)
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
url = urlparse.urljoin(item.url, scrapedurl)
title = scrapedtitle
contentTitle = title
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail,
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
next_page = scrapertools.find_single_match(data,'<li><a class="next page-numbers" href="([^"]+)">Next')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail,
fanart=thumbnail, plot=plot, contentTitle=contentTitle))
next_page = scrapertools.find_single_match(data, '<li><a class="next page-numbers" href="([^"]+)">Next')
if next_page != "":
next_page = urlparse.urljoin(item.url, next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
return itemlist

View File

@@ -5,28 +5,34 @@ import sys
import urlparse
from platformcode import logger
from core import scrapertools
from core import scrapertools, httptools
from core.item import Item
HOST = "http://es.xhamster.com/"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, action="videos" , title="Útimos videos" , url=HOST, viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="categorias" , title="Categorías", url=HOST))
itemlist.append( Item(channel=item.channel, action="votados" , title="Lo mejor"))
itemlist.append( Item(channel=item.channel, action="vistos" , title="Los mas vistos"))
itemlist.append( Item(channel=item.channel, action="videos" , title="Recomendados", url=urlparse.urljoin(HOST,"/videos/recommended")))
itemlist.append( Item(channel=item.channel, action="search" , title="Buscar", url=urlparse.urljoin(HOST,"/search?q=%s")))
itemlist.append(Item(channel=item.channel, action="videos", title="Útimos videos", url=HOST, viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="categorias", title="Categorías", url=HOST))
itemlist.append(Item(channel=item.channel, action="votados", title="Lo mejor"))
itemlist.append(Item(channel=item.channel, action="vistos", title="Los mas vistos"))
itemlist.append(Item(channel=item.channel, action="videos", title="Recomendados",
url=urlparse.urljoin(HOST, "/videos/recommended")))
itemlist.append(
Item(channel=item.channel, action="search", title="Buscar", url=urlparse.urljoin(HOST, "/search?q=%s")))
return itemlist
# REALMENTE PASA LA DIRECCION DE BUSQUEDA
def search(item,texto):
def search(item, texto):
logger.info()
tecleado = texto.replace( " ", "+" )
tecleado = texto.replace(" ", "+")
item.url = item.url % tecleado
item.extra = "buscar"
try:
@@ -37,71 +43,95 @@ def search(item,texto):
for line in sys.exc_info():
logger.error("%s" % line)
return []
# SECCION ENCARGADA DE BUSCAR
def videos(item):
logger.info()
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
itemlist = []
data = scrapertools.get_match(data,'<article.+?>(.*?)</article>')
#Patron
data = scrapertools.get_match(data, '<article.+?>(.*?)</article>')
# Patron
patron = '(?s)<div class="thumb-list__item.*?href="([^"]+)".*?src="([^"]+)".*?alt="([^"]+)">.*?'
patron += '<div class="thumb-image-container__duration">(.+?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,duration in matches:
#logger.debug("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
for scrapedurl, scrapedthumbnail, scrapedtitle, duration in matches:
# logger.debug("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
fullTitle = scrapedtitle.strip() + " [" + duration + "]"
itemlist.append( Item(channel=item.channel, action="play" , title=fullTitle , url=scrapedurl, thumbnail=scrapedthumbnail, folder=True))
itemlist.append(
Item(channel=item.channel, action="play", title=fullTitle, url=scrapedurl, thumbnail=scrapedthumbnail,
folder=True))
#Paginador
# Paginador
patron = '(?s)<div class="pager-container".*?<li class="next">.*?href="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
if len(matches) >0:
itemlist.append( Item(channel=item.channel, action="videos", title="Página Siguiente" , url=matches[0] , thumbnail="" , folder=True, viewmode="movie") )
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) > 0:
itemlist.append(
Item(channel=item.channel, action="videos", title="Página Siguiente", url=matches[0], thumbnail="",
folder=True, viewmode="movie"))
return itemlist
# SECCION ENCARGADA DE VOLCAR EL LISTADO DE CATEGORIAS CON EL LINK CORRESPONDIENTE A CADA PAGINA
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.cache_page(item.url)
data = scrapertools.get_match(data,'(?s)<div class="all-categories">(.*?)</aside>')
data = scrapertools.get_match(data, '(?s)<div class="all-categories">(.*?)</aside>')
patron = '(?s)<li>.*?<a href="([^"]+)".*?>([^<]+).*?</a></li>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
fullTitle = scrapedtitle.strip()
itemlist.append( Item(channel=item.channel, action="videos" , title=fullTitle , url=scrapedurl))
itemlist.append(Item(channel=item.channel, action="videos", title=fullTitle, url=scrapedurl))
return itemlist
def votados(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, action="videos" , title="Día", url=urlparse.urljoin(HOST,"/best/daily"), viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="videos" , title="Semana" , url=urlparse.urljoin(HOST,"/best/weekly"), viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="videos" , title="Mes" , url=urlparse.urljoin(HOST,"/best/monthly"), viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="videos" , title="De siempre" , url=urlparse.urljoin(HOST,"/best/"), viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="videos", title="Día", url=urlparse.urljoin(HOST, "/best/daily"),
viewmode="movie"))
itemlist.append(
Item(channel=item.channel, action="videos", title="Semana", url=urlparse.urljoin(HOST, "/best/weekly"),
viewmode="movie"))
itemlist.append(
Item(channel=item.channel, action="videos", title="Mes", url=urlparse.urljoin(HOST, "/best/monthly"),
viewmode="movie"))
itemlist.append(
Item(channel=item.channel, action="videos", title="De siempre", url=urlparse.urljoin(HOST, "/best/"),
viewmode="movie"))
return itemlist
def vistos(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, action="videos" , title="Día", url=urlparse.urljoin(HOST,"/most-viewed/daily"), viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="videos" , title="Semana" , url=urlparse.urljoin(HOST,"/most-viewed/weekly"), viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="videos" , title="Mes" , url=urlparse.urljoin(HOST,"/most-viewed/monthly"), viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="videos" , title="De siempre" , url=urlparse.urljoin(HOST,"/most-viewed/"), viewmode="movie"))
itemlist.append(
Item(channel=item.channel, action="videos", title="a", url=urlparse.urljoin(HOST, "/most-viewed/daily"),
viewmode="movie"))
itemlist.append(
Item(channel=item.channel, action="videos", title="Semana", url=urlparse.urljoin(HOST, "/most-viewed/weekly"),
viewmode="movie"))
itemlist.append(
Item(channel=item.channel, action="videos", title="Mes", url=urlparse.urljoin(HOST, "/most-viewed/monthly"),
viewmode="movie"))
itemlist.append(
Item(channel=item.channel, action="videos", title="De siempre", url=urlparse.urljoin(HOST, "/most-viewed/"),
viewmode="movie"))
return itemlist
@@ -111,15 +141,15 @@ def play(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
logger.debug(data)
patron = '"([0-9]+p)":"([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
matches = re.compile(patron, re.DOTALL).findall(data)
for res, url in matches:
url = url.replace("\\", "")
logger.debug("url="+url)
itemlist.append(["%s %s [directo]" % (res, scrapertools.get_filename_from_url(url)[-4:]), url])
logger.debug("url=" + url)
itemlist.append(["%s %s [directo]" % (res, scrapertools.get_filename_from_url(url)[-4:]), url])
return itemlist

View File

@@ -12,7 +12,7 @@ from platformcode import config, logger
__channel__ = "xms"
host = 'https://xxxmoviestream.com/'
host = 'https://xtheatre.org'
host1 = 'https://www.cam4.com/'
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
@@ -90,8 +90,6 @@ def peliculas(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>|#038;", "", data)
patron_todos = '<div id="content">(.*?)<div id="footer"'
data = scrapertools.find_single_match(data, patron_todos)
patron = 'src="([^"]+)" class="attachment-thumb_site.*?' # img
patron += '<a href="([^"]+)" title="([^"]+)".*?' # url, title
patron += '<div class="right"><p>([^<]+)</p>' # plot
@@ -119,20 +117,19 @@ def peliculas(item):
def webcam(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>|#038;", "", data)
patron = '<div class="profileBox">.*?<a href="/([^"]+)".*?' # url
patron += 'data-hls-preview-url="([^"]+)">.*?' # video_url
patron += 'data-username="([^"]+)".*?' # username
patron += 'title="([^"]+)".*?' # title
patron += 'data-profile="([^"]+)" />' # img
patron += 'data-profile="([^"]+)"' # img
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, video_url, username, scrapedtitle, scrapedthumbnail in matches:
scrapedtitle = scrapedtitle.replace(' Chat gratis con webcam.', '')
itemlist.append(item.clone(channel=__channel__, action="play", title=scrapedtitle,
itemlist.append(item.clone(channel=__channel__, action="play", title=username,
url=video_url, thumbnail=scrapedthumbnail, fanart=scrapedthumbnail,
viewmode="movie_with_plot", folder=True, contentTitle=scrapedtitle))
# Extrae el paginador

View File

@@ -1,7 +1,11 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
# ------------------------------------------------------------
import urlparse
import urllib2
import urllib
import re
import os
import sys
from platformcode import config, logger
from core import scrapertools
@@ -11,17 +15,18 @@ from core import httptools
host = 'https://www.xozilla.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/latest-updates/"))
itemlist.append( Item(channel=item.channel, title="Popular" , action="lista", url=host + "/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/top-rated/"))
itemlist.append(Item(channel=item.channel, title="Nuevas", action="lista", url=host + "/latest-updates/"))
itemlist.append(Item(channel=item.channel, title="Popular", action="lista", url=host + "/most-popular/"))
itemlist.append(Item(channel=item.channel, title="Mejor valorada", action="lista", url=host + "/top-rated/"))
itemlist.append( Item(channel=item.channel, title="PornStar" , action="categorias", url=host + "/models/"))
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/channels/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
itemlist.append(Item(channel=item.channel, title="PornStar", action="categorias", url=host + "/models/"))
itemlist.append(Item(channel=item.channel, title="Canal", action="categorias", url=host + "/channels/"))
itemlist.append(Item(channel=item.channel, title="Categorias", action="categorias", url=host + "/categories/"))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -46,64 +51,62 @@ def categorias(item):
patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?'
patron += '<img class="thumb" src="([^"]+)".*?'
patron += '(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail, cantidad in matches:
scrapedplot = ""
cantidad = scrapertools.find_single_match(cantidad,'(\d+) videos</div>')
cantidad = scrapertools.find_single_match(cantidad, '(\d+) videos</div>')
if cantidad:
scrapedtitle += " (" + cantidad + ")"
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
if next_page!="#videos":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) )
if next_page=="#videos":
next_page = scrapertools.find_single_match(data,'from:(\d+)">Next</a>')
next_page = urlparse.urljoin(item.url,next_page) + "/"
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) )
itemlist.append(Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot))
next_page = scrapertools.find_single_match(data, '<li class="next"><a href="([^"]+)"')
if next_page != "#videos":
next_page = urlparse.urljoin(item.url, next_page)
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page))
if next_page == "#videos":
next_page = scrapertools.find_single_match(data, 'from:(\d+)">Next</a>')
next_page = urlparse.urljoin(item.url, next_page) + "/"
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page))
return itemlist
def lista(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)" class="item.*?'
patron += 'data-original="([^"]+)".*?'
patron += 'alt="([^"]+)".*?'
patron += '<div class="duration">(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, duracion in matches:
url = scrapedurl
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
contentTitle = title
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail,
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
next_page = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
if next_page!="#videos":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
if next_page=="#videos":
next_page = scrapertools.find_single_match(data,'from:(\d+)">Next</a>')
next_page = urlparse.urljoin(item.url,next_page) + "/"
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
fanart=thumbnail, plot=plot, contentTitle=contentTitle))
next_page = scrapertools.find_single_match(data, '<li class="next"><a href="([^"]+)"')
if next_page != "#videos":
next_page = urlparse.urljoin(item.url, next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
if next_page == "#videos":
next_page = scrapertools.find_single_match(data, 'from:(\d+)">Next</a>')
next_page = urlparse.urljoin(item.url, next_page) + "/"
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
return itemlist
def play(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
media_url = scrapertools.find_single_match(data, 'video_alt_url: \'([^\']+)/\'')
if media_url == "":
media_url = scrapertools.find_single_match(data, 'video_url: \'([^\']+)/\'')
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=media_url,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
return itemlist

View File

@@ -79,7 +79,7 @@ def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data,'--more--></p>(.*?)/a></p>')
data = scrapertools.get_match(data,'--more-->(.*?)/a>')
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)".*?class="external">(.*?)<'
matches = re.compile(patron,re.DOTALL).findall(data)

View File

@@ -1,7 +1,11 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
# ------------------------------------------------------------
import urlparse
import urllib2
import urllib
import re
import os
import sys
from platformcode import config, logger
from core import scrapertools
from core.item import Item
@@ -10,14 +14,16 @@ from core import httptools
host = 'https://www.youjizz.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/newest-clips/1.html"))
itemlist.append( Item(channel=item.channel, title="Popular" , action="lista", url=host + "/most-popular/1.html"))
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/top-rated-week/1.html"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
itemlist.append(Item(channel=item.channel, title="Nuevas", action="lista", url=host + "/newest-clips/1.html"))
itemlist.append(Item(channel=item.channel, title="Popular", action="lista", url=host + "/most-popular/1.html"))
itemlist.append(
Item(channel=item.channel, title="Mejor valorada", action="lista", url=host + "/top-rated-week/1.html"))
itemlist.append(Item(channel=item.channel, title="Categorias", action="categorias", url=host))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -38,68 +44,68 @@ def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data,'<h4>Trending Categories</h4>(.*?)</ul>')
data = scrapertools.get_match(data, '<h4>Trending(.*?)</ul>')
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
itemlist.append( Item(channel=item.channel, action="lista", title="big tits", url= host + "/search/big-tits-1.html?") )
patron = '<li><a href="([^"]+)">([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
patron = '<li><a href="([^"]+)">([^"]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedtitle = scrapedtitle
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
scrapedurl = urlparse.urljoin(item.url, scrapedurl)
itemlist.append(Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot))
return itemlist
def lista(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="video-item">.*?'
patron = '<div class="video-item">.*?'
patron += 'class="frame image" href="([^"]+)".*?'
patron += 'data-original="([^"]+)" />.*?'
patron += '<div class="video-title">.*?'
patron += '>(.*?)</a>.*?'
patron += '<span class="time">(.*?)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
url = urlparse.urljoin(item.url,scrapedurl)
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, duracion in matches:
url = urlparse.urljoin(item.url, scrapedurl)
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
quality= ""
if '-720-' in scrapedthumbnail : quality = "720"
if '-1080-' in scrapedthumbnail : quality = "1080"
quality = ""
if '-720-' in scrapedthumbnail:
quality = "720"
if '-1080-' in scrapedthumbnail:
quality = "1080"
if quality:
title = "[COLOR yellow]" + duracion + "[/COLOR] " + "[COLOR red]" + quality + "p[/COLOR] " + scrapedtitle
title = "[COLOR yellow]" + duracion + "[/COLOR] " + "[COLOR red]" + quality + "p[/COLOR] " + scrapedtitle
contentTitle = title
thumbnail = "http:" + scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
plot=plot, quality= quality, contentTitle = contentTitle))
next_page = scrapertools.find_single_match(data,'<li><a class="pagination-next" href="([^"]+)">Next &raquo;</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>" , text_color="blue", url=next_page) )
itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
plot=plot, quality=quality, contentTitle=contentTitle))
next_page = scrapertools.find_single_match(data, '<li><a class="pagination-next" href="([^"]+)">Next &raquo;</a>')
if next_page != "":
next_page = urlparse.urljoin(item.url, next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
return itemlist
def play(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = scrapertools.get_match(data,'var encodings(.*?)var')
if '360' in data:
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data, 'var encodings(.*?)var')
if '360' in data:
patron = '"360".*?"filename"\:"(.*?)"'
if '720' in data:
if '720' in data:
patron = '"720".*?"filename"\:"(.*?)"'
if '1080' in data:
if '1080' in data:
patron = '"1080".*?"filename"\:"(.*?)"'
media_url = scrapertools.find_single_match(data, patron)
media_url = "https:" + media_url.replace("\\", "")
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=media_url,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
return itemlist

View File

@@ -19,7 +19,7 @@ def youtube_api_call(method, parameters):
url = "https://www.googleapis.com/youtube/v3/" + method + "?" + encoded_parameters + "&key=" + YOUTUBE_V3_API_KEY;
logger.info("url=" + url)
data = scrapertools.cache_page(url)
data = httptools.downloadpage(url).data
logger.info("data=" + data)
json_object = jsontools.load(data)
@@ -37,7 +37,7 @@ def youtube_get_user_playlists(user_id, pageToken=""):
{"part": "snippet,contentDetails", "channelId": channel_id, "maxResults": 50,
"pageToken": pageToken})
return json_object;
return json_object
def youtube_get_playlist_items(playlist_id, pageToken=""):

View File

@@ -353,7 +353,7 @@ def listado(item):
item_local.quality += " 3D"
else:
item_local.quality = "3D"
title = re.sub('3D', '', title, flags=re.IGNORECASE)
title = re.sub('(?i)3D', '', title)
title = title.replace('[]', '')
if item_local.quality:
item_local.quality += ' %s' % scrapertools.find_single_match(title, '\[(.*?)\]')
@@ -418,7 +418,7 @@ def listado(item):
title = re.sub(r'- $', '', title)
#Limpiamos el título de la basura innecesaria
title = re.sub(r'TV|Online|Spanish|Torrent|en Espa\xc3\xb1ol|Español|Latino|Subtitulado|Blurayrip|Bluray rip|\[.*?\]|R2 Pal|\xe3\x80\x90 Descargar Torrent \xe3\x80\x91|Completa|Temporada|Descargar|Torren', '', title, flags=re.IGNORECASE)
title = re.sub(r'(?i)TV|Online|Spanish|Torrent|en Espa\xc3\xb1ol|Español|Latino|Subtitulado|Blurayrip|Bluray rip|\[.*?\]|R2 Pal|\xe3\x80\x90 Descargar Torrent \xe3\x80\x91|Completa|Temporada|Descargar|Torren', '', title)
title = title.replace("Dual", "").replace("dual", "").replace("Subtitulada", "").replace("subtitulada", "").replace("Subt", "").replace("subt", "").replace("(Proper)", "").replace("(proper)", "").replace("Proper", "").replace("proper", "").replace("#", "").replace("(Latino)", "").replace("Latino", "").replace("LATINO", "").replace("Spanish", "").replace("Trailer", "").replace("Audio", "")
title = title.replace("HDTV-Screener", "").replace("DVDSCR", "").replace("TS ALTA", "").replace("- HDRip", "").replace("(HDRip)", "").replace("- Hdrip", "").replace("(microHD)", "").replace("(DVDRip)", "").replace("HDRip", "").replace("(BR-LINE)", "").replace("(HDTS-SCREENER)", "").replace("(BDRip)", "").replace("(BR-Screener)", "").replace("(DVDScreener)", "").replace("TS-Screener", "").replace(" TS", "").replace(" Ts", "").replace(" 480p", "").replace(" 480P", "").replace(" 720p", "").replace(" 720P", "").replace(" 1080p", "").replace(" 1080P", "").replace("DVDRip", "").replace(" Dvd", "").replace(" DVD", "").replace(" V.O", "").replace(" Unrated", "").replace(" UNRATED", "").replace(" unrated", "").replace("screener", "").replace("TS-SCREENER", "").replace("TSScreener", "").replace("HQ", "").replace("AC3 5.1", "").replace("Telesync", "").replace("Line Dubbed", "").replace("line Dubbed", "").replace("LineDuB", "").replace("Line", "").replace("XviD", "").replace("xvid", "").replace("XVID", "").replace("Mic Dubbed", "").replace("HD", "").replace("V2", "").replace("CAM", "").replace("VHS.SCR", "").replace("Dvd5", "").replace("DVD5", "").replace("Iso", "").replace("ISO", "").replace("Reparado", "").replace("reparado", "").replace("DVD9", "").replace("Dvd9", "")

View File

@@ -99,7 +99,7 @@ load_cookies()
def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=True, cookies=True, replace_headers=False,
add_referer=False, only_headers=False, bypass_cloudflare=True, count_retries=0, random_headers=False, ignore_response_code=False):
add_referer=False, only_headers=False, bypass_cloudflare=True, count_retries=0, random_headers=False, ignore_response_code=False, alfa_s=False):
"""
Abre una url y retorna los datos obtenidos
@@ -164,22 +164,23 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
if timeout is None and HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT is not None: timeout = HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT
if timeout == 0: timeout = None
logger.info("----------------------------------------------")
logger.info("downloadpage Alfa: %s" %__version)
logger.info("----------------------------------------------")
logger.info("Timeout: %s" % timeout)
logger.info("URL: " + url)
logger.info("Dominio: " + urlparse.urlparse(url)[1])
if post:
logger.info("Peticion: POST")
else:
logger.info("Peticion: GET")
logger.info("Usar Cookies: %s" % cookies)
logger.info("Descargar Pagina: %s" % (not only_headers))
logger.info("Fichero de Cookies: " + ficherocookies)
logger.info("Headers:")
for header in request_headers:
logger.info("- %s: %s" % (header, request_headers[header]))
if not alfa_s:
logger.info("----------------------------------------------")
logger.info("downloadpage Alfa: %s" %__version)
logger.info("----------------------------------------------")
logger.info("Timeout: %s" % timeout)
logger.info("URL: " + url)
logger.info("Dominio: " + urlparse.urlparse(url)[1])
if post:
logger.info("Peticion: POST")
else:
logger.info("Peticion: GET")
logger.info("Usar Cookies: %s" % cookies)
logger.info("Descargar Pagina: %s" % (not only_headers))
logger.info("Fichero de Cookies: " + ficherocookies)
logger.info("Headers:")
for header in request_headers:
logger.info("- %s: %s" % (header, request_headers[header]))
# Handlers
handlers = [urllib2.HTTPHandler(debuglevel=False)]
@@ -192,7 +193,8 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
opener = urllib2.build_opener(*handlers)
logger.info("Realizando Peticion")
if not alfa_s:
logger.info("Realizando Peticion")
# Contador
inicio = time.time()
@@ -243,15 +245,17 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
response["time"] = time.time() - inicio
response["url"] = handle.geturl()
logger.info("Terminado en %.2f segundos" % (response["time"]))
logger.info("Response sucess: %s" % (response["sucess"]))
logger.info("Response code: %s" % (response["code"]))
logger.info("Response error: %s" % (response["error"]))
logger.info("Response data length: %s" % (len(response["data"])))
logger.info("Response headers:")
if not alfa_s:
logger.info("Terminado en %.2f segundos" % (response["time"]))
logger.info("Response sucess: %s" % (response["sucess"]))
logger.info("Response code: %s" % (response["code"]))
logger.info("Response error: %s" % (response["error"]))
logger.info("Response data length: %s" % (len(response["data"])))
logger.info("Response headers:")
server_cloudflare = ""
for header in response["headers"]:
logger.info("- %s: %s" % (header, response["headers"][header]))
if not alfa_s:
logger.info("- %s: %s" % (header, response["headers"][header]))
if "cloudflare" in response["headers"][header]:
server_cloudflare = "cloudflare"
@@ -266,22 +270,27 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
if cookies:
save_cookies()
logger.info("Encoding: %s" % (response["headers"].get('content-encoding')))
if not alfa_s:
logger.info("Encoding: %s" % (response["headers"].get('content-encoding')))
if response["headers"].get('content-encoding') == 'gzip':
logger.info("Descomprimiendo...")
if not alfa_s:
logger.info("Descomprimiendo...")
data_alt = response["data"]
try:
response["data"] = gzip.GzipFile(fileobj=StringIO(response["data"])).read()
logger.info("Descomprimido")
if not alfa_s:
logger.info("Descomprimido")
except:
logger.info("No se ha podido descomprimir con gzip. Intentando con zlib")
if not alfa_s:
logger.info("No se ha podido descomprimir con gzip. Intentando con zlib")
response["data"] = data_alt
try:
import zlib
response["data"] = zlib.decompressobj(16 + zlib.MAX_WBITS).decompress(response["data"])
except:
logger.info("No se ha podido descomprimir con zlib")
if not alfa_s:
logger.info("No se ha podido descomprimir con zlib")
response["data"] = data_alt
# Anti Cloudflare
@@ -289,11 +298,14 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
cf = Cloudflare(response)
if cf.is_cloudflare:
count_retries += 1
logger.info("cloudflare detectado, esperando %s segundos..." % cf.wait_time)
if not alfa_s:
logger.info("cloudflare detectado, esperando %s segundos..." % cf.wait_time)
auth_url = cf.get_url()
logger.info("Autorizando... intento %d url: %s" % (count_retries, auth_url))
if not alfa_s:
logger.info("Autorizando... intento %d url: %s" % (count_retries, auth_url))
if downloadpage(auth_url, headers=request_headers, replace_headers=True, count_retries=count_retries).sucess:
logger.info("Autorización correcta, descargando página")
if not alfa_s:
logger.info("Autorización correcta, descargando página")
resp = downloadpage(url=response["url"], post=post, headers=headers, timeout=timeout,
follow_redirects=follow_redirects,
cookies=cookies, replace_headers=replace_headers, add_referer=add_referer)
@@ -305,7 +317,8 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
response["time"] = resp.time
response["url"] = resp.url
else:
logger.info("No se ha podido autorizar")
if not alfa_s:
logger.info("No se ha podido autorizar")
return type('HTTPResponse', (), response)

View File

@@ -151,7 +151,7 @@ def cuadro_completar(item):
'default': mediatype_default,
'enabled': True,
'visible': True,
'lvalues': [config.get_localized_string(60244), config.get_localized_string(60245)]
'lvalues': [config.get_localized_string(60244), config.get_localized_string(70136)]
}]
for i, c in enumerate(controls):

View File

@@ -144,10 +144,9 @@ def findvideos(data, skip=False):
for serverid in servers_list:
if not is_server_enabled(serverid):
continue
if config.get_setting("black_list", server=serverid):
if config.get_setting("filter_servers") == True and config.get_setting("black_list", server=serverid):
is_filter_servers = True
continue
devuelve.extend(findvideosbyserver(data, serverid))
if skip and len(devuelve) >= skip:
devuelve = devuelve[:skip]
@@ -166,7 +165,6 @@ def findvideosbyserver(data, serverid):
server_parameters = get_server_parameters(serverid)
devuelve = []
if "find_videos" in server_parameters:
# Recorre los patrones
for pattern in server_parameters["find_videos"].get("patterns", []):

View File

@@ -455,7 +455,7 @@ def find_and_set_infoLabels(item):
title = item.contentTitle
else:
tipo_busqueda = "tv"
tipo_contenido = config.get_localized_string(70529)
tipo_contenido = config.get_localized_string(60245)
title = item.contentSerieName
# Si el titulo incluye el (año) se lo quitamos
@@ -478,7 +478,7 @@ def find_and_set_infoLabels(item):
if len(results) > 1:
from platformcode import platformtools
tmdb_result = platformtools.show_video_info(results, item=item,
caption="[%s]: Selecciona la %s correcta" % (title, tipo_contenido))
caption=config.get_localized_string(60247) %(title, tipo_contenido))
elif len(results) > 0:
tmdb_result = results[0]

View File

@@ -441,7 +441,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
if e.emergency_urls: #Si ya tenemos urls...
emergency_urls_succ = True #... es un éxito y vamos a marcar el .nfo
if not e.infoLabels: #en series multicanal, prevalece el infolabels...
if not e.infoLabels["tmdb_id"] or (serie.infoLabels["tmdb_id"] and e.infoLabels["tmdb_id"] != serie.infoLabels["tmdb_id"]): #en series multicanal, prevalece el infolabels...
e.infoLabels = serie.infoLabels #... del canal actual y no el del original
e.contentSeason, e.contentEpisodeNumber = season_episode.split("x")
new_episodelist.append(e)
@@ -516,7 +516,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
if not item_nfo:
head_nfo, item_nfo = read_nfo(nfo_path)
if not e.infoLabels: #en series multicanal, prevalece el infolabels...
if not e.infoLabels["tmdb_id"] or (item_nfo.infoLabels["tmdb_id"] and e.infoLabels["tmdb_id"] != item_nfo.infoLabels["tmdb_id"]): #en series multicanal, prevalece el infolabels...
e.infoLabels = item_nfo.infoLabels #... del canal actual y no el del original
if filetools.write(json_path, e.tojson()):

File diff suppressed because one or more lines are too long

View File

@@ -280,7 +280,9 @@ def post_tmdb_listado(item, itemlist):
item.category_new = ''
for item_local in itemlist: #Recorremos el Itemlist generado por el canal
title = re.sub(r'online|descarga|downloads|trailer|videoteca|gb|autoplay', '', item_local.title, flags=re.IGNORECASE).strip()
item_local.title = re.sub(r'(?i)online|descarga|downloads|trailer|videoteca|gb|autoplay', '', item_local.title).strip()
#item_local.title = re.sub(r'online|descarga|downloads|trailer|videoteca|gb|autoplay', '', item_local.title, flags=re.IGNORECASE).strip()
title = item_local.title
#logger.debug(item_local)
item_local.last_page = 0
@@ -375,11 +377,13 @@ def post_tmdb_listado(item, itemlist):
item_local.contentSerieName = item_local.from_title
if item_local.contentType == 'season':
item_local.title = item_local.from_title
title = re.sub(r'online|descarga|downloads|trailer|videoteca|gb|autoplay', '', item_local.title, flags=re.IGNORECASE).strip()
item_local.title = re.sub(r'(?i)online|descarga|downloads|trailer|videoteca|gb|autoplay', '', item_local.title).strip()
title = item_local.title
#Limpiamos calidad de títulos originales que se hayan podido colar
if item_local.infoLabels['originaltitle'].lower() in item_local.quality.lower():
item_local.quality = re.sub(item_local.infoLabels['originaltitle'], '', item_local.quality, flags=re.IGNORECASE)
item_local.quality = re.sub(item_local.infoLabels['originaltitle'], '', item_local.quality)
#item_local.quality = re.sub(item_local.infoLabels['originaltitle'], '', item_local.quality, flags=re.IGNORECASE)
# Preparamos el título para series, con los núm. de temporadas, si las hay
if item_local.contentType in ['season', 'tvshow', 'episode']:
@@ -775,7 +779,7 @@ def post_tmdb_episodios(item, itemlist):
del item_local.totalItems
item_local.unify = 'xyz'
del item_local.unify
item_local.title = re.sub(r'online|descarga|downloads|trailer|videoteca|gb|autoplay', '', item_local.title, flags=re.IGNORECASE).strip()
item_local.title = re.sub(r'(?i)online|descarga|downloads|trailer|videoteca|gb|autoplay', '', item_local.title).strip()
#logger.debug(item_local)
@@ -851,7 +855,8 @@ def post_tmdb_episodios(item, itemlist):
#Limpiamos calidad de títulos originales que se hayan podido colar
if item_local.infoLabels['originaltitle'].lower() in item_local.quality.lower():
item_local.quality = re.sub(item_local.infoLabels['originaltitle'], '', item_local.quality, flags=re.IGNORECASE)
item_local.quality = re.sub(item_local.infoLabels['originaltitle'], '', item_local.quality)
#item_local.quality = re.sub(item_local.infoLabels['originaltitle'], '', item_local.quality, flags=re.IGNORECASE)
#Si no está el título del episodio, pero sí está en "title", lo rescatamos
if not item_local.infoLabels['episodio_titulo'] and item_local.infoLabels['title'].lower() != item_local.infoLabels['tvshowtitle'].lower():
@@ -1489,8 +1494,9 @@ def fail_over_newpct1(item, patron, patron2=None, timeout=None):
fail_over = settings['default'] #Carga lista de clones
break
fail_over_list = ast.literal_eval(fail_over)
#logger.debug(str(fail_over_list))
if item.from_channel and item.from_channel != 'videolibrary': #Desde search puede venir con el nombre de canal equivocado
if item.from_channel and item.from_channel != 'videolibrary': #Desde search puede venir con el nombre de canal equivocado
item.channel = item.from_channel
#Recorremos el Array identificando el canal que falla
for active, channel, channel_host, contentType, action_excluded in fail_over_list:
@@ -1503,10 +1509,11 @@ def fail_over_newpct1(item, patron, patron2=None, timeout=None):
channel_failed = channel #salvamos el nombre del canal o categoría
channel_host_failed = channel_host #salvamos el nombre del host
channel_url_failed = item.url #salvamos la url
#logger.debug(channel_failed + ' / ' + channel_host_failed)
if patron == True and active == '1': #solo nos han pedido verificar el clone
return (item, data) #nos vamos, con el mismo clone, si está activo
if (item.action == 'episodios' or item.action == 'findvideos') and item.contentType not in contentType: #soporta el fail_over de este contenido?
if (item.action == 'episodios' or item.action == "update_tvshow" or item.action == "get_seasons" or item.action == 'findvideos') and item.contentType not in contentType: #soporta el fail_over de este contenido?
logger.error("ERROR 99: " + item.action.upper() + ": Acción no soportada para Fail-Over en canal: " + item.url)
return (item, data) #no soporta el fail_over de este contenido, no podemos hacer nada
break
@@ -1521,7 +1528,7 @@ def fail_over_newpct1(item, patron, patron2=None, timeout=None):
data_alt = ''
if channel == channel_failed or active == '0' or item.action in action_excluded or item.extra2 in action_excluded: #es válido el nuevo canal?
continue
if (item.action == 'episodios' or item.action == 'findvideos') and item.contentType not in contentType: #soporta el contenido?
if (item.action == 'episodios' or item.action == "update_tvshow" or item.action == "get_seasons" or item.action == 'findvideos') and item.contentType not in contentType: #soporta el contenido?
continue
#Hacemos el cambio de nombre de canal y url, conservando las anteriores como ALT
@@ -1531,12 +1538,16 @@ def fail_over_newpct1(item, patron, patron2=None, timeout=None):
item.category = channel.capitalize()
item.url_alt = channel_url_failed
item.url = channel_url_failed
item.url = item.url.replace(channel_host_failed, channel_host)
channel_host_bis = re.sub(r'(?i)http.*://', '', channel_host)
channel_host_failed_bis = re.sub(r'(?i)http.*://', '', channel_host_failed)
item.url = item.url.replace(channel_host_failed_bis, channel_host_bis)
url_alt += [item.url] #salvamos la url para el bucle
item.channel_host = channel_host
#logger.debug(str(url_alt))
#quitamos el código de series, porque puede variar entre webs
if item.action == "episodios" or item.action == "get_seasons":
if item.action == "episodios" or item.action == "get_seasons" or item.action == "update_tvshow":
item.url = re.sub(r'\/\d+\/?$', '', item.url) #parece que con el título solo ecuentra la serie, normalmente...
url_alt = [item.url] #salvamos la url para el bucle, pero de momento ignoramos la inicial con código de serie
@@ -1597,9 +1608,11 @@ def fail_over_newpct1(item, patron, patron2=None, timeout=None):
#Función especial para encontrar en otro clone un .torrent válido
if verify_torrent == 'torrent:check:status':
from core import videolibrarytools
if not data_alt.startswith("http"): #Si le falta el http.: lo ponemos
data_alt = scrapertools.find_single_match(item.channel_host, '(\w+:)//') + data_alt
if videolibrarytools.verify_url_torrent(data_alt): #verificamos si el .torrent existe
item.url = url #guardamos la url que funciona
break #nos vamos, con la nueva url del .torrent verificada
break #nos vamos, con la nueva url del .torrent verificada
data = ''
continue #no vale el .torrent, continuamos
item.url = url #guardamos la url que funciona, sin verificar

View File

@@ -26,7 +26,7 @@ def find_in_text(regex, text, flags=re.IGNORECASE | re.DOTALL):
class UnshortenIt(object):
_adfly_regex = r'adf\.ly|j\.gs|q\.gs|u\.bb|ay\.gy|atominik\.com|tinyium\.com|microify\.com|threadsphere\.bid|clearload\.bid|activetect\.net|swiftviz\.net|briskgram\.net|activetect\.net|baymaleti\.net|thouth\.net'
_adfly_regex = r'adf\.ly|j\.gs|q\.gs|u\.bb|ay\.gy|atominik\.com|tinyium\.com|microify\.com|threadsphere\.bid|clearload\.bid|activetect\.net|swiftviz\.net|briskgram\.net|activetect\.net|baymaleti\.net|thouth\.net|uclaut.net'
_linkbucks_regex = r'linkbucks\.com|any\.gs|cash4links\.co|cash4files\.co|dyo\.gs|filesonthe\.net|goneviral\.com|megaline\.co|miniurls\.co|qqc\.co|seriousdeals\.net|theseblogs\.com|theseforums\.com|tinylinks\.co|tubeviral\.com|ultrafiles\.net|urlbeat\.net|whackyvidz\.com|yyv\.co'
_adfocus_regex = r'adfoc\.us'
_lnxlu_regex = r'lnx\.lu'
@@ -76,7 +76,7 @@ class UnshortenIt(object):
if re.search(self._cryptmango_regex, uri, re.IGNORECASE):
return self._unshorten_cryptmango(uri)
return uri, 200
return uri, 0
def unwrap_30x(self, uri, timeout=10):
def unwrap_30x(uri, timeout=10):
@@ -442,12 +442,15 @@ class UnshortenIt(object):
r = httptools.downloadpage(uri, timeout=self._timeout, cookies=False)
html = r.data
uri = re.findall(r'<a class="push_button blue" href=([^>]+)>', html)[0]
if 'embed' in uri:
uri = re.findall(r'<a class="play-btn" href=([^">]*)>', html)[0]
else:
uri = re.findall(r'<a class="push_button blue" href=([^>]+)>', html)[0]
return uri, r.code
except Exception as e:
return uri, str(e)
return uri, 0
def _unshorten_cryptmango(self, uri):
try:

View File

@@ -1,476 +1,478 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# XBMC Launcher (xbmc / kodi)
# ------------------------------------------------------------
import os
import sys
import urllib2
import time
from core import channeltools
from core import scrapertools
from core import servertools
from core import videolibrarytools
from core import trakt_tools
from core.item import Item
from platformcode import config, logger
from platformcode import platformtools
from platformcode.logger import WebErrorException
def start():
""" Primera funcion que se ejecuta al entrar en el plugin.
Dentro de esta funcion deberian ir todas las llamadas a las
funciones que deseamos que se ejecuten nada mas abrir el plugin.
"""
logger.info()
#config.set_setting('show_once', True)
# Test if all the required directories are created
config.verify_directories_created()
def run(item=None):
logger.info()
if not item:
# Extract item from sys.argv
if sys.argv[2]:
item = Item().fromurl(sys.argv[2])
# If no item, this is mainlist
else:
if config.get_setting("start_page"):
if not config.get_setting("custom_start"):
category = config.get_setting("category").lower()
item = Item(channel="news", action="novedades", extra=category, mode = 'silent')
else:
from channels import side_menu
item= Item()
item = side_menu.check_user_home(item)
item.start = True;
else:
item = Item(channel="channelselector", action="getmainlist", viewmode="movie")
if not config.get_setting('show_once'):
from platformcode import xbmc_videolibrary
xbmc_videolibrary.ask_set_content(1)
config.set_setting('show_once', True)
logger.info(item.tostring())
try:
# If item has no action, stops here
if item.action == "":
logger.info("Item sin accion")
return
# Action for main menu in channelselector
elif item.action == "getmainlist":
import channelselector
itemlist = channelselector.getmainlist()
platformtools.render_items(itemlist, item)
# Action for channel types on channelselector: movies, series, etc.
elif item.action == "getchanneltypes":
import channelselector
itemlist = channelselector.getchanneltypes()
platformtools.render_items(itemlist, item)
# Action for channel listing on channelselector
elif item.action == "filterchannels":
import channelselector
itemlist = channelselector.filterchannels(item.channel_type)
platformtools.render_items(itemlist, item)
# Special action for playing a video from the library
elif item.action == "play_from_library":
play_from_library(item)
return
elif item.action == "keymap":
from platformcode import keymaptools
if item.open:
return keymaptools.open_shortcut_menu()
else:
return keymaptools.set_key()
elif item.action == "script":
from core import tmdb
if tmdb.drop_bd():
platformtools.dialog_notification(config.get_localized_string(20000), config.get_localized_string(60011), time=2000, sound=False)
# Action in certain channel specified in "action" and "channel" parameters
else:
# Entry point for a channel is the "mainlist" action, so here we check parental control
if item.action == "mainlist":
# Parental control
# If it is an adult channel, and user has configured pin, asks for it
if channeltools.is_adult(item.channel) and config.get_setting("adult_request_password"):
tecleado = platformtools.dialog_input("", config.get_localized_string(60334), True)
if tecleado is None or tecleado != config.get_setting("adult_password"):
return
# # Actualiza el canal individual
# if (item.action == "mainlist" and item.channel != "channelselector" and
# config.get_setting("check_for_channel_updates") == True):
# from core import updater
# updater.update_channel(item.channel)
# Checks if channel exists
channel_file = os.path.join(config.get_runtime_path(),
'channels', item.channel + ".py")
logger.info("channel_file=%s" % channel_file)
channel = None
if os.path.exists(channel_file):
try:
channel = __import__('channels.%s' % item.channel, None,
None, ["channels.%s" % item.channel])
except ImportError:
exec "import channels." + item.channel + " as channel"
logger.info("Running channel %s | %s" % (channel.__name__, channel.__file__))
# Special play action
if item.action == "play":
#define la info para trakt
try:
trakt_tools.set_trakt_info(item)
except:
pass
logger.info("item.action=%s" % item.action.upper())
# logger.debug("item_toPlay: " + "\n" + item.tostring('\n'))
# First checks if channel has a "play" function
if hasattr(channel, 'play'):
logger.info("Executing channel 'play' method")
itemlist = channel.play(item)
b_favourite = item.isFavourite
# Play should return a list of playable URLS
if len(itemlist) > 0 and isinstance(itemlist[0], Item):
item = itemlist[0]
if b_favourite:
item.isFavourite = True
platformtools.play_video(item)
# Permitir varias calidades desde play en el canal
elif len(itemlist) > 0 and isinstance(itemlist[0], list):
item.video_urls = itemlist
platformtools.play_video(item)
# If not, shows user an error message
else:
platformtools.dialog_ok(config.get_localized_string(20000), config.get_localized_string(60339))
# If player don't have a "play" function, not uses the standard play from platformtools
else:
logger.info("Executing core 'play' method")
platformtools.play_video(item)
# Special action for findvideos, where the plugin looks for known urls
elif item.action == "findvideos":
# First checks if channel has a "findvideos" function
if hasattr(channel, 'findvideos'):
itemlist = getattr(channel, item.action)(item)
itemlist = servertools.filter_servers(itemlist)
# If not, uses the generic findvideos function
else:
logger.info("No channel 'findvideos' method, "
"executing core method")
itemlist = servertools.find_video_items(item)
if config.get_setting("max_links", "videolibrary") != 0:
itemlist = limit_itemlist(itemlist)
from platformcode import subtitletools
subtitletools.saveSubtitleName(item)
platformtools.render_items(itemlist, item)
# Special action for adding a movie to the library
elif item.action == "add_pelicula_to_library":
videolibrarytools.add_movie(item)
# Special action for adding a serie to the library
elif item.action == "add_serie_to_library":
videolibrarytools.add_tvshow(item, channel)
# Special action for downloading all episodes from a serie
elif item.action == "download_all_episodes":
from channels import downloads
item.action = item.extra
del item.extra
downloads.save_download(item)
# Special action for searching, first asks for the words then call the "search" function
elif item.action == "search":
logger.info("item.action=%s" % item.action.upper())
last_search = ""
last_search_active = config.get_setting("last_search", "search")
if last_search_active:
try:
current_saved_searches_list = list(config.get_setting("saved_searches_list", "search"))
last_search = current_saved_searches_list[0]
except:
pass
tecleado = platformtools.dialog_input(last_search)
if tecleado is not None:
if last_search_active and not tecleado.startswith("http"):
from channels import search
search.save_search(tecleado)
itemlist = channel.search(item, tecleado)
else:
return
platformtools.render_items(itemlist, item)
# For all other actions
else:
logger.info("Executing channel '%s' method" % item.action)
itemlist = getattr(channel, item.action)(item)
if config.get_setting('trakt_sync'):
token_auth = config.get_setting("token_trakt", "trakt")
if not token_auth:
trakt_tools.auth_trakt()
else:
import xbmc
if not xbmc.getCondVisibility('System.HasAddon(script.trakt)') and config.get_setting(
'install_trakt'):
trakt_tools.ask_install_script()
itemlist = trakt_tools.trakt_check(itemlist)
else:
config.set_setting('install_trakt', True)
platformtools.render_items(itemlist, item)
except urllib2.URLError, e:
import traceback
logger.error(traceback.format_exc())
# Grab inner and third party errors
if hasattr(e, 'reason'):
logger.error("Razon del error, codigo: %s | Razon: %s" % (str(e.reason[0]), str(e.reason[1])))
texto = config.get_localized_string(30050) # "No se puede conectar con el sitio web"
platformtools.dialog_ok("alfa", texto)
# Grab server response errors
elif hasattr(e, 'code'):
logger.error("Codigo de error HTTP : %d" % e.code)
# "El sitio web no funciona correctamente (error http %d)"
platformtools.dialog_ok("alfa", config.get_localized_string(30051) % e.code)
except WebErrorException, e:
import traceback
logger.error(traceback.format_exc())
patron = 'File "' + os.path.join(config.get_runtime_path(), "channels", "").replace("\\",
"\\\\") + '([^.]+)\.py"'
canal = scrapertools.find_single_match(traceback.format_exc(), patron)
platformtools.dialog_ok(
config.get_localized_string(59985) + canal,
config.get_localized_string(60013) %(e))
except:
import traceback
logger.error(traceback.format_exc())
patron = 'File "' + os.path.join(config.get_runtime_path(), "channels", "").replace("\\",
"\\\\") + '([^.]+)\.py"'
canal = scrapertools.find_single_match(traceback.format_exc(), patron)
try:
import xbmc
if config.get_platform(True)['num_version'] < 14:
log_name = "xbmc.log"
else:
log_name = "kodi.log"
log_message = config.get_localized_string(50004) + xbmc.translatePath("special://logpath") + log_name
except:
log_message = ""
if canal:
platformtools.dialog_ok(
config.get_localized_string(60087) %canal,
config.get_localized_string(60014),
log_message)
else:
platformtools.dialog_ok(
config.get_localized_string(60038),
config.get_localized_string(60015),
log_message)
def reorder_itemlist(itemlist):
logger.info()
# logger.debug("Inlet itemlist size: %i" % len(itemlist))
new_list = []
mod_list = []
not_mod_list = []
modified = 0
not_modified = 0
to_change = [[config.get_localized_string(60335), '[V]'],
[config.get_localized_string(60336), '[D]']]
for item in itemlist:
old_title = unicode(item.title, "utf8").lower().encode("utf8")
for before, after in to_change:
if before in item.title:
item.title = item.title.replace(before, after)
break
new_title = unicode(item.title, "utf8").lower().encode("utf8")
if old_title != new_title:
mod_list.append(item)
modified += 1
else:
not_mod_list.append(item)
not_modified += 1
# logger.debug("OLD: %s | NEW: %s" % (old_title, new_title))
new_list.extend(mod_list)
new_list.extend(not_mod_list)
logger.info("Titulos modificados:%i | No modificados:%i" % (modified, not_modified))
if len(new_list) == 0:
new_list = itemlist
# logger.debug("Outlet itemlist size: %i" % len(new_list))
return new_list
def limit_itemlist(itemlist):
logger.info()
# logger.debug("Inlet itemlist size: %i" % len(itemlist))
try:
opt = config.get_setting("max_links", "videolibrary")
if opt == 0:
new_list = itemlist
else:
i_max = 30 * opt
new_list = itemlist[:i_max]
# logger.debug("Outlet itemlist size: %i" % len(new_list))
return new_list
except:
return itemlist
def play_from_library(item):
"""
Los .strm al reproducirlos desde kodi, este espera que sea un archivo "reproducible" asi que no puede contener
más items, como mucho se puede colocar un dialogo de seleccion.
Esto lo solucionamos "engañando a kodi" y haciendole creer que se ha reproducido algo, asi despues mediante
"Container.Update()" cargamos el strm como si un item desde dentro del addon se tratara, quitando todas
las limitaciones y permitiendo reproducir mediante la funcion general sin tener que crear nuevos métodos para
la videoteca.
@type item: item
@param item: elemento con información
"""
logger.info()
#logger.debug("item: \n" + item.tostring('\n'))
import xbmcgui
import xbmcplugin
import xbmc
# Intentamos reproducir una imagen (esto no hace nada y ademas no da error)
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True,
xbmcgui.ListItem(
path=os.path.join(config.get_runtime_path(), "resources", "subtitle.mp4")))
# Por si acaso la imagen hiciera (en futuras versiones) le damos a stop para detener la reproduccion
xbmc.Player().stop()
# modificamos el action (actualmente la videoteca necesita "findvideos" ya que es donde se buscan las fuentes
item.action = "findvideos"
window_type = config.get_setting("window_type", "videolibrary")
# y volvemos a lanzar kodi
if xbmc.getCondVisibility('Window.IsMedia') and not window_type == 1:
# Ventana convencional
xbmc.executebuiltin("Container.Update(" + sys.argv[0] + "?" + item.tourl() + ")")
else:
# Ventana emergente
from channels import videolibrary
p_dialog = platformtools.dialog_progress_bg(config.get_localized_string(20000), config.get_localized_string(70004))
p_dialog.update(0, '')
itemlist = videolibrary.findvideos(item)
while platformtools.is_playing():
# Ventana convencional
from time import sleep
sleep(5)
p_dialog.update(50, '')
'''# Se filtran los enlaces segun la lista negra
if config.get_setting('filter_servers', "servers"):
itemlist = servertools.filter_servers(itemlist)'''
# Se limita la cantidad de enlaces a mostrar
if config.get_setting("max_links", "videolibrary") != 0:
itemlist = limit_itemlist(itemlist)
# Se "limpia" ligeramente la lista de enlaces
if config.get_setting("replace_VD", "videolibrary") == 1:
itemlist = reorder_itemlist(itemlist)
import time
p_dialog.update(100, '')
time.sleep(0.5)
p_dialog.close()
if len(itemlist) > 0:
while not xbmc.Monitor().abortRequested():
# El usuario elige el mirror
opciones = []
for item in itemlist:
opciones.append(item.title)
# Se abre la ventana de seleccion
if (item.contentSerieName != "" and
item.contentSeason != "" and
item.contentEpisodeNumber != ""):
cabecera = ("%s - %sx%s -- %s" %
(item.contentSerieName,
item.contentSeason,
item.contentEpisodeNumber,
config.get_localized_string(30163)))
else:
cabecera = config.get_localized_string(30163)
seleccion = platformtools.dialog_select(cabecera, opciones)
if seleccion == -1:
return
else:
item = videolibrary.play(itemlist[seleccion])[0]
platformtools.play_video(item)
from channels import autoplay
if (platformtools.is_playing() and item.action) or item.server == 'torrent' or autoplay.is_active(item.contentChannel):
break
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# XBMC Launcher (xbmc / kodi)
# ------------------------------------------------------------
import os
import sys
import urllib2
import time
from core import channeltools
from core import scrapertools
from core import servertools
from core import videolibrarytools
from core import trakt_tools
from core.item import Item
from platformcode import config, logger
from platformcode import platformtools
from platformcode.logger import WebErrorException
def start():
""" Primera funcion que se ejecuta al entrar en el plugin.
Dentro de esta funcion deberian ir todas las llamadas a las
funciones que deseamos que se ejecuten nada mas abrir el plugin.
"""
logger.info()
#config.set_setting('show_once', True)
# Test if all the required directories are created
config.verify_directories_created()
def run(item=None):
logger.info()
if not item:
# Extract item from sys.argv
if sys.argv[2]:
item = Item().fromurl(sys.argv[2])
# If no item, this is mainlist
else:
if config.get_setting("start_page"):
if not config.get_setting("custom_start"):
category = config.get_setting("category").lower()
item = Item(channel="news", action="novedades", extra=category, mode = 'silent')
else:
from channels import side_menu
item= Item()
item = side_menu.check_user_home(item)
item.start = True;
else:
item = Item(channel="channelselector", action="getmainlist", viewmode="movie")
if not config.get_setting('show_once'):
from platformcode import xbmc_videolibrary
xbmc_videolibrary.ask_set_content(1)
config.set_setting('show_once', True)
logger.info(item.tostring())
try:
# If item has no action, stops here
if item.action == "":
logger.info("Item sin accion")
return
# Action for main menu in channelselector
elif item.action == "getmainlist":
import channelselector
itemlist = channelselector.getmainlist()
platformtools.render_items(itemlist, item)
# Action for channel types on channelselector: movies, series, etc.
elif item.action == "getchanneltypes":
import channelselector
itemlist = channelselector.getchanneltypes()
platformtools.render_items(itemlist, item)
# Action for channel listing on channelselector
elif item.action == "filterchannels":
import channelselector
itemlist = channelselector.filterchannels(item.channel_type)
platformtools.render_items(itemlist, item)
# Special action for playing a video from the library
elif item.action == "play_from_library":
play_from_library(item)
return
elif item.action == "keymap":
from platformcode import keymaptools
if item.open:
return keymaptools.open_shortcut_menu()
else:
return keymaptools.set_key()
elif item.action == "script":
from core import tmdb
if tmdb.drop_bd():
platformtools.dialog_notification(config.get_localized_string(20000), config.get_localized_string(60011), time=2000, sound=False)
# Action in certain channel specified in "action" and "channel" parameters
else:
# Entry point for a channel is the "mainlist" action, so here we check parental control
if item.action == "mainlist":
# Parental control
# If it is an adult channel, and user has configured pin, asks for it
if channeltools.is_adult(item.channel) and config.get_setting("adult_request_password"):
tecleado = platformtools.dialog_input("", config.get_localized_string(60334), True)
if tecleado is None or tecleado != config.get_setting("adult_password"):
return
# # Actualiza el canal individual
# if (item.action == "mainlist" and item.channel != "channelselector" and
# config.get_setting("check_for_channel_updates") == True):
# from core import updater
# updater.update_channel(item.channel)
# Checks if channel exists
channel_file = os.path.join(config.get_runtime_path(),
'channels', item.channel + ".py")
logger.info("channel_file=%s" % channel_file)
channel = None
if os.path.exists(channel_file):
try:
channel = __import__('channels.%s' % item.channel, None,
None, ["channels.%s" % item.channel])
except ImportError:
exec "import channels." + item.channel + " as channel"
logger.info("Running channel %s | %s" % (channel.__name__, channel.__file__))
# Special play action
if item.action == "play":
#define la info para trakt
try:
trakt_tools.set_trakt_info(item)
except:
pass
logger.info("item.action=%s" % item.action.upper())
# logger.debug("item_toPlay: " + "\n" + item.tostring('\n'))
# First checks if channel has a "play" function
if hasattr(channel, 'play'):
logger.info("Executing channel 'play' method")
itemlist = channel.play(item)
b_favourite = item.isFavourite
# Play should return a list of playable URLS
if len(itemlist) > 0 and isinstance(itemlist[0], Item):
item = itemlist[0]
if b_favourite:
item.isFavourite = True
platformtools.play_video(item)
# Permitir varias calidades desde play en el canal
elif len(itemlist) > 0 and isinstance(itemlist[0], list):
item.video_urls = itemlist
platformtools.play_video(item)
# If not, shows user an error message
else:
platformtools.dialog_ok(config.get_localized_string(20000), config.get_localized_string(60339))
# If player don't have a "play" function, not uses the standard play from platformtools
else:
logger.info("Executing core 'play' method")
platformtools.play_video(item)
# Special action for findvideos, where the plugin looks for known urls
elif item.action == "findvideos":
# First checks if channel has a "findvideos" function
if hasattr(channel, 'findvideos'):
itemlist = getattr(channel, item.action)(item)
itemlist = servertools.filter_servers(itemlist)
# If not, uses the generic findvideos function
else:
logger.info("No channel 'findvideos' method, "
"executing core method")
itemlist = servertools.find_video_items(item)
if config.get_setting("max_links", "videolibrary") != 0:
itemlist = limit_itemlist(itemlist)
from platformcode import subtitletools
subtitletools.saveSubtitleName(item)
platformtools.render_items(itemlist, item)
# Special action for adding a movie to the library
elif item.action == "add_pelicula_to_library":
videolibrarytools.add_movie(item)
# Special action for adding a serie to the library
elif item.action == "add_serie_to_library":
videolibrarytools.add_tvshow(item, channel)
# Special action for downloading all episodes from a serie
elif item.action == "download_all_episodes":
from channels import downloads
item.action = item.extra
del item.extra
downloads.save_download(item)
# Special action for searching, first asks for the words then call the "search" function
elif item.action == "search":
logger.info("item.action=%s" % item.action.upper())
last_search = ""
last_search_active = config.get_setting("last_search", "search")
if last_search_active:
try:
current_saved_searches_list = list(config.get_setting("saved_searches_list", "search"))
last_search = current_saved_searches_list[0]
except:
pass
tecleado = platformtools.dialog_input(last_search)
if tecleado is not None:
if last_search_active and not tecleado.startswith("http"):
from channels import search
search.save_search(tecleado)
itemlist = channel.search(item, tecleado)
else:
return
platformtools.render_items(itemlist, item)
# For all other actions
else:
logger.info("Executing channel '%s' method" % item.action)
itemlist = getattr(channel, item.action)(item)
if config.get_setting('trakt_sync'):
token_auth = config.get_setting("token_trakt", "trakt")
if not token_auth:
trakt_tools.auth_trakt()
else:
import xbmc
if not xbmc.getCondVisibility('System.HasAddon(script.trakt)') and config.get_setting(
'install_trakt'):
trakt_tools.ask_install_script()
itemlist = trakt_tools.trakt_check(itemlist)
else:
config.set_setting('install_trakt', True)
platformtools.render_items(itemlist, item)
except urllib2.URLError, e:
import traceback
logger.error(traceback.format_exc())
# Grab inner and third party errors
if hasattr(e, 'reason'):
logger.error("Razon del error, codigo: %s | Razon: %s" % (str(e.reason[0]), str(e.reason[1])))
texto = config.get_localized_string(30050) # "No se puede conectar con el sitio web"
platformtools.dialog_ok("alfa", texto)
# Grab server response errors
elif hasattr(e, 'code'):
logger.error("Codigo de error HTTP : %d" % e.code)
# "El sitio web no funciona correctamente (error http %d)"
platformtools.dialog_ok("alfa", config.get_localized_string(30051) % e.code)
except WebErrorException, e:
import traceback
logger.error(traceback.format_exc())
patron = 'File "' + os.path.join(config.get_runtime_path(), "channels", "").replace("\\",
"\\\\") + '([^.]+)\.py"'
canal = scrapertools.find_single_match(traceback.format_exc(), patron)
platformtools.dialog_ok(
config.get_localized_string(59985) + canal,
config.get_localized_string(60013) %(e))
except:
import traceback
logger.error(traceback.format_exc())
patron = 'File "' + os.path.join(config.get_runtime_path(), "channels", "").replace("\\",
"\\\\") + '([^.]+)\.py"'
canal = scrapertools.find_single_match(traceback.format_exc(), patron)
try:
import xbmc
if config.get_platform(True)['num_version'] < 14:
log_name = "xbmc.log"
else:
log_name = "kodi.log"
log_message = config.get_localized_string(50004) + xbmc.translatePath("special://logpath") + log_name
except:
log_message = ""
if canal:
platformtools.dialog_ok(
config.get_localized_string(60087) %canal,
config.get_localized_string(60014),
log_message)
else:
platformtools.dialog_ok(
config.get_localized_string(60038),
config.get_localized_string(60015),
log_message)
def reorder_itemlist(itemlist):
logger.info()
# logger.debug("Inlet itemlist size: %i" % len(itemlist))
new_list = []
mod_list = []
not_mod_list = []
modified = 0
not_modified = 0
to_change = [[config.get_localized_string(60335), '[V]'],
[config.get_localized_string(60336), '[D]']]
for item in itemlist:
old_title = unicode(item.title, "utf8").lower().encode("utf8")
for before, after in to_change:
if before in item.title:
item.title = item.title.replace(before, after)
break
new_title = unicode(item.title, "utf8").lower().encode("utf8")
if old_title != new_title:
mod_list.append(item)
modified += 1
else:
not_mod_list.append(item)
not_modified += 1
# logger.debug("OLD: %s | NEW: %s" % (old_title, new_title))
new_list.extend(mod_list)
new_list.extend(not_mod_list)
logger.info("Titulos modificados:%i | No modificados:%i" % (modified, not_modified))
if len(new_list) == 0:
new_list = itemlist
# logger.debug("Outlet itemlist size: %i" % len(new_list))
return new_list
def limit_itemlist(itemlist):
logger.info()
# logger.debug("Inlet itemlist size: %i" % len(itemlist))
try:
opt = config.get_setting("max_links", "videolibrary")
if opt == 0:
new_list = itemlist
else:
i_max = 30 * opt
new_list = itemlist[:i_max]
# logger.debug("Outlet itemlist size: %i" % len(new_list))
return new_list
except:
return itemlist
def play_from_library(item):
"""
Los .strm al reproducirlos desde kodi, este espera que sea un archivo "reproducible" asi que no puede contener
más items, como mucho se puede colocar un dialogo de seleccion.
Esto lo solucionamos "engañando a kodi" y haciendole creer que se ha reproducido algo, asi despues mediante
"Container.Update()" cargamos el strm como si un item desde dentro del addon se tratara, quitando todas
las limitaciones y permitiendo reproducir mediante la funcion general sin tener que crear nuevos métodos para
la videoteca.
@type item: item
@param item: elemento con información
"""
logger.info()
#logger.debug("item: \n" + item.tostring('\n'))
import xbmcgui
import xbmcplugin
import xbmc
from time import sleep
# Intentamos reproducir una imagen (esto no hace nada y ademas no da error)
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True,
xbmcgui.ListItem(
path=os.path.join(config.get_runtime_path(), "resources", "subtitle.mp4")))
# Por si acaso la imagen hiciera (en futuras versiones) le damos a stop para detener la reproduccion
sleep(0.5) ### Si no se pone esto se bloquea Kodi
xbmc.Player().stop()
# modificamos el action (actualmente la videoteca necesita "findvideos" ya que es donde se buscan las fuentes
item.action = "findvideos"
window_type = config.get_setting("window_type", "videolibrary")
# y volvemos a lanzar kodi
if xbmc.getCondVisibility('Window.IsMedia') and not window_type == 1:
# Ventana convencional
xbmc.executebuiltin("Container.Update(" + sys.argv[0] + "?" + item.tourl() + ")")
else:
# Ventana emergente
from channels import videolibrary
p_dialog = platformtools.dialog_progress_bg(config.get_localized_string(20000), config.get_localized_string(70004))
p_dialog.update(0, '')
itemlist = videolibrary.findvideos(item)
while platformtools.is_playing():
# Ventana convencional
sleep(5)
p_dialog.update(50, '')
'''# Se filtran los enlaces segun la lista negra
if config.get_setting('filter_servers', "servers"):
itemlist = servertools.filter_servers(itemlist)'''
# Se limita la cantidad de enlaces a mostrar
if config.get_setting("max_links", "videolibrary") != 0:
itemlist = limit_itemlist(itemlist)
# Se "limpia" ligeramente la lista de enlaces
if config.get_setting("replace_VD", "videolibrary") == 1:
itemlist = reorder_itemlist(itemlist)
import time
p_dialog.update(100, '')
time.sleep(0.5)
p_dialog.close()
if len(itemlist) > 0:
while not xbmc.Monitor().abortRequested():
# El usuario elige el mirror
opciones = []
for item in itemlist:
opciones.append(item.title)
# Se abre la ventana de seleccion
if (item.contentSerieName != "" and
item.contentSeason != "" and
item.contentEpisodeNumber != ""):
cabecera = ("%s - %sx%s -- %s" %
(item.contentSerieName,
item.contentSeason,
item.contentEpisodeNumber,
config.get_localized_string(30163)))
else:
cabecera = config.get_localized_string(30163)
seleccion = platformtools.dialog_select(cabecera, opciones)
if seleccion == -1:
return
else:
item = videolibrary.play(itemlist[seleccion])[0]
platformtools.play_video(item)
from channels import autoplay
if (platformtools.is_playing() and item.action) or item.server == 'torrent' or autoplay.is_active(item.contentChannel):
break

View File

@@ -19,7 +19,7 @@ import xbmcplugin
from channelselector import get_thumb
from platformcode import unify
from core import channeltools
from core import trakt_tools
from core import trakt_tools, scrapertoolsV2
from core.item import Item
from platformcode import logger
@@ -696,7 +696,6 @@ def play_video(item, strm=False, force_direct=False, autoplay=False):
return
# se obtiene la información del video.
mediaurl = alfaresolver.av(mediaurl)
if not item.contentThumbnail:
thumb = item.thumbnail
else:
@@ -740,15 +739,53 @@ def get_seleccion(default_action, opciones, seleccion, video_urls):
seleccion = dialog_select(config.get_localized_string(30163), opciones)
# Ver en calidad baja
elif default_action == 1:
seleccion = 0
resolutions = []
for url in video_urls:
res = calcResolution(url[0])
if res:
resolutions.append(res)
if resolutions:
seleccion = resolutions.index(min(resolutions))
else:
seleccion = 0
# Ver en alta calidad
elif default_action == 2:
seleccion = len(video_urls) - 1
resolutions = []
for url in video_urls:
res = calcResolution(url[0])
if res:
resolutions.append(res)
if resolutions:
seleccion = resolutions.index(max(resolutions))
else:
seleccion = len(video_urls) - 1
else:
seleccion = 0
return seleccion
def calcResolution(option):
match = scrapertoolsV2.find_single_match(option, '([0-9]{2,4})x([0-9]{2,4})')
resolution = False
if match:
resolution = int(match[0])*int(match[1])
else:
if '240p' in option:
resolution = 320 * 240
elif '360p' in option:
resolution = 480 * 360
elif ('480p' in option) or ('480i' in option):
resolution = 720 * 480
elif ('576p' in option) or ('576p' in option):
resolution = 720 * 576
elif ('720p' in option) or ('HD' in option):
resolution = 1280 * 720
elif ('1080p' in option) or ('1080i' in option) or ('Full HD' in option):
resolution = 1920 * 1080
return resolution
def show_channel_settings(**kwargs):
"""
Muestra un cuadro de configuracion personalizado para cada canal y guarda los datos al cerrarlo.

View File

@@ -1046,7 +1046,7 @@ msgid "Movie"
msgstr ""
msgctxt "#60245"
msgid "Series"
msgid "tv show"
msgstr ""
msgctxt "#60246"
@@ -1249,10 +1249,6 @@ msgctxt "#60297"
msgid "Find %s possible matches"
msgstr ""
msgctxt "#60298"
msgid "[%s]: Select the correct TV series"
msgstr ""
msgctxt "#60299"
msgid "Not found in the language '%s'"
msgstr ""
@@ -4267,10 +4263,6 @@ msgctxt "#70392"
msgid "Rate with a [COLOR %s]%s[/ COLOR]"
msgstr ""
msgctxt "#70393"
msgid "[%s]: Select the correct %s "
msgstr ""
msgctxt "#70394"
msgid "Action"
msgstr ""
@@ -4811,10 +4803,6 @@ msgctxt "#70528"
msgid "Default folder"
msgstr ""
msgctxt "#70529"
msgid "Repeated link"
msgstr ""
msgctxt "#70530"
msgid "You already have this link in the folder"
msgstr ""
@@ -4943,3 +4931,6 @@ msgctxt "#70561"
msgid "Search Similar
msgstr ""
msgctxt "#70562"
msgid "autoplay"
msgstr "Enable autoplay in all channels"

View File

@@ -1034,8 +1034,8 @@ msgid "Movie"
msgstr "Film"
msgctxt "#60245"
msgid "Series"
msgstr "Serie"
msgid "tv show"
msgstr "serie"
msgctxt "#60246"
msgid "Full information"
@@ -1237,10 +1237,6 @@ msgctxt "#60297"
msgid "Find %s possible matches"
msgstr "Trovate %s possibili corrispondenze"
msgctxt "#60298"
msgid "[%s]: Select the correct TV series"
msgstr "[%s]: Seleziona la serie corretta"
msgctxt "#60299"
msgid "Not found in the language '%s'"
msgstr "Non trovato nella lingua '%s'"
@@ -4247,10 +4243,6 @@ msgctxt "#70392"
msgid "Rate with a [COLOR %s]%s[/ COLOR]"
msgstr "Dai un punteggio con un [COLOR %s]%s[/COLOR]"
msgctxt "#70393"
msgid "[%s]: Select the correct %s "
msgstr "[%s]: Seleziona il %s corretto"
msgctxt "#70394"
msgid "Action"
msgstr "Azione"
@@ -4799,10 +4791,6 @@ msgctxt "#70528"
msgid "Default folder"
msgstr "Cartella di Default"
msgctxt "#70529"
msgid "Repeated link"
msgstr "Link ripetuto"
msgctxt "#70530"
msgid "You already have this link in the folder"
msgstr "C'è già un link nella cartella"
@@ -4932,3 +4920,6 @@ msgid "Search Similar
msgstr "Cerca Simili"
msgctxt "#70562"
msgid "autoplay"
msgstr "Abilita autoplay in tutti i canali"

View File

@@ -1046,8 +1046,8 @@ msgid "Movie"
msgstr "Película"
msgctxt "#60245"
msgid "Series"
msgstr "Serie"
msgid "tv show"
msgstr "serie"
msgctxt "#60246"
msgid "Full information"
@@ -1249,10 +1249,6 @@ msgctxt "#60297"
msgid "Find %s possible matches"
msgstr "Encontrados %s posibles coincidencias"
msgctxt "#60298"
msgid "[%s]: Select the correct TV series"
msgstr "[%s]: Selecciona la serie correcta"
msgctxt "#60299"
msgid "Not found in the language '%s'"
msgstr "No se ha encontrado en idioma '%s'"
@@ -4263,10 +4259,6 @@ msgctxt "#70392"
msgid "Rate with a [COLOR %s]%s[/ COLOR]"
msgstr "Puntuar con un [COLOR %s]%s[/COLOR]"
msgctxt "#70393"
msgid "[%s]: Select the correct %s "
msgstr "[%s]: Selecciona la %s correcta"
msgctxt "#70394"
msgid "Action"
msgstr "Accion"
@@ -4811,10 +4803,6 @@ msgctxt "#70528"
msgid "Default folder"
msgstr "Carpeta por defecto"
msgctxt "#70529"
msgid "Repeated link"
msgstr "Enlace repetido"
msgctxt "#70530"
msgid "You already have this link in the folder"
msgstr "Ya tienes este enlace en la carpeta"

View File

@@ -1046,8 +1046,8 @@ msgid "Movie"
msgstr "Película"
msgctxt "#60245"
msgid "Series"
msgstr "Serie"
msgid "tv show"
msgstr "serie"
msgctxt "#60246"
msgid "Full information"
@@ -1249,10 +1249,6 @@ msgctxt "#60297"
msgid "Find %s possible matches"
msgstr "Encontrados %s posibles coincidencias"
msgctxt "#60298"
msgid "[%s]: Select the correct TV series"
msgstr "[%s]: Selecciona la serie correcta"
msgctxt "#60299"
msgid "Not found in the language '%s'"
msgstr "No se ha encontrado en idioma '%s'"
@@ -4263,10 +4259,6 @@ msgctxt "#70392"
msgid "Rate with a [COLOR %s]%s[/ COLOR]"
msgstr "Puntuar con un [COLOR %s]%s[/COLOR]"
msgctxt "#70393"
msgid "[%s]: Select the correct %s "
msgstr "[%s]: Selecciona la %s correcta"
msgctxt "#70394"
msgid "Action"
msgstr "Accion"
@@ -4811,10 +4803,6 @@ msgctxt "#70528"
msgid "Default folder"
msgstr "Carpeta por defecto"
msgctxt "#70529"
msgid "Repeated link"
msgstr "Enlace repetido"
msgctxt "#70530"
msgid "You already have this link in the folder"
msgstr "Ya tienes este enlace en la carpeta"

View File

@@ -1046,8 +1046,8 @@ msgid "Movie"
msgstr "Película"
msgctxt "#60245"
msgid "Series"
msgstr "Serie"
msgid "tv show"
msgstr "serie"
msgctxt "#60246"
msgid "Full information"
@@ -1249,10 +1249,6 @@ msgctxt "#60297"
msgid "Find %s possible matches"
msgstr "Encontrados %s posibles coincidencias"
msgctxt "#60298"
msgid "[%s]: Select the correct TV series"
msgstr "[%s]: Selecciona la serie correcta"
msgctxt "#60299"
msgid "Not found in the language '%s'"
msgstr "No se ha encontrado en idioma '%s'"
@@ -4263,10 +4259,6 @@ msgctxt "#70392"
msgid "Rate with a [COLOR %s]%s[/ COLOR]"
msgstr "Puntuar con un [COLOR %s]%s[/COLOR]"
msgctxt "#70393"
msgid "[%s]: Select the correct %s "
msgstr "[%s]: Selecciona la %s correcta"
msgctxt "#70394"
msgid "Action"
msgstr "Accion"
@@ -4811,10 +4803,6 @@ msgctxt "#70528"
msgid "Default folder"
msgstr "Carpeta por defecto"
msgctxt "#70529"
msgid "Repeated link"
msgstr "Enlace repetido"
msgctxt "#70530"
msgid "You already have this link in the folder"
msgstr "Ya tienes este enlace en la carpeta"
@@ -4943,7 +4931,9 @@ msgctxt "#70561"
msgid "Search Similar"
msgstr "Buscar Similares"
msgctxt "#70562"
msgid "autoplay"
msgstr "Habilitar reproducción automática en todos los canales"

Binary file not shown.

Before

Width:  |  Height:  |  Size: 134 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 87 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 31 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 110 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 127 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 71 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 46 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 51 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 31 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 75 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 65 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.4 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.6 KiB

View File

@@ -3,6 +3,7 @@
<category label="70168">
<setting id="player_mode" type="enum" values="Direct|SetResolvedUrl|Built-In|Download and Play" label="30044" default="0"/>
<setting id="default_action" type="enum" lvalues="30006|30007|30008" label="30005" default="0"/>
<setting id="autoplay" type="bool" label="70562" default="false" visible="true"/>
<setting id="thumbnail_type" type="enum" lvalues="30011|30012|30200" label="30010" default="2"/>
<setting id="channel_language" type="labelenum" values="all|cast|lat" label="30019" default="all"/>
<setting id="trakt_sync" type="bool" label="70109" default="false"/>

Some files were not shown because too many files have changed in this diff Show More