Merge remote-tracking branch 'upstream/master'

This commit is contained in:
Kingbox
2018-11-07 14:21:43 +01:00
19 changed files with 584 additions and 205 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.7.10" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.7.11" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,17 +19,16 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Arreglos[/B][/COLOR]
¤ anitoons ¤ goovie ¤ playporn
¤ gnula ¤ pelisr ¤ peliculonhd
¤ thevid ¤ vidcloud ¤ xhamster
¤ descargacineclasico
¤ anitoons ¤ rexpelis ¤ bitertv
¤ fembed ¤ animeflv ¤ canalpelis
¤ dilo ¤ fanpelis ¤ pelisplus
¤ pelisr ¤ retroseries ¤ datoporn
¤ newpct1 ¤ subtorrents ¤ sleazemovies
[COLOR green][B]Novedades[/B][/COLOR]
¤ repelis.live ¤ zonaworld ¤ subtorrents
¤ fex ¤ xdrive ¤ cat3plus
¤ sleazemovies
¤ tupelicula
¤ Agradecimientos a @diegotcba y @sculkurt por colaborar en ésta versión
¤ Agradecimientos a @sculkurt por colaborar en ésta versión
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>

View File

@@ -181,7 +181,7 @@ def episodios(item):
itemlist.append(item.clone(title=title, url=url, action='findvideos', show=info[1]))
itemlist = itemlist[::-1]
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca",
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios"))
return itemlist

View File

@@ -9,5 +9,15 @@
"categories": [
"tvshow",
"anime"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -22,7 +22,7 @@ list_servers = ['openload',
list_quality = ['default']
host = "http://www.anitoonstv.com"
host = "https://www.anitoonstv.com"
def mainlist(item):
@@ -38,17 +38,45 @@ def mainlist(item):
thumbnail=thumb_series, range=[0,19] ))
itemlist.append(Item(channel=item.channel, action="lista", title="Especiales", url=host+"/catalogo.php?g=&t=especiales&o=0",
thumbnail=thumb_series, range=[0,19]))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar",
thumbnail=thumb_series, range=[0,19]))
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host +"/php/buscar.php"
item.texto = texto
if texto != '':
return sub_search(item)
else:
return []
def sub_search(item):
logger.info()
itemlist = []
post = "b=" + item.texto
headers = {"X-Requested-With":"XMLHttpRequest"}
data = httptools.downloadpage(item.url, post=post, headers=headers).data
patron = "href='([^']+).*?"
patron += ">([^<]+)"
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(action = "episodios",
title = scrapedtitle,
url = scrapedurl
))
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
#logger.info("Pagina para regex "+data)
@@ -98,7 +126,7 @@ def episodios(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<div class="pagina">(.*?)</ul>'
patron = '<div class="pagina">(.*?)cajaSocial'
data = scrapertools.find_single_match(data, patron)
patron_caps = "<li><a href='(.+?)'>Cap(?:i|í)tulo: (.+?) - (.+?)<\/a>"
matches = scrapertools.find_multiple_matches(data, patron_caps)
@@ -173,6 +201,8 @@ def findvideos(item):
if "goo" in url:
url = googl(url)
server='netutv'
if "hqq" in url:
server='netutv'
if "ok" in url:
url = "https:"+url
server='okru'

View File

@@ -30,8 +30,8 @@ def mainlist(item):
itemlist = []
itemlist.append(Item(channel=item.channel, fanart=fanart, title="Todas", action="list_all", url=host,
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, fanart=fanart, title="Todas", action="list_all",
url=host+'/category/terror', thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, fanart=fanart, title="Asiaticas", action="list_all",
url=host+'/category/asiatico', thumbnail=get_thumb('asiaticas', auto=True)))

View File

@@ -348,31 +348,38 @@ def episodios(item):
return itemlist
def findvideos(item):
logger.info()
from lib import generictools
import urllib
import base64
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", data)
patron = '<div id="option-(\d+)" class="play-box-iframe.*?src="([^"]+)" frameborder="0" scrolling="no" allowfullscreen></iframe>'
patron = 'data-post="(\d+)" data-nume="(\d+)".*?img src=\'([^\']+)\''
matches = re.compile(patron, re.DOTALL).findall(data)
for id, option, lang in matches:
lang = scrapertools.find_single_match(lang, '.*?/flags/(.*?).png')
if lang == 'en':
lang = 'VOSE'
post = {'action': 'doo_player_ajax', 'post': id, 'nume': option, 'type':'movie'}
post = urllib.urlencode(post)
test_url = '%swp-admin/admin-ajax.php' % host
new_data = httptools.downloadpage(test_url, post=post, headers={'Referer':item.url}).data
hidden_url = scrapertools.find_single_match(new_data, "src='([^']+)'")
new_data = httptools.downloadpage(hidden_url, follow_redirects=False)
try:
b64_url = scrapertools.find_single_match(new_data.headers['location'], "y=(.*)")
url = base64.b64decode(b64_url)
except:
url = hidden_url
if url != '':
itemlist.append(
Item(channel=item.channel, url=url, title='%s', action='play', language=lang,
infoLabels=item.infoLabels))
for option, url in matches:
datas = httptools.downloadpage(urlparse.urljoin(host, url),
headers={'Referer': item.url}).data
patron = '<iframe[^>]+src="([^"]+)"'
url = scrapertools.find_single_match(datas, patron)
lang = scrapertools.find_single_match(
data, '<li><a class="options" href="#option-%s"><b class="icon-play_arrow"><\/b> (.*?)<span class="dt_flag">' % option)
lang = lang.replace('Español ', '').replace('B.S.O. ', '')
server = servertools.get_server_from_url(url)
title = "%s [COLOR yellow](%s) (%s)[/COLOR]" % (item.contentTitle, server.title(), lang)
itemlist.append(item.clone(action='play', url=url, title=title, extra1=title,
server=server, language = lang, text_color=color3))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",

View File

@@ -1,57 +1,73 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
def mainlist(item):
logger.info()
itemlist = []
data = httptools.downloadpage(page_url).data
if 'File Not Found' in data or '404 Not Found' in data:
return False, "[Datoporn] El archivo no existe o ha sido borrado"
return True, ""
itemlist.append(item.clone(action="categorias", title="Categorías", url="http://dato.porn/categories_all", contentType="movie", viewmode="movie"))
itemlist.append(item.clone(title="Buscar...", action="search", contentType="movie", viewmode="movie"))
return itemlist
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
def search(item, texto):
logger.info()
item.url = "http://dato.porn/?k=%s&op=search" % texto.replace(" ", "+")
return lista(item)
data = httptools.downloadpage(page_url).data
logger.debug(data)
media_urls = scrapertools.find_multiple_matches(data, 'src: "([^"]+)",.*?label: "([^"]+)"')
#media_urls = scrapertools.find_multiple_matches(data, 'file\:"([^"]+\.mp4)",label:"([^"]+)"')
# if not media_urls:
# match = scrapertools.find_single_match(data, "p,a,c,k(.*?)</script>")
# try:
# data = jsunpack.unpack(match)
# except:
# pass
# media_urls = scrapertools.find_multiple_matches(data, 'file\:"([^"]+\.mp4)",label:"([^"]+)"')
# Extrae la URL
calidades = []
video_urls = []
for media_url in sorted(media_urls, key=lambda x: int(x[1][-3:])):
calidades.append(int(media_url[1][-3:]))
try:
title = ".%s %sp [datoporn]" % (media_url[0].rsplit('.', 1)[1], media_url[1][-3:])
except:
title = ".%s %sp [datoporn]" % (media_url[-4:], media_url[1][-3:])
video_urls.append([title, media_url[0]])
def lista(item):
logger.info()
itemlist = []
sorted(calidades)
m3u8 = scrapertools.find_single_match(data, 'file\:"([^"]+\.m3u8)"')
if not m3u8:
m3u8 = str(scrapertools.find_multiple_matches(data, 'player.updateSrc\({src:.?"([^"]+\.m3u8)"')).replace("['", "").replace("']", "")
calidades = ['720p']
if m3u8:
video_urls.insert(0, [".m3u8 %s [datoporn]" % calidades[-1], m3u8])
# Descarga la pagina
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
# Extrae las entradas
patron = '<div class="videobox">\s*<a href="([^"]+)".*?url\(\'([^\']+)\'.*?<span>(.*?)<\/span><\/div><\/a>.*?class="title">(.*?)<\/a><span class="views">.*?<\/a><\/span><\/div> '
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, duration, scrapedtitle in matches:
if "/embed-" not in scrapedurl:
#scrapedurl = scrapedurl.replace("dato.porn/", "dato.porn/embed-") + ".html"
scrapedurl = scrapedurl.replace("datoporn.co/", "datoporn.co/embed-") + ".html"
if duration:
scrapedtitle = "%s - %s" % (duration, scrapedtitle)
scrapedtitle += ' gb'
scrapedtitle = scrapedtitle.replace(":", "'")
return video_urls
#logger.debug(scrapedurl + ' / ' + scrapedthumbnail + ' / ' + duration + ' / ' + scrapedtitle)
itemlist.append(item.clone(action="play", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
server="datoporn", fanart=scrapedthumbnail.replace("_t.jpg", ".jpg")))
# Extrae la marca de siguiente página
#next_page = scrapertools.find_single_match(data, '<a href=["|\']([^["|\']+)["|\']>Next')
next_page = scrapertools.find_single_match(data, '<a class=["|\']page-link["|\'] href=["|\']([^["|\']+)["|\']>Next')
if next_page and itemlist:
itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page))
return itemlist
def categorias(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# Extrae las entradas (carpetas)
patron = '<div class="vid_block">\s*<a href="([^"]+)".*?url\((.*?)\).*?<span>(.*?)</span>.*?<b>(.*?)</b>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, numero, scrapedtitle in matches:
if numero:
scrapedtitle = "%s (%s)" % (scrapedtitle, numero)
itemlist.append(item.clone(action="lista", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail))
return itemlist

View File

@@ -204,14 +204,13 @@ def episodesxseason(item):
data = jsontools.load(httptools.downloadpage(seasons_url, post=post, headers=headers).data)
infoLabels = item.infoLabels
for dict in data:
episode = dict['number']
epi_name = dict['name']
title = '%sx%s - %s' % (season, episode, epi_name)
url = '%s%s/' % (host, dict['permalink'])
infoLabels['episode'] = episode
itemlist.append(Item(channel=item.channel, title=title, action='findvideos', url=url,
contentEpisodeNumber=season, id=item.id, infoLabels=infoLabels))
contentEpisodeNumber=episode, id=item.id, infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

View File

@@ -247,10 +247,17 @@ def episodesxseason(item):
def findvideos(item):
logger.info()
import urllib
itemlist = []
data = get_source(item.url)
patron = '<div class="movieplay"><iframe src="([^"]+)"'
player = scrapertools.find_single_match(data, "({'action': 'movie_player','foobar_id':\d+,})")
post = eval(player)
post = urllib.urlencode(post)
data = httptools.downloadpage(host+'wp-admin/admin-ajax.php', post=post, headers={'Referer':item.url}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = '<iframe src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)

View File

@@ -19,7 +19,7 @@ from lib import generictools
IDIOMAS = {'latino': 'Latino'}
list_language = IDIOMAS.values()
list_quality = []
list_quality = ['360p', '480p', '720p', '1080']
list_servers = [
'directo',
@@ -109,7 +109,8 @@ def list_all(item):
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
title = scrapedtitle
year = scrapertools.find_single_match(scrapedtitle, '(\d{4})')
title = scrapertools.find_single_match(scrapedtitle, '([^\(]+)\(?').strip()
thumbnail = scrapedthumbnail
filter_thumb = thumbnail.replace("https://image.tmdb.org/t/p/w300", "")
filter_list = {"poster_path": filter_thumb}
@@ -120,14 +121,14 @@ def list_all(item):
title=title,
url=url,
thumbnail=thumbnail,
infoLabels={'filtro':filter_list})
infoLabels={'filtro':filter_list, 'year':year})
if item.type == 'peliculas' or 'serie' not in url:
new_item.action = 'findvideos'
new_item.contentTitle = scrapedtitle
new_item.contentTitle = title
else:
new_item.action = 'seasons'
new_item.contentSerieName = scrapedtitle
new_item.contentSerieName = title
itemlist.append(new_item)
@@ -147,7 +148,7 @@ def seasons(item):
itemlist=[]
data=get_source(item.url)
patron='data-toggle="tab">TEMPORADA (\d+)</a>'
patron='data-toggle="tab">TEMPORADA.?(\d+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
@@ -182,8 +183,7 @@ def episodesxseasons(item):
season = item.infoLabels['season']
data=get_source(item.url)
season_data = scrapertools.find_single_match(data, 'id="pills-vertical-%s">(.*?)</div>' % season)
patron='href="([^"]+)".*?block">Capitulo (\d+) - ([^<]+)<'
patron='href="([^"]+)".*?block">Capitulo(\d+) -.?([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(season_data)
infoLabels = item.infoLabels
@@ -218,36 +218,53 @@ def section(item):
def findvideos(item):
logger.info()
import urllib
itemlist = []
data = get_source(item.url)
servers_page = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
data = get_source(servers_page)
patron = '<a href="([^"]+)"'
patron = 'video\[\d+\] = "([^"]+)";'
matches = re.compile(patron, re.DOTALL).findall(data)
for enc_url in matches:
url_data = get_source(enc_url, referer=item.url)
hidden_url = scrapertools.find_single_match(url_data, '<iframe src="([^"]+)"')
if 'server' in hidden_url:
hidden_data = get_source(hidden_url)
url = scrapertools.find_single_match(hidden_data, '<iframe src="([^"]+)"')
else:
url = hidden_url
if 'pelishd.tv' in url:
vip_data = httptools.downloadpage(url, headers={'Referer':item.url}, follow_redirects=False).data
dejuiced = generictools.dejuice(vip_data)
url = scrapertools.find_single_match(dejuiced, '"file":"([^"]+)"')
for video_url in matches:
language = 'latino'
if not config.get_setting('unify'):
title = ' [%s]' % language.capitalize()
else:
title = ''
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', language=IDIOMAS[language],
infoLabels=item.infoLabels))
if 'pelisplus.net' in video_url:
referer = video_url
post = {'r':item.url}
post = urllib.urlencode(post)
video_url = video_url.replace('/v/', '/api/sources/')
url_data = httptools.downloadpage(video_url, post=post, headers={'Referer':referer}).data
patron = '"file":"([^"]+)","label":"([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(url_data)
for url, quality in matches:
url = url.replace('\/', '/')
itemlist.append(
Item(channel=item.channel, title='%s' + title, url=url, action='play', language=IDIOMAS[language],
quality=quality, infoLabels=item.infoLabels))
else:
url_data = get_source(video_url)
url = scrapertools.find_single_match(url_data, '<iframe src="([^"]+)"')
if 'server' in url:
hidden_data = get_source(hidden_url)
url = scrapertools.find_single_match(hidden_data, '<iframe src="([^"]+)"')
else:
url = url
if 'pelishd.net' in url:
vip_data = httptools.downloadpage(url, headers={'Referer':item.url}, follow_redirects=False).data
dejuiced = generictools.dejuice(vip_data)
url = scrapertools.find_single_match(dejuiced, '"file":"([^"]+)"')
if url != '':
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', language=IDIOMAS[language],
infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())

View File

@@ -266,10 +266,13 @@ def findvideos(item):
pass
else:
url = scrapedurl
url = url +"|referer=%s" % item.url
itemlist.append(
Item(channel=item.channel, url=url, title=title, action='play', quality=quality, language=IDIOMAS[lang],
infoLabels=item.infoLabels))
try:
url = url +"|referer=%s" % item.url
itemlist.append(
Item(channel=item.channel, url=url, title=title, action='play', quality=quality, language=IDIOMAS[lang],
infoLabels=item.infoLabels))
except:
pass
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())

View File

@@ -35,10 +35,10 @@ def mainlist(item):
section='genres'))
itemlist.append(item.clone(title="Por Año", action="section", url=host, thumbnail=get_thumb('year', auto=True),
section='year'))
section='releases'))
itemlist.append(item.clone(title="Alfabetico", action="section", url=host, thumbnail=get_thumb('alphabet', auto=True),
section='abc'))
#itemlist.append(item.clone(title="Alfabetico", action="section", url=host, thumbnail=get_thumb('alphabet', auto=True),
# section='glossary'))
itemlist.append(item.clone(title="Buscar", action="search", url=host+'?s=',
thumbnail=get_thumb('search', auto=True)))
@@ -49,7 +49,7 @@ def mainlist(item):
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
@@ -57,11 +57,12 @@ def list_all(item):
itemlist = []
data = get_source(item.url)
patron = '<article id=post-.*?<a href=(.*?)><img src=(.*?) alt=(.*?)><.*?<span>(.*?)<'
patron = '<article id="post-\d+.*?<img src="([^"]+)" alt="([^"]+)">.*?'
patron += '<a href="([^"]+)">.*?</h3> <span></span> <span>(\d{4})<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
for scrapedthumbnail, scrapedtitle, scrapedurl, year in matches:
url = scrapedurl
contentSerieName = scrapedtitle
@@ -76,8 +77,7 @@ def list_all(item):
tmdb.set_infoLabels_itemlist(itemlist, True)
# Paginación
url_next_page = scrapertools.find_single_match(data,'rel=next.*?href=(.*?) ')
url_next_page = scrapertools.find_single_match(data, "<span class=\"current\">\d+</span><a href='([^']+)'")
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
return itemlist
@@ -88,8 +88,8 @@ def section(item):
itemlist = []
data = get_source(item.url)
data = scrapertools.find_single_match(data, '<ul class=%s(.*?)</ul>' % item.section)
patron = '<a href=(.*?)>(.*?)</a>'
data = scrapertools.find_single_match(data, '<ul class="%s(.*?)</ul>' % item.section)
patron = '<a href="([^"]+)".?>(.*?)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
@@ -103,7 +103,7 @@ def seasons(item):
itemlist = []
data = get_source(item.url)
patron = '<span class=title>Temporada(\d+) <'
patron = '<span class="title">Temporada (\d+) <'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle in matches:
@@ -138,8 +138,8 @@ def episodesxseason(item):
data = get_source(item.url)
infoLabels = item.infoLabels
season = infoLabels['season']
patron = '<img src=([^>]+)></a></div><div class=numerando>%s - (\d+)</div>' % season
patron += '<div class=episodiotitle><a href=(.*?)>(.*?)</a><'
patron = '<img src="([^>]+)"></a></div><div class="numerando">%s+ - (\d+)</div>' % season
patron += '<div class="episodiotitle"><a href="([^"]+)">(.*?)</a><'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedepi, scrapedurl, scrapedtitle in matches:
@@ -156,24 +156,29 @@ def episodesxseason(item):
def findvideos(item):
logger.info()
from lib import generictools
import urllib
itemlist = []
data = get_source(item.url)
patron = 'id=([^ ]+) class=play-box-iframe .*?src=(.*?) frameborder=0.*?'
patron = 'data-post="(\d+)" data-nume="(\d+)".*?img src=\'([^\']+)\''
matches = re.compile(patron, re.DOTALL).findall(data)
for id, option, lang in matches:
lang = scrapertools.find_single_match(lang, '.*?/flags/(.*?).png')
if lang == 'ar':
lang = 'lat'
post = {'action': 'doo_player_ajax', 'post': id, 'nume': option, 'type':'tv'}
post = urllib.urlencode(post)
for option, scrapedurl in matches:
#language = scrapertools.find_single_match(data, '#%s.*?dt_flag><img src=.*?flags/(.*?).png' % option)
#title = '%s [%s]'
language = ''
title = '%s'
SerieName = item.contentSerieName
itemlist.append(Item(channel=item.channel, title=title, contentSerieName=SerieName, url=scrapedurl,
action='play', language=language, infoLabels=item.infoLabels))
test_url = '%swp-admin/admin-ajax.php' % host
new_data = httptools.downloadpage(test_url, post=post, headers={'Referer':item.url}).data
url = scrapertools.find_single_match(new_data, "src='([^']+)'")
if url != '':
itemlist.append(
Item(channel=item.channel, url=url, title='%s', action='play', language=lang,
infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
#itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % (i.server.capitalize(), i.language))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
return itemlist
def search_results(item):
@@ -182,7 +187,8 @@ def search_results(item):
itemlist = []
data = get_source(item.url)
patron = '<article.*?<a href=(.*?)><img src=(.*?) alt=(.*?)><.*?year>(.*?)<.*?<p>(.*?)</p>'
data = scrapertools.find_single_match(data, '<h1>Resultados encontrados:(.*?)genres')
patron = '<article.*?<a href="([^"]+)"><img src="([^"]+)".*?alt="([^"]+)".*?class="year">(\d{4}).*?<p>([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year, scrapedplot in matches:

View File

@@ -59,29 +59,6 @@ def configuracion(item):
return ret
def estrenos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'item-pelicula.*?href="([^"]+).*?'
patron += 'src="([^"]+).*?'
patron += '<p>([^<]+).*?'
patron += '<span>([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
scrapedtitle = scrapedtitle.replace("Película ","")
itemlist.append(Item(channel = item.channel,
action = "findvideos",
contentTitle = scrapedtitle,
infoLabels = {'year':scrapedyear},
thumbnail = scrapedthumbnail,
title = scrapedtitle + " (%s)" %scrapedyear,
url = scrapedurl
))
tmdb.set_infoLabels(itemlist)
return itemlist
def search(item, texto):
logger.info()
item.url = host + "/suggest?que=" + texto
@@ -104,9 +81,7 @@ def sub_search(item):
data = httptools.downloadpage(item.url).data
token = scrapertools.find_single_match(data, 'csrf-token" content="([^"]+)')
data = httptools.downloadpage(item.url + "&_token=" + token, headers=headers).data
#logger.info("Intel33 %s" %data)
data_js = jsontools.load(data)["data"]["m"]
#logger.info("Intel44 %s" %data_js)
for js in data_js:
itemlist.append(Item(channel = item.channel,
action = "findvideos",
@@ -128,6 +103,51 @@ def sub_search(item):
return itemlist
def peliculas_gen(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'item-pelicula.*?href="([^"]+)".*?'
patron += 'src="([^"]+)".*?'
patron += 'text-center">([^<]+).*?'
patron += '<p>([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedyear, scrapedtitle in matches:
itemlist.append(Item(channel = item.channel,
action = "findvideos",
contentTitle = scrapedtitle,
infoLabels = {'year':scrapedyear},
thumbnail = scrapedthumbnail,
title = scrapedtitle + " (%s)" %scrapedyear,
url = scrapedurl
))
tmdb.set_infoLabels(itemlist)
return itemlist
def estrenos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'item-pelicula.*?href="([^"]+).*?'
patron += 'src="([^"]+).*?'
patron += 'text-center">([^<]+).*?'
patron += '<p>([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedyear, scrapedtitle in matches:
scrapedtitle = scrapedtitle.replace("Película ","")
itemlist.append(Item(channel = item.channel,
action = "findvideos",
contentTitle = scrapedtitle,
infoLabels = {'year':scrapedyear},
thumbnail = scrapedthumbnail,
title = scrapedtitle + " (%s)" %scrapedyear,
url = scrapedurl
))
tmdb.set_infoLabels(itemlist)
return itemlist
def peliculas(item):
logger.info()
itemlist = []
@@ -139,9 +159,7 @@ def peliculas(item):
post = "page=%s&type=%s&_token=%s" %(item.page, item.type, token)
if item.slug:
post += "&slug=%s" %item.slug
#logger.info("Intel11 %s" %post)
data = httptools.downloadpage(host + "/pagination", post=post, headers=headers).data
#logger.info("Intel11 %s" %data)
patron = '(?s)href="([^"]+)".*?'
patron += 'src="([^"]+)".*?'
patron += 'text-center">([^<]+).*?'
@@ -215,28 +233,6 @@ def generos(item):
return itemlist
def peliculas_gen(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'item-pelicula.*?href="([^"]+)".*?'
patron += 'src="([^"]+)".*?'
patron += '<p>([^<]+).*?'
patron += '<span>([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle , scrapedyear in matches:
itemlist.append(Item(channel = item.channel,
action = "findvideos",
contentTitle = scrapedtitle,
infoLabels = {'year':scrapedyear},
thumbnail = scrapedthumbnail,
title = scrapedtitle + " (%s)" %scrapedyear,
url = scrapedurl
))
tmdb.set_infoLabels(itemlist)
return itemlist
def annos(item):
logger.info()
itemlist = []
@@ -260,18 +256,18 @@ def annos(item):
def findvideos(item):
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '(?i)<iframe.*?src="([^"]+).*?'
patron += ''
patron = "video\[(\d)+\] = '([^']+)"
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
titulo = "Ver en: %s"
for scrapedoption, scrapedurl in matches:
tit = scrapertools.find_single_match(data, 'option%s">([^<]+)' %scrapedoption)
if "VIP" in tit: tit = "fembed"
titulo = "Ver en %s" %tit.capitalize()
itemlist.append(
item.clone(channel = item.channel,
action = "play",
title = titulo,
url = scrapedurl
url = host + "/embed/%s/" %scrapedurl
))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
tmdb.set_infoLabels(itemlist, __modo_grafico__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
@@ -296,5 +292,12 @@ def findvideos(item):
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
url = scrapertools.find_single_match(data, '<iframe src="([^"]+)')
headers = {"Referer":item.url}
item.url = httptools.downloadpage(url, follow_redirects=False, only_headers=True, headers=headers).headers.get("location", "")
itemlist.append(item.clone())
itemlist = servertools.get_servers_itemlist(itemlist)
item.thumbnail = item.contentThumbnail
return [item]
return itemlist

View File

@@ -30,7 +30,7 @@ def genero(item):
itemlist = list()
data = httptools.downloadpage(host).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<li class="cat-item.*?<a href="([^"]+)">([^<]+)</a>'
patron = '<li class="cat-item.*?<a href="([^"]+)".*?>([^<]+)</a>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:

View File

@@ -0,0 +1,77 @@
{
"id": "tupelicula",
"name": "TuPelicula",
"active": true,
"adult": false,
"language": ["lat", "cast", "*"],
"thumbnail": "https://i.postimg.cc/W4TbdCDP/tupelicula.png",
"banner": "",
"categories": [
"movie",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_castellano",
"type": "bool",
"label": "Incluir en Novedades - Castellano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"CAST",
"VOSE"
]
}
]
}

View File

@@ -0,0 +1,205 @@
# -*- coding: utf-8 -*-
# -*- Channel TuPelicula -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'http://www.tupelicula.tv/'
IDIOMAS = {'la_la': 'LAT', 'es_es':'CAST', 'en_es':'VOSE', 'en_en':'VO'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['xdrive', 'bitertv', 'okru']
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host,
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, title="Castellano", action="list_all", url=host+'filter?language=1',
thumbnail=get_thumb('cast', auto=True)))
itemlist.append(Item(channel=item.channel, title="Latino", action="list_all", url=host + 'filter?language=2',
thumbnail=get_thumb('lat', auto=True)))
itemlist.append(Item(channel=item.channel, title="VOSE", action="list_all", url=host + 'filter?language=4',
thumbnail=get_thumb('vose', auto=True)))
itemlist.append(Item(channel=item.channel, title="Generos", action="section",
thumbnail=get_thumb('genres', auto=True)))
itemlist.append(Item(channel=item.channel, title="Por Años", action="section",
thumbnail=get_thumb('year', auto=True)))
itemlist.append(Item(channel=item.channel, title = 'Buscar', action="search", url=host + 'search?q=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def list_all(item):
logger.info()
itemlist = []
full_data = get_source(item.url)
data = scrapertools.find_single_match(full_data, '<div id="movie-list"(.*?)</ul>')
patron = '<a href="([^"]+)".*?data-original="([^"]+)" alt="([^"]+)".*?'
patron += '<div class="_audio">(.*?)"label_year">(\d{4}) &bull;([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, lang_data, year, genre in matches:
url = scrapedurl
scrapedtitle = scrapertools.find_single_match(scrapedtitle, '([^\(]+)')
lang = get_language(lang_data)
thumbnail = 'https:'+scrapedthumbnail
if genre.lower() not in ['adultos', 'erotico'] or config.get_setting('adult_mode') > 0:
itemlist.append(Item(channel=item.channel, title=scrapedtitle, url=url, action='findvideos',
thumbnail=thumbnail, contentTitle=scrapedtitle, language = lang,
infoLabels={'year':year}))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
if itemlist != []:
next_page = scrapertools.find_single_match(full_data, '<li><a href="([^"]+)"><i class="fa fa-angle-right">')
if next_page != '':
itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>', url=next_page))
return itemlist
def section(item):
logger.info()
itemlist = []
data=get_source(host)
if item.title == 'Generos':
data = scrapertools.find_single_match(data, '>Películas por género</div>(.*?)</ul>')
patron = '<a href="([^"]+)"><span class="icon"></span>.?([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title in matches:
if title.lower() not in ['adultos', 'erotico'] or config.get_setting('adult_mode') > 0:
itemlist.append(Item(channel=item.channel, title=title, url=url, action='list_all'))
return itemlist
def get_language(lang_data):
logger.info()
language = []
lang_list = scrapertools.find_multiple_matches(lang_data, '/flags/(.*?).png"?')
for lang in lang_list:
lang = IDIOMAS[lang]
if lang not in language:
language.append(lang)
return language
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
player = scrapertools.find_single_match(data, '<iframe id="playerframe" data-src="([^"]+)"')
data = get_source(player)
patron = 'data-id="(\d+)">.*?img src="([^"]+)".*?>([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scraped_id, lang_data, quality in matches:
hidden_url = get_source('%splayer/rep/%s' % (host, scraped_id), player)
url = scrapertools.find_single_match(hidden_url, 'iframe src=.?"([^"]+)"').replace('\\','')
lang = get_language(lang_data)
itemlist.append(Item(channel=item.channel, title='%s', url=url, action='play', language=lang,
infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url, action="add_pelicula_to_library", extra="findvideos",
contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
itemlist = []
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
try:
return list_all(item)
except:
itemlist.append(item.clone(url='', title='No hay elementos...', action=''))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host
elif categoria == 'latino':
item.url = host + 'filter?language=2'
elif categoria == 'castellano':
item.url = host + 'filter?language=1'
elif categoria == 'infantiles':
item.url = host + 'genre/25/infantil'
elif categoria == 'terror':
item.url = host + 'genre/15/terror'
item.pages=3
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -9,7 +9,7 @@ headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Archive no Encontrado" in data:
if "Archive no Encontrado" in data or "File has been removed" in data:
return False, "[bitertv] El fichero ha sido borrado"
return True, ""

View File

@@ -21,15 +21,15 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.info("url=" + page_url)
data = httptools.downloadpage(page_url).data
media_urls = scrapertools.find_multiple_matches(data, 'file\:"([^"]+\.mp4)",label:"([^"]+)"')
if not media_urls:
match = scrapertools.find_single_match(data, "p,a,c,k(.*?)</script>")
try:
data = jsunpack.unpack(match)
except:
pass
media_urls = scrapertools.find_multiple_matches(data, 'file\:"([^"]+\.mp4)",label:"([^"]+)"')
media_urls = scrapertools.find_multiple_matches(data, 'src: "([^"]+)",.*?label: "([^"]+)"')
#media_urls = scrapertools.find_multiple_matches(data, 'file\:"([^"]+\.mp4)",label:"([^"]+)"')
# if not media_urls:
# match = scrapertools.find_single_match(data, "p,a,c,k(.*?)</script>")
# try:
# data = jsunpack.unpack(match)
# except:
# pass
# media_urls = scrapertools.find_multiple_matches(data, 'file\:"([^"]+\.mp4)",label:"([^"]+)"')
# Extrae la URL
calidades = []

View File

@@ -4,8 +4,8 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "(https://www.fembed.com/v/[A-z0-9]+)",
"url": "\\1"
"pattern": "((?:fembed|divload).com/v/[A-z0-9]+)",
"url": "https://www.\\1"
}
]
},