@@ -98,20 +98,15 @@ def peliculas(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '(?s)short_overlay.*?<a href="([^"]+)'
|
||||
patron += '.*?img.*?src="([^"]+)'
|
||||
patron += '.*?title="(.*?)"'
|
||||
patron += '.*?(Idioma.*?)post-ratings'
|
||||
|
||||
patron += '.*?title="([^"]+).*?'
|
||||
patron += 'data-postid="([^"]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for url, thumbnail, titulo, varios in matches:
|
||||
idioma = scrapertools.find_single_match(varios, '(?s)Idioma.*?kinopoisk">([^<]+)')
|
||||
number_idioma = scrapertools.find_single_match(idioma, '[0-9]')
|
||||
mtitulo = titulo
|
||||
if number_idioma != "":
|
||||
idioma = ""
|
||||
else:
|
||||
mtitulo += " (" + idioma + ")"
|
||||
year = scrapertools.find_single_match(varios, 'Año.*?kinopoisk">([^<]+)')
|
||||
year = scrapertools.find_single_match(year, '[0-9]{4}')
|
||||
for url, thumbnail, titulo, datapostid in matches:
|
||||
post = 'action=get_movie_details&postID=%s' %datapostid
|
||||
data1 = httptools.downloadpage(host + "wp-admin/admin-ajax.php", post=post).data
|
||||
idioma = "Latino"
|
||||
mtitulo = titulo + " (" + idioma + ")"
|
||||
year = scrapertools.find_single_match(data1, "Año:.*?(\d{4})")
|
||||
if year:
|
||||
mtitulo += " (" + year + ")"
|
||||
item.infoLabels['year'] = int(year)
|
||||
@@ -121,7 +116,6 @@ def peliculas(item):
|
||||
fulltitle = titulo,
|
||||
thumbnail = thumbnail,
|
||||
url = url,
|
||||
contentTitle = titulo,
|
||||
contentType="movie",
|
||||
language = idioma
|
||||
))
|
||||
@@ -142,10 +136,13 @@ def findvideos(item):
|
||||
contentTitle = scrapertools.find_single_match(data, 'orig_title.*?>([^<]+)<').strip()
|
||||
if contentTitle != "":
|
||||
item.contentTitle = contentTitle
|
||||
patron = '(?s)fmi(.*?)thead'
|
||||
bloque = scrapertools.find_single_match(data, patron)
|
||||
match = scrapertools.find_multiple_matches(bloque, '(?is)(?:iframe|script) .*?src="([^"]+)')
|
||||
for url in match:
|
||||
bloque = scrapertools.find_single_match(data, '(?s)<div class="bottomPlayer">(.*?)<script>')
|
||||
match = scrapertools.find_multiple_matches(bloque, '(?is)data-Url="([^"]+).*?data-postId="([^"]+)')
|
||||
for dataurl, datapostid in match:
|
||||
page_url = host + "wp-admin/admin-ajax.php"
|
||||
post = "action=get_more_top_news&postID=%s&dataurl=%s" %(datapostid, dataurl)
|
||||
data = httptools.downloadpage(page_url, post=post).data
|
||||
url = scrapertools.find_single_match(data, '(?i)src="([^"]+)')
|
||||
titulo = "Ver en: %s"
|
||||
text_color = "white"
|
||||
if "goo.gl" in url:
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
@@ -12,27 +14,25 @@ from platformcode import config, logger
|
||||
|
||||
host = "http://www.asialiveaction.com"
|
||||
|
||||
IDIOMAS = {'Japones': 'Japones'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = []
|
||||
list_servers = ['gvideo', 'openload','streamango']
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = list()
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Peliculas",
|
||||
url=urlparse.urljoin(host, "p/peliculas.html"), type='pl', first=0))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Series",
|
||||
url=urlparse.urljoin(host, "p/series.html"), type='sr', first=0))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="category", title="Géneros", url=host, cat='genre'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="category", title="Calidad", url=host, cat='quality'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="category", title="Orden Alfabético", url=host, cat='abc'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="category", title="Año de Estreno", url=host, cat='year'))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+"/search?q="))
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -41,7 +41,6 @@ def category(item):
|
||||
itemlist = list()
|
||||
data = httptools.downloadpage(host).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
if item.cat == 'abc':
|
||||
data = scrapertools.find_single_match(data, '<span>Orden Alfabético</span>.*?</ul>')
|
||||
elif item.cat == 'genre':
|
||||
@@ -50,31 +49,23 @@ def category(item):
|
||||
data = scrapertools.find_single_match(data, '<span>Año</span>.*?</ul>')
|
||||
elif item.cat == 'quality':
|
||||
data = scrapertools.find_single_match(data, '<span>Calidad</span>.*?</ul>')
|
||||
|
||||
patron = "<li>([^<]+)<a href='([^']+)'>"
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedtitle, scrapedurl in matches:
|
||||
if scrapedtitle != 'Próximas Películas':
|
||||
itemlist.append(item.clone(action='lista', title=scrapedtitle, url=host+scrapedurl, type='cat', first=0))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search_results(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
patron = '<span class=.post-labels.>([^<]+)</span>.*?class="poster-bg" src="([^"]+)"/>.*?<h4>.*?'
|
||||
patron +=">(\d{4})</a>.*?<h6>([^<]+)<a href='([^']+)"
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedtype, scrapedthumbnail, scrapedyear, scrapedtitle ,scrapedurl in matches:
|
||||
|
||||
title="%s [%s]" % (scrapedtitle,scrapedyear)
|
||||
new_item= Item(channel=item.channel, title=title, url=scrapedurl, thumbnail=scrapedthumbnail)
|
||||
if scrapedtype.strip() == 'Serie':
|
||||
@@ -85,12 +76,10 @@ def search_results(item):
|
||||
new_item.contentTitle = scrapedtitle
|
||||
new_item.action = 'findvideos'
|
||||
new_item.type = 'pl'
|
||||
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
@@ -98,57 +87,49 @@ def search(item, texto):
|
||||
if texto != '':
|
||||
return search_results(item)
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = list()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron ='<div id="ep(\d+)" class="eps"> <section class="section-post online"><div class="player">.*?'
|
||||
patron += 'src="([^"]+)"/><a href="([^"]+)" target='
|
||||
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
|
||||
data = data.replace('"ep0','"epp"')
|
||||
patron = '(?is)<div id="ep(\d+)".*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += 'href="([^"]+)" target="_blank"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedepi, scrapedthumbnail, scrapedurl in matches:
|
||||
url = scrapedurl
|
||||
title="1x%s - %s" % (scrapedepi, item.contentSerieName)
|
||||
itemlist.append(item.clone(action='findvideos', title=title, url=url, thumbnail=scrapedthumbnail, type=item.type,
|
||||
infoLabels=item.infoLabels))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]",
|
||||
url=item.url, action="add_serie_to_library", extra="episodios",
|
||||
contentSerieName=item.contentSerieName))
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
next = True
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
data = scrapertools.find_single_match(data, "itemprop='headline'>.*?</h2>.*?</ul>")
|
||||
|
||||
patron = '<span class="([^"]+)">.*?<figure class="poster-bg"><header><span>(\d{4})</span></header><img src="([^"]+)" />'
|
||||
patron += '<footer>(.*?)</footer></figure><h6>([^<]+)</h6><a href="([^"]+)"></a>'
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
first = int(item.first)
|
||||
last = first + 19
|
||||
if last > len(matches):
|
||||
last = len(matches)
|
||||
next = False
|
||||
|
||||
|
||||
for scrapedtype, scrapedyear, scrapedthumbnail, scrapedquality, scrapedtitle ,scrapedurl in matches[first:last]:
|
||||
patron_quality="<span>(.+?)</span>"
|
||||
quality = scrapertools.find_multiple_matches(scrapedquality, patron_quality)
|
||||
qual=""
|
||||
|
||||
for calidad in quality:
|
||||
qual=qual+"["+calidad+"] "
|
||||
|
||||
title="%s [%s] %s" % (scrapedtitle,scrapedyear,qual)
|
||||
new_item= Item(channel=item.channel, title=title, url=host+scrapedurl, thumbnail=scrapedthumbnail,
|
||||
type=scrapedtype, infoLabels={'year':scrapedyear})
|
||||
@@ -158,34 +139,26 @@ def lista(item):
|
||||
else:
|
||||
new_item.contentTitle = scrapedtitle
|
||||
new_item.action = 'findvideos'
|
||||
|
||||
if scrapedtype == item.type or item.type == 'cat':
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
#pagination
|
||||
|
||||
url_next_page = item.url
|
||||
first = last
|
||||
|
||||
if next:
|
||||
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='lista', first=first))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
dl_links = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
### obtiene los gvideo
|
||||
patron = 'class="Button Sm fa fa-download mg"></a><a target="_blank" rel="nofollow" href="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for dl_url in matches:
|
||||
g_data = httptools.downloadpage(dl_url).data
|
||||
video_id = scrapertools.find_single_match(g_data, 'jfk-button jfk-button-action" href="([^"]+)">')
|
||||
@@ -194,22 +167,26 @@ def findvideos(item):
|
||||
g_data = httptools.downloadpage(g_url, follow_redirects=False, only_headers=True).headers
|
||||
url = g_data['location']
|
||||
dl_links.append(Item(channel=item.channel, title='%s', url=url, action='play', infoLabels=item.infoLabels))
|
||||
|
||||
if item.type == 'pl':
|
||||
new_url = scrapertools.find_single_match(data, '<div class="player">.*?<a href="([^"]+)" target')
|
||||
|
||||
data = httptools.downloadpage(new_url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
patron = '<li class="btn.*?" data-video="([^"]+)">'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for video_id in matches:
|
||||
url_data = httptools.downloadpage('https://tinyurl.com/%s' % video_id, follow_redirects=False)
|
||||
url = url_data.headers['location']
|
||||
itemlist.append(Item(channel=item.channel, title = '%s', url=url, action='play', infoLabels=item.infoLabels))
|
||||
|
||||
patron = '<iframe src="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for url in matches:
|
||||
itemlist.append(item.clone(title = '%s', url=url, action='play'))
|
||||
itemlist.extend(dl_links)
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
return itemlist
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
return itemlist
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
{
|
||||
"id": "cinefoxtv",
|
||||
"name": "CineFoxTV",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat"],
|
||||
"thumbnail": "https://s28.postimg.cc/lytn2q1tp/cinefoxtv.png",
|
||||
"banner": "cinefoxtv.png",
|
||||
"categories": [
|
||||
"movie"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,209 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
|
||||
host = 'http://verhdpelis.com/'
|
||||
headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
|
||||
['Referer', host]]
|
||||
|
||||
global duplicado
|
||||
global itemlist
|
||||
global temp_list
|
||||
canal = 'cinefoxtv'
|
||||
|
||||
tgenero = {"Comedia": "https://s7.postimg.cc/ne9g9zgwb/comedia.png",
|
||||
"Suspenso": "https://s13.postimg.cc/wmw6vl1cn/suspenso.png",
|
||||
"Drama": "https://s16.postimg.cc/94sia332d/drama.png",
|
||||
"Acción": "https://s3.postimg.cc/y6o9puflv/accion.png",
|
||||
"Aventuras": "https://s10.postimg.cc/6su40czih/aventura.png",
|
||||
"Animacion": "https://s13.postimg.cc/5on877l87/animacion.png",
|
||||
"Ciencia Ficcion": "https://s9.postimg.cc/diu70s7j3/cienciaficcion.png",
|
||||
"Terror": "https://s7.postimg.cc/yi0gij3gb/terror.png",
|
||||
"Documentales": "https://s16.postimg.cc/7xjj4bmol/documental.png",
|
||||
"Musical": "https://s29.postimg.cc/bbxmdh9c7/musical.png",
|
||||
"Western": "https://s23.postimg.cc/lzyfbjzhn/western.png",
|
||||
"Belico": "https://s23.postimg.cc/71itp9hcr/belica.png",
|
||||
"Crimen": "https://s4.postimg.cc/6z27zhirx/crimen.png",
|
||||
"Biográfica": "https://s15.postimg.cc/5lrpbx323/biografia.png",
|
||||
"Deporte": "https://s13.postimg.cc/xuxf5h06v/deporte.png",
|
||||
"Fantástico": "https://s10.postimg.cc/pbkbs6j55/fantastico.png",
|
||||
"Estrenos": "https://s21.postimg.cc/fy69wzm93/estrenos.png",
|
||||
"Película 18+": "https://s15.postimg.cc/exz7kysjf/erotica.png",
|
||||
"Thriller": "https://s22.postimg.cc/5y9g0jsu9/thriller.png",
|
||||
"Familiar": "https://s7.postimg.cc/6s7vdhqrf/familiar.png",
|
||||
"Romanticas": "https://s21.postimg.cc/xfsj7ua0n/romantica.png",
|
||||
"Intriga": "https://s27.postimg.cc/v9og43u2b/intriga.png",
|
||||
"Infantil": "https://s23.postimg.cc/g5rmazozv/infantil.png"}
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(item.clone(title="Todas", action="lista", thumbnail=get_thumb('all', auto=True),
|
||||
fanart='https://s18.postimg.cc/fwvaeo6qh/todas.png', extra='peliculas/',
|
||||
url=host + 'page/1.html'))
|
||||
|
||||
itemlist.append(
|
||||
itemlist[-1].clone(title="Generos", action="generos", thumbnail=get_thumb('genres', auto=True),
|
||||
fanart='https://s3.postimg.cc/5s9jg2wtf/generos.png', url=host))
|
||||
|
||||
itemlist.append(
|
||||
itemlist[-1].clone(title="Mas Vistas", action="lista", thumbnail=get_thumb('more watched', auto=True),
|
||||
fanart='https://s9.postimg.cc/wmhzu9d7z/vistas.png',
|
||||
url=host + 'top-peliculas-online/1.html'))
|
||||
|
||||
itemlist.append(itemlist[-1].clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True),
|
||||
fanart='https://s30.postimg.cc/pei7txpa9/buscar.png', url=host + 'search/'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
duplicado = []
|
||||
max_items = 24
|
||||
next_page_url = ''
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
patron = '"box_image_b.*?"><a href="([^"]+)" title=".*?><img src="([^"]+)" alt="(.*?)(\d{4}).*?"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
if item.next_page != 'b':
|
||||
if len(matches) > max_items:
|
||||
next_page_url = item.url
|
||||
matches = matches[:max_items]
|
||||
next_page = 'b'
|
||||
else:
|
||||
matches = matches[max_items:]
|
||||
next_page = 'a'
|
||||
patron_next_page = '<a class="page dark gradient" href="([^"]+)">PROXIMO'
|
||||
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
|
||||
if len(matches_next_page) > 0:
|
||||
next_page_url = urlparse.urljoin(item.url, matches_next_page[0])
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
|
||||
|
||||
url = scrapedurl
|
||||
thumbnail = scrapedthumbnail
|
||||
contentTitle = re.sub(r"\(.*?\)|\/.*?|\(|\)|.*?\/|!", "", scrapedtitle)
|
||||
title = scrapertools.decodeHtmlentities(contentTitle) + '(' + scrapedyear + ')'
|
||||
fanart = ''
|
||||
plot = ''
|
||||
|
||||
if url not in duplicado:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action='findvideos', title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
fanart=fanart, contentTitle=contentTitle, infoLabels={'year': scrapedyear}))
|
||||
duplicado.append(url)
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
if next_page_url != '':
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title='Siguiente >>>', url=next_page_url,
|
||||
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png', extra=item.extra,
|
||||
next_page=next_page))
|
||||
return itemlist
|
||||
|
||||
|
||||
def generos(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<li><a href="([^"]+)"><i class="fa fa-caret-right"><\/i> <strong>Películas de (.*?)<\/strong><\/a><\/li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
url = scrapedurl
|
||||
if scrapedtitle in tgenero:
|
||||
thumbnail = tgenero[scrapedtitle]
|
||||
else:
|
||||
thumbnail = ''
|
||||
title = scrapedtitle
|
||||
fanart = ''
|
||||
plot = ''
|
||||
|
||||
if title != 'Series':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action='lista', title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
fanart=fanart))
|
||||
return itemlist
|
||||
|
||||
|
||||
def getinfo(page_url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(page_url).data
|
||||
plot = scrapertools.find_single_match(data, '<\/em>\.(?:\s*|.)(.*?)\s*<\/p>')
|
||||
info = plot
|
||||
|
||||
return info
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
info = getinfo(item.url)
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
patron = 'src="(.*?)" style="border:none;'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl in matches:
|
||||
itemlist.extend(servertools.find_video_items(data=scrapedurl))
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.contentTitle
|
||||
videoitem.channel = item.channel
|
||||
videoitem.plot = info
|
||||
videoitem.action = "play"
|
||||
videoitem.folder = False
|
||||
videoitem.infoLabels=item.infoLabels
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
|
||||
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "-")
|
||||
item.url = item.url + texto
|
||||
if texto != '':
|
||||
return lista(item)
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
# categoria='peliculas'
|
||||
try:
|
||||
if categoria in ['peliculas','latino']:
|
||||
item.url = host + 'page/1.html'
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + 'peliculas-de-genero/infantil/1.html'
|
||||
itemlist = lista(item)
|
||||
if itemlist[-1].title == 'Siguiente >>>':
|
||||
itemlist.pop()
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
@@ -2,7 +2,7 @@
|
||||
"id": "crunchyroll",
|
||||
"name": "Crunchyroll",
|
||||
"language": ["cast", "lat"],
|
||||
"active": true,
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"thumbnail": "http://i.imgur.com/O49fDS1.png",
|
||||
"categories": [
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"id": "cuelgame",
|
||||
"name": "Cuelgame",
|
||||
"active": true,
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"language": ["cast"],
|
||||
"thumbnail": "cuelgame.png",
|
||||
|
||||
@@ -1,60 +1,46 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
|
||||
host = "https://jkanime.net"
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = list()
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="ultimos_capitulos", title="Últimos Capitulos", url="http://jkanime.net/"))
|
||||
itemlist.append(Item(channel=item.channel, action="ultimos", title="Últimos", url="http://jkanime.net/"))
|
||||
itemlist.append(Item(channel=item.channel, action="letras", title="Listado Alfabetico", url="http://jkanime.net/"))
|
||||
itemlist.append(Item(channel=item.channel, action="generos", title="Listado por Genero", url="http://jkanime.net/"))
|
||||
itemlist.append(Item(channel=item.channel, action="ultimas_series", title="Últimas Series", url=host))
|
||||
itemlist.append(Item(channel=item.channel, action="ultimos_episodios", title="Últimos Episodios", url=host))
|
||||
itemlist.append(Item(channel=item.channel, action="p_tipo", title="Listado Alfabetico", url=host, extra="Animes por letra"))
|
||||
itemlist.append(Item(channel=item.channel, action="p_tipo", title="Listado por Genero", url=host, extra="Animes por Genero"))
|
||||
itemlist.append(Item(channel=item.channel, action="search", title="Buscar"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def ultimos_capitulos(item):
|
||||
def ultimas_series(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data, '<ul class="ratedul">.+?</ul>')
|
||||
|
||||
data = data.replace('\t', '')
|
||||
data = data.replace('\n', '')
|
||||
data = data.replace('/thumbnail/', '/image/')
|
||||
|
||||
patron = '<img src="(http://cdn.jkanime.net/assets/images/animes/.+?)" .+?href="(.+?)">(.+?)<.+?span>(.+?)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumb, scrapedurl, scrapedtitle, scrapedepisode in matches:
|
||||
title = scrapedtitle.strip() + scrapedepisode
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
thumbnail = scrapedthumb
|
||||
plot = ""
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
|
||||
data = scrapertools.find_single_match(data, 'Últimos capitulos agregados.*?/div><!-- .content-box -->')
|
||||
patron = '<a title="([^"]+).*?'
|
||||
patron += 'href="([^"]+)".*?'
|
||||
patron += 'src="([^"]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedtitle, scrapedurl, scrapedthumbnail in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
show=scrapedtitle.strip(), fulltitle=title))
|
||||
|
||||
Item(channel=item.channel, action="episodios", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
show=scrapedtitle))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
if item.url == "":
|
||||
item.url = "http://jkanime.net/buscar/%s/"
|
||||
item.url = host + "/buscar/%s/"
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url % texto
|
||||
try:
|
||||
@@ -67,127 +53,77 @@ def search(item, texto):
|
||||
return []
|
||||
|
||||
|
||||
def ultimos(item):
|
||||
def ultimos_episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data, '<ul class="latestul">(.*?)</ul>')
|
||||
|
||||
patron = '<a href="([^"]+)">([^<]+)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapedtitle.strip()
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
thumbnail = ""
|
||||
plot = ""
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
|
||||
#data = scrapertools.find_single_match(data, '<ul class="latestul">(.*?)</ul>')
|
||||
patron = '<a class="odd" title="([^"]+).*?'
|
||||
patron += 'href="([^"]+)".*?'
|
||||
patron += 'img src="([^"]+)".*?'
|
||||
patron += 'Episodio.*?(\d+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedtitle, scrapedurl, scrapedthumbnail, scrapedepisode in matches:
|
||||
title = scrapedtitle + " - Episodio " + scrapedepisode
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, plot=plot))
|
||||
|
||||
Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
show=scrapedtitle))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def generos(item):
|
||||
def p_tipo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data, '<div class="genres">(.*?)</div>')
|
||||
|
||||
patron = '<a href="([^"]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
data = scrapertools.find_single_match(data, '<h3>%s(.*?)</ul>' %item.extra)
|
||||
patron = 'href="([^"]+)".*?'
|
||||
patron += 'title.*?>([^<]+)</a>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapedtitle
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
thumbnail = ""
|
||||
plot = ""
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="series", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
viewmode="movie_with_plot"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def letras(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data, '<ul class="animelet">(.*?)</ul>')
|
||||
|
||||
patron = '<a href="([^"]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapedtitle
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
thumbnail = ""
|
||||
plot = ""
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="series", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
viewmode="movie_with_plot"))
|
||||
|
||||
if "Por Genero" not in scrapedtitle:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="series", title=scrapedtitle, url=host + scrapedurl,
|
||||
viewmode="movie_with_plot"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def series(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Extrae las entradas
|
||||
patron = '<table class="search[^<]+'
|
||||
patron += '<tr[^<]+'
|
||||
patron += '<td[^<]+'
|
||||
patron += '<a href="([^"]+)"><img src="([^"]+)"[^<]+</a>[^<]+'
|
||||
patron += '</td>[^<]+'
|
||||
patron += '<td><a[^>]+>([^<]+)</a></td>[^<]+'
|
||||
patron += '<td[^>]+>([^<]+)</td>[^<]+'
|
||||
patron += '<td[^>]+>([^<]+)</td>[^<]+'
|
||||
patron += '</tr>[^<]+'
|
||||
patron += '<tr>[^<]+'
|
||||
patron += '<td>(.*?)</td>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
patron = '(?is)let-post.*?src="([^"]+).*?'
|
||||
patron += 'alt="([^"]+).*?'
|
||||
patron += 'href="([^"]+).*?'
|
||||
patron += '<p>([^\<]+).*?'
|
||||
patron += 'eps-num">([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
itemlist = []
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, line1, line2, scrapedplot in matches:
|
||||
title = scrapedtitle.strip() + " (" + line1.strip() + ") (" + line2.strip() + ")"
|
||||
extra = line2.strip()
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
|
||||
thumbnail = thumbnail.replace("thumbnail", "image")
|
||||
for scrapedthumbnail, scrapedtitle, scrapedurl, scrapedplot, scrapedepisode in matches:
|
||||
title = scrapedtitle + " (" + scrapedepisode + ")"
|
||||
scrapedthumbnail = scrapedthumbnail.replace("thumbnail", "image")
|
||||
plot = scrapertools.htmlclean(scrapedplot)
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, fanart=thumbnail,
|
||||
plot=plot, extra=extra, show=scrapedtitle.strip()))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, fanart=scrapedthumbnail,
|
||||
plot=scrapedplot, show=scrapedtitle))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
try:
|
||||
siguiente = scrapertools.get_match(data, '<a class="listsiguiente" href="([^"]+)" >Resultados Siguientes')
|
||||
scrapedurl = urlparse.urljoin(item.url, siguiente)
|
||||
siguiente = scrapertools.find_single_match(data, '<a class="listsiguiente" href="([^"]+)" >Resultados Siguientes')
|
||||
scrapedurl = item.url + siguiente
|
||||
scrapedtitle = ">> Pagina Siguiente"
|
||||
scrapedthumbnail = ""
|
||||
scrapedplot = ""
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="series", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot, folder=True, viewmode="movie_with_plot"))
|
||||
if len(itemlist)>0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="series", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot, folder=True, viewmode="movie_with_plot"))
|
||||
except:
|
||||
pass
|
||||
return itemlist
|
||||
|
||||
|
||||
def get_pages_and_episodes(data):
|
||||
results = re.findall('href="#pag([0-9]+)">[0-9]+ - ([0-9]+)', data)
|
||||
results = scrapertools.find_multiple_matches(data, 'href="#pag([0-9]+)".*?>[0-9]+ - ([0-9]+)')
|
||||
if results:
|
||||
return int(results[-1][0]), int(results[-1][1])
|
||||
return 1, 0
|
||||
@@ -196,14 +132,11 @@ def get_pages_and_episodes(data):
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
scrapedplot = scrapertools.get_match(data, '<meta name="description" content="([^"]+)"/>')
|
||||
scrapedplot = scrapertools.find_single_match(data, '<meta name="description" content="([^"]+)"/>')
|
||||
scrapedthumbnail = scrapertools.find_single_match(data, '<div class="separedescrip">.*?src="([^"]+)"')
|
||||
|
||||
idserie = scrapertools.get_match(data, "ajax/pagination_episodes/(\d+)/")
|
||||
idserie = scrapertools.find_single_match(data, "ajax/pagination_episodes/(\d+)/")
|
||||
logger.info("idserie=" + idserie)
|
||||
if " Eps" in item.extra and "Desc" not in item.extra:
|
||||
caps_x = item.extra
|
||||
@@ -212,69 +145,55 @@ def episodios(item):
|
||||
paginas = capitulos / 10 + (capitulos % 10 > 0)
|
||||
else:
|
||||
paginas, capitulos = get_pages_and_episodes(data)
|
||||
|
||||
logger.info("idserie=" + idserie)
|
||||
for num_pag in range(1, paginas + 1):
|
||||
|
||||
numero_pagina = str(num_pag)
|
||||
headers = {"Referer": item.url}
|
||||
data2 = scrapertools.cache_page("http://jkanime.net/ajax/pagination_episodes/%s/%s/" % (idserie, numero_pagina),
|
||||
headers=headers)
|
||||
# logger.info("data2=" + data2)
|
||||
|
||||
data2 = httptools.downloadpage(host + "/ajax/pagination_episodes/%s/%s/" % (idserie, numero_pagina),
|
||||
headers=headers).data
|
||||
patron = '"number"\:"(\d+)","title"\:"([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data2)
|
||||
|
||||
# http://jkanime.net/get-backers/1/
|
||||
matches = scrapertools.find_multiple_matches(data2, patron)
|
||||
for numero, scrapedtitle in matches:
|
||||
title = scrapedtitle.strip()
|
||||
url = urlparse.urljoin(item.url, numero)
|
||||
thumbnail = scrapedthumbnail
|
||||
url = item.url + numero
|
||||
plot = scrapedplot
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail,
|
||||
fanart=thumbnail, plot=plot, fulltitle=title))
|
||||
|
||||
itemlist.append(item.clone(action="findvideos", title=title, url=url, plot=plot))
|
||||
if len(itemlist) == 0:
|
||||
try:
|
||||
# porestrenar = scrapertools.get_match(data,
|
||||
# '<div[^<]+<span class="labl">Estad[^<]+</span[^<]+<span[^>]+>Por estrenar</span>')
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title="Serie por estrenar", url="",
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot,
|
||||
server="directo", folder=False))
|
||||
except:
|
||||
pass
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", httptools.downloadpage(item.url).data)
|
||||
|
||||
list_videos = scrapertools.find_multiple_matches(data, '<iframe class="player_conte" src="([^"]+)"')
|
||||
aux_url = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
list_videos = scrapertools.find_multiple_matches(data, '<iframe class="player_conte" src="([^"]+)"')
|
||||
index = 1
|
||||
for e in list_videos:
|
||||
if e.startswith("https://jkanime.net/jk.php?"):
|
||||
if e.startswith(host + "/jk"):
|
||||
headers = {"Referer": item.url}
|
||||
data = httptools.downloadpage(e, headers=headers).data
|
||||
|
||||
url = scrapertools.find_single_match(data, '<embed class="player_conte".*?&file=([^\"]+)\"')
|
||||
if not url:
|
||||
url = scrapertools.find_single_match(data, 'source src="([^\"]+)\"')
|
||||
if not url:
|
||||
url = scrapertools.find_single_match(data, '<iframe class="player_conte" src="([^\"]+)\"')
|
||||
if "jkanime" in url:
|
||||
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
|
||||
if url:
|
||||
itemlist.append(item.clone(title="Enlace encontrado en server #%s" % index, url=url, action="play"))
|
||||
itemlist.append(item.clone(title="Enlace encontrado en server #" + str(index) + " (%s)", url=url, action="play"))
|
||||
index += 1
|
||||
|
||||
else:
|
||||
aux_url.append(e)
|
||||
|
||||
itemlist.extend(servertools.find_video_items(data=",".join(aux_url)))
|
||||
aux_url.append(item.clone(title="Enlace encontrado (%s)", url=e, action="play"))
|
||||
itemlist.extend(aux_url)
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
for videoitem in itemlist:
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.channel = item.channel
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -41,7 +41,7 @@ tcalidad = {"FULL HD": "https://s18.postimg.cc/qszt3n6tl/fullhd.png",
|
||||
"HD": "https://s27.postimg.cc/m2dhhkrur/image.png",
|
||||
"SD": "https://s29.postimg.cc/l66t2pfqf/image.png"
|
||||
}
|
||||
host = 'http://miradetodo.io/'
|
||||
host = 'http://miradetodo.net/'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
|
||||
@@ -14,6 +14,26 @@
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"Latino"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Buscar información extra",
|
||||
|
||||
@@ -8,6 +8,8 @@ import sys
|
||||
import urllib
|
||||
import urlparse
|
||||
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
@@ -19,7 +21,7 @@ from channelselector import get_thumb
|
||||
|
||||
__channel__ = "pedropolis"
|
||||
|
||||
host = "http://pedropolis.com/"
|
||||
host = "http://pedropolis.tv/"
|
||||
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
@@ -44,10 +46,16 @@ parameters = channeltools.get_channel_parameters(__channel__)
|
||||
fanart_host = parameters['fanart']
|
||||
thumbnail_host = parameters['thumbnail']
|
||||
|
||||
IDIOMAS = {'Latino': 'LAT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = []
|
||||
list_servers = ['rapidvideo', 'streamango', 'fastplay', 'openload']
|
||||
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = [item.clone(title="Peliculas", action="menumovies", text_blod=True,
|
||||
viewcontent='movies', viewmode="movie_with_plot", thumbnail=get_thumb("channels_movie.png")),
|
||||
|
||||
@@ -57,31 +65,27 @@ def mainlist(item):
|
||||
|
||||
item.clone(title="Buscar", action="search", text_blod=True, extra='buscar',
|
||||
thumbnail=get_thumb('search.png'), url=host)]
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def menumovies(item):
|
||||
logger.info()
|
||||
itemlist = [item.clone(title="Todas", action="peliculas", text_blod=True, url=host + 'movies/',
|
||||
itemlist = [item.clone(title="Todas", action="peliculas", text_blod=True, url=host + 'pelicula/',
|
||||
viewcontent='movies', viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Más Vistas", action="peliculas", text_blod=True,
|
||||
viewcontent='movies', url=host + 'tendencias/?get=movies', viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Más Valoradas", action="peliculas", text_blod=True, viewcontent='movies',
|
||||
url=host + 'calificaciones/?get=movies', viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Géneros", action="generos", text_blod=True, viewmode="movie_with_plot",
|
||||
viewcontent='movies', url=host)]
|
||||
|
||||
viewcontent='movies', url=host + 'tendencias/?get=movie', viewmode="movie_with_plot"),
|
||||
item.clone(title="Por año", action="p_portipo", text_blod=True, extra="Películas Por año",
|
||||
viewcontent='movies', url=host, viewmode="movie_with_plot"),
|
||||
item.clone(title="Por género", action="p_portipo", text_blod=True, extra="Categorías",
|
||||
viewcontent='movies', url=host, viewmode="movie_with_plot")]
|
||||
return itemlist
|
||||
|
||||
|
||||
def menuseries(item):
|
||||
logger.info()
|
||||
itemlist = [item.clone(title="Todas", action="series", text_blod=True, extra='serie', mediatype="tvshow",
|
||||
viewcontent='tvshows', url=host + 'tvshows/', viewmode="movie_with_plot"),
|
||||
viewcontent='tvshows', url=host + 'serie/', viewmode="movie_with_plot"),
|
||||
|
||||
item.clone(title="Más Vistas", action="series", text_blod=True, extra='serie', mediatype="tvshow",
|
||||
viewcontent='tvshows', url=host + 'tendencias/?get=tv', viewmode="movie_with_plot"),
|
||||
@@ -92,6 +96,22 @@ def menuseries(item):
|
||||
return itemlist
|
||||
|
||||
|
||||
def p_portipo(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.find_single_match(data, '(?is)%s.*?</ul>' %item.extra)
|
||||
patron = 'href="([^"]+).*?'
|
||||
patron += '>([^"<]+)'
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(item.clone(action = "peliculas",
|
||||
title = scrapedtitle,
|
||||
url = scrapedurl
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
@@ -99,14 +119,11 @@ def peliculas(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data)
|
||||
# logger.info(data)
|
||||
|
||||
patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?' # img, title
|
||||
patron = '<div class="poster"> <img src="([^"]+)" alt="([^"]+)">.*?' # img, title
|
||||
patron += '<div class="rating"><span class="[^"]+"></span>([^<]+).*?' # rating
|
||||
patron += '<span class="quality">([^<]+)</span></div><a href="([^"]+)">.*?' # calidad, url
|
||||
patron += '<span class="quality">([^<]+)</span></div> <a href="([^"]+)">.*?' # calidad, url
|
||||
patron += '<span>([^<]+)</span>' # year
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
# Paginación
|
||||
if item.next_page != 'b':
|
||||
if len(matches) > 19:
|
||||
@@ -120,65 +137,28 @@ def peliculas(item):
|
||||
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
|
||||
if len(matches_next_page) > 0:
|
||||
url_next_page = urlparse.urljoin(item.url, matches_next_page[0])
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, rating, quality, scrapedurl, year in matches:
|
||||
if 'Proximamente' not in quality:
|
||||
scrapedtitle = scrapedtitle.replace('Ver ', '').partition(' /')[0].partition(':')[0].replace(
|
||||
'Español Latino', '').strip()
|
||||
title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (scrapedtitle, year, quality)
|
||||
|
||||
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", contentTitle=scrapedtitle,
|
||||
infoLabels={"year":year, "rating":rating}, thumbnail=scrapedthumbnail,
|
||||
url=scrapedurl, next_page=next_page, quality=quality, title=title))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
|
||||
if url_next_page:
|
||||
itemlist.append(Item(channel=__channel__, action="peliculas", title="» Siguiente »",
|
||||
url=url_next_page, next_page=next_page, folder=True, text_blod=True,
|
||||
thumbnail=get_thumb("next.png")))
|
||||
|
||||
for no_plot in itemlist:
|
||||
if no_plot.infoLabels['plot'] == '':
|
||||
thumb_id = scrapertools.find_single_match(no_plot.thumbnail, '.*?\/\d{2}\/(.*?)-')
|
||||
thumbnail = "/%s.jpg" % thumb_id
|
||||
filtro_list = {"poster_path": thumbnail}
|
||||
filtro_list = filtro_list.items()
|
||||
no_plot.infoLabels={'filtro':filtro_list}
|
||||
tmdb.set_infoLabels_item(no_plot, __modo_grafico__)
|
||||
|
||||
if no_plot.infoLabels['plot'] == '':
|
||||
data = httptools.downloadpage(no_plot.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
# logger.info(data)
|
||||
no_plot.fanart = scrapertools.find_single_match(data,
|
||||
"<meta property='og:image' content='([^']+)' />").replace(
|
||||
'w780', 'original')
|
||||
no_plot.plot = scrapertools.find_single_match(data, '<div itemprop="description" '
|
||||
'class="wp-content">.*?<p>(['
|
||||
'^<]+)</p>')
|
||||
no_plot.plot = scrapertools.htmlclean(no_plot.plot)
|
||||
no_plot.infoLabels['director'] = scrapertools.find_single_match(data,
|
||||
'<div class="name"><a href="[^"]+">([^<]+)</a>')
|
||||
no_plot.infoLabels['rating'] = scrapertools.find_single_match(data, '<b id="repimdb"><strong>(['
|
||||
'^<]+)</strong>')
|
||||
no_plot.infoLabels['votes'] = scrapertools.find_single_match(data, '<b id="repimdb"><strong>['
|
||||
'^<]+</strong>\s(.*?) votos</b>')
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = urlparse.urljoin(item.url, "?s={0}".format(texto))
|
||||
|
||||
try:
|
||||
return sub_search(item)
|
||||
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
@@ -189,20 +169,18 @@ def search(item, texto):
|
||||
|
||||
def sub_search(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
|
||||
patron = '<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)" />' # url, img, title
|
||||
bloque = scrapertools.find_single_match(data, 'Resultados encontrados.*?class="widget widget_fbw_id')
|
||||
patron = '(?is)<a href="([^"]+)">.*?'
|
||||
patron += '<img src="([^"]+)".*?'
|
||||
patron += 'alt="([^"]+)" />.*?' # url, img, title
|
||||
patron += '<span class="[^"]+">([^<]+)</span>.*?' # tipo
|
||||
patron += '<span class="year">([^"]+)</span>.*?<div class="contenido"><p>([^<]+)</p>' # year, plot
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, tipo, year, plot in matches:
|
||||
patron += '<span class="year">([^"]+)' # year
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, tipo, year in matches:
|
||||
title = scrapedtitle
|
||||
if tipo == 'Serie':
|
||||
if tipo == ' Serie ':
|
||||
contentType = 'tvshow'
|
||||
action = 'temporadas'
|
||||
title += ' [COLOR red](' + tipo + ')[/COLOR]'
|
||||
@@ -210,18 +188,14 @@ def sub_search(item):
|
||||
contentType = 'movie'
|
||||
action = 'findvideos'
|
||||
title += ' [COLOR green](' + tipo + ')[/COLOR]'
|
||||
|
||||
itemlist.append(item.clone(title=title, url=scrapedurl, contentTitle=scrapedtitle, extra='buscar',
|
||||
action=action, infoLabels={"year": year}, contentType=contentType,
|
||||
thumbnail=scrapedthumbnail, text_color=color1, contentSerieName=scrapedtitle))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
paginacion = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
|
||||
|
||||
if paginacion:
|
||||
itemlist.append(Item(channel=item.channel, action="sub_search",
|
||||
title="» Siguiente »", url=paginacion, thumbnail=get_thumb("next.png")))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -253,42 +227,17 @@ def newest(categoria):
|
||||
return itemlist
|
||||
|
||||
|
||||
def generos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
# logger.info(data)
|
||||
data = scrapertools.find_single_match(data, 'Genero</a><ulclass="sub-menu">(.*?)</ul></li><li id')
|
||||
|
||||
patron = '<li id="[^"]+" class="menu-item.*?<a href="([^"]+)">([^<]+)</a></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
if scrapedtitle != 'Proximamente':
|
||||
title = "%s" % scrapedtitle
|
||||
itemlist.append(item.clone(channel=item.channel, action="peliculas", title=title,
|
||||
url=scrapedurl, text_color=color3, viewmode="movie_with_plot"))
|
||||
|
||||
itemlist.sort(key=lambda it: it.title)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def series(item):
|
||||
logger.info()
|
||||
url_next_page = ''
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
|
||||
patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?<a href="([^"]+)">' # img, title, url
|
||||
|
||||
patron = '<div class="poster"> <img src="([^"]+)"'
|
||||
patron += ' alt="([^"]+)">.*?'
|
||||
patron += '<a href="([^"]+)">'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
if item.next_page != 'b':
|
||||
if len(matches) > 19:
|
||||
url_next_page = item.url
|
||||
@@ -301,45 +250,27 @@ def series(item):
|
||||
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
|
||||
if len(matches_next_page) > 0:
|
||||
url_next_page = urlparse.urljoin(item.url, matches_next_page[0])
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
|
||||
scrapedtitle = scrapedtitle.replace('’', "'")
|
||||
itemlist.append(Item(channel=__channel__, title=scrapedtitle, extra='serie',
|
||||
url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
contentSerieName=scrapedtitle, show=scrapedtitle,
|
||||
next_page=next_page, action="temporadas", contentType='tvshow'))
|
||||
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
|
||||
if url_next_page:
|
||||
itemlist.append(Item(channel=__channel__, action="series", title="» Siguiente »", url=url_next_page,
|
||||
next_page=next_page, thumbnail=get_thumb("next.png")))
|
||||
|
||||
for item in itemlist:
|
||||
if item.infoLabels['plot'] == '':
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
# logger.info(data)
|
||||
item.fanart = scrapertools.find_single_match(data,
|
||||
"<meta property='og:image' content='([^']+)' />").replace(
|
||||
'w780', 'original')
|
||||
item.plot = scrapertools.find_single_match(data, '<h2>Sinopsis</h2><div class="wp-content"><p>([^<]+)</p>')
|
||||
item.plot = scrapertools.htmlclean(item.plot)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def temporadas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<span class="title">([^<]+)<i>.*?' # season
|
||||
patron += '<img src="([^"]+)"></a></div>' # img
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
if len(matches) > 1:
|
||||
for scrapedseason, scrapedthumbnail in matches:
|
||||
@@ -349,7 +280,6 @@ def temporadas(item):
|
||||
new_item.infoLabels['season'] = temporada
|
||||
new_item.extra = ""
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
for i in itemlist:
|
||||
i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle'])
|
||||
@@ -359,14 +289,11 @@ def temporadas(item):
|
||||
if i.infoLabels.has_key('poster_path'):
|
||||
# Si la temporada tiene poster propio remplazar al de la serie
|
||||
i.thumbnail = i.infoLabels['poster_path']
|
||||
|
||||
itemlist.sort(key=lambda it: it.title)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
|
||||
text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host))
|
||||
|
||||
return itemlist
|
||||
else:
|
||||
return episodios(item)
|
||||
@@ -375,36 +302,28 @@ def temporadas(item):
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<div class="imagen"><a href="([^"]+)">.*?' # url
|
||||
patron += '<div class="numerando">(.*?)</div>.*?' # numerando cap
|
||||
patron += '<a href="[^"]+">([^<]+)</a>' # title de episodios
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedname in matches:
|
||||
scrapedtitle = scrapedtitle.replace('--', '0')
|
||||
patron = '(\d+) - (\d+)'
|
||||
match = re.compile(patron, re.DOTALL).findall(scrapedtitle)
|
||||
season, episode = match[0]
|
||||
|
||||
if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season):
|
||||
continue
|
||||
|
||||
title = "%sx%s: %s" % (season, episode.zfill(2), scrapertools.unescape(scrapedname))
|
||||
new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, fulltitle=title,
|
||||
contentType="episode", extra='serie')
|
||||
if 'infoLabels' not in new_item:
|
||||
new_item.infoLabels = {}
|
||||
|
||||
new_item.infoLabels['season'] = season
|
||||
new_item.infoLabels['episode'] = episode.zfill(2)
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
# TODO no hacer esto si estamos añadiendo a la videoteca
|
||||
if not item.extra:
|
||||
# Obtenemos los datos de todos los capítulos de la temporada mediante multihilos
|
||||
@@ -416,31 +335,25 @@ def episodios(item):
|
||||
if i.infoLabels.has_key('poster_path'):
|
||||
# Si el capitulo tiene imagen propia remplazar al poster
|
||||
i.thumbnail = i.infoLabels['poster_path']
|
||||
|
||||
itemlist.sort(key=lambda it: int(it.infoLabels['episode']),
|
||||
reverse=config.get_setting('orden_episodios', __channel__))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
|
||||
# Opción "Añadir esta serie a la videoteca"
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
|
||||
text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<div id="option-(\d+)" class="[^"]+"><iframe.*?src="([^"]+)".*?</iframe>' # lang, url
|
||||
patron = '<div id="option-(\d+)".*?<iframe.*?src="([^"]+)".*?</iframe>' # lang, url
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for option, url in matches:
|
||||
lang = scrapertools.find_single_match(data, '<li><a class="options" href="#option-%s">.*?</b>-->(\w+)' % option)
|
||||
lang = lang.lower()
|
||||
@@ -451,17 +364,13 @@ def findvideos(item):
|
||||
'ingles': '[COLOR red](VOS)[/COLOR]'}
|
||||
if lang in idioma:
|
||||
lang = idioma[lang]
|
||||
|
||||
# obtenemos los redirecionamiento de shorturl en caso de coincidencia
|
||||
if "bit.ly" in url:
|
||||
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
|
||||
|
||||
itemlist.append(item.clone(channel=__channel__, url=url, title=item.contentTitle,
|
||||
action='play', language=lang))
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
itemlist.sort(key=lambda it: it.language, reverse=False)
|
||||
|
||||
for x in itemlist:
|
||||
if x.extra != 'directo':
|
||||
x.thumbnail = item.thumbnail
|
||||
@@ -469,10 +378,14 @@ def findvideos(item):
|
||||
if item.extra != 'serie' and item.extra != 'buscar':
|
||||
x.title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % (
|
||||
x.server.title(), x.quality, x.language)
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'serie':
|
||||
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",
|
||||
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
|
||||
thumbnail=get_thumb("videolibrary_movie.png"), contentTitle=item.contentTitle))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"id": "seriesmeme",
|
||||
"name": "SeriesMeme",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast", "lat"],
|
||||
"thumbnail": "seriesmeme.png",
|
||||
"banner": "seriesmeme.png",
|
||||
"categories": [
|
||||
"tvshow"
|
||||
]
|
||||
}
|
||||
@@ -1,224 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from channels import renumbertools
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channels import autoplay
|
||||
|
||||
IDIOMAS = {'latino': 'Latino', 'español':'Español'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['openload',
|
||||
'sendvid',
|
||||
'netutv',
|
||||
'rapidvideo'
|
||||
]
|
||||
list_quality = ['default']
|
||||
|
||||
host = "https://seriesmeme.com/"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
thumb_series = get_thumb("channels_tvshow.png")
|
||||
thumb_series_az = get_thumb("channels_tvshow_az.png")
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = list()
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="lista_gen", title="Novedades", url=host,
|
||||
thumbnail=thumb_series))
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Listado Completo de Series", url=urlparse.urljoin(host, "/lista"),
|
||||
thumbnail=thumb_series))
|
||||
itemlist.append(Item(channel=item.channel, action="categorias", title="Categorias", url=host,
|
||||
thumbnail=thumb_series))
|
||||
itemlist.append(Item(channel=item.channel, action="alfabetico", title="Listado Alfabetico", url=host,
|
||||
thumbnail=thumb_series_az))
|
||||
itemlist.append(Item(channel=item.channel, action="top", title="Top Series", url=host,
|
||||
thumbnail=thumb_series))
|
||||
itemlist = renumbertools.show_option(item.channel, itemlist)
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
"""
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ","+")
|
||||
item.url = item.url+texto
|
||||
if texto!='':
|
||||
return lista(item)
|
||||
"""
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
dict_gender = {"acción": "accion", "animes": "animacion", "aventuras": "aventura", "dibujos": "animacion",
|
||||
"ciencia ficción": "ciencia%20ficcion", "intriga": "misterio", "suspenso": "suspense",
|
||||
"thriller": "suspense", "fantástico": "fantasia"}
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron_cat = '<li id="menu-item-15068" class=".+?"><.+?>.+?<\/a>(.+?)<\/ul><\/li>'
|
||||
categorias = scrapertools.find_single_match(data, patron_cat)
|
||||
patron = '<li id="menu-item-.+?" class=".+?"><a href="([^"]+)">([^"]+)<\/a><\/li>'
|
||||
matches = scrapertools.find_multiple_matches(categorias, patron)
|
||||
for link, name in matches:
|
||||
if 'Género' in name:
|
||||
title = name.replace('Género ', '')
|
||||
url = link
|
||||
thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/azul/%s.png"
|
||||
thumbnail = thumbnail % dict_gender.get(title.lower(), title.lower())
|
||||
itemlist.append(item.clone(title=title, url=url, plot=title, action="lista_gen", thumbnail=thumbnail))
|
||||
return itemlist
|
||||
|
||||
|
||||
def alfabetico(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron_alf1 = '<li id="menu-item-15069" class=".+?"><.+?>.+?<\/a>(.+?)<\/ul><\/li>'
|
||||
patron_alf2 = '<li id="menu-item-15099" class=".+?"><.+?>.+?<\/a>(.+?)<\/ul><\/li>'
|
||||
alfabeto1 = scrapertools.find_single_match(data, patron_alf1)
|
||||
alfabeto2 = scrapertools.find_single_match(data, patron_alf2)
|
||||
alfabeto = alfabeto1 + alfabeto2
|
||||
patron = '<li id="menu-item-.+?" class=".+?"><a href="([^"]+)">([^"]+)<\/a><\/li>'
|
||||
matches = scrapertools.find_multiple_matches(alfabeto, patron)
|
||||
for link, name in matches:
|
||||
title = name
|
||||
url = link
|
||||
itemlist.append(item.clone(title=title, url=url, plot=title, action="lista_gen"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def top(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron_top = '<li id="menu-item-15087" class=".+?"><.+?>.+?<\/a>(.+?)<\/ul><\/li>'
|
||||
top = scrapertools.find_single_match(data, patron_top)
|
||||
patron = '<a href="([^"]+)">([^"]+)<\/a>'
|
||||
matches = scrapertools.find_multiple_matches(top, patron)
|
||||
for link, name in matches:
|
||||
title = name
|
||||
url = link
|
||||
itemlist.append(item.clone(title=title, url=url, plot=title, action="lista_gen", show=title))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista_gen(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data1 = httptools.downloadpage(item.url).data
|
||||
data1 = re.sub(r"\n|\r|\t|\s{2}| ", "", data1)
|
||||
patron_sec = '<section class="content">.+?<\/section>'
|
||||
data = scrapertools.find_single_match(data1, patron_sec)
|
||||
patron = '<article id=.+? class=.+?><div.+?>'
|
||||
patron += '<a href="([^"]+)" title="([^"]+)' # scrapedurl, # scrapedtitle
|
||||
patron += ' Capítulos Completos ([^"]+)">' # scrapedlang
|
||||
patron += '<img src=".+?" data-lazy-src="([^"]+)"' # scrapedthumbnail
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
i = 0
|
||||
for scrapedurl, scrapedtitle, scrapedlang, scrapedthumbnail in matches:
|
||||
i = i + 1
|
||||
if 'HD' in scrapedlang:
|
||||
scrapedlang = scrapedlang.replace('HD', '')
|
||||
title = scrapedtitle + " [ " + scrapedlang + "]"
|
||||
context1=[renumbertools.context(item), autoplay.context]
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title=title, url=scrapedurl, thumbnail=scrapedthumbnail, action="episodios",
|
||||
show=scrapedtitle, context=context1, language=scrapedlang))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
# Paginacion
|
||||
|
||||
#patron_pag='<a class="nextpostslink" rel="next" href="([^"]+)">'
|
||||
patron_pag='<li class="next right"><a href="([^"]+)" >([^"]+)<\/a><\/li>'
|
||||
next_page_url = scrapertools.find_single_match(data,patron_pag)
|
||||
|
||||
if next_page_url!="" and i!=1:
|
||||
item.url=next_page_url[0]
|
||||
itemlist.append(Item(channel = item.channel,action = "lista_gen",title = ">> Página siguiente", url = next_page_url[0], thumbnail='https://s32.postimg.cc/4zppxf5j9/siguiente.png'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '<li><strong><a href="([^"]+)">([^"]+)<\/a>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for link, name in matches:
|
||||
title = name
|
||||
url = link
|
||||
itemlist.append(item.clone(title=title, url=url, plot=title, action="episodios"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron_caps = '<li><strong><a href="([^"]+)">(.+?)–(.+?)<\/a>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron_caps)
|
||||
show = scrapertools.find_single_match(data, '<h3><strong>.+?de (.+?)<\/strong>')
|
||||
scrapedplot = scrapertools.find_single_match(data, '<strong>Sinopsis<\/strong><strong>([^"]+)<\/strong><\/pre>')
|
||||
for link, cap, name in matches:
|
||||
if 'x' in cap:
|
||||
title = cap + " - " + name
|
||||
else:
|
||||
season = 1
|
||||
episode = int(cap)
|
||||
season, episode = renumbertools.numbered_for_tratk(
|
||||
item.channel, item.show, season, episode)
|
||||
date = name
|
||||
title = "{0}x{1:02d} {2} ({3})".format(
|
||||
season, episode, "Episodio " + str(episode), date)
|
||||
# title = cap+" - "+name
|
||||
url = link
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=item.thumbnail,
|
||||
plot=scrapedplot, show=show))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
|
||||
|
||||
action="add_serie_to_library", extra="episodios", show=show))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist.extend(servertools.find_video_items(data=data))
|
||||
for videoitem in itemlist:
|
||||
videoitem.channel=item.channel
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
@@ -1,41 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "http://documentary.es/(\\d+[a-z0-9\\-]+)",
|
||||
"url": "http://documentary.es/\\1?embed"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "documentary",
|
||||
"name": "documentary",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
video_urls = []
|
||||
|
||||
data = scrapertools.cache_page(page_url)
|
||||
|
||||
try:
|
||||
# var videoVars = {"videoNonceVar":"94767795ce","post_id":"2835"};
|
||||
videoNonceVar = scrapertools.get_match(data,
|
||||
'var\s*videoVars\s*\=\s*\{"videoNonceVar"\:"([^"]+)","post_id"\:"\d+"')
|
||||
post_id = scrapertools.get_match(data, 'var\s*videoVars\s*\=\s*\{"videoNonceVar"\:"[^"]+","post_id"\:"(\d+)"')
|
||||
|
||||
# http://documentary.es/wp-admin/admin-ajax.php?postId=2835&videoNonce=94767795ce&action=getVideo&_=1385893877929
|
||||
import random
|
||||
url = "http://documentary.es/wp-admin/admin-ajax.php?postId=" + post_id + "&videoNonce=" + videoNonceVar + "&action=getVideo&_=" + str(
|
||||
random.randint(10000000000, 9999999999999))
|
||||
data = scrapertools.cache_page(url)
|
||||
|
||||
# {"videoUrl":"http:\/\/www.dailymotion.com\/embed\/video\/xioggh?autoplay=1&defaultSubtitle=es"}
|
||||
data = data.replace("\\", "")
|
||||
except:
|
||||
pass
|
||||
|
||||
from core import servertools
|
||||
real_urls = servertools.find_video_items(data=data)
|
||||
if len(real_urls) > 0:
|
||||
item = real_urls[len(real_urls) - 1]
|
||||
servermodule = __import__('servers.%s' % item.server, None, None, ["servers.%s" % item.server])
|
||||
# exec "import " + item.server
|
||||
# exec "servermodule = " + item.server
|
||||
video_urls = servermodule.get_video_url(item.url)
|
||||
|
||||
for video_url in video_urls:
|
||||
logger.info("%s - %s" % (video_url[0], video_url[1]))
|
||||
|
||||
return video_urls
|
||||
@@ -1,45 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(fileflyer.com/view/[a-zA-Z0-9]+)",
|
||||
"url": "http://www.\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "fileflyer",
|
||||
"name": "fileflyer",
|
||||
"premium": [
|
||||
"realdebrid",
|
||||
"alldebrid"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
# Vídeo borrado: http://www.fileflyer.com/view/fioZRBu
|
||||
# Video erróneo:
|
||||
data = scrapertools.cache_page(page_url)
|
||||
if '<a href="/RemoveDetail.aspx">' in data:
|
||||
return False, "El archivo ya no está disponible<br/>en fileflyer (ha sido borrado)"
|
||||
else:
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
video_urls = []
|
||||
|
||||
data = scrapertools.cache_page(page_url)
|
||||
location = scrapertools.get_match(data,
|
||||
'<td class="dwnlbtn"[^<]+<a id="[^"]+" title="[^"]+" class="[^"]+" href="([^"]+)"')
|
||||
|
||||
video_urls.append([scrapertools.get_filename_from_url(location)[-4:] + " [fileflyer]", location])
|
||||
|
||||
for video_url in video_urls:
|
||||
logger.info("%s - %s" % (video_url[0], video_url[1]))
|
||||
|
||||
return video_urls
|
||||
@@ -1,42 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "filez.tv/(?:embed/u=)?([A-z0-9]+)",
|
||||
"url": "http://filez.tv/embed/u=\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "filez",
|
||||
"name": "filez",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "http://i.imgur.com/HasfjUH.png"
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url, follow_redirects=False)
|
||||
|
||||
if data.headers.get("location"):
|
||||
return False, "[filez] El archivo ha sido eliminado o no existe"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
|
||||
data = httptools.downloadpage(page_url).data
|
||||
|
||||
video_urls = []
|
||||
media_urls = scrapertools.find_multiple_matches(data, 'file\s*:\s*"([^"]+)",\s*type\s*:\s*"([^"]+)"')
|
||||
for media_url, ext in media_urls:
|
||||
video_urls.append([".%s [filez]" % ext, media_url])
|
||||
|
||||
if not video_urls:
|
||||
media_urls = scrapertools.find_multiple_matches(data, '<embed.*?src="([^"]+)"')
|
||||
for media_url in media_urls:
|
||||
media_url = media_url.replace("https:", "http:")
|
||||
ext = httptools.downloadpage(media_url, only_headers=True).headers.get("content-disposition", "")
|
||||
ext = scrapertools.find_single_match(ext, 'filename="([^"]+)"')
|
||||
if ext:
|
||||
ext = ext[-4:]
|
||||
video_urls.append(["%s [filez]" % ext, media_url])
|
||||
|
||||
return video_urls
|
||||
@@ -1,46 +0,0 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "movshare.net/(?:embed|video)/([a-z0-9]+)",
|
||||
"url": "http://www.movshare.net/video/\\1"
|
||||
},
|
||||
{
|
||||
"pattern": "movshare.net/embed.php\\?v\\=([a-z0-9]+)",
|
||||
"url": "http://www.movshare.net/video/\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "movshare",
|
||||
"name": "movshare",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "server_movshare.png"
|
||||
}
|
||||
@@ -1,49 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
data = scrapertools.cache_page(page_url)
|
||||
|
||||
if "This file no longer exists on our servers" in data:
|
||||
return False, "El fichero ha sido borrado de movshare"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
# Returns an array of possible video url's from the page_url
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
videoid = scrapertools.get_match(page_url, "http://www.movshare.net/video/([a-z0-9]+)")
|
||||
video_urls = []
|
||||
|
||||
# Descarga la página
|
||||
headers = []
|
||||
headers.append(
|
||||
['User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'])
|
||||
html = scrapertools.cache_page(page_url, headers=headers)
|
||||
|
||||
# La vuelve a descargar, como si hubieras hecho click en el botón
|
||||
# html = scrapertools.cache_page(page_url , headers = headers)
|
||||
filekey = scrapertools.find_single_match(html, 'flashvars.filekey="([^"]+)"')
|
||||
|
||||
# get stream url from api
|
||||
api = 'http://www.movshare.net/api/player.api.php?key=%s&file=%s' % (filekey, videoid)
|
||||
headers.append(['Referer', page_url])
|
||||
|
||||
html = scrapertools.cache_page(api, headers=headers)
|
||||
logger.info("html=" + html)
|
||||
stream_url = scrapertools.find_single_match(html, 'url=(.+?)&title')
|
||||
|
||||
if stream_url != "":
|
||||
video_urls.append([scrapertools.get_filename_from_url(stream_url)[-4:] + " [movshare]", stream_url])
|
||||
|
||||
for video_url in video_urls:
|
||||
logger.info("%s - %s" % (video_url[0], video_url[1]))
|
||||
|
||||
return video_urls
|
||||
@@ -1,5 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import urllib
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
@@ -15,17 +17,15 @@ def test_video_exists(page_url):
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
key = scrapertools.find_single_match(data, "var thief\s*=\s*'([^']+)'")
|
||||
data_vt = httptools.downloadpage("http://vidup.tv/jwv/%s" % key).data
|
||||
vt = scrapertools.find_single_match(data_vt, 'file\|direct\|(.*?)\|')
|
||||
# Extrae la URL
|
||||
video_urls = []
|
||||
media_urls = scrapertools.find_multiple_matches(data, '\{"file"\:"([^"]+)","label"\:"([^"]+)"\}')
|
||||
for media_url, label in media_urls:
|
||||
ext = scrapertools.get_filename_from_url(media_url)[-4:]
|
||||
media_url += "?direct=false&ua=1&vt=%s" % vt
|
||||
video_urls.append(["%s (%s) [vidup]" % (ext, label), media_url])
|
||||
for video_url in video_urls:
|
||||
logger.info("%s - %s" % (video_url[0], video_url[1]))
|
||||
post= {}
|
||||
post = urllib.urlencode(post)
|
||||
url = httptools.downloadpage(page_url, follow_redirects=False, only_headers=True).headers.get("location", "")
|
||||
data = httptools.downloadpage("https://vidup.io/api/serve/video/" + scrapertools.find_single_match(url, "embed/([A-z0-9]+)"), post=post).data
|
||||
bloque = scrapertools.find_single_match(data, 'qualities":\{(.*?)\}')
|
||||
matches = scrapertools.find_multiple_matches(bloque, '"([^"]+)":"([^"]+)')
|
||||
for res, media_url in matches:
|
||||
video_urls.append(
|
||||
[scrapertools.get_filename_from_url(media_url)[-4:] + " (" + res + ") [vidup.tv]", media_url])
|
||||
video_urls.reverse()
|
||||
return video_urls
|
||||
|
||||
Reference in New Issue
Block a user