@@ -148,12 +148,13 @@ def findvideos(item):
|
||||
action = "play",
|
||||
title = calidad,
|
||||
fulltitle = item.title,
|
||||
thumbnail = item.thumbnail,
|
||||
contentThumbnail = item.thumbnail,
|
||||
url = url,
|
||||
language = IDIOMAS['Latino']
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist, seekTmdb = True)
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
tmdb.set_infoLabels(itemlist, seekTmdb = True)
|
||||
itemlist.append(Item(channel=item.channel))
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
|
||||
|
||||
75
plugin.video.alfa/channels/cinedetodo.json
Normal file
75
plugin.video.alfa/channels/cinedetodo.json
Normal file
@@ -0,0 +1,75 @@
|
||||
{
|
||||
"id": "cinedetodo",
|
||||
"name": "CINEDETODO",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat"],
|
||||
"thumbnail": "https://s31.postimg.cc/win1ffxyj/cinedetodo.png",
|
||||
"banner": "",
|
||||
"version": 1,
|
||||
"categories": [
|
||||
"movies"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"LAT"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_documentales",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Documentales",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
207
plugin.video.alfa/channels/cinedetodo.py
Normal file
207
plugin.video.alfa/channels/cinedetodo.py
Normal file
@@ -0,0 +1,207 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel CinemaHD -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
import urllib
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
|
||||
|
||||
host = 'http://www.cinedetodo.com/'
|
||||
|
||||
IDIOMAS = {'Latino': 'LAT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = []
|
||||
list_servers = ['fastplay', 'rapidvideo', 'streamplay', 'flashx', 'streamito', 'streamango', 'vidoza']
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = list()
|
||||
itemlist.append(item.clone(title="Ultimas", action="list_all", url=host, thumbnail=get_thumb('last', auto=True)))
|
||||
itemlist.append(item.clone(title="Generos", action="section", section='genre',
|
||||
thumbnail=get_thumb('genres', auto=True)))
|
||||
# itemlist.append(item.clone(title="Por Calidad", action="section", section='quality',
|
||||
# thumbnail=get_thumb('quality', auto=True)))
|
||||
itemlist.append(item.clone(title="Alfabetico", action="section", section='alpha',
|
||||
thumbnail=get_thumb('alphabet', auto=True)))
|
||||
itemlist.append(item.clone(title="Buscar", action="search", url=host+'?s=',
|
||||
thumbnail=get_thumb('search', auto=True)))
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def get_source(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
|
||||
def list_all(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
if item.section == 'alpha':
|
||||
patron = '<span class=Num>\d+.*?<a href=(.*?) class.*?<img src=(.*?) alt=.*?<strong>(.*?)</strong>.*?'
|
||||
patron += '<td>(\d{4})</td>'
|
||||
else:
|
||||
patron = '<article id=post-.*?<a href=(.*?)>.*?<img src=(.*?) alt=.*?'
|
||||
patron += '<h3 class=Title>(.*?)<\/h3>.*?<span class=Year>(.*?)<\/span>'
|
||||
data = get_source(item.url)
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
|
||||
|
||||
url = scrapedurl
|
||||
if "|" in scrapedtitle:
|
||||
scrapedtitle= scrapedtitle.split("|")
|
||||
contentTitle = scrapedtitle[0].strip()
|
||||
else:
|
||||
contentTitle = scrapedtitle
|
||||
|
||||
contentTitle = re.sub('\(.*?\)','', contentTitle)
|
||||
|
||||
title = '%s [%s]'%(contentTitle, year)
|
||||
thumbnail = 'http:'+scrapedthumbnail
|
||||
itemlist.append(item.clone(action='findvideos',
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
contentTitle=contentTitle,
|
||||
infoLabels={'year':year}
|
||||
))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, True)
|
||||
|
||||
# Paginación
|
||||
|
||||
url_next_page = scrapertools.find_single_match(data,'<a class=next.*?href=(.*?)>')
|
||||
if url_next_page:
|
||||
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
|
||||
return itemlist
|
||||
|
||||
def section(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = get_source(host)
|
||||
|
||||
action = 'list_all'
|
||||
if item.section == 'quality':
|
||||
patron = 'menu-item-object-category.*?menu-item-\d+><a href=(.*?)>(.*?)<\/a>'
|
||||
elif item.section == 'genre':
|
||||
patron = '<a href=(http:.*?) class=Button STPb>(.*?)</a>'
|
||||
elif item.section == 'year':
|
||||
patron = 'custom menu-item-15\d+><a href=(.*?\?s.*?)>(\d{4})<\/a><\/li>'
|
||||
elif item.section == 'alpha':
|
||||
patron = '<li><a href=(.*?letters.*?)>(.*?)</a>'
|
||||
action = 'list_all'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for data_one, data_two in matches:
|
||||
|
||||
url = data_one
|
||||
title = data_two
|
||||
if title != 'Ver más':
|
||||
new_item = Item(channel=item.channel, title= title, url=url, action=action, section=item.section)
|
||||
itemlist.append(new_item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
|
||||
patron = 'id=(Opt\d+)>.*?src=(.*?) frameborder.*?</iframe>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for option, scrapedurl in matches:
|
||||
scrapedurl = scrapedurl.replace('"','').replace('&','&')
|
||||
data_video = get_source(scrapedurl)
|
||||
url = scrapertools.find_single_match(data_video, '<div class=Video>.*?src=(.*?) frameborder')
|
||||
opt_data = scrapertools.find_single_match(data,'%s><span>.*?</span>.*?<span>(.*?)</span>'%option).split('-')
|
||||
language = opt_data[0].strip()
|
||||
language = language.replace('(','').replace(')','')
|
||||
quality = opt_data[1].strip()
|
||||
if url != '' and 'youtube' not in url:
|
||||
itemlist.append(item.clone(title='%s', url=url, language=IDIOMAS[language], quality=quality, action='play'))
|
||||
elif 'youtube' in url:
|
||||
trailer = item.clone(title='Trailer', url=url, action='play', server='youtube')
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % '%s [%s] [%s]'%(i.server.capitalize(),
|
||||
i.language, i.quality))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, True)
|
||||
try:
|
||||
itemlist.append(trailer)
|
||||
except:
|
||||
pass
|
||||
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
|
||||
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
|
||||
if texto != '':
|
||||
return list_all(item)
|
||||
else:
|
||||
return []
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria in ['peliculas','latino']:
|
||||
item.url = host
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host+'/animacion'
|
||||
elif categoria == 'terror':
|
||||
item.url = host+'/terror'
|
||||
elif categoria == 'documentales':
|
||||
item.url = host+'/documental'
|
||||
itemlist = list_all(item)
|
||||
if itemlist[-1].title == 'Siguiente >>':
|
||||
itemlist.pop()
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
@@ -274,7 +274,7 @@ def findvideos(item):
|
||||
#title = '%s [%s]' % (item.title, language)
|
||||
itemlist.append(item.clone(title='[%s] [%s]', url=url, action='play', subtitle=subs,
|
||||
language=language, quality=quality, infoLabels = item.infoLabels))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
|
||||
|
||||
# Requerido para Filtrar enlaces
|
||||
|
||||
|
||||
@@ -38,8 +38,8 @@ def mainlist(item):
|
||||
Item(channel=item.channel,
|
||||
title="Español",
|
||||
action="listado",
|
||||
url=host + "peliculas/en-espanol/"
|
||||
))
|
||||
url=host + "peliculas/en-espanol/",
|
||||
thumbnail = get_thumb("channels_spanish.png")))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="Latino",
|
||||
@@ -54,9 +54,10 @@ def mainlist(item):
|
||||
thumbnail=get_thumb("channels_vos.png")))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="Categorias",
|
||||
title="Generos",
|
||||
action="categories",
|
||||
url=host
|
||||
url=host,
|
||||
thumbnail=get_thumb('genres', auto=True)
|
||||
))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
@@ -95,7 +96,6 @@ def search(item, texto):
|
||||
post = "keyword=%s" % texto
|
||||
data = httptools.downloadpage(item.url, post=post).data
|
||||
data = data.replace('\\"', '"').replace('\\/', '/')
|
||||
logger.debug("data %s" % data)
|
||||
|
||||
pattern = 'url\((.*?)\).+?<a href="([^"]+)".*?class="ss-title">(.*?)</a>'
|
||||
matches = re.compile(pattern, re.DOTALL).findall(data)
|
||||
@@ -146,14 +146,6 @@ def listado(item):
|
||||
title=">> Página siguiente",
|
||||
url=url,
|
||||
thumbnail=get_thumb("next.png")))
|
||||
|
||||
for item in itemlist:
|
||||
if item.infoLabels['plot'] == '':
|
||||
data = httptools.downloadpage(item.url).data
|
||||
item.plot = scrapertools.find_single_match(data, '<div class="desc">([^<]+)</div>').strip()
|
||||
item.fanart = scrapertools.find_single_match(data, '<meta property="og:image" content="([^"]+)"/>')
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -172,10 +164,13 @@ def findvideos(item):
|
||||
video_info = scrapertools.find_single_match(data, "load_player\('([^']+).*?([^']+)")
|
||||
movie_info = scrapertools.find_single_match(item.url,
|
||||
'http:\/\/ver-peliculas\.(io|org)\/peliculas\/(\d+)-(.*?)-\d{4}-online\.')
|
||||
|
||||
|
||||
movie_host = movie_info[0]
|
||||
movie_id = movie_info[1]
|
||||
movie_name = movie_info[2]
|
||||
sub = video_info[1]
|
||||
movie_id = scrapertools.find_single_match(data,'id=idpelicula value=(.*?)>')
|
||||
movie_name = scrapertools.find_single_match(data,'id=nombreslug value=(.*?)>')
|
||||
sub = scrapertools.find_single_match(data, 'id=imdb value=(.*?)>')
|
||||
sub = '%s/subtix/%s.srt' % (movie_host, sub)
|
||||
url_base = 'http://ver-peliculas.%s/core/api.php?id=%s&slug=%s' % (movie_host, movie_id, movie_name)
|
||||
data = httptools.downloadpage(url_base).data
|
||||
json_data = jsontools.load(data)
|
||||
@@ -185,8 +180,10 @@ def findvideos(item):
|
||||
video_base_url = host + '/core/videofinal.php'
|
||||
if video_list[videoitem] != None:
|
||||
video_lang = video_list[videoitem]
|
||||
languages = ['latino', 'spanish', 'subtitulos']
|
||||
languages = ['latino', 'spanish', 'subtitulos', 'subtitulosp']
|
||||
for lang in languages:
|
||||
if lang not in video_lang:
|
||||
continue
|
||||
if video_lang[lang] != None:
|
||||
if not isinstance(video_lang[lang], int):
|
||||
video_id = video_lang[lang][0]["video"]
|
||||
@@ -199,15 +196,20 @@ def findvideos(item):
|
||||
for video_link in sources:
|
||||
url = video_link['sources']
|
||||
if url not in duplicated and server!='drive':
|
||||
lang = lang.capitalize()
|
||||
if lang == 'Spanish':
|
||||
|
||||
if lang == 'spanish':
|
||||
lang = 'Español'
|
||||
elif 'sub' in lang:
|
||||
lang = 'Subtitulada'
|
||||
lang = lang.capitalize()
|
||||
title = 'Ver en %s [' + lang + ']'
|
||||
thumbnail = servertools.guess_server_thumbnail(server)
|
||||
itemlist.append(item.clone(title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
action='play'
|
||||
action='play',
|
||||
language=lang
|
||||
|
||||
))
|
||||
duplicated.append(url)
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
|
||||
37
plugin.video.alfa/channels/wikiseries.json
Normal file
37
plugin.video.alfa/channels/wikiseries.json
Normal file
@@ -0,0 +1,37 @@
|
||||
{
|
||||
"id": "wikiseries",
|
||||
"name": "WikiSeries",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat", "cast", "vo", "vose"],
|
||||
"thumbnail": "https://s31.postimg.cc/tnmcrytnv/16142379_1847422438815031_3788419094563167644_n.jpg",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"tvshow"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"Latino",
|
||||
"Español",
|
||||
"VOSE",
|
||||
"VO"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
251
plugin.video.alfa/channels/wikiseries.py
Normal file
251
plugin.video.alfa/channels/wikiseries.py
Normal file
@@ -0,0 +1,251 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel wikiseries -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import jsontools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
|
||||
host = 'http://www.wikiseriesonline.nu/'
|
||||
|
||||
list_language = ['Latino', 'Español', 'VOSE', 'VO']
|
||||
list_quality = []
|
||||
list_servers = ['openload']
|
||||
|
||||
def get_source(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist =[]
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Nuevos Capitulos", action="list_all", url=host + 'category/episode',
|
||||
thumbnail=get_thumb('new episodes', auto=True)))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host + 'category/serie',
|
||||
thumbnail=get_thumb('all', auto=True)))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Generos", action="genres",
|
||||
url=host + 'latest-episodes', thumbnail=get_thumb('genres', auto=True)))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host + '?s=',
|
||||
thumbnail=get_thumb('search', auto=True)))
|
||||
|
||||
itemlist = filtertools.show_option(itemlist, item.channel, list_language, list_quality)
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
def list_all(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
patron = '39;src=.*?(http.*?)style=display:.*?one-line href=(.*?) title=.*?>(.*?)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
|
||||
url = scrapedurl
|
||||
scrapedtitle = scrapedtitle.replace('×','x')
|
||||
|
||||
contentSerieName = scrapedtitle
|
||||
action = 'seasons'
|
||||
|
||||
if 'episode' in item.url:
|
||||
scrapedtitle, season, episode = scrapertools.find_single_match(scrapedtitle, '(.*?) (\d+)x(\d+)')
|
||||
contentSerieName = scrapedtitle
|
||||
scrapedtitle = '%sx%s - %s' % (season, episode, scrapedtitle)
|
||||
action='findvideos'
|
||||
|
||||
thumbnail = scrapedthumbnail
|
||||
new_item = Item(channel=item.channel, title=scrapedtitle, url=url,
|
||||
thumbnail=thumbnail, contentSerieName=contentSerieName, action=action,
|
||||
context=filtertools.context(item, list_language, list_quality))
|
||||
|
||||
if 'episode' in item.url:
|
||||
new_item.contentSeasonNumber = season
|
||||
new_item.contentepisodeNumber = episode
|
||||
new_item.context = []
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Paginacion
|
||||
next_page = scrapertools.find_single_match(data, 'rel=next href=(.*?)>»</a>')
|
||||
if next_page != '':
|
||||
itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>',
|
||||
url=next_page, thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
|
||||
type=item.type))
|
||||
return itemlist
|
||||
|
||||
|
||||
def genres(item):
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = get_source(host)
|
||||
patron = '<li> <a href=(/category/.*?)>(.*?)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
|
||||
if scrapedtitle != 'Series':
|
||||
itemlist.append(Item(channel=item.channel, title=scrapedtitle, url=host + scrapedurl, action='list_all'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def seasons(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
|
||||
patron = 'data-season-num=1>(.*?)</span>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
infoLabels = item.infoLabels
|
||||
for scrapedseason in matches:
|
||||
contentSeasonNumber = scrapedseason
|
||||
title = 'Temporada %s' % scrapedseason
|
||||
infoLabels['season'] = contentSeasonNumber
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action='episodesxseason', url=item.url, title=title,
|
||||
contentSeasonNumber=contentSeasonNumber, infoLabels=infoLabels))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
|
||||
action="add_serie_to_library", extra="all_episodes", contentSerieName=item.contentSerieName,
|
||||
extra1='library'))
|
||||
|
||||
return itemlist
|
||||
|
||||
def all_episodes(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
templist = seasons(item)
|
||||
for tempitem in templist:
|
||||
itemlist += episodesxseason(tempitem)
|
||||
return itemlist
|
||||
|
||||
def episodesxseason(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
season = item.contentSeasonNumber
|
||||
patron = '<li class=ep-list-item id=s%se(\d+)>.*?<a href=(.*?) >.*?name>(.*?)<.*?class=lgn (.*?)</a>' % season
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
infoLabels = item.infoLabels
|
||||
for scrapedepi, scrapedurl, scrapedtitle, languages in matches:
|
||||
url = scrapedurl
|
||||
language = scrapertools.find_multiple_matches(languages, 'title=(.*?)>')
|
||||
contentEpisodeNumber = scrapedepi
|
||||
title = '%sx%s - %s %s' % (season, contentEpisodeNumber, scrapedtitle, language)
|
||||
infoLabels['episode'] = contentEpisodeNumber
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url,
|
||||
contentSerieName=item.contentSerieName, contentEpisodeNumber=contentEpisodeNumber,
|
||||
language=language, infoLabels=infoLabels))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
def search(item, text):
|
||||
logger.info()
|
||||
|
||||
item.url = item.url + text
|
||||
item.text = text
|
||||
item.type = 'search'
|
||||
if text != '':
|
||||
#return list_all(item)
|
||||
return search_results(item)
|
||||
|
||||
|
||||
def search_results(item):
|
||||
import urllib
|
||||
itemlist = []
|
||||
headers={"Origin": "http://www.wikiseriesonline.nu",
|
||||
"Accept-Encoding": "gzip, deflate", "Host": "www.wikiseriesonline.nu",
|
||||
"Accept-Language": "es-ES,es;q=0.8,en;q=0.6",
|
||||
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
|
||||
"Accept": "*/*", "Referer": item.url,
|
||||
"X-Requested-With": "XMLHttpRequest", "Connection": "keep-alive", "Content-Length": "7"}
|
||||
post = {"n":item.text}
|
||||
post = urllib.urlencode(post)
|
||||
url = host + 'wp-content/themes/wikiSeries/searchajaxresponse.php'
|
||||
data = httptools.downloadpage(url, post=post, headers=headers).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
|
||||
patron = "<!-- .Posts -->.*?<a href=(.*?)>.*?src=(.*?) .*?titleinst>(.*?)<"
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
if item.text.lower() in scrapedtitle.lower():
|
||||
itemlist.append(Item(channel=item.channel, title=scrapedtitle, contentSerieName=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, action='seasons',
|
||||
context=filtertools.context(item, list_language, list_quality)))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
|
||||
itemlist = []
|
||||
data=get_source(item.url)
|
||||
patron = '<a href=(/reproductor.*?)target'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for link in matches:
|
||||
video_data = get_source(host+link)
|
||||
language = ''
|
||||
if 'latino' in link.lower():
|
||||
language='Latino'
|
||||
elif 'español' in link.lower():
|
||||
language = 'Español'
|
||||
elif 'subtitulado' in link.lower():
|
||||
language = 'VOSE'
|
||||
elif 'vo' in link.lower():
|
||||
language = 'VO'
|
||||
|
||||
url = scrapertools.find_single_match(video_data, '<iframe src=(.*?) scrolling')
|
||||
title = '%s [%s]'
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play', language=language,
|
||||
infoLabels=item.infoLabels))
|
||||
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -203,13 +203,17 @@ def trakt_check(itemlist):
|
||||
id_result = ''
|
||||
# check = u'\u221a'
|
||||
check = 'v'
|
||||
get_sync_from_file()
|
||||
synced = False
|
||||
try:
|
||||
for item in itemlist:
|
||||
info = item.infoLabels
|
||||
|
||||
if info != '' and info['mediatype'] in ['movie', 'episode'] and item.channel != 'videolibrary':
|
||||
|
||||
if not synced:
|
||||
get_sync_from_file()
|
||||
synced = True
|
||||
|
||||
mediatype = 'movies'
|
||||
id_type = 'tmdb'
|
||||
|
||||
|
||||
@@ -3,6 +3,10 @@
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "clipwatching.com/(e.*?.html)",
|
||||
"url": "http://clipwatching.com/\\1"
|
||||
},
|
||||
{
|
||||
"pattern": "clipwatching.com/(\\w+)",
|
||||
"url": "http://clipwatching.com/\\1.html"
|
||||
|
||||
@@ -7,31 +7,21 @@ from platformcode import logger
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
response = httptools.downloadpage(page_url)
|
||||
if response.code == 404:
|
||||
return False, "[Dailymotion] El archivo no existe o ha sido borrado"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
video_urls = []
|
||||
|
||||
response = httptools.downloadpage(page_url, cookies=False)
|
||||
cookie = {'Cookie': response.headers["set-cookie"]}
|
||||
data = response.data.replace("\\", "")
|
||||
|
||||
'''
|
||||
"240":[{"type":"video/mp4","url":"http://www.dailymotion.com/cdn/H264-320x240/video/x33mvht.mp4?auth=1441130963-2562-u49z9kdc-84796332ccab3c7ce84e01c67a18b689"}]
|
||||
'''
|
||||
|
||||
subtitle = scrapertools.find_single_match(data, '"subtitles":.*?"es":.*?urls":\["([^"]+)"')
|
||||
qualities = scrapertools.find_multiple_matches(data, '"([^"]+)":(\[\{"type":".*?\}\])')
|
||||
for calidad, urls in qualities:
|
||||
if calidad == "auto":
|
||||
continue
|
||||
patron = '"type":"(?:video|application)/([^"]+)","url":"([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(urls, patron)
|
||||
for stream_type, stream_url in matches:
|
||||
@@ -41,10 +31,10 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
follow_redirects=False).headers.get("location", stream_url)
|
||||
else:
|
||||
data_m3u8 = httptools.downloadpage(stream_url).data
|
||||
stream_url = scrapertools.find_single_match(data_m3u8, '(http:.*?\.m3u8)')
|
||||
stream_url_http = scrapertools.find_single_match(data_m3u8, '(http:.*?\.m3u8)')
|
||||
if stream_url_http:
|
||||
stream_url = stream_url_http
|
||||
video_urls.append(["%sp .%s [dailymotion]" % (calidad, stream_type), stream_url, 0, subtitle])
|
||||
|
||||
for video_url in video_urls:
|
||||
logger.info("%s - %s" % (video_url[0], video_url[1]))
|
||||
|
||||
return video_urls
|
||||
|
||||
42
plugin.video.alfa/servers/gounlimited.json
Normal file
42
plugin.video.alfa/servers/gounlimited.json
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "https://gounlimited.to/embed-(.*?).html",
|
||||
"url": "https://gounlimited.to/embed-\\1.html"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "gounlimited",
|
||||
"name": "gounlimited",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "Incluir en lista negra",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "Incluir en lista de favoritos",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "https://s31.postimg.cc/bsiaj2q2j/goo.png"
|
||||
}
|
||||
32
plugin.video.alfa/servers/gounlimited.py
Normal file
32
plugin.video.alfa/servers/gounlimited.py
Normal file
@@ -0,0 +1,32 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# --------------------------------------------------------
|
||||
# Conector GoUnlimited By Alfa development Group
|
||||
# --------------------------------------------------------
|
||||
|
||||
import re
|
||||
from core import httptools
|
||||
from platformcode import logger
|
||||
from core import scrapertools
|
||||
from lib import jsunpack
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if data == "File was deleted":
|
||||
return False, "[gounlimited] El video ha sido borrado"
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
video_urls = []
|
||||
data = httptools.downloadpage(page_url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
packed_data = scrapertools.find_single_match(data, "javascript'>(eval.*?)</script>")
|
||||
unpacked = jsunpack.unpack(packed_data)
|
||||
patron = "file:(.*?),label:(.*?)}"
|
||||
matches = re.compile(patron, re.DOTALL).findall(unpacked)
|
||||
for url, quality in matches:
|
||||
video_urls.append(['%s' % quality, url])
|
||||
video_urls.sort(key=lambda x: int(x[0]))
|
||||
return video_urls
|
||||
@@ -8,45 +8,38 @@ from core import jsontools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
|
||||
# http://netu.tv/watch_video.php=XX solo contiene una redireccion, ir directamente a http://hqq.tv/player/embed_player.php?vid=XX
|
||||
page_url = page_url.replace("http://netu.tv/watch_video.php?v=", "http://hqq.tv/player/embed_player.php?vid=")
|
||||
|
||||
data = httptools.downloadpage(page_url).data
|
||||
|
||||
if "var userid = '';" in data.lower():
|
||||
return False, "[netutv] El archivo no existe o ha sido borrado"
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
|
||||
if "hash=" in page_url:
|
||||
data = urllib.unquote(httptools.downloadpage(page_url).data)
|
||||
id_video = scrapertools.find_single_match(data, "vid\s*=\s*'([^']+)'")
|
||||
id_video = scrapertools.find_single_match(data, "vid':'([^']+)'")
|
||||
else:
|
||||
id_video = page_url.rsplit("=", 1)[1]
|
||||
page_url_hqq = "http://hqq.watch/player/embed_player.php?vid=%s&autoplay=no" % id_video
|
||||
data_page_url_hqq = httptools.downloadpage(page_url_hqq, add_referer=True).data
|
||||
|
||||
js_wise = scrapertools.find_single_match(data_page_url_hqq,
|
||||
"<script type=[\"']text/javascript[\"']>\s*;?(eval.*?)</script>")
|
||||
data_unwise = jswise(js_wise).replace("\\", "")
|
||||
at = scrapertools.find_single_match(data_unwise, 'var at\s*=\s*"([^"]+)"')
|
||||
http_referer = scrapertools.find_single_match(data_unwise, 'var http_referer\s*=\s*"([^"]+)"')
|
||||
|
||||
url = "http://hqq.watch/sec/player/embed_player.php?iss=&vid=%s&at=%s&autoplayed=yes&referer=on" \
|
||||
"&http_referer=%s&pass=&embed_from=&need_captcha=0&hash_from=" % (id_video, at, http_referer)
|
||||
data_player = httptools.downloadpage(url, add_referer=True).data
|
||||
|
||||
data_unescape = scrapertools.find_multiple_matches(data_player, 'document.write\(unescape\("([^"]+)"')
|
||||
data = ""
|
||||
for d in data_unescape:
|
||||
data += urllib.unquote(d)
|
||||
|
||||
subtitle = scrapertools.find_single_match(data, 'value="sublangs=Spanish.*?sub=([^&]+)&')
|
||||
if not subtitle:
|
||||
subtitle = scrapertools.find_single_match(data, 'value="sublangs=English.*?sub=([^&]+)&')
|
||||
@@ -55,7 +48,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
"<script type=[\"']text/javascript[\"']>\s*;?(eval.*?)</script>")
|
||||
if js_wise:
|
||||
data_unwise_player = jswise(js_wise).replace("\\", "")
|
||||
|
||||
vars_data = scrapertools.find_single_match(data, '/player/get_md5.php",\s*\{(.*?)\}')
|
||||
matches = scrapertools.find_multiple_matches(vars_data, '\s*([^:]+):\s*([^,]*)[,"]')
|
||||
params = {}
|
||||
@@ -69,22 +61,17 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
if not value_var and data_unwise_player:
|
||||
value_var = scrapertools.find_single_match(data_unwise_player, 'var\s*%s\s*=\s*"([^"]+)"' % value)
|
||||
params[key] = value_var
|
||||
|
||||
params = urllib.urlencode(params)
|
||||
head = {'X-Requested-With': 'XMLHttpRequest', 'Referer': url}
|
||||
data = httptools.downloadpage("http://hqq.watch/player/get_md5.php?" + params, headers=head).data
|
||||
|
||||
media_urls = []
|
||||
url_data = jsontools.load(data)
|
||||
media_url = tb(url_data["html5_file"].replace("#", ""))
|
||||
|
||||
media_url = "https:" + tb(url_data["obf_link"].replace("#", "")) + ".mp4.m3u8"
|
||||
video_urls = []
|
||||
media = media_url + "|User-Agent=Mozilla/5.0 (iPhone; CPU iPhone OS 5_0_1 like Mac OS X)"
|
||||
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [netu.tv]", media, 0, subtitle])
|
||||
|
||||
for video_url in video_urls:
|
||||
logger.info("%s - %s" % (video_url[0], video_url[1]))
|
||||
|
||||
return video_urls
|
||||
|
||||
|
||||
@@ -95,7 +82,6 @@ def tb(b_m3u8_2):
|
||||
while j < len(b_m3u8_2):
|
||||
s2 += "\\u0" + b_m3u8_2[j:(j + 3)]
|
||||
j += 3
|
||||
|
||||
return s2.decode('unicode-escape').encode('ASCII', 'ignore')
|
||||
|
||||
|
||||
@@ -105,15 +91,12 @@ def tb(b_m3u8_2):
|
||||
def jswise(wise):
|
||||
## js2python
|
||||
def js_wise(wise):
|
||||
|
||||
w, i, s, e = wise
|
||||
|
||||
v0 = 0;
|
||||
v1 = 0;
|
||||
v2 = 0
|
||||
v3 = [];
|
||||
v4 = []
|
||||
|
||||
while True:
|
||||
if v0 < 5:
|
||||
v4.append(w[v0])
|
||||
@@ -131,12 +114,10 @@ def jswise(wise):
|
||||
v3.append(s[v2])
|
||||
v2 += 1
|
||||
if len(w) + len(i) + len(s) + len(e) == len(v3) + len(v4) + len(e): break
|
||||
|
||||
v5 = "".join(v3);
|
||||
v6 = "".join(v4)
|
||||
v1 = 0
|
||||
v7 = []
|
||||
|
||||
for v0 in range(0, len(v3), 2):
|
||||
v8 = -1
|
||||
if ord(v6[v1]) % 2: v8 = 1
|
||||
@@ -144,7 +125,6 @@ def jswise(wise):
|
||||
v1 += 1
|
||||
if v1 >= len(v4): v1 = 0
|
||||
return "".join(v7)
|
||||
|
||||
## loop2unobfuscated
|
||||
while True:
|
||||
wise = re.search("var\s.+?\('([^']+)','([^']+)','([^']+)','([^']+)'\)", wise, re.DOTALL)
|
||||
|
||||
@@ -14,7 +14,8 @@ def test_video_exists(page_url):
|
||||
|
||||
if "Streaming link:" in data:
|
||||
return True, ""
|
||||
elif "Unfortunately, the file you want is not available." in data or "Unfortunately, the video you want to see is not available" in data or "This stream doesn" in data:
|
||||
elif "Unfortunately, the file you want is not available." in data or "Unfortunately, the video you want to see is not available" in data or "This stream doesn" in data\
|
||||
or "Page not found" in data:
|
||||
return False, "[Uptobox] El archivo no existe o ha sido borrado"
|
||||
wait = scrapertools.find_single_match(data, "You have to wait ([0-9]+) (minute|second)")
|
||||
if len(wait) > 0:
|
||||
|
||||
Reference in New Issue
Block a user