nuevos canales - tvseriesdk y ver-peliculas

This commit is contained in:
alfa-addon
2017-08-12 19:19:46 -04:00
parent cdec3f5c07
commit 7f6f22c79d
4 changed files with 501 additions and 0 deletions

View File

@@ -0,0 +1,40 @@
{
"id": "tvseriesdk",
"name": "TVSeriesdk",
"active": true,
"adult": false,
"language": "es",
"thumbnail": "https://s13.postimg.org/jrvqmqfnb/tvseriesdk.png",
"banner": "https://s16.postimg.org/r6mbel0f9/tvseriesdk-banner.png",
"version": 1,
"categories": [
"latino",
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,204 @@
# -*- coding: utf-8 -*-
# -*- Channel TVSeriesdk -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from core import logger
from core import config
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
host = 'http://www.tvseriesdk.com/'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Ultimos",
action="last_episodes",
url=host
))
itemlist.append(item.clone(title="Todas",
action="list_all",
url = host
))
itemlist.append(item.clone(title="Buscar",
action="search",
url='http://www.tvseriesdk.com/index.php?s='
))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all (item):
logger.info ()
global i
itemlist = []
templist =[]
data = get_source(item.url)
patron = '<li class=cat-item cat-item-\d+><a href=(.*?) title=(.*?)>(.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) > 10:
if item.next_page != 10:
url_next_page = item.url
matches = matches[:10]
next_page = 10
item.i = 0
else:
patron = matches[item.i:][:10]
next_page = 10
url_next_page = item.url
for scrapedurl, scrapedplot, scrapedtitle in matches:
url = scrapedurl
plot= scrapedplot
contentSerieName=scrapedtitle
title = contentSerieName
thumbnail=''
templist.append(item.clone(action='episodes',
title=title,
url=url,
thumbnail='',
plot=plot,
contentErieName = contentSerieName
))
itemlist = get_thumb(templist)
## Paginación
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>",
url=url_next_page,
next_page=next_page,
i=item.i
))
return itemlist
def last_episodes (item):
logger.info ()
itemlist = []
data = get_source(item.url)
patron = '<div class=pelis>.*?<a href=(.*?) title=(.*?)><img src=(.*?) alt='
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
url = scrapedurl
title = scrapedtitle
thumbnail = scrapedthumbnail
itemlist.append(item.clone(action='episodes',
title=title,
url=url,
thumbnail = thumbnail
))
return itemlist
def episodes (item):
logger.info ()
itemlist = []
data = get_source(item.url)
patron = '<a href=(.*?) class=lcc>(.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
n_ep= 1
for scrapedurl, scrapedtitle in matches[::-1]:
url = scrapedurl
scrapedtitle = re.sub(r'Capítulo \d+', '', scrapedtitle)
title = '1x%s - %s'%(n_ep, scrapedtitle)
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
contentEpisodeNumber = n_ep,
contentSeasonNumber = '1'
))
n_ep +=1
return itemlist
def get_thumb(itemlist):
logger.info()
for item in itemlist:
data = get_source(item.url)
item.thumbnail = scrapertools.find_single_match(data,'<div class=sinope><img src=(.*?) alt=')
return itemlist
def search_list(item):
logger.info()
itemlist =[]
data= get_source(item.url)
patron = 'img title.*?src=(.*?) width=.*?class=tisearch><a href=(.*?)>(.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumb, scrapedurl, scrapedtitle in matches:
title = scrapedtitle
url = scrapedurl
thumbnail = scrapedthumb
itemlist.append(item.clone(title=title,
url=url,
thumbnail=thumbnail,
action='findvideos'
))
#Pagination < link
next_page = scrapertools.find_single_match(data, '<link rel=next href=(.*?) />')
if next_page:
itemlist.append(Item(channel=item.channel, action="search_list", title='>> Pagina Siguiente', url=next_page,
thumbnail=config.get_thumb("thumb_next.png")))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return search_list(item)
else:
return []
def findvideos(item):
logger.info()
itemlist=[]
servers = {'netu':'http://hqq.tv/player/embed_player.php?vid=',
'open':'https://openload.co/embed/',
'netv':'http://goo.gl/',
'gamo':'http://gamovideo.com/embed-',
'powvideo':'http://powvideo.net/embed-',
'play':'http://streamplay.to/embed-',
'vido':'http://vidoza.net/embed-'}
data = get_source(item.url)
patron = 'id=tab\d+.*?class=tab_content><script>(.*?)\((.*?)\)<\/script>'
matches = re.compile(patron, re.DOTALL).findall(data)
for server, video_id in matches:
if server not in ['gamo', 'powvideo', 'play', 'vido', 'netv']:
url = servers[server]+video_id
elif server == 'netv':
url = get_source(servers[server]+video_id)
else:
url = servers[server]+video_id+'.html'
itemlist.extend(servertools.find_video_items(data=url))
for videoitem in itemlist:
videoitem.channel = item.channel
videoitem.title = item.title+' (%s)'%videoitem.server
videoitem.action = 'play'
return itemlist

View File

@@ -0,0 +1,15 @@
{
"id": "ver-peliculas",
"thumbnail":"https://s14.postimg.org/98ulljwhd/ver-peliculas.png",
"banner":"https://s2.postimg.org/4k1ivod2h/ver-peliculas-banner.png",
"name": "Ver-peliculas",
"active": true,
"adult": false,
"language": "es",
"version": 1,
"categories": [
"movie",
"latino",
"direct"
]
}

View File

@@ -0,0 +1,242 @@
# -*- coding: utf-8 -*-
# -*- Channel TVSeriesdk -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
import urlparse
from core import config
from core import httptools
from core import logger
from core import scrapertools
from core.item import Item
from core import jsontools
from core import servertools
from core import tmdb
host = "http://ver-peliculas.io/"
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(
Item(channel=item.channel,
title="Peliculas",
action="listado",
url=host + "peliculas/",
thumbnail=config.get_thumb("thumb_channels_movie.png"
)))
itemlist.append(
Item(channel=item.channel,
title="Español",
action="listado",
url=host + "peliculas/en-espanol/"
))
itemlist.append(
Item(channel=item.channel,
title="Latino",
action="listado",
url=host + "peliculas/en-latino/",
thumbnail=config.get_thumb("thumb_channels_latino.png"
)))
itemlist.append(
Item(channel=item.channel,
title="Subtituladas",
action="listado",
url=host + "peliculas/subtituladas/",
thumbnail=config.get_thumb("thumb_channels_vos.png"
)))
itemlist.append(
Item(channel=item.channel,
title="Categorias",
action="categories",
url=host
))
itemlist.append(
Item(channel=item.channel,
title="Buscar",
action="search",
url=host + "core/ajax/suggest_search",
thumbnail=config.get_thumb("thumb_search.png"
)))
return itemlist
def categories(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
section = scrapertools.find_single_match(data, '<ul class="sub-menu">(.*?)</ul>')
matches = re.compile('<li><a href="([^"]+)"[^>]+>(.*?)</a>', re.DOTALL).findall(section)
for url, title in matches:
itemlist.append(Item(channel=item.channel,
action="listado",
title=title,
url=url
))
return itemlist
def search(item, texto):
logger.info()
try:
itemlist = []
post = "keyword=%s" % texto
data = httptools.downloadpage(item.url, post=post).data
data = data.replace('\\"', '"').replace('\\/', '/')
logger.debug("data %s" % data)
pattern = 'url\((.*?)\).+?<a href="([^"]+)".*?class="ss-title">(.*?)</a>'
matches = re.compile(pattern, re.DOTALL).findall(data)
for thumb, url, title in matches:
itemlist.append(Item(channel=item.channel,
action="findvideos",
title=title,
url=url,
thumbnail=thumb
))
return itemlist
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def listado(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
logger.debug (data)
pattern = '<a href="([^"]+)"[^>]+><img (?:src)?(?:data-original)?="([^"]+)".*?alt="([^"]+)"'
matches = re.compile(pattern, re.DOTALL).findall(data)
for url, thumb, title in matches:
title = title.replace("Película", "", 1)
itemlist.append(Item(channel=item.channel,
action="findvideos",
title=title,
url=url,
thumbnail=thumb,
contentTitle=title
))
pagination = scrapertools.find_single_match(data, '<ul class="pagination">(.*?)</ul>')
if pagination:
next_page = scrapertools.find_single_match(pagination, '<a href="#">\d+</a>.*?<a href="([^"]+)">')
if next_page:
url = urlparse.urljoin(host, next_page)
itemlist.append(Item(channel=item.channel,
action="listado",
title=">> Página siguiente",
url=url,
thumbnail=config.get_thumb("thumb_next.png"
)))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url, add_referer=True).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def findvideos(item):
logger.info()
itemlist =[]
duplicated =[]
data= get_source(item.url)
video_info = scrapertools.find_single_match(data, "load_player\('(.*?)','(.*?)'\);")
movie_info= scrapertools.find_single_match(item.url, 'http:\/\/ver-peliculas\.io\/peliculas\/(\d+)-(.*?)-\d{'
'4}-online\.')
movie_id = movie_info[0]
movie_name = movie_info[1]
sub = video_info[1]
url_base='http://ver-peliculas.io/core/api.php?id=%s&slug=%s'%(movie_id, movie_name)
data = httptools.downloadpage(url_base).data
json_data = jsontools.load(data)
video_list = json_data['lista']
itemlist =[]
for videoitem in video_list:
video_base_url = 'http://ver-peliculas.io/core/videofinal.php'
if video_list[videoitem] != None:
video_lang = video_list[videoitem]
languages = ['latino', 'spanish', 'subtitulos']
for lang in languages:
if video_lang[lang] != None:
if not isinstance(video_lang[lang],int):
video_id = video_lang[lang][0]["video"]
post = {"video":video_id , "sub": sub}
post = urllib.urlencode(post)
data = httptools.downloadpage(video_base_url, post=post).data
playlist = jsontools.load(data)
sources = playlist[['playlist'][0]]
server = playlist['server']
for video_link in sources:
url = video_link['sources']
if 'onevideo' in url:
data= get_source(url)
g_urls = servertools.findvideos(data=data)
url = g_urls[0][1]
server = g_urls[0][0]
if url not in duplicated:
lang = lang.capitalize()
if lang == 'Spanish':
lang = 'Español'
title = '(%s) %s (%s)' % (server, item.title, lang)
thumbnail = servertools.guess_server_thumbnail(server)
itemlist.append(item.clone(title= title,
url=url,
server=server,
thumbnail=thumbnail,
action='play'
))
duplicated.append(url)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle
))
return itemlist
def newest(category):
logger.info()
itemlist = []
item = Item()
try:
if category == 'peliculas':
item.url = host + "peliculas/"
elif category == 'infantiles':
item.url = host + 'categorias/peliculas-de-animacion.html'
itemlist = lista(item)
if itemlist[-1].title == '>> Página siguiente':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist