Merge pull request #39 from Alfa-beto/Fixes

Fixes Varios
This commit is contained in:
Alfa
2017-08-17 01:56:38 +02:00
committed by GitHub
5 changed files with 204 additions and 205 deletions

View File

@@ -345,13 +345,13 @@ def temporadas(item):
itemlist = []
templist = []
data = httptools.downloadpage(item.url).data
data = data.replace ('"',"'")
realplot = ''
patron = "<button class='classnamer' onclick='javascript: mostrarcapitulos.*?blank'>([^<]+)</button>"
patron = "<button class='classnamer' onclick='javascript: mostrarcapitulos.*?blank'>([^<]+)<\/button>"
matches = re.compile(patron, re.DOTALL).findall(data)
serieid = scrapertools.find_single_match(data, 'data-nonce="(.*?)"')
serieid = scrapertools.find_single_match(data, "data-nonce='(.*?)'")
item.thumbnail = item.thumbvid
infoLabels = item.infoLabels
for scrapedtitle in matches:
@@ -408,6 +408,7 @@ def episodiosxtemp(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = data.replace('"', "'")
patron = "<button class='classnamer' onclick='javascript: mostrarenlaces\(([^\)]+)\).*?<"
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -640,7 +641,7 @@ def play(item):
logger.info()
data = httptools.downloadpage(item.url).data
if 'streamplay' not in item.server or 'streame' not in item.server:
if item.server not in ['streamplay','streame']:
url = scrapertools.find_single_match(data, '<(?:IFRAME|iframe).*?(?:SRC|src)=*([^ ]+) (?!style|STYLE)')
else:
url = scrapertools.find_single_match(data, '<meta http-equiv="refresh" content="0; url=([^"]+)">')

View File

@@ -1,182 +1,185 @@
# -*- coding: utf-8 -*-
# -*- Channel TVSeriesdk -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
host = 'http://www.tvseriesdk.com/'
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(item.clone(title="Ultimos", action="last_episodes", url=host))
itemlist.append(item.clone(title="Todas", action="list_all", url=host))
itemlist.append(item.clone(title="Buscar", action="search", url='http://www.tvseriesdk.com/index.php?s='))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
global i
templist = []
data = get_source(item.url)
patron = '<li class=cat-item cat-item-\d+><a href=(.*?) title=(.*?)>(.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) > 10:
if item.next_page != 10:
url_next_page = item.url
matches = matches[:10]
next_page = 10
item.i = 0
else:
patron = matches[item.i:][:10]
next_page = 10
url_next_page = item.url
for scrapedurl, scrapedplot, scrapedtitle in matches:
url = scrapedurl
plot = scrapedplot
contentSerieName = scrapedtitle
title = contentSerieName
templist.append(item.clone(action='episodios',
title=title,
url=url,
thumbnail='',
plot=plot,
contentErieName=contentSerieName
))
itemlist = get_thumb(templist)
# Paginación
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, next_page=next_page, i=item.i))
return itemlist
def last_episodes(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<div class=pelis>.*?<a href=(.*?) title=(.*?)><img src=(.*?) alt='
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
url = scrapedurl
title = scrapedtitle
thumbnail = scrapedthumbnail
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
thumbnail=thumbnail
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<a href=(.*?) class=lcc>(.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
n_ep = 1
for scrapedurl, scrapedtitle in matches[::-1]:
url = scrapedurl
scrapedtitle = re.sub(r'Capítulo \d+', '', scrapedtitle)
title = '1x%s - %s' % (n_ep, scrapedtitle)
itemlist.append(
item.clone(action='findvideos', title=title, url=url, contentEpisodeNumber=n_ep, contentSeasonNumber='1'))
n_ep += 1
return itemlist
def get_thumb(itemlist):
logger.info()
for item in itemlist:
data = get_source(item.url)
item.thumbnail = scrapertools.find_single_match(data, '<div class=sinope><img src=(.*?) alt=')
return itemlist
def search_list(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'img title.*?src=(.*?) width=.*?class=tisearch><a href=(.*?)>(.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumb, scrapedurl, scrapedtitle in matches:
title = scrapedtitle
url = scrapedurl
thumbnail = scrapedthumb
itemlist.append(item.clone(title=title, url=url, thumbnail=thumbnail, action='findvideos'))
# Pagination < link
next_page = scrapertools.find_single_match(data, '<link rel=next href=(.*?) />')
if next_page:
itemlist.append(Item(channel=item.channel, action="search_list", title='>> Pagina Siguiente', url=next_page,
thumbnail=config.get_thumb("thumb_next.png")))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return search_list(item)
else:
return []
def findvideos(item):
logger.info()
itemlist = []
servers = {'netu': 'http://hqq.tv/player/embed_player.php?vid=',
'open': 'https://openload.co/embed/',
'netv': 'http://goo.gl/',
'gamo': 'http://gamovideo.com/embed-',
'powvideo': 'http://powvideo.net/embed-',
'play': 'http://streamplay.to/embed-',
'vido': 'http://vidoza.net/embed-'}
data = get_source(item.url)
patron = 'id=tab\d+.*?class=tab_content><script>(.*?)\((.*?)\)<\/script>'
matches = re.compile(patron, re.DOTALL).findall(data)
for server, video_id in matches:
if server not in ['gamo', 'powvideo', 'play', 'vido', 'netv']:
url = servers[server] + video_id
elif server == 'netv':
url = get_source(servers[server] + video_id)
else:
url = servers[server] + video_id + '.html'
itemlist.extend(servertools.find_video_items(data=url))
for videoitem in itemlist:
videoitem.channel = item.channel
videoitem.title = item.title + ' (%s)' % videoitem.server
videoitem.action = 'play'
return itemlist
# -*- coding: utf-8 -*-
# -*- Channel TVSeriesdk -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
host = 'http://www.tvseriesdk.com/'
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(item.clone(title="Ultimos", action="last_episodes", url=host))
itemlist.append(item.clone(title="Todas", action="list_all", url=host))
itemlist.append(item.clone(title="Buscar", action="search", url='http://www.tvseriesdk.com/index.php?s='))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
global i
templist = []
data = get_source(item.url)
patron = '<li class=cat-item cat-item-\d+><a href=(.*?) title=(.*?)>(.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) > 10:
if item.next_page != 10:
url_next_page = item.url
matches = matches[:10]
next_page = 10
item.i = 0
else:
patron = matches[item.i:][:10]
next_page = 10
url_next_page = item.url
for scrapedurl, scrapedplot, scrapedtitle in matches:
url = scrapedurl
plot = scrapedplot
contentSerieName = scrapedtitle
title = contentSerieName
templist.append(item.clone(action='episodios',
title=title,
url=url,
thumbnail='',
plot=plot,
contentErieName=contentSerieName
))
itemlist = get_thumb(templist)
# Paginación
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, next_page=next_page, i=item.i))
return itemlist
def last_episodes(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<div class=pelis>.*?<a href=(.*?) title=(.*?)><img src=(.*?) alt='
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
url = scrapedurl
title = scrapedtitle
thumbnail = scrapedthumbnail
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
thumbnail=thumbnail
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<a href=(.*?) class=lcc>(.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
n_ep = 1
for scrapedurl, scrapedtitle in matches[::-1]:
url = scrapedurl
scrapedtitle = re.sub(r'Capítulo \d+', '', scrapedtitle)
title = '1x%s - %s' % (n_ep, scrapedtitle)
itemlist.append(
item.clone(action='findvideos', title=title, url=url, contentEpisodeNumber=n_ep, contentSeasonNumber='1'))
n_ep += 1
return itemlist
def get_thumb(itemlist):
logger.info()
for item in itemlist:
data = get_source(item.url)
item.thumbnail = scrapertools.find_single_match(data, '<div class=sinope><img src=(.*?) alt=')
return itemlist
def search_list(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'img title.*?src=(.*?) width=.*?class=tisearch><a href=(.*?)>(.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumb, scrapedurl, scrapedtitle in matches:
title = scrapedtitle
url = scrapedurl
thumbnail = scrapedthumb
itemlist.append(item.clone(title=title, url=url, thumbnail=thumbnail, action='findvideos'))
# Pagination < link
next_page = scrapertools.find_single_match(data, '<link rel=next href=(.*?) />')
if next_page:
itemlist.append(Item(channel=item.channel, action="search_list", title='>> Pagina Siguiente', url=next_page,
thumbnail=config.get_thumb("thumb_next.png")))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return search_list(item)
else:
return []
def findvideos(item):
logger.info()
itemlist = []
servers = {'netu': 'http://hqq.tv/player/embed_player.php?vid=',
'open': 'https://openload.co/embed/',
'netv': 'http://goo.gl/',
'gamo': 'http://gamovideo.com/embed-',
'powvideo': 'http://powvideo.net/embed-',
'play': 'http://streamplay.to/embed-',
'vido': 'http://vidoza.net/embed-'}
data = get_source(item.url)
noemitido = scrapertools.find_single_match(data, '<p><img src=(http://darkiller.com/images/subiendo.png) border=0\/><\/p>')
patron = 'id=tab\d+.*?class=tab_content><script>(.*?)\((.*?)\)<\/script>'
matches = re.compile(patron, re.DOTALL).findall(data)
if not noemitido:
for server, video_id in matches:
if server not in ['gamo', 'powvideo', 'play', 'vido', 'netv']:
url = servers[server] + video_id
elif server == 'netv':
url = get_source(servers[server] + video_id)
else:
url = servers[server] + video_id + '.html'
itemlist.extend(servertools.find_video_items(data=url))
for videoitem in itemlist:
videoitem.channel = item.channel
videoitem.title = item.title + ' (%s)' % videoitem.server
videoitem.action = 'play'
else:
itemlist.append(item.clone(title = 'Este capitulo aun no esta disponible', action='', url=''))
return itemlist

View File

@@ -9,7 +9,6 @@
"version": 1,
"categories": [
"movie",
"latino",
"direct"
"latino"
]
}

View File

@@ -15,7 +15,7 @@ from core import servertools
from core.item import Item
from platformcode import config, logger
host = "http://ver-peliculas.io/"
host = "http://ver-peliculas.org/"
def mainlist(item):
@@ -156,18 +156,18 @@ def findvideos(item):
data = get_source(item.url)
video_info = scrapertools.find_single_match(data, "load_player\('(.*?)','(.*?)'\);")
movie_info = scrapertools.find_single_match(item.url, 'http:\/\/ver-peliculas\.io\/peliculas\/(\d+)-(.*?)-\d{'
movie_info = scrapertools.find_single_match(item.url, 'http:\/\/ver-peliculas\.org\/peliculas\/(\d+)-(.*?)-\d{'
'4}-online\.')
movie_id = movie_info[0]
movie_name = movie_info[1]
sub = video_info[1]
url_base = 'http://ver-peliculas.io/core/api.php?id=%s&slug=%s' % (movie_id, movie_name)
url_base = 'http://ver-peliculas.org/core/api.php?id=%s&slug=%s' % (movie_id, movie_name)
data = httptools.downloadpage(url_base).data
json_data = jsontools.load(data)
video_list = json_data['lista']
itemlist = []
for videoitem in video_list:
video_base_url = 'http://ver-peliculas.io/core/videofinal.php'
video_base_url = 'http://ver-peliculas.org/core/videofinal.php'
if video_list[videoitem] != None:
video_lang = video_list[videoitem]
languages = ['latino', 'spanish', 'subtitulos']
@@ -184,12 +184,12 @@ def findvideos(item):
for video_link in sources:
url = video_link['sources']
if 'onevideo' in url:
data = get_source(url)
g_urls = servertools.findvideos(data=data)
url = g_urls[0][1]
server = g_urls[0][0]
if url not in duplicated:
# if 'onevideo' in url:
# data = get_source(url)
# g_urls = servertools.findvideos(data=data)
# url = g_urls[0][1]
# server = g_urls[0][0]
if url not in duplicated and server!='drive':
lang = lang.capitalize()
if lang == 'Spanish':
lang = 'Español'

View File

@@ -26,12 +26,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
referer = re.sub(r"embed-|player-", "", page_url)[:-5]
data = httptools.downloadpage(page_url, headers={'Referer': referer}).data
for list in scrapertools.find_multiple_matches(data, '_[^=]+=(\[[^\]]+\]);'):
if len(list) == 703 or len(list) == 711:
key = "".join(eval(list)[7:9])
break
if key.startswith("embed"):
key = key[6:] + key[:6]
matches = scrapertools.find_single_match(data, "<script type=[\"']text/javascript[\"']>(eval.*?)</script>")
data = jsunpack.unpack(matches).replace("\\", "")
@@ -40,7 +35,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
video_urls = []
for video_url in matches:
_hash = scrapertools.find_single_match(video_url, '[A-z0-9\_\-]{40,}')
hash = decrypt(_hash, key)
hash = _hash[::-1]
hash = hash.replace(hash[2:3],"",1)
video_url = video_url.replace(_hash, hash)
filename = scrapertools.get_filename_from_url(video_url)[-4:]