Merge pull request #4 from alfa-addon/master

update
This commit is contained in:
alfa-jor
2017-08-13 11:09:01 +02:00
committed by GitHub
9 changed files with 879 additions and 29 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="1.5.2" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="1.5.3" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -18,17 +18,15 @@
<screenshot>resources/media/general/ss/4.jpg</screenshot>
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales arreglos[/B][/COLOR]
[I]- ver-pelis
- datoporn
- playpornx[/I]
[COLOR green][B]Servidor arreglado[/B][/COLOR]
- vimeo
- downace
[COLOR green][B]Novedades y mejoras[/B][/COLOR]
[I] - arreglos internos
- videoteca - conversion de peliculas fixed[/I]
</news>
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
[I]- ver-peliculas
- tvseriesdk
- oh-pelis
- seriesblanco
- allcalidad[/I]
[COLOR green]Gracias a[/COLOR] [COLOR yellow]k991293[/COLOR] [COLOR green]por su colaboración en esta versión[/COLOR]
</news>
<description lang="es">Descripción en Español</description>
<summary lang="en">English summary</summary>
<description lang="en">English description</description>

View File

@@ -75,12 +75,6 @@ def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '(?s)short_overlay.*?<a href="([^"]+)'
patron += '.*?img.*?src="([^"]+)'
patron += '.*?title="([^"]+)'
patron += '.*?kinopoisk">([^<]+)'
patron += '</span(.*?)small_rating'
patron = '(?s)short_overlay.*?<a href="([^"]+)'
patron += '.*?img.*?src="([^"]+)'
patron += '.*?title="(.*?)"'
@@ -118,13 +112,10 @@ def findvideos(item):
bloque = scrapertools.find_single_match(data, patron)
match = scrapertools.find_multiple_matches(bloque, '(?is)(?:iframe|script) .*?src="([^"]+)')
for url in match:
server = servertools.get_server_from_url(url)
titulo = "Ver en: " + server
if "youtube" in server:
if "embed" in url:
url = "http://www.youtube.com/watch?v=" + scrapertools.find_single_match(url, 'embed/(.*)')
titulo = "[COLOR = yellow]Ver trailer: " + server + "[/COLOR]"
elif "directo" in server:
titulo = "Ver en: %s"
if "youtube" in url:
titulo = "[COLOR = yellow]Ver trailer: %s[/COLOR]"
if "ad.js" in url or "script" in url:
continue
elif "vimeo" in url:
url += "|" + "http://www.allcalidad.com"
@@ -134,9 +125,10 @@ def findvideos(item):
title = titulo,
fulltitle = item.fulltitle,
thumbnail = item.thumbnail,
server = server,
server = "",
url = url
))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if itemlist:
itemlist.append(Item(channel = item.channel))
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",

View File

@@ -0,0 +1,43 @@
{
"id": "ohpelis",
"name": "OH-PELIS",
"compatible": {
"addon_version": "4.3"
},
"active": true,
"adult": false,
"language": "es",
"thumbnail": "https://s28.postimg.org/6v7ig831p/oh-pelis.png",
"banner": "https://s27.postimg.org/bz0fh8jpf/oh-pelis-banner.png",
"version": 1,
"categories": [
"latino",
"movie"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,316 @@
# -*- coding: utf-8 -*-
# -*- Channel OH-PELIS -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
import urlparse
from channels import autoplay
from channels import filtertools
from core import config
from core import httptools
from core import logger
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
host = 'http://www.ohpelis.com'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0 Chrome/58.0.3029.110',
'Referer': host}
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(
item.clone(title="Peliculas",
action = 'movies_menu'
))
itemlist.append(
item.clone(title="Series",
action ='series_menu'
))
itemlist.append(
item.clone(title="Buscar",
action="search",
url='http://www.ohpelis.com/?s=',
))
return itemlist
def series_menu(item):
logger.info()
itemlist = []
itemlist.append(
item.clone(title="Series",
action="list_all",
url=host + '/series/',
extra='serie'
))
return itemlist
def movies_menu(item):
logger.info()
itemlist = []
itemlist.append(
item.clone(title="Todas",
action="list_all",
url=host + '/peliculas/'
))
itemlist.append(
item.clone(title="Generos",
action="section",
url=host, extra='genres'))
itemlist.append(
item.clone(title="Por año",
action="section",
url=host, extra='byyear'
))
return itemlist
def list_all(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="poster"><a href="(.*?)"><img src="(.*?)" alt="(.*?)"><\/a>.*?<span>(\d{4})<\/span>.*?'
patron +='<div class="texto">(.*?)<div'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedplot in matches:
title = scrapedtitle
plot = scrapedplot
thumbnail = scrapedthumbnail
url = scrapedurl
year = scrapedyear
new_item = (item.clone(title=title,
url=url,
thumbnail = thumbnail,
plot = plot,
infoLabels={'year':year}
))
if item.extra == 'serie':
new_item.action ='seasons'
new_item.contentSerieName = title
else:
new_item.action = 'findvideos'
new_item.contentTitle = title
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
next_page = scrapertools.find_single_match(data, '<link rel="next" href="(.*?) />')
if next_page:
itemlist.append(Item(channel=item.channel,
action="list_all",
title=">> Página siguiente",
url=next_page,
thumbnail=config.get_thumb("thumb_next.png")))
return itemlist
def section(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if item.extra == 'genres':
patron = '<li class="cat-item cat-item-\d+"><a href="(.*?)" >(.*?)<\/a> <i>\d+<\/i>'
elif item.extra == 'byyear':
patron ='<li><a href="(http:\/\/www\.ohpelis\.com\/release.*?)">(.*?)<\/a><\/li>'
elif item.extra == 'alpha':
patron = '<li><a href="(http:\/\/www\.ohpelis\.com\/.*?)" >(.*?)<\/a><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle
url = scrapedurl
itemlist.append(Item(channel=item.channel,
action='list_all',
title=title,
url=url
))
return itemlist
def search_list(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '><div class="result-item">.*?<a href="(.*?)"><img src="(.*?)" alt="(.*?)" \/><span class="(.*?)".*?'
patron +='<span class="year">(.*?)<\/span>.*?<p>(.*?)<\/p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedtype, scrapedyear, scrapedplot in matches:
title = scrapedtitle
plot = scrapedplot
thumbnail = scrapedthumbnail
url = scrapedurl
year = scrapedyear
new_item = item.clone(action='',
title=title,
url=url,
thumbnail=thumbnail,
plot=plot,
infoLabels={'year': year})
if scrapedtype == 'movies':
new_item.action = 'findvideos'
new_item.contentTitle = title
else:
new_item.action = 'seasons'
new_item.contentSerieName = title
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return search_list(item)
def seasons (item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<span class="se-t(?: "| se-o")>(.*?)<\/span><span class="title">(.*?) <i>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedseason, scrapedtitle in matches:
title = scrapedtitle
contentSeasonNumber = scrapedseason
infoLabels['season']= scrapedseason
itemlist.append(item.clone(title = title,
contentSeasonNumber= contentSeasonNumber,
action = 'episodesxseason',
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(item.clone(title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url=item.url,
action="add_serie_to_library",
extra = 'episodes',
contentSerieName=item.contentSerieName,
))
return itemlist
def episodes (item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="numerando">(\d+) - (\d+)<\/div><div class="episodiotitle"><a href="(.*?)">(.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
num_ep = 1
for scrapedseason, scrapedepisode, scrapedurl, scrapedtitle in matches:
season = scrapedseason
contentEpisodeNumber = num_ep
url = scrapedurl
title = '%sx%s - %s' % (season, num_ep, scrapedtitle)
itemlist.append(item.clone(title = title,
url = url,
contentEpisodeNumber = contentEpisodeNumber,
action = 'findvideos',
infoLabels = infoLabels
))
num_ep += 1
return itemlist
def episodesxseason (item):
logger.info()
itemlist = []
season = item.contentSeasonNumber
data = httptools.downloadpage(item.url).data
patron = '<div class="numerando">%s - (\d+)<\/div><div class="episodiotitle"><a href="(.*?)">(.*?)<\/a>'%season
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
num_ep=1
for scrapedepisode, scrapedurl, scrapedtitle in matches:
title = '%sx%s - %s'%(season, num_ep, scrapedtitle)
url = scrapedurl
infoLabels['episode']= num_ep
itemlist.append(item.clone(title = title,
url=url,
contentEpisodeNumber = num_ep,
action = 'findvideos',
infoLabels=infoLabels))
num_ep +=1
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
templist =[]
data = httptools.downloadpage(item.url).data
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
videoitem.channel = item.channel
if videoitem.server != 'youtube':
videoitem.title = item.title+' (%s)'%videoitem.server
else:
videoitem.title = 'Trailer en %s' % videoitem.server
videoitem.action = 'play'
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle,
))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host + '/release/2017/'
elif categoria == 'infantiles':
item.url = host + '/genero/infantil/'
itemlist = list_all(item)
if itemlist[-1].title == '>> Página siguiente':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -255,7 +255,7 @@ def parse_videos(item, type_str, data):
def extract_videos_section(data):
return re.findall("panel-title(.+?)</div>[^<]*</div>[^<]*</div>", data, re.MULTILINE | re.DOTALL)
return re.findall("panel-title[^>]*>\s*([VvDd].+?)</div>[^<]*</div>[^<]*</div>", data, re.MULTILINE | re.DOTALL)
def findvideos(item):
@@ -275,10 +275,10 @@ def findvideos(item):
list_links = []
if filtro_enlaces != 0:
list_links.extend(parse_videos(item, "Ver", online[0]))
list_links.extend(parse_videos(item, "Ver", online[-2]))
if filtro_enlaces != 1:
list_links.extend(parse_videos(item, "Descargar", online[1]))
list_links.extend(parse_videos(item, "Descargar", online[-1]))
list_links = filtertools.get_links(list_links, item, list_idiomas, CALIDADES)

View File

@@ -0,0 +1,40 @@
{
"id": "tvseriesdk",
"name": "TVSeriesdk",
"active": true,
"adult": false,
"language": "es",
"thumbnail": "https://s13.postimg.org/jrvqmqfnb/tvseriesdk.png",
"banner": "https://s16.postimg.org/r6mbel0f9/tvseriesdk-banner.png",
"version": 1,
"categories": [
"latino",
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,204 @@
# -*- coding: utf-8 -*-
# -*- Channel TVSeriesdk -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from core import logger
from core import config
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
host = 'http://www.tvseriesdk.com/'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Ultimos",
action="last_episodes",
url=host
))
itemlist.append(item.clone(title="Todas",
action="list_all",
url = host
))
itemlist.append(item.clone(title="Buscar",
action="search",
url='http://www.tvseriesdk.com/index.php?s='
))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all (item):
logger.info ()
global i
itemlist = []
templist =[]
data = get_source(item.url)
patron = '<li class=cat-item cat-item-\d+><a href=(.*?) title=(.*?)>(.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) > 10:
if item.next_page != 10:
url_next_page = item.url
matches = matches[:10]
next_page = 10
item.i = 0
else:
patron = matches[item.i:][:10]
next_page = 10
url_next_page = item.url
for scrapedurl, scrapedplot, scrapedtitle in matches:
url = scrapedurl
plot= scrapedplot
contentSerieName=scrapedtitle
title = contentSerieName
thumbnail=''
templist.append(item.clone(action='episodes',
title=title,
url=url,
thumbnail='',
plot=plot,
contentErieName = contentSerieName
))
itemlist = get_thumb(templist)
## Paginación
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>",
url=url_next_page,
next_page=next_page,
i=item.i
))
return itemlist
def last_episodes (item):
logger.info ()
itemlist = []
data = get_source(item.url)
patron = '<div class=pelis>.*?<a href=(.*?) title=(.*?)><img src=(.*?) alt='
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
url = scrapedurl
title = scrapedtitle
thumbnail = scrapedthumbnail
itemlist.append(item.clone(action='episodes',
title=title,
url=url,
thumbnail = thumbnail
))
return itemlist
def episodes (item):
logger.info ()
itemlist = []
data = get_source(item.url)
patron = '<a href=(.*?) class=lcc>(.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
n_ep= 1
for scrapedurl, scrapedtitle in matches[::-1]:
url = scrapedurl
scrapedtitle = re.sub(r'Capítulo \d+', '', scrapedtitle)
title = '1x%s - %s'%(n_ep, scrapedtitle)
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
contentEpisodeNumber = n_ep,
contentSeasonNumber = '1'
))
n_ep +=1
return itemlist
def get_thumb(itemlist):
logger.info()
for item in itemlist:
data = get_source(item.url)
item.thumbnail = scrapertools.find_single_match(data,'<div class=sinope><img src=(.*?) alt=')
return itemlist
def search_list(item):
logger.info()
itemlist =[]
data= get_source(item.url)
patron = 'img title.*?src=(.*?) width=.*?class=tisearch><a href=(.*?)>(.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumb, scrapedurl, scrapedtitle in matches:
title = scrapedtitle
url = scrapedurl
thumbnail = scrapedthumb
itemlist.append(item.clone(title=title,
url=url,
thumbnail=thumbnail,
action='findvideos'
))
#Pagination < link
next_page = scrapertools.find_single_match(data, '<link rel=next href=(.*?) />')
if next_page:
itemlist.append(Item(channel=item.channel, action="search_list", title='>> Pagina Siguiente', url=next_page,
thumbnail=config.get_thumb("thumb_next.png")))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return search_list(item)
else:
return []
def findvideos(item):
logger.info()
itemlist=[]
servers = {'netu':'http://hqq.tv/player/embed_player.php?vid=',
'open':'https://openload.co/embed/',
'netv':'http://goo.gl/',
'gamo':'http://gamovideo.com/embed-',
'powvideo':'http://powvideo.net/embed-',
'play':'http://streamplay.to/embed-',
'vido':'http://vidoza.net/embed-'}
data = get_source(item.url)
patron = 'id=tab\d+.*?class=tab_content><script>(.*?)\((.*?)\)<\/script>'
matches = re.compile(patron, re.DOTALL).findall(data)
for server, video_id in matches:
if server not in ['gamo', 'powvideo', 'play', 'vido', 'netv']:
url = servers[server]+video_id
elif server == 'netv':
url = get_source(servers[server]+video_id)
else:
url = servers[server]+video_id+'.html'
itemlist.extend(servertools.find_video_items(data=url))
for videoitem in itemlist:
videoitem.channel = item.channel
videoitem.title = item.title+' (%s)'%videoitem.server
videoitem.action = 'play'
return itemlist

View File

@@ -0,0 +1,15 @@
{
"id": "ver-peliculas",
"thumbnail":"https://s14.postimg.org/98ulljwhd/ver-peliculas.png",
"banner":"https://s2.postimg.org/4k1ivod2h/ver-peliculas-banner.png",
"name": "Ver-peliculas",
"active": true,
"adult": false,
"language": "es",
"version": 1,
"categories": [
"movie",
"latino",
"direct"
]
}

View File

@@ -0,0 +1,242 @@
# -*- coding: utf-8 -*-
# -*- Channel TVSeriesdk -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
import urlparse
from core import config
from core import httptools
from core import logger
from core import scrapertools
from core.item import Item
from core import jsontools
from core import servertools
from core import tmdb
host = "http://ver-peliculas.io/"
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(
Item(channel=item.channel,
title="Peliculas",
action="listado",
url=host + "peliculas/",
thumbnail=config.get_thumb("thumb_channels_movie.png"
)))
itemlist.append(
Item(channel=item.channel,
title="Español",
action="listado",
url=host + "peliculas/en-espanol/"
))
itemlist.append(
Item(channel=item.channel,
title="Latino",
action="listado",
url=host + "peliculas/en-latino/",
thumbnail=config.get_thumb("thumb_channels_latino.png"
)))
itemlist.append(
Item(channel=item.channel,
title="Subtituladas",
action="listado",
url=host + "peliculas/subtituladas/",
thumbnail=config.get_thumb("thumb_channels_vos.png"
)))
itemlist.append(
Item(channel=item.channel,
title="Categorias",
action="categories",
url=host
))
itemlist.append(
Item(channel=item.channel,
title="Buscar",
action="search",
url=host + "core/ajax/suggest_search",
thumbnail=config.get_thumb("thumb_search.png"
)))
return itemlist
def categories(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
section = scrapertools.find_single_match(data, '<ul class="sub-menu">(.*?)</ul>')
matches = re.compile('<li><a href="([^"]+)"[^>]+>(.*?)</a>', re.DOTALL).findall(section)
for url, title in matches:
itemlist.append(Item(channel=item.channel,
action="listado",
title=title,
url=url
))
return itemlist
def search(item, texto):
logger.info()
try:
itemlist = []
post = "keyword=%s" % texto
data = httptools.downloadpage(item.url, post=post).data
data = data.replace('\\"', '"').replace('\\/', '/')
logger.debug("data %s" % data)
pattern = 'url\((.*?)\).+?<a href="([^"]+)".*?class="ss-title">(.*?)</a>'
matches = re.compile(pattern, re.DOTALL).findall(data)
for thumb, url, title in matches:
itemlist.append(Item(channel=item.channel,
action="findvideos",
title=title,
url=url,
thumbnail=thumb
))
return itemlist
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def listado(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
logger.debug (data)
pattern = '<a href="([^"]+)"[^>]+><img (?:src)?(?:data-original)?="([^"]+)".*?alt="([^"]+)"'
matches = re.compile(pattern, re.DOTALL).findall(data)
for url, thumb, title in matches:
title = title.replace("Película", "", 1)
itemlist.append(Item(channel=item.channel,
action="findvideos",
title=title,
url=url,
thumbnail=thumb,
contentTitle=title
))
pagination = scrapertools.find_single_match(data, '<ul class="pagination">(.*?)</ul>')
if pagination:
next_page = scrapertools.find_single_match(pagination, '<a href="#">\d+</a>.*?<a href="([^"]+)">')
if next_page:
url = urlparse.urljoin(host, next_page)
itemlist.append(Item(channel=item.channel,
action="listado",
title=">> Página siguiente",
url=url,
thumbnail=config.get_thumb("thumb_next.png"
)))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url, add_referer=True).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def findvideos(item):
logger.info()
itemlist =[]
duplicated =[]
data= get_source(item.url)
video_info = scrapertools.find_single_match(data, "load_player\('(.*?)','(.*?)'\);")
movie_info= scrapertools.find_single_match(item.url, 'http:\/\/ver-peliculas\.io\/peliculas\/(\d+)-(.*?)-\d{'
'4}-online\.')
movie_id = movie_info[0]
movie_name = movie_info[1]
sub = video_info[1]
url_base='http://ver-peliculas.io/core/api.php?id=%s&slug=%s'%(movie_id, movie_name)
data = httptools.downloadpage(url_base).data
json_data = jsontools.load(data)
video_list = json_data['lista']
itemlist =[]
for videoitem in video_list:
video_base_url = 'http://ver-peliculas.io/core/videofinal.php'
if video_list[videoitem] != None:
video_lang = video_list[videoitem]
languages = ['latino', 'spanish', 'subtitulos']
for lang in languages:
if video_lang[lang] != None:
if not isinstance(video_lang[lang],int):
video_id = video_lang[lang][0]["video"]
post = {"video":video_id , "sub": sub}
post = urllib.urlencode(post)
data = httptools.downloadpage(video_base_url, post=post).data
playlist = jsontools.load(data)
sources = playlist[['playlist'][0]]
server = playlist['server']
for video_link in sources:
url = video_link['sources']
if 'onevideo' in url:
data= get_source(url)
g_urls = servertools.findvideos(data=data)
url = g_urls[0][1]
server = g_urls[0][0]
if url not in duplicated:
lang = lang.capitalize()
if lang == 'Spanish':
lang = 'Español'
title = '(%s) %s (%s)' % (server, item.title, lang)
thumbnail = servertools.guess_server_thumbnail(server)
itemlist.append(item.clone(title= title,
url=url,
server=server,
thumbnail=thumbnail,
action='play'
))
duplicated.append(url)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle
))
return itemlist
def newest(category):
logger.info()
itemlist = []
item = Item()
try:
if category == 'peliculas':
item.url = host + "peliculas/"
elif category == 'infantiles':
item.url = host + 'categorias/peliculas-de-animacion.html'
itemlist = lista(item)
if itemlist[-1].title == '>> Página siguiente':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist