Merge remote-tracking branch 'alfa-addon/master'

This commit is contained in:
unknown
2017-10-24 08:25:18 -03:00
16 changed files with 71 additions and 815 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.2.3" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.2.4" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,12 +19,12 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
» animeyt » pelismundo
» asialiveaction » animeflv_me
» newpct1 » wopelis
» gvideo » powvideo
¤ arreglos internos
[COLOR green]Gracias a [COLOR yellow]Danielr460[/COLOR] y [COLOR yellow]robalo[/COLOR] por su colaboración en esta versión[/COLOR]
» playmax » playpornx
» canalporno » divxatope
» flashx » verpeliculasnuevas
» animeflv_me » hdfull
» pelismundo » downace
» gamovideo ¤ arreglos internos
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary>

View File

@@ -12,14 +12,14 @@ from core import servertools
from core.item import Item
from platformcode import config, logger
CHANNEL_HOST = "http://animeflv.me/"
CHANNEL_HOST = "http://animeflv.co"
CHANNEL_DEFAULT_HEADERS = [
["User-Agent", "Mozilla/5.0"],
["Accept-Encoding", "gzip, deflate"],
["Referer", CHANNEL_HOST]
]
REGEX_NEXT_PAGE = r"class='current'>\d+?</li><li><a href=\"([^']+?)\""
REGEX_NEXT_PAGE = "class='current'>\d+?</li><li><a href='([^']+?)'"
REGEX_TITLE = r'(?:bigChar_a" href=.+?>)(.+?)(?:</a>)'
REGEX_THUMB = r'src="(http://media.animeflv\.co/uploads/thumbs/[^"]+?)"'
REGEX_PLOT = r'<span class="info">Línea de historia:</span><p><span>(.*?)</span>'
@@ -61,14 +61,6 @@ def get_cookie_value():
header_string = "|User-Agent=Mozilla/5.0&Referer=http://animeflv.co&Cookie=" + \
get_cookie_value()
def __find_next_page(html):
"""
Busca el enlace a la pagina siguiente
"""
return scrapertools.find_single_match(html, REGEX_NEXT_PAGE)
def __extract_info_from_serie(html):
title = scrapertools.find_single_match(html, REGEX_TITLE)
title = clean_title(title)
@@ -131,15 +123,15 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, action="letras",
title="Por orden alfabético"))
itemlist.append(Item(channel=item.channel, action="generos", title="Por géneros",
url=urlparse.urljoin(CHANNEL_HOST, "ListadeAnime")))
url= CHANNEL_HOST + "/ListadeAnime"))
itemlist.append(Item(channel=item.channel, action="series", title="Por popularidad",
url=urlparse.urljoin(CHANNEL_HOST, "/ListadeAnime/MasVisto")))
url=CHANNEL_HOST + "/ListadeAnime/MasVisto"))
itemlist.append(Item(channel=item.channel, action="series", title="Novedades",
url=urlparse.urljoin(CHANNEL_HOST, "ListadeAnime/Nuevo")))
url=CHANNEL_HOST + "/ListadeAnime/Nuevo"))
itemlist.append(Item(channel=item.channel, action="series", title="Últimos",
url=urlparse.urljoin(CHANNEL_HOST, "ListadeAnime/LatestUpdate")))
url=CHANNEL_HOST + "/ListadeAnime/LatestUpdate"))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar...",
url=urlparse.urljoin(CHANNEL_HOST, "Buscar?s=")))
url=CHANNEL_HOST + "/Buscar?s="))
itemlist = renumbertools.show_option(item.channel, itemlist)
@@ -148,15 +140,11 @@ def mainlist(item):
def letras(item):
logger.info()
base_url = 'http://animeflv.co/ListadeAnime?c='
itemlist = list()
itemlist.append(Item(channel=item.channel, action="series", title="#", url=base_url + "#"))
for letter in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
logger.debug("title=[%s], url=[%s], thumbnail=[]" % (letter, base_url + letter))
itemlist.append(Item(channel=item.channel, action="series", title=letter, url=base_url + letter))
return itemlist
@@ -172,8 +160,6 @@ def generos(item):
list_genre = re.findall(REGEX_GENERO, html)
for url, genero in list_genre:
logger.debug("title=[%s], url=[%s], thumbnail=[]" % (genero, url))
itemlist.append(Item(channel=item.channel, action="series", title=genero, url=url))
return itemlist
@@ -181,12 +167,9 @@ def generos(item):
def search(item, texto):
logger.info()
texto = texto.replace(" ", "%20")
item.url = "%s%s" % (item.url, texto)
html = get_url_contents(item.url)
try:
# Se encontro un solo resultado y se redicciono a la página de la serie
if html.find('<title>Ver') >= 0:
@@ -198,9 +181,6 @@ def search(item, texto):
items = []
for show in show_list:
title, url, thumbnail, plot = show
logger.debug("title=[%s], url=[%s], thumbnail=[%s]" % (title, url, thumbnail))
items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
plot=plot, show=title, viewmode="movies_with_plot", context=renumbertools.context(item)))
except:
@@ -214,35 +194,25 @@ def search(item, texto):
def series(item):
logger.info()
page_html = get_url_contents(item.url)
show_list = __find_series(page_html)
items = []
for show in show_list:
title, url, thumbnail, plot = show
logger.debug("title=[%s], url=[%s], thumbnail=[%s]" % (title, url, thumbnail))
items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
plot=plot, show=title, viewmode="movies_with_plot", context=renumbertools.context(item)))
url_next_page = __find_next_page(page_html)
url_next_page = scrapertools.find_single_match(page_html, REGEX_NEXT_PAGE)
if url_next_page:
items.append(Item(channel=item.channel, action="series", title=">> Página Siguiente", url=url_next_page))
items.append(Item(channel=item.channel, action="series", title=">> Página Siguiente", url= CHANNEL_HOST + url_next_page))
return items
def episodios(item):
logger.info()
itemlist = []
html_serie = get_url_contents(item.url)
info_serie = __extract_info_from_serie(html_serie)
if info_serie[3]:
plot = info_serie[3]
@@ -250,11 +220,9 @@ def episodios(item):
plot = ''
episodes = re.findall(REGEX_EPISODE, html_serie, re.DOTALL)
es_pelicula = False
for url, title, date in episodes:
episode = scrapertools.find_single_match(title, r'Episodio (\d+)')
# El enlace pertenece a un episodio
if episode:
season = 1
@@ -268,9 +236,6 @@ def episodios(item):
title = "%s (%s)" % (title, date)
item.url = url
es_pelicula = True
logger.debug("title=[%s], url=[%s], thumbnail=[%s]" % (title, url, item.thumbnail))
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=item.thumbnail,
plot=plot, show=item.show, fulltitle="%s %s" % (item.show, title)))
@@ -318,7 +283,6 @@ def findvideos(item):
videoitem.thumbnail = item.thumbnail
regex_video_list = r'var part = \[([^\]]+)'
videos_html = scrapertools.find_single_match(iframe_html, regex_video_list)
videos = re.findall('"([^"]+)"', videos_html, re.DOTALL)
for quality_id, video_url in enumerate(videos):

View File

@@ -43,7 +43,7 @@ def findvideos(item):
for thumbnail, title, url, time in matches:
scrapedtitle = time + " - " + title
scrapedurl = host + url
scrapedthumbnail = "http:" + thumbnail
scrapedthumbnail = thumbnail
itemlist.append(item.clone(action="play", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail))
@@ -80,7 +80,7 @@ def play(item):
itemlist = []
data = httptools.downloadpage(item.url).data
url = "http:" + scrapertools.find_single_match(data, '<source src="([^"]+)"')
url = scrapertools.find_single_match(data, '<source src="([^"]+)"')
itemlist.append(item.clone(url=url, server="directo"))
return itemlist

View File

@@ -1,37 +0,0 @@
{
"id": "crimenes",
"name": "Crimenes Imperfectos",
"active": true,
"adult": false,
"language": ["cast"],
"banner": "crimenes.png",
"thumbnail": "crimenes.png",
"version": 1,
"changes": [
{
"date": "19/06/2017",
"description": "correcion xml"
},
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "01/07/2016",
"description": "Eliminado código innecesario."
}
],
"categories": [
"movie"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
}
]
}

View File

@@ -1,167 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
import xbmc
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
# Main list manual
def listav(item):
itemlist = []
data = scrapertools.cache_page(item.url)
patronbloque = '<li><div class="yt-lockup.*?<img.*?src="([^"]+)".*?'
patronbloque += '<h3 class="yt-lockup-title "><a href="([^"]+)".*?title="([^"]+)".*?'
patronbloque += '</a><span class=.*?">(.*?)</span></h3>'
matchesbloque = re.compile(patronbloque, re.DOTALL).findall(data)
scrapertools.printMatches(matchesbloque)
scrapedduration = ''
for scrapedthumbnail, scrapedurl, scrapedtitle, scrapedduration in matchesbloque:
scrapedtitle = '[COLOR white]' + scrapedtitle + '[/COLOR] [COLOR red]' + scrapedduration + '[/COLOR]'
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.thumbnail, scrapedthumbnail)
xbmc.log("$ " + scrapedurl + " " + scrapedtitle + " " + scrapedthumbnail)
itemlist.append(Item(channel=item.channel, action="play", title=scrapedtitle, fulltitle=scrapedtitle, url=url,
thumbnail=thumbnail, fanart=thumbnail))
# Paginacion
patronbloque = '<div class="branded-page-box .*? spf-link ">(.*?)</div>'
matches = re.compile(patronbloque, re.DOTALL).findall(data)
for bloque in matches:
patronvideo = '<a href="([^"]+)"'
matchesx = re.compile(patronvideo, re.DOTALL).findall(bloque)
for scrapedurl in matchesx:
url = urlparse.urljoin(item.url, 'https://www.youtube.com' + scrapedurl)
# solo me quedo con el ultimo enlace
itemlist.append(
Item(channel=item.channel, action="listav", title="Siguiente pag >>", fulltitle="Siguiente Pag >>", url=url,
thumbnail=item.thumbnail, fanart=item.fanart))
return itemlist
def busqueda(item):
itemlist = []
keyboard = xbmc.Keyboard("", "Busqueda")
keyboard.doModal()
if (keyboard.isConfirmed()):
myurl = keyboard.getText().replace(" ", "+")
data = scrapertools.cache_page('https://www.youtube.com/results?q=' + myurl)
data = data.replace("\n", "").replace("\t", "")
data = scrapertools.decodeHtmlentities(data)
patronbloque = '<li><div class="yt-lockup.*?<img.*?src="([^"]+)".*?'
patronbloque += '<h3 class="yt-lockup-title "><a href="([^"]+)".*?title="([^"]+)".*?'
patronbloque += '</a><span class=.*?">(.*?)</span></h3>'
matchesbloque = re.compile(patronbloque, re.DOTALL).findall(data)
scrapertools.printMatches(matchesbloque)
for scrapedthumbnail, scrapedurl, scrapedtitle, scrapedduracion in matchesbloque:
scrapedtitle = scrapedtitle + ' ' + scrapedduracion
url = scrapedurl
thumbnail = scrapedthumbnail
xbmc.log("$ " + scrapedurl + " " + scrapedtitle + " " + scrapedthumbnail)
itemlist.append(
Item(channel=item.channel, action="play", title=scrapedtitle, fulltitle=scrapedtitle, url=url,
thumbnail=thumbnail, fanart=thumbnail))
# Paginacion
patronbloque = '<div class="branded-page-box .*? spf-link ">(.*?)</div>'
matches = re.compile(patronbloque, re.DOTALL).findall(data)
for bloque in matches:
patronvideo = '<a href="([^"]+)"'
matchesx = re.compile(patronvideo, re.DOTALL).findall(bloque)
for scrapedurl in matchesx:
url = 'https://www.youtube.com' + scrapedurl
# solo me quedo con el ultimo enlace
itemlist.append(
Item(channel=item.channel, action="listav", title="Siguiente pag >>", fulltitle="Siguiente Pag >>",
url=url))
return itemlist
else:
# xbmcgui.Dialog().ok(item.channel, "nada que buscar")
# xbmc.executebuiltin("Action(up)")
xbmc.executebuiltin("Action(enter)")
# itemlist.append( Item(channel=item.channel, action="listav", title="<< Volver", fulltitle="Volver" , url="history.back()") )
def mainlist(item):
logger.info()
itemlist = []
item.url = 'https://www.youtube.com/results?q=crimenes+imperfectos&sp=CAI%253D'
scrapedtitle = "[COLOR white]Crimenes [COLOR red]Imperfectos[/COLOR]"
item.thumbnail = urlparse.urljoin(item.thumbnail,
"https://encrypted-tbn2.gstatic.com/images?q=tbn:ANd9GcQ2PcyvcYIg6acvdUZrHGFFk_E3mXK9QSh-5TypP8Rk6zQ6S1yb2g")
item.fanart = urlparse.urljoin(item.fanart,
"https://encrypted-tbn2.gstatic.com/images?q=tbn:ANd9GcQ2PcyvcYIg6acvdUZrHGFFk_E3mXK9QSh-5TypP8Rk6zQ6S1yb2g")
itemlist.append(
Item(channel=item.channel, action="listav", title=scrapedtitle, fulltitle=scrapedtitle, url=item.url,
thumbnail=item.thumbnail, fanart=item.fanart))
item.url = 'https://www.youtube.com/results?search_query=russian+dash+cam&sp=CAI%253D'
scrapedtitle = "[COLOR blue]Russian[/COLOR] [COLOR White]Dash[/COLOR] [COLOR red]Cam[/COLOR]"
item.thumbnail = urlparse.urljoin(item.thumbnail, "https://i.ytimg.com/vi/-C6Ftromtig/maxresdefault.jpg")
item.fanart = urlparse.urljoin(item.fanart,
"https://encrypted-tbn2.gstatic.com/images?q=tbn:ANd9GcRQLO-n-kO1ByY8lLhKxz0-cejJD1J7rLge_j0E0Gh9LJ2WtTbSnA")
itemlist.append(
Item(channel=item.channel, action="listav", title=scrapedtitle, fulltitle=scrapedtitle, url=item.url,
thumbnail=item.thumbnail, fanart=item.fanart))
item.url = 'https://www.youtube.com/results?search_query=cuarto+milenio+programa+completo&sp=CAI%253D'
scrapedtitle = "[COLOR green]Cuarto[/COLOR] [COLOR White]Milenio[/COLOR]"
item.thumbnail = urlparse.urljoin(item.thumbnail,
"http://cuatrostatic-a.akamaihd.net/cuarto-milenio/Cuarto-Milenio-analiza-fantasma-Granada_MDSVID20100924_0063_3.jpg")
item.fanart = urlparse.urljoin(item.fanart,
"http://cuatrostatic-a.akamaihd.net/cuarto-milenio/programas/temporada-07/t07xp32/fantasma-universidad_MDSVID20120420_0001_3.jpg")
itemlist.append(
Item(channel=item.channel, action="listav", title=scrapedtitle, fulltitle=scrapedtitle, url=item.url,
thumbnail=item.thumbnail, fanart=item.fanart))
item.url = 'https://www.youtube.com/results?q=milenio+3&sp=CAI%253D'
scrapedtitle = "[COLOR green]Milenio[/COLOR] [COLOR White]3- Podcasts[/COLOR]"
item.thumbnail = urlparse.urljoin(item.thumbnail,
"http://cuatrostatic-a.akamaihd.net/cuarto-milenio/Cuarto-Milenio-analiza-fantasma-Granada_MDSVID20100924_0063_3.jpg")
item.fanart = urlparse.urljoin(item.fanart,
"http://cuatrostatic-a.akamaihd.net/cuarto-milenio/programas/temporada-07/t07xp32/fantasma-universidad_MDSVID20120420_0001_3.jpg")
itemlist.append(
Item(channel=item.channel, action="listav", title=scrapedtitle, fulltitle=scrapedtitle, url=item.url,
thumbnail=item.thumbnail, fanart=item.fanart))
scrapedtitle = "[COLOR red]buscar ...[/COLOR]"
item.thumbnail = urlparse.urljoin(item.thumbnail,
"http://cuatrostatic-a.akamaihd.net/cuarto-milenio/Cuarto-Milenio-analiza-fantasma-Granada_MDSVID20100924_0063_3.jpg")
item.fanart = urlparse.urljoin(item.fanart,
"http://cuatrostatic-a.akamaihd.net/cuarto-milenio/programas/temporada-07/t07xp32/fantasma-universidad_MDSVID20120420_0001_3.jpg")
itemlist.append(Item(channel=item.channel, action="busqueda", title=scrapedtitle, fulltitle=scrapedtitle,
thumbnail=item.thumbnail, fanart=item.fanart))
return itemlist
def play(item):
logger.info("url=" + item.url)
itemlist = servertools.find_video_items(data=item.url)
return itemlist

View File

@@ -157,11 +157,10 @@ def lista(item):
# logger.info("data="+data)
bloque = scrapertools.find_single_match(data, '(?:<ul class="pelilist">|<ul class="buscar-list">)(.*?)</ul>')
patron = '<li[^<]+'
patron += '<a href="([^"]+)".*?'
patron += 'src="([^"]+)".*?'
patron += '<h2[^>]*>(.*?)</h2.*?'
patron += '(?:<strong[^>]*>|<span[^>]*>)(.*?)(?:</strong>|</span>)'
patron = '<a href="([^"]+).*?' # la url
patron += '<img src="([^"]+)"[^>]+>.*?' # el thumbnail
patron += '<h2[^>]*>(.*?)</h2.*?' # el titulo
patron += '<span>([^<].*?)<' # la calidad
matches = re.compile(patron, re.DOTALL).findall(bloque)
scrapertools.printMatches(matches)
@@ -175,7 +174,7 @@ def lista(item):
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})')
contentTitle = scrapertools.htmlclean(scrapedtitle).strip()
patron = '([^<]+)<br>'
matches = re.compile(patron, re.DOTALL).findall(calidad + '<br>')
@@ -196,7 +195,7 @@ def lista(item):
itemlist.append(Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=url,
thumbnail=thumbnail, plot=plot, folder=True, contentTitle=contentTitle,
language=idioma, contentSeason=int(temporada),
contentEpisodeNumber=int(episodio), contentQuality=calidad))
contentEpisodeNumber=int(episodio), quality=calidad))
else:
if len(matches) == 2:
@@ -205,7 +204,7 @@ def lista(item):
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
thumbnail=thumbnail, plot=plot, folder=True, contentTitle=contentTitle,
language=idioma, contentThumbnail=thumbnail, contentQuality=calidad))
language=idioma, contentThumbnail=thumbnail, quality=calidad))
next_page_url = scrapertools.find_single_match(data, '<li><a href="([^"]+)">Next</a></li>')
if next_page_url != "":
@@ -262,7 +261,7 @@ def findvideos(item):
item.plot = scrapertools.htmlclean(item.plot).strip()
item.contentPlot = item.plot
link = scrapertools.find_single_match(data, 'href.*?=.*?"http:\/\/(?:tumejorserie|tumejorjuego).*?link=([^"]+)"')
link = scrapertools.find_single_match(data, 'location\.href.*?=.*?"http:\/\/(?:tumejorserie|tumejorjuego).*?link=(.*?)"')
if link != "":
link = "http://www.divxatope1.com/" + link
logger.info("torrent=" + link)
@@ -275,14 +274,7 @@ def findvideos(item):
patron += '<\/div[^<]+<div class="box4">([^<]+)<\/div[^<]+<div class="box5"><a href=(.*?) rel.*?'
patron += '<\/div[^<]+<div class="box6">([^<]+)<'
#patron = "<div class=\"box1\"[^<]+<img[^<]+</div[^<]+"
#patron += '<div class="box2">([^<]+)</div[^<]+'
#patron += '<div class="box3">([^<]+)</div[^<]+'
#patron += '<div class="box4">([^<]+)</div[^<]+'
#patron += '<div class="box5">(.*?)</div[^<]+'
#patron += '<div class="box6">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
itemlist_ver = []
itemlist_descargar = []
@@ -308,11 +300,8 @@ def findvideos(item):
else:
itemlist_descargar.append(new_item)
for new_item in itemlist_ver:
itemlist.append(new_item)
for new_item in itemlist_descargar:
itemlist.append(new_item)
itemlist.extend(itemlist_ver)
itemlist.extend(itemlist_descargar)
return itemlist

View File

@@ -616,6 +616,8 @@ def findvideos(item):
url_targets = item.url
## Vídeos
id = ""
type = ""
if "###" in item.url:
id = item.url.split("###")[1].split(";")[0]
type = item.url.split("###")[1].split(";")[1]
@@ -698,6 +700,9 @@ def findvideos(item):
it2 = servertools.get_servers_itemlist(it2, lambda i: i.title % i.server.capitalize())
it2.sort(key=lambda it: (it.tipo1, it.idioma, it.server))
for item in it2:
if "###" not in item.url:
item.url += "###" + id + ";" + type
itemlist.extend(it1)
itemlist.extend(it2)
## 2 = película
@@ -707,7 +712,6 @@ def findvideos(item):
action="add_pelicula_to_library", url=url_targets, thumbnail = item.thumbnail,
fulltitle = item.contentTitle
))
return itemlist

View File

@@ -12,15 +12,8 @@ from core import tmdb
from core.item import Item
from platformcode import config, logger
__channel__='allcalidad'
host = "http://www.pelismundo.com/"
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
except:
__modo_grafico__ = True
host = "http://www.pelismundo.com"
idiomas = {"Castellano":"CAST","Subtitulad":"VOSE","Latino":"LAT"}
def mainlist(item):
logger.info()
@@ -80,10 +73,9 @@ def sub_search(item):
patron = '(?s)href="([^"]+)".*?'
patron += 'title="([^"]+)".*?'
patron += 'src="([^"]+)".*?'
patron += 'Idioma.*?tag">([^<]+).*?'
patron += 'Calidad(.*?<)\/'
patron += 'Idioma(.*?)Cal'
patron += 'idad(.*?<)\/'
match = scrapertools.find_multiple_matches(bloque, patron)
scrapertools.printMatches(match)
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedlanguages, scrapedquality in match:
year = scrapertools.find_single_match(scrapedtitle, '[0-9]{4}')
scrapedquality = scrapertools.find_single_match(scrapedquality, 'rel="tag">([^<]+)<')
@@ -93,21 +85,14 @@ def sub_search(item):
scrapedtitle = scrapedtitle.replace(st, "")
title = scrapedtitle
if year:
title += " (" + year + ")"
title += "(" + year + ")"
if scrapedquality:
title += " (" + scrapedquality + ")"
patronidiomas = ''
idiomas_disponibles = []
matchidioma = scrapertools.find_single_match(scrapedlanguages, 'Castellano')
if matchidioma:
idiomas_disponibles.append("ESP")
matchidioma = scrapertools.find_single_match(scrapedlanguages, 'Subtitulado')
if matchidioma:
idiomas_disponibles.append("VOSE")
matchidioma = scrapertools.find_single_match(scrapedlanguages, 'Latino')
if matchidioma:
idiomas_disponibles.append("LAT")
idiomas_disponibles1 = ""
for lang in idiomas.keys():
if lang in scrapedlanguages:
idiomas_disponibles.append(idiomas[lang])
if idiomas_disponibles:
idiomas_disponibles1 = "[" + "/".join(idiomas_disponibles) + "]"
title += " %s" %idiomas_disponibles1
@@ -171,17 +156,10 @@ def peliculas(item):
title += " (" + year + ")"
if scrapedquality:
title += " (" + scrapedquality + ")"
patronidiomas = ''
idiomas_disponibles = []
matchidioma = scrapertools.find_single_match(scrapedlanguages, 'Castellano')
if matchidioma:
idiomas_disponibles.append("ESP")
matchidioma = scrapertools.find_single_match(scrapedlanguages, 'Subtitulado')
if matchidioma:
idiomas_disponibles.append("VOSE")
matchidioma = scrapertools.find_single_match(scrapedlanguages, 'Latino')
if matchidioma:
idiomas_disponibles.append("LAT")
for lang in idiomas.keys():
if lang in scrapedlanguages:
idiomas_disponibles.append(idiomas[lang])
idiomas_disponibles1 = ""
if idiomas_disponibles:
idiomas_disponibles1 = "[" + "/".join(idiomas_disponibles) + "]"
@@ -219,7 +197,7 @@ def findvideos(item):
title = "Ver en: %s " + "(" + scrapedlanguage + ")"
itemlist.append(item.clone(action = "play",
title = title,
language = item.language,
language = scrapedlanguage,
quality = item.quality,
url = scrapedurl
))

View File

@@ -777,7 +777,7 @@ def acciones_cuenta(item):
for category, contenido in matches:
itemlist.append(item.clone(action="", title=category, text_color=color3))
patron = '<div class="c_fichas_image"[^>]*>[^<]*<[^>]+href="\.([^"]+)".*?src="\.([^"]+)".*?serie="([^"]*)".*?' \
patron = '<div class="c_fichas_image"[^>]*>[^<]*<[^>]+href="\.([^"]+)".*?src="([^"]+)".*?serie="([^"]*)".*?' \
'<div class="c_fichas_title">(?:<div class="c_fichas_episode">([^<]+)</div>|)([^<]+)</div>'
entradas = scrapertools.find_multiple_matches(contenido, patron)
for scrapedurl, scrapedthumbnail, serie, episodio, scrapedtitle in entradas:

View File

@@ -7,7 +7,7 @@ from core import scrapertools
from core.item import Item
from platformcode import logger
host = "http://www.playpornx.net/list-movies/"
host = "http://www.playpornx.net/"
def mainlist(item):
@@ -15,7 +15,7 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, title="Todas", action="lista",
thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png',
fanart='https://s18.postimg.org/fwvaeo6qh/todas.png',
url ='https://www.playpornx.net/category/porn-movies/?filter=date'))
url =host))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url='http://www.playpornx.net/?s=',
thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png',
@@ -31,10 +31,10 @@ def lista(item):
if item.url == '': item.url = host
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = 'role=article><a href=(.*?) rel=bookmark title=(.*?)>.*?src=(.*?) class'
patron = '<div class=item>.*?href=(.*?)><div.*?<img src=(.*?) alt.*?<h2>(.*?)<\/h2>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = scrapedurl
thumbnail = scrapedthumbnail
title = scrapedtitle

View File

@@ -266,7 +266,6 @@ def lista(item):
contentTitle=scrapedtitle,
extra=item.extra,
infoLabels={'year': year},
show=scrapedtitle,
list_language=list_language,
context=autoplay.context
))

View File

@@ -1,100 +0,0 @@
{
"id": "vixto",
"name": "Vixto",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"banner": "vixto.png",
"thumbnail": "http://i.imgur.com/y4c4HT2.png",
"version": 1,
"changes": [
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "16/02/2017",
"description": "Correccion para el apartado de series"
},
{
"date": "12/11/2016",
"description": "Primera version, sustituye a oranline"
}
],
"categories": [
"movie",
"tvshow",
"vos"
],
"settings": [
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Películas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
},
{
"id": "filterlanguages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"VOSE",
"Latino",
"Español",
"No filtrar"
]
},
{
"id": "filterlinks",
"type": "list",
"label": "Mostrar enlaces de tipo...",
"default": 2,
"enabled": true,
"visible": true,
"lvalues": [
"Solo Descarga",
"Solo Online",
"No filtrar"
]
},
{
"id": "orderlinks",
"type": "list",
"label": "Ordenar enlaces por...",
"default": 2,
"enabled": true,
"visible": true,
"lvalues": [
"Servidor",
"Idioma",
"Más recientes"
]
}
]
}

View File

@@ -1,383 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
# Configuracion del canal
__modo_grafico__ = config.get_setting('modo_grafico', "vixto")
__perfil__ = config.get_setting('perfil', "vixto")
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE']]
color1, color2, color3 = perfil[__perfil__]
host = "http://www.vixto.net/"
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(item.clone(title="Películas", text_color=color2, action="",
text_bold=True))
itemlist.append(item.clone(action="listado", title=" Estrenos", text_color=color1, url=host,
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/"
"0/Directors%20Chair.png"))
itemlist.append(item.clone(action="listado", title=" Novedades", text_color=color1, url=host,
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/"
"0/Directors%20Chair.png"))
itemlist.append(item.clone(action="listado", title="Series - Novedades", text_color=color2, url=host,
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres/"
"0/TV%20Series.png", text_bold=True))
itemlist.append(item.clone(action="search", title="Buscar...", text_color=color3,
url="http://www.vixto.net/buscar?q="))
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
try:
return busqueda(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%{0}".format(line))
return []
def newest(categoria):
logger.info()
itemlist = list()
item = Item()
try:
if categoria == 'peliculas':
item.url = host
itemlist = listado(item)
if itemlist[-1].action == "listado":
itemlist.pop()
item.title = "Estrenos"
itemlist.extend(listado(item))
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def listado(item):
logger.info()
itemlist = list()
item.infoLabels['mediatype'] = "movie"
if "Estrenos" in item.title:
bloque_head = "ESTRENOS CARTELERA"
elif "Series" in item.title:
bloque_head = "RECIENTE SERIES"
item.infoLabels['mediatype'] = "tvshow"
else:
bloque_head = "RECIENTE PELICULAS"
# Descarga la página
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|\s{2}", "", data)
# Extrae las entradas (carpetas)
bloque = scrapertools.find_single_match(data, bloque_head + '\s*</h2>(.*?)</section>')
patron = '<div class="".*?href="([^"]+)".*?src="([^"]+)".*?<div class="calZG">(.*?)</div>' \
'(.*?)</div>.*?href.*?>(.*?)</a>'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedthumbnail, calidad, idiomas, scrapedtitle in matches:
title = scrapedtitle
langs = []
if 'idio idi1' in idiomas:
langs.append("VOS")
if 'idio idi2' in idiomas:
langs.append("LAT")
if 'idio idi4' in idiomas:
langs.append("ESP")
if langs:
title += " [%s]" % "/".join(langs)
if calidad:
title += " %s" % calidad
filtro_thumb = scrapedthumbnail.replace("http://image.tmdb.org/t/p/w342", "")
filtro_list = {"poster_path": filtro_thumb}
filtro_list = filtro_list.items()
if item.contentType == "tvshow":
new_item = item.clone(action="episodios", title=title, url=scrapedurl, thumbnail=scrapedthumbnail,
fulltitle=scrapedtitle, infoLabels={'filtro': filtro_list},
contentTitle=scrapedtitle, context="buscar_trailer", text_color=color1,
show=scrapedtitle, text_bold=False)
else:
new_item = item.clone(action="findvideos", title=title, url=scrapedurl, thumbnail=scrapedthumbnail,
fulltitle=scrapedtitle, infoLabels={'filtro': filtro_list}, text_bold=False,
contentTitle=scrapedtitle, context="buscar_trailer", text_color=color1)
itemlist.append(new_item)
if item.action == "listado":
try:
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
return itemlist
def busqueda(item):
logger.info()
itemlist = list()
# Descarga la página
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|\s{2}", "", data)
# Extrae las entradas (carpetas)
bloque = scrapertools.find_single_match(data, '<h2>Peliculas</h2>(.*?)</div>')
bloque += scrapertools.find_single_match(data, '<h2>Series</h2>(.*?)</div>')
patron = '<figure class="col-lg-2.*?href="([^"]+)".*?src="([^"]+)".*?<figcaption title="([^"]+)"'
matches = scrapertools.find_multiple_matches(bloque, patron)
peliculas = False
series = False
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
new_item = Item(channel=item.channel, contentType="movie", url=scrapedurl, title=" " + scrapedtitle,
text_color=color1, context="buscar_trailer", fulltitle=scrapedtitle,
contentTitle=scrapedtitle, thumbnail=scrapedthumbnail, action="findvideos")
if "/peliculas/" in scrapedurl and not peliculas:
itemlist.append(Item(channel=item.channel, action="", title="Películas", text_color=color2))
peliculas = True
if "/series/" in scrapedurl and not series:
itemlist.append(Item(channel=item.channel, action="", title="Series", text_color=color2))
series = True
if "/series/" in scrapedurl:
new_item.contentType = "tvshow"
new_item.show = scrapedtitle
new_item.action = "episodios"
filtro_thumb = scrapedthumbnail.replace("http://image.tmdb.org/t/p/w342", "")
filtro_list = {"poster_path": filtro_thumb}
new_item.infoLabels["filtro"] = filtro_list.items()
itemlist.append(new_item)
try:
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
return itemlist
def episodios(item):
logger.info()
itemlist = list()
# Descarga la página
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|\s{2}", "", data)
# Extrae las entradas (carpetas)
bloque = scrapertools.find_single_match(data, '<strong>Temporada:(.*?)</div>')
matches = scrapertools.find_multiple_matches(bloque, 'href="([^"]+)">(.*?)</a>')
for scrapedurl, scrapedtitle in matches:
title = "Temporada %s" % scrapedtitle
new_item = item.clone(action="", title=title, text_color=color2)
new_item.infoLabels["season"] = scrapedtitle
new_item.infoLabels["mediatype"] = "season"
data_season = httptools.downloadpage(scrapedurl).data
data_season = re.sub(r"\n|\r|\t|&nbsp;|\s{2}", "", data_season)
patron = '<li class="media">.*?href="([^"]+)"(.*?)<div class="media-body">.*?href.*?>' \
'(.*?)</a>'
matches = scrapertools.find_multiple_matches(data_season, patron)
elementos = []
for url, status, title in matches:
if not "Enlaces Disponibles" in status:
continue
elementos.append(title)
item_epi = item.clone(action="findvideos", url=url, text_color=color1)
item_epi.infoLabels["season"] = scrapedtitle
episode = scrapertools.find_single_match(title, 'Capitulo (\d+)')
titulo = scrapertools.find_single_match(title, 'Capitulo \d+\s*-\s*(.*?)$')
item_epi.infoLabels["episode"] = episode
item_epi.infoLabels["mediatype"] = "episode"
item_epi.title = "%sx%s %s" % (scrapedtitle, episode.zfill(2), titulo)
itemlist.insert(0, item_epi)
if elementos:
itemlist.insert(0, new_item)
if item.infoLabels["tmdb_id"] and itemlist:
try:
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
if itemlist:
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir serie a la videoteca", text_color="green",
filtro=True, action="add_serie_to_library", fulltitle=item.fulltitle,
extra="episodios", url=item.url, infoLabels=item.infoLabels, show=item.show))
else:
itemlist.append(item.clone(title="Serie sin episodios disponibles", action="", text_color=color3))
return itemlist
def findvideos(item):
logger.info()
itemlist = list()
try:
filtro_idioma = config.get_setting("filterlanguages", item.channel)
filtro_enlaces = config.get_setting("filterlinks", item.channel)
except:
filtro_idioma = 3
filtro_enlaces = 2
dict_idiomas = {'Castellano': 2, 'Latino': 1, 'Subtitulada': 0}
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|\s{2}", "", data)
if not item.infoLabels["tmdb_id"]:
year = scrapertools.find_single_match(data, 'Lanzamiento.*?(\d{4})')
if year != "":
item.infoLabels['filtro'] = ""
item.infoLabels['year'] = int(year)
# Ampliamos datos en tmdb
try:
tmdb.set_infoLabels_item(item, __modo_grafico__)
except:
pass
if not item.infoLabels['plot']:
plot = scrapertools.find_single_match(data, '<p class="plot">(.*?)</p>')
item.infoLabels['plot'] = plot
if filtro_enlaces != 0:
list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas, "Ver Online", item)
if list_enlaces:
itemlist.append(item.clone(action="", title="Enlaces Online", text_color=color1,
text_bold=True))
itemlist.extend(list_enlaces)
if filtro_enlaces != 1:
list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas, "Descarga Directa", item)
if list_enlaces:
itemlist.append(item.clone(action="", title="Enlaces Descargas", text_color=color1,
text_bold=True))
itemlist.extend(list_enlaces)
# Opción "Añadir esta película a la videoteca de XBMC"
if itemlist and item.contentType == "movie":
contextual = config.is_xbmc()
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
text_color="magenta", contextual=contextual))
if item.extra != "findvideos":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir enlaces a la videoteca", text_color="green",
filtro=True, action="add_pelicula_to_library", fulltitle=item.fulltitle,
extra="findvideos", url=item.url, infoLabels=item.infoLabels,
contentType=item.contentType, contentTitle=item.contentTitle, show=item.show))
elif not itemlist and item.contentType == "movie":
itemlist.append(item.clone(title="Película sin enlaces disponibles", action="", text_color=color3))
return itemlist
def bloque_enlaces(data, filtro_idioma, dict_idiomas, tipo, item):
logger.info()
lista_enlaces = list()
bloque = scrapertools.find_single_match(data, tipo + '(.*?)</table>')
patron = '<td class="sape">\s*<i class="idioma-([^"]+)".*?href="([^"]+)".*?</p>.*?<td>([^<]+)</td>' \
'.*?<td class="desaparecer">(.*?)</td>'
matches = scrapertools.find_multiple_matches(bloque, patron)
filtrados = []
for language, scrapedurl, calidad, orden in matches:
language = language.strip()
server = scrapertools.find_single_match(scrapedurl, 'http(?:s|)://(?:www.|)(\w+).')
if server == "ul":
server = "uploadedto"
if server == "streamin":
server = "streaminto"
if server == "waaw":
server = "netutv"
if servertools.is_server_enabled(server):
try:
servers_module = __import__("servers." + server)
title = " Mirror en " + server + " (" + language + ") (Calidad " + calidad.strip() + ")"
if filtro_idioma == 3 or item.filtro:
lista_enlaces.append(item.clone(title=title, action="play", server=server, text_color=color2,
url=scrapedurl, idioma=language, orden=orden, language=language))
else:
idioma = dict_idiomas[language]
if idioma == filtro_idioma:
lista_enlaces.append(item.clone(title=title, text_color=color2, action="play",
url=scrapedurl, server=server, idioma=language, orden=orden,
language=language))
else:
if language not in filtrados:
filtrados.append(language)
except:
pass
order = config.get_setting("orderlinks", item.channel)
if order == 0:
lista_enlaces.sort(key=lambda item: item.server)
elif order == 1:
lista_enlaces.sort(key=lambda item: item.idioma)
else:
lista_enlaces.sort(key=lambda item: item.orden, reverse=True)
if filtro_idioma != 3:
if len(filtrados) > 0:
title = "Mostrar enlaces filtrados en %s" % ", ".join(filtrados)
lista_enlaces.append(item.clone(title=title, action="findvideos", url=item.url, text_color=color3,
filtro=True))
return lista_enlaces
def play(item):
logger.info()
itemlist = list()
enlace = servertools.findvideosbyserver(item.url, item.server)
itemlist.append(item.clone(url=enlace[0][1]))
return itemlist

View File

@@ -8,9 +8,10 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "no longer exists" in data:
return False, "[Downace] El fichero ha sido borrado"
if "no longer exists" in data or "to copyright issues" in data:
return False, "[Downace] El video ha sido borrado"
if "please+try+again+later." in data:
return False, "[Downace] Error de downace, no se puede generar el enlace al video"
return True, ""

View File

@@ -14,8 +14,7 @@ def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, cookies=False).data
if 'File Not Found' in data or 'file was deleted' in data:
if 'file was deleted' in data:
return False, "[FlashX] El archivo no existe o ha sido borrado"
elif 'Video is processing now' in data:
return False, "[FlashX] El archivo se está procesando"
@@ -25,7 +24,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
pfxfx = ""
headers = {'Host': 'www.flashx.tv',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
@@ -33,6 +32,14 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
'Accept-Encoding': 'gzip, deflate, br', 'Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1',
'Cookie': ''}
data = httptools.downloadpage(page_url, headers=headers, replace_headers=True).data
# Para obtener el f y el fxfx
js_fxfx = scrapertools.find_single_match(data, 'src="(https://www.flashx.tv/js/code.js\?cache=[0-9]+)')
data_fxfx = httptools.downloadpage(js_fxfx).data
mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","")
matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')
for f, v in matches:
pfxfx += f + "=" + v + "&"
# {f: 'y', fxfx: '6'}
flashx_id = scrapertools.find_single_match(data, 'name="id" value="([^"]+)"')
fname = scrapertools.find_single_match(data, 'name="fname" value="([^"]+)"')
hash_f = scrapertools.find_single_match(data, 'name="hash" value="([^"]+)"')
@@ -45,7 +52,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
headers['Accept'] = "*/*"
headers['Host'] = "www.flashx.tv"
coding_url = 'https://www.flashx.tv/flashx.php?f=x&fxfx=6'
coding_url = 'https://www.flashx.tv/flashx.php?%s' %pfxfx
headers['X-Requested-With'] = 'XMLHttpRequest'
httptools.downloadpage(coding_url, headers=headers)
@@ -56,7 +63,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
headers.pop('X-Requested-With')
headers['Content-Type'] = 'application/x-www-form-urlencoded'
data = httptools.downloadpage('https://www.flashx.tv/dl?playnow', post, headers, replace_headers=True).data
data = httptools.downloadpage('https://www.flashx.tv/dl?playitnow', post, headers, replace_headers=True).data
# Si salta aviso, se carga la pagina de comprobacion y luego la inicial
# LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS
@@ -64,7 +71,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
url_reload = scrapertools.find_single_match(data, 'try to reload the page.*?href="([^"]+)"')
try:
data = httptools.downloadpage(url_reload, cookies=False).data
data = httptools.downloadpage('https://www.flashx.tv/dl?playnow', post, headers, replace_headers=True).data
data = httptools.downloadpage('https://www.flashx.tv/dl?playitnow', post, headers, replace_headers=True).data
# LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS
except:
pass

View File

@@ -7,7 +7,8 @@ from core import scrapertools
from lib import jsunpack
from platformcode import logger
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0'}
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:40.0) Gecko/20100101 ' \
'Firefox/40.0'}
def test_video_exists(page_url):
@@ -24,8 +25,8 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, add_referer = True, headers=headers).data
data = httptools.downloadpage(page_url, headers=headers).data
logger.debug(data)
packer = scrapertools.find_single_match(data,
"<script type='text/javascript'>(eval.function.p,a,c,k,e,d..*?)</script>")
if packer != "":