Merge pull request #42 from Intel11/ultimo

Actualizados
This commit is contained in:
Alfa
2017-08-19 20:15:22 -04:00
committed by GitHub
12 changed files with 251 additions and 123 deletions

View File

@@ -37,6 +37,17 @@
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar"
]
},
{
"id": "include_in_newest_anime",
"type": "bool",

View File

@@ -9,6 +9,22 @@ from core import jsontools
from core import scrapertools
from core.item import Item
from platformcode import logger
from channels import filtertools
from channels import autoplay
list_language = ['No filtrar']
logger.debug('lista_language: %s' % list_language)
list_quality = ['default']
list_servers = [
'izanagi',
'yourupload',
'okru',
'netutv',
'openload',
'streamango',
'mp4upload'
]
HOST = "https://animeflv.net/"
@@ -16,6 +32,8 @@ HOST = "https://animeflv.net/"
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Últimos episodios", url=HOST))
@@ -35,6 +53,8 @@ def mainlist(item):
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -131,7 +151,7 @@ def novedades_episodios(item):
thumbnail = urlparse.urljoin(HOST, thumbnail)
new_item = Item(channel=item.channel, action="findvideos", title=title, url=url, show=show, thumbnail=thumbnail,
fulltitle=title)
fulltitle=title, context = autoplay.context)
itemlist.append(new_item)
@@ -155,7 +175,7 @@ def novedades_anime(item):
thumbnail = urlparse.urljoin(HOST, thumbnail)
new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
fulltitle=title, plot=plot)
fulltitle=title, plot=plot, context = autoplay.context)
if _type != "Película":
new_item.show = title
new_item.context = renumbertools.context(item)
@@ -189,7 +209,7 @@ def listado(item):
thumbnail = urlparse.urljoin(HOST, thumbnail)
new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
fulltitle=title, plot=plot)
fulltitle=title, plot=plot, context = autoplay.context)
if _type == "Anime":
new_item.show = title
@@ -242,7 +262,7 @@ def episodios(item):
title = "%s: %sx%s" % (item.title, season, str(episode).zfill(2))
itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumb, fulltitle=title,
fanart=item.thumbnail, contentType="episode"))
fanart=item.thumbnail, contentType="episode", context = autoplay.context))
else:
# no hay thumbnail
matches = re.compile('<a href="(/ver/[^"]+)"[^>]+>(.*?)<', re.DOTALL).findall(data)
@@ -299,11 +319,15 @@ def findvideos(item):
if video_urls:
video_urls.sort(key=lambda v: int(v[0]))
itemlist.append(item.clone(title="Enlace encontrado en %s" % server, action="play",
video_urls=video_urls))
video_urls=video_urls, language='No filtrar', quality ='default',
server=server))
else:
url = scrapertools.find_single_match(data, '"file":"([^"]+)"')
if url:
itemlist.append(item.clone(title="Enlace encontrado en %s" % server, url=url, action="play"))
if server == 'izanagi':
server = 'directo'
itemlist.append(item.clone(title="Enlace encontrado en %s" % server, url=url, action="play",
language='No filtrar', quality ='default', server=server))
else:
aux_url.append(e)
@@ -315,6 +339,14 @@ def findvideos(item):
videoitem.channel = item.channel
videoitem.thumbnail = item.thumbnail
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist

View File

@@ -21,8 +21,24 @@
"description": "First release"
}
],
"categories": [
"categories": [
"latino",
"anime"
],
"settings": [
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"CAST",
"SUB"
]
}
]
}

View File

@@ -8,6 +8,19 @@ from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
from channels import autoplay
from channels import filtertools
IDIOMAS = {'Latino': 'LAT', 'Castellano':'CAST','Subtitulado': 'VOS'}
list_language = IDIOMAS.values()
logger.debug('lista_language: %s' % list_language)
list_quality = ['default']
list_servers = [
'rapidvideo',
'downace',
'openload'
]
tgenero = {"Comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png",
"Drama": "https://s16.postimg.org/94sia332d/drama.png",
@@ -35,6 +48,8 @@ headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(item.clone(title="Ultimas",
@@ -65,6 +80,8 @@ def mainlist(item):
fanart='https://s30.postimg.org/pei7txpa9/buscar.png'
))
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -161,10 +178,16 @@ def episodios(item):
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedlang, scrapedtitle in matches:
language = scrapedlang
language = IDIOMAS[scrapedlang]
title = scrapedtitle + ' (%s)' % language
url = scrapedurl
itemlist.append(item.clone(title=title, url=url, action='findvideos', language=language))
itemlist.append(item.clone(title=title,
url=url,
action='findvideos',
language=language,
quality ='default'
))
return itemlist
@@ -180,6 +203,13 @@ def findvideos(item):
videoitem.channel = item.channel
videoitem.title = title
videoitem.action = 'play'
videoitem.language = item.language
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist

View File

@@ -381,10 +381,10 @@ def check_value(channel, itemlist):
quality_list = channel_node['quality'] = list()
for item in itemlist:
if item.server not in server_list:
if item.server not in server_list and item.server !='':
server_list.append(item.server)
change = True
if item.quality not in quality_list:
if item.quality not in quality_list and item.quality !='':
quality_list.append(item.quality)
change = True

View File

@@ -49,6 +49,21 @@
"enabled": "!eq(-1,'')",
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino",
"Español",
"VOS",
"VOSE"
]
},
{
"id": "include_in_global_search",
"type": "bool",
@@ -58,4 +73,4 @@
"visible": true
}
]
}
}

View File

@@ -12,6 +12,20 @@ from core import servertools
from core.item import Item
from platformcode import config, logger
from platformcode import platformtools
from channels import filtertools
from channels import autoplay
IDIOMAS = {'LAT': 'Latino', 'ESP': 'Español', 'ESPSUB': 'VOS', 'ENGSUB' : 'VOSE'}
list_language = IDIOMAS.values()
list_quality = ['RHDTV', 'HD0180M', 'HD720M', 'TS']
list_servers = [
'openload',
'powvideo',
'streamplay',
'streamcloud',
'nowvideo'
]
host = "http://hdfull.tv"
@@ -42,7 +56,7 @@ def login():
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, action="menupeliculas", title="Películas", url=host, folder=True))
@@ -56,6 +70,7 @@ def mainlist(item):
login()
itemlist.append(Item(channel=item.channel, action="settingCanal", title="Configuración...", url=""))
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -465,7 +480,8 @@ def episodios(item):
'id'] + ";3"
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
thumbnail=thumbnail, show=item.show, folder=True, contentType="episode"))
thumbnail=thumbnail, show=item.show, folder=True, contentType="episode",
context =autoplay.context))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=url_targets,
@@ -696,11 +712,13 @@ def findvideos(item):
fanart = scrapertools.find_single_match(data, '<div style="background-image.url. ([^\s]+)')
if account:
url += "###" + id + ";" + type
logger.debug('idioma: %s'%idioma)
logger.debug('IDIOMAS[idioma]: %s' % IDIOMAS[idioma])
enlaces.append(
Item(channel=item.channel, action="play", title=title, fulltitle=title, url=url, thumbnail=thumbnail,
plot=plot, fanart=fanart, show=item.show, folder=True, server=servername, infoLabels=infolabels,
contentTitle=item.contentTitle, contentType=item.contentType, tipo=option))
contentTitle=item.contentTitle, contentType=item.contentType, tipo=option, language =
IDIOMAS[idioma], quality=calidad, context= autoplay.context))
enlaces.sort(key=lambda it: it.tipo, reverse=True)
itemlist.extend(enlaces)
@@ -713,6 +731,9 @@ def findvideos(item):
except:
pass
itemlist = filtertools.get_links(itemlist, item, list_language)
autoplay.start(itemlist, item)
return itemlist

View File

@@ -61,30 +61,3 @@ def search(item, texto):
return todas(item)
else:
return []
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<li.*?<a href="([^"]+)" target="_blank"><i class="icon-metro online"><\/i><span>Ver.*?<\/span><\/a> <\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl in matches:
title = item.title
url = scrapedurl
itemlist.append(item.clone(title=title, url=url, action="play"))
return itemlist
def play(item):
logger.info()
itemlist = []
item.url = item.url.replace(' ', '%20')
data = httptools.downloadpage(item.url, add_referer=True).data
url = scrapertools.find_single_match(data, '<iframe.*?src="([^"]+)".*?frameborder="0"')
itemlist = servertools.find_video_items(data=data)
return itemlist

View File

@@ -17,7 +17,6 @@ def mainlist(item):
itemlist = list()
itemlist.append(Item(channel=item.channel, action="series", title="Novedades",
url=urlparse.urljoin(CHANNEL_HOST, "archivos/h2/"), extra="novedades"))
itemlist.append(Item(channel=item.channel, action="letras", title="Por orden alfabético"))
itemlist.append(Item(channel=item.channel, action="generos", title="Por géneros", url=CHANNEL_HOST))
itemlist.append(Item(channel=item.channel, action="series", title="Sin Censura",
url=urlparse.urljoin(CHANNEL_HOST, "archivos/sin-censura/")))
@@ -25,20 +24,6 @@ def mainlist(item):
url=urlparse.urljoin(CHANNEL_HOST, "archivos/hight-definition/")))
itemlist.append(Item(channel=item.channel, action="series", title="Mejores Hentais",
url=urlparse.urljoin(CHANNEL_HOST, "archivos/ranking-hentai/")))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar",
url=urlparse.urljoin(CHANNEL_HOST, "?s=")))
return itemlist
def letras(item):
logger.info()
itemlist = []
for letra in '0ABCDEFGHIJKLMNOPQRSTUVWXYZ':
itemlist.append(Item(channel=item.channel, action="series", title=letra,
url=urlparse.urljoin(CHANNEL_HOST, "/?s=letra-%s" % letra.replace("0", "num"))))
return itemlist
@@ -47,49 +32,34 @@ def generos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}", "", data)
data = re.sub(r"\n|\r|\t|\s{2}", "", httptools.downloadpage(item.url).data)
data = scrapertools.get_match(data, "<div class='cccon'>(.*?)</div><div id=\"myslides\">")
patron = "<a.+? href='/([^']+)'>(.*?)</a>"
pattern = 'id="hentai2"><div[^>]+>(.*?)</div></div>'
data = scrapertools.find_single_match(data, pattern)
patron = 'href="([^"]+)"[^>]+>(.*?)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapertools.entityunescape(scrapedtitle)
url = urlparse.urljoin(item.url, scrapedurl)
for url, title in matches:
# logger.debug("title=[{0}], url=[{1}]".format(title, url))
itemlist.append(Item(channel=item.channel, action="series", title=title, url=url))
return itemlist
def search(item, texto):
logger.info()
if item.url == "":
item.url = urlparse.urljoin(CHANNEL_HOST, "animes/?buscar=")
texto = texto.replace(" ", "+")
item.url = "%s%s" % (item.url, texto)
try:
return series(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def series(item):
logger.info()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}", "", httptools.downloadpage(item.url).data)
patron = '<div class="post" id="post"[^<]+<center><h1 class="post-title entry-title"[^<]+<a href="([^"]+)">' \
'(.*?)</a>[^<]+</h1></center>[^<]+<div[^<]+</div>[^<]+<div[^<]+<div.+?<img src="([^"]+)"'
pattern = "<div class='wp-pagenavi'>(.*?)</div>"
pagination = scrapertools.find_single_match(data, pattern)
matches = re.compile(patron, re.DOTALL).findall(data)
pattern = '<div class="col-xs-12 col-md-12 col-lg-9px-3"><ul>(.*?)</ul><div class="clearfix">'
data = scrapertools.find_single_match(data, pattern)
pattern = '<a href="([^"]+)".*?<img src="([^"]+)" title="([^"]+)"'
matches = re.compile(pattern, re.DOTALL).findall(data)
itemlist = []
if item.extra == "novedades":
@@ -97,25 +67,20 @@ def series(item):
else:
action = "episodios"
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
title = scrapertools.unescape(scrapedtitle)
for url, thumbnail, title in matches:
fulltitle = title
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
show = title
# logger.debug("title=[{0}], url=[{1}], thumbnail=[{2}]".format(title, url, thumbnail))
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail,
show=show, fulltitle=fulltitle, fanart=thumbnail, folder=True))
patron = '</span><a class="page larger" href="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for match in matches:
if len(matches) > 0:
scrapedurl = match
scrapedtitle = ">> Pagina Siguiente"
if pagination:
page = scrapertools.find_single_match(pagination, '>Página\s*(\d+)\s*de\s*\d+<')
pattern = 'href="([^"]+)">%s<' % (int(page) + 1)
url_page = scrapertools.find_single_match(pagination, pattern)
itemlist.append(Item(channel=item.channel, action="series", title=scrapedtitle, url=scrapedurl,
folder=True, viewmode="movies_with_plot"))
if url_page:
itemlist.append(Item(channel=item.channel, action="series", title=">> Página Siguiente", url=url_page))
return itemlist
@@ -124,9 +89,11 @@ def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<div class="listanime">(.*?)</div>')
patron = '<a href="([^"]+)">([^<]+)</a>'
data = re.sub(r"\n|\r|\t|\s{2}", "", httptools.downloadpage(item.url).data)
pattern = '<div class="box-entry-title text-center">Lista de Capítulos</div>(.*?)</div></div>'
data = scrapertools.find_single_match(data, pattern)
patron = '<a href="([^"]+)"[^>]+>([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
@@ -136,10 +103,9 @@ def episodios(item):
plot = item.plot
# logger.debug("title=[{0}], url=[{1}], thumbnail=[{2}]".format(title, url, thumbnail))
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url,
thumbnail=thumbnail, plot=plot, show=item.show, fulltitle="%s %s" % (item.show, title),
fanart=thumbnail, viewmode="movies_with_plot", folder=True))
fanart=thumbnail))
return itemlist
@@ -148,7 +114,8 @@ def findvideos(item):
logger.info()
data = httptools.downloadpage(item.url).data
patron = '<div id="tab\d".+?>[^<]+<[iframe|IFRAME].*?[src|SRC]="([^"]+)"'
patron = '<(?:iframe)?(?:IFRAME)?\s*(?:src)?(?:SRC)?="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for url in matches:

View File

@@ -10,16 +10,21 @@ from core import scrapertoolsV2
from core import servertools
from core.item import Item
from platformcode import config, logger
from channels import autoplay
HOST = "http://seriesblanco.com/"
IDIOMAS = {'es': 'Español', 'en': 'Inglés', 'la': 'Latino', 'vo': 'VO', 'vos': 'VOS', 'vosi': 'VOSI', 'otro': 'OVOS'}
list_idiomas = IDIOMAS.values()
CALIDADES = ['SD', 'HDiTunes', 'Micro-HD-720p', 'Micro-HD-1080p', '1080p', '720p']
list_servers =['youwatch','powvideo', 'openload', 'streamplay', 'streaminto', 'flashx', 'gamovideo', 'nowvideo',
'rockfile']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, CALIDADES)
thumb_series = get_thumb("channels_tvshow.png")
thumb_series_az = get_thumb("channels_tvshow_az.png")
thumb_buscar = get_thumb("search.png")
@@ -44,6 +49,7 @@ def mainlist(item):
thumbnail=thumb_buscar))
itemlist = filtertools.show_option(itemlist, item.channel, list_idiomas, CALIDADES)
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -199,7 +205,8 @@ def episodios(item):
display_title = "%s - %s %s" % (item.show, title, idiomas)
# logger.debug("Episode found %s: %s" % (display_title, urlparse.urljoin(HOST, url)))
itemlist.append(item.clone(title=display_title, url=urlparse.urljoin(HOST, url),
action="findvideos", plot=plot, fanart=fanart, language=filter_lang))
action="findvideos", plot=plot, fanart=fanart, language=filter_lang,
context = autoplay.context))
itemlist = filtertools.get_links(itemlist, item, list_idiomas, CALIDADES)
@@ -248,7 +255,7 @@ def parse_videos(item, type_str, data):
itemlist.append(
item.clone(title=title, fulltitle=item.title, url=urlparse.urljoin(HOST, v_fields.get("link")),
action="play", language=IDIOMAS.get(v_fields.get("language"), "OVOS"),
quality=quality))
quality=quality, server= v_fields.get("server")))
if len(itemlist) > 0:
return itemlist
@@ -284,6 +291,14 @@ def findvideos(item):
list_links = filtertools.get_links(list_links, item, list_idiomas, CALIDADES)
# Requerido para FilterTools
itemlist = filtertools.get_links(list_links, item, list_idiomas)
# Requerido para AutoPlay
autoplay.start(list_links, item)
return list_links

View File

@@ -6,8 +6,9 @@ from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import logger
from core import servertools
host = 'http://www.18hentaionline.eu/'
host = 'http://www.18hentaionline.net/'
headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
@@ -92,17 +93,58 @@ def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = '<td>([^<]+)<\/td>.<td>([^<]+)<\/td>.<td>([^<]+)<\/td>.<td>([^<]+)<\/td>.<td><a href="([^"]+)".*?>Ver Capitulo<\/a><\/td>'
old_mode = scrapertools.find_single_match(data, '<th>Censura<\/th>')
if old_mode:
patron = '<td>(\d+)<\/td><td>(.*?)<\/td><td>(.*?)<\/td><td>(.*?)<\/td><td><a href="(.*?)".*?>Ver Capitulo<\/a><\/td>'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedcap, scrapedaud, scrapedsub, scrapedcen, scrapedurl in matches:
url = scrapedurl
title = 'CAPITULO ' + scrapedcap + ' AUDIO: ' + scrapedaud + ' SUB:' + scrapedsub + ' ' + censura[scrapedcen]
thumbnail = ''
plot = ''
fanart = ''
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=item.fulltitle, url=url,
thumbnail=item.thumbnail, plot=plot))
for scrapedcap, scrapedaud, scrapedsub, scrapedcen, scrapedurl in matches:
url = scrapedurl
title = 'CAPITULO ' + scrapedcap + ' AUDIO: ' + scrapedaud + ' SUB:' + scrapedsub + ' ' + censura[scrapedcen]
thumbnail = ''
plot = ''
fanart = ''
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=item.fulltitle, url=url,
thumbnail=item.thumbnail, plot=plot))
else:
patron = '<\/i>.*?(.\d+)<\/td><td style="text-align:center">MP4<\/td><td style="text-align:center">(.*?)<\/td>.*?'
patron +='<a class="dr-button" href="(.*?)" >'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedcap, scrapedsub, scrapedurl in matches:
url = scrapedurl
if scrapedsub !='':
subs= scrapedsub
else:
sub = 'No'
title = 'CAPITULO %s SUB %s'%(scrapedcap, subs)
thumbnail = ''
plot = ''
fanart = ''
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=item.fulltitle, url=url,
thumbnail=item.thumbnail, plot=plot))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
gvideo = scrapertools.find_single_match(data,'<li rel="(http:\/\/www\.18hentaionline\.net\/ramus\/phar\.php\?vid=.*?)">')
headers = {'Host':'www.18hentaionline.net', 'Referer':item.url}
gvideo_data = httptools.downloadpage(gvideo, headers = headers).data
gvideo_url = scrapertools.find_single_match(gvideo_data, 'file: "(.*?)"')
server = 'directo'
new_item = (item.clone(url=gvideo_url, server=server))
itemlist.append(new_item)
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
videoitem.channel = item.channel
videoitem.title = item.title+' (%s)'%videoitem.server
videoitem.action = 'play'
return itemlist

View File

@@ -1,17 +1,23 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import logger
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = scrapertools.cache_page(page_url)
logger.info("data=" + data)
media_url = scrapertools.find_single_match(data, '"file": "(.+?)"')
logger.info("media_url=" + media_url)
media_url = media_url.replace("?start=0", "")
data = re.sub(r"\n|\r|\t|\s{2}", "", httptools.downloadpage(page_url).data)
match = scrapertools.find_single_match(data, "<script type='text/javascript'>(.*?)</script>")
data = jsunpack.unpack(match)
data = data.replace("\\'", "'")
media_url = scrapertools.find_single_match(data, '{type:"video/mp4",src:"([^"]+)"}')
logger.info("media_url=" + media_url)
video_urls = list()