agregados

This commit is contained in:
alfa-addon
2017-08-01 17:46:11 -04:00
parent b12cbca299
commit bde00ddc4f
6 changed files with 719 additions and 0 deletions

View File

@@ -0,0 +1,70 @@
{
"id": "pelisplanet",
"name": "PelisPlanet",
"active": true,
"adult": false,
"language": "es",
"fanart": "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/pelisplanetbg.png",
"thumbnail": "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/pelisplanet.png",
"banner": "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/pelisplanetbaner.png",
"version": 1,
"changes": [
{
"date": "07/06/17",
"description": "Canal Nuevo"
}
],
"categories": [
"movie",
"tvshow",
"vos"
],
"settings": [
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"Sin color",
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
},
{
"id": "orden_episodios",
"type": "bool",
"label": "Mostrar los episodios de las series en orden descendente",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,275 @@
# -*- coding: utf-8 -*-
import re
import sys
import urllib
import urlparse
from core import config
from core import httptools
from core import logger
from core import scrapertools
from core import servertools
from core.item import Item
from core import channeltools
from core import tmdb
host = "http://www.pelisplanet.com/"
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
parameters = channeltools.get_channel_parameters('pelisplanet')
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
color1, color2, color3 = ['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E']
def mainlist(item):
logger.info()
itemlist = []
item.url = host
item.text_color = color1
item.fanart = fanart_host
thumbnail = "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/%s.png"
itemlist.append(item.clone(title="Novedades", action="peliculas", text_blod=True,
viewcontent='movies', thumbnail=thumbnail % 'novedades',
viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Estrenos", action="peliculas", text_blod=True,
url=host + 'genero/estrenos/', thumbnail=thumbnail % 'estrenos'))
itemlist.append(item.clone(title="Géneros", action="generos", text_blod=True,
viewcontent='movies', thumbnail=thumbnail % 'generos',
viewmode="movie_with_plot", url=host + 'generos/'))
itemlist.append(Item(channel=item.channel, title="Filtrar por Idiomas",
fanart=fanart_host, folder=False, text_color=color3,
text_blod=True, thumbnail=thumbnail % 'idiomas'))
itemlist.append(item.clone(title="Castellano", action="peliculas", text_blod=True,
viewcontent='movies', thumbnail=thumbnail % 'castellano',
viewmode="movie_with_plot", url=host + 'idioma/castellano/'))
itemlist.append(item.clone(title="Latino", action="peliculas", text_blod=True,
viewcontent='movies', thumbnail=thumbnail % 'latino',
viewmode="movie_with_plot", url=host + 'idioma/latino/'))
itemlist.append(item.clone(title="Buscar por Título o Actor", action="search", text_blod=True,
thumbnail=thumbnail % 'busqueda', url=host))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = urlparse.urljoin(item.url, "?s={0}".format(texto))
try:
return sub_search(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
def sub_search(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
# logger.info(data)
patron = '<div class="img">.*?<a href="(?P<url>[^"]+)" title="(?P<name>[^"]+)".*?'
patron += '<img.+?src="(?P<img>[^"]+)".*?\(([^\)]+)\)"> </a></div>.*?'
patron += 'Ver\s(.*?)\sOnline'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, name, img, year, scrapedinfo in matches:
contentTitle = scrapertools.decodeHtmlentities(scrapedinfo.strip())
plot = item.plot
itemlist.append(item.clone(title=name, url=url, contentTitle=contentTitle,
plot=plot, action="findvideos", infoLabels={"year": year},
thumbnail=img, text_color=color3))
paginacion = scrapertools.find_single_match(
data, '<a class="page larger" href="([^"]+)">\d+</a>')
if paginacion:
itemlist.append(Item(channel=item.channel, action="sub_search",
title="» Siguiente »", url=paginacion,
thumbnail='https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/next.png'))
tmdb.set_infoLabels(itemlist)
for item in itemlist:
if item.infoLabels['plot'] == '':
data = httptools.downloadpage(item.url).data
item.fanart = scrapertools.find_single_match(
data, 'meta property="og:image" content="([^"]+)" \/>')
item.plot = scrapertools.find_single_match(data,
'Castellano</h3>\s*<p>(.+?)<strong>')
item.plot = scrapertools.htmlclean(item.plot)
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host
elif categoria == 'infantiles':
item.url = host + "genero/animacion-e-infantil/"
else:
return []
itemlist = peliculas(item)
if itemlist[-1].title == "» Siguiente »":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", data)
patron_todas = '<div class="home-movies">(.*?)<footer>'
data = scrapertools.find_single_match(data, patron_todas)
patron = 'col-sm-5"><a href="(?P<scrapedurl>[^"]+)".+?'
patron += 'browse-movie-link-qd.*?>(?P<calidad>[^>]+)</.+?'
patron += '<p>(?P<year>[^>]+)</p>.+?'
patron += 'title one-line">(?P<scrapedtitle>[^>]+)</h2>.+?'
patron += 'img-responsive" src="(?P<scrapedthumbnail>[^"]+)".*?'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, calidad, year, scrapedtitle, scrapedthumbnail in matches:
datas = httptools.downloadpage(scrapedurl).data
datas = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", datas)
#logger.info(datas)
if '/ ' in scrapedtitle:
scrapedtitle = scrapedtitle.partition('/ ')[2]
contentTitle = scrapertools.find_single_match(datas, '<em class="pull-left">Titulo original: </em>([^<]+)</p>')
contentTitle = scrapertools.decodeHtmlentities(contentTitle.strip())
rating = scrapertools.find_single_match(datas, 'alt="Puntaje MPA IMDb" /></a><span>([^<]+)</span>')
director = scrapertools.find_single_match(datas, '<div class="list-cast-info tableCell"><a href="[^"]+" rel="tag">([^<]+)</a></div>')
title = "%s [COLOR yellow][%s][/COLOR]" % (scrapedtitle.strip(), calidad.upper())
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, plot='',
url=scrapedurl, contentQuality=calidad, thumbnail=scrapedthumbnail,
contentTitle=contentTitle, infoLabels={"year": year, 'rating': rating, 'director': director},
text_color=color3))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
paginacion = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="([^"]+)">')
if paginacion:
itemlist.append(Item(channel=item.channel, action="peliculas",
title="» Siguiente »", url=paginacion, plot="Página Siguiente",
thumbnail='https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/next.png'))
for item in itemlist:
if item.infoLabels['plot'] == '':
data = httptools.downloadpage(item.url).data
item.fanart = scrapertools.find_single_match(data, 'meta property="og:image" content="([^"]+)" \/>')
item.plot = scrapertools.find_single_match(data, 'Castellano</h3>\s*<p>(.+?)<strong>')
item.plot = scrapertools.htmlclean(item.plot)
return itemlist
def generos(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
# logger.info(data)
patron = '<div class="todos">.*?'
patron += '<a href="([^"]+)".*?'
patron += 'title="([^"]+)".*?'
patron += '<img src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle,
url=scrapedurl, text_color=color3, thumbnail=scrapedthumbnail,
plot="", viewmode="movie_with_plot", folder=True))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
datas = httptools.downloadpage(item.url).data
datas = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", datas)
# logger.info(data)
patron = '<a style="cursor:pointer; cursor: hand;" rel="([^"]+)".*?'
patron += 'clearfix colores title_calidad">.*?<span>([^<]+)</span></a>'
matches = re.compile(patron, re.DOTALL).findall(datas)
for scrapedurl, servidores, in matches:
if 'pelispp.com' or 'ultrapelis' in scrapedurl:
data = httptools.downloadpage(scrapedurl, headers=headers).data
patronr = 'file: "([^"]+)",label:"([^"]+)",type'
matchesr = re.compile(patronr, re.DOTALL).findall(data)
for scrapedurl, label in matchesr:
url = scrapedurl.replace('\\', '')
language = 'latino'
quality = label.decode('cp1252').encode('utf8')
title = item.contentTitle + ' (' + str(label) + ')'
thumbnail = item.thumbnail
fanart = item.fanart
itemlist.append(item.clone(action="play", title=title, url=url, server='directo',
thumbnail=thumbnail, fanart=fanart, extra='directo',
quality=quality, language=language,))
itemlist.sort(key=lambda it: it.title, reverse=True)
# if 'youtube' not in scrapedurl:
if 'youtube' not in scrapedurl:
quality = scrapertools.find_single_match(
datas, '<p class="hidden-xs hidden-sm">.*?class="magnet-download">([^<]+)p</a>')
title = "[COLOR green]%s[/COLOR] [COLOR yellow][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (
item.contentTitle, quality.upper(), servidores.capitalize())
url = scrapedurl.replace('\\', '')
thumbnail = item.thumbnail
server = servertools.get_server_from_url(url)
itemlist.append(item.clone(action='play', title=title, url=url, quality=quality,
server=server, text_color=color3, thumbnail=thumbnail))
for videoitem in itemlist:
videoitem.infoLabels = item.infoLabels
videoitem.channel = item.channel
videoitem.action = 'play'
videoitem.fulltitle = item.title
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la biblioteca[/COLOR]',
url=item.url, action="add_pelicula_to_library",
thumbnail='https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/libreria.png',
extra="findvideos", contentTitle=item.contentTitle))
return itemlist

View File

@@ -0,0 +1,44 @@
{
"id": "xtheatre",
"name": "xTheatre",
"active": true,
"adult": true,
"language": "es",
"fanart": "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/adults/xthearebg.jpg",
"thumbnail": "https://xtheatre.net/wp-content/uploads/xtlogo.jpg",
"banner": "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/adults/xthearebm.png",
"version": 1,
"changes": [
{
"date": "07/06/17",
"description": "Canal Nuevo"
}
],
"categories": [
"adult"
],
"settings": [
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"Sin color",
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
}
]
}

View File

@@ -0,0 +1,220 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para PelisPlanet
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import re
import sys
import urllib
import urlparse
from core import config
from core import httptools
from core import logger
from core import scrapertools
from core import servertools
from core.item import Item
from core import channeltools
from core import tmdb
__channel__ = "xtheatre"
host = 'https://xtheatre.net/'
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
__perfil__ = int(config.get_setting('perfil', __channel__))
except:
__modo_grafico__ = True
__perfil__ = 0
# Fijar perfil de color
perfil = [['0xFF6E2802', '0xFFFAA171', '0xFFE9D7940'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E'],
['0xFF58D3F7', '0xFF2E64FE', '0xFF0404B4']]
if __perfil__ - 1 >= 0:
color1, color2, color3 = perfil[__perfil__-1]
else:
color1 = color2 = color3 = ""
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
parameters = channeltools.get_channel_parameters(__channel__)
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
thumbnail = 'https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/adults/%s.png'
def mainlist(item):
logger.info()
itemlist = []
# thumbnail = 'https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/adults/%s.png'
itemlist.append(Item(channel=__channel__, title="Últimas", url=host + '?filtre=date&cat=0',
action="peliculas", viewmode="movie_with_plot", viewcontent='movies',
thumbnail = thumbnail % '1'))
itemlist.append(Item(channel=__channel__, title="Más Vistas", url=host + '?display=extract&filtre=views',
action="peliculas", viewmode="movie_with_plot", viewcontent='movies',
thumbnail = thumbnail % '2'))
itemlist.append(Item(channel=__channel__, title="Mejor Valoradas", url=host + '?display=extract&filtre=rate',
action="peliculas", viewmode="movie_with_plot", viewcontent='movies',
thumbnail = thumbnail % '3'))
itemlist.append(Item(channel=__channel__, title="Categorías", action="categorias",
url=host + 'categories/', viewmode="movie_with_plot", viewcontent='movies',
thumbnail = thumbnail % '4'))
itemlist.append(Item(channel=__channel__, title="Buscador", action="search", url=host, thumbnail = thumbnail % '5'))
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>|#038;", "", data)
# logger.info(data)
patron_todos = '<div id="content">(.*?)<div id="footer"'
data = scrapertools.find_single_match(data, patron_todos)
# logger.info(data)
patron = 'data-lazy-src="([^"]+)".*?' # img
patron += 'title="([^"]+)"/>.*?' # title
patron += '</noscript><a href="([^"]+)"' # url
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
title = "%s" % (scrapedtitle)
itemlist.append(item.clone(channel=item.channel, action="findvideos", title=title,
url=scrapedurl, thumbnail=scrapedthumbnail, plot='',
viewmode="movie_with_plot", folder=True))
# Extrae el paginador
paginacion = scrapertools.find_single_match(data, '<a href="([^"]+)">Next &rsaquo;</a></li><li>')
# paginacion = paginacion.replace('#038;', '')
paginacion = urlparse.urljoin(item.url, paginacion)
if paginacion:
itemlist.append(Item(channel=item.channel, action="peliculas",
thumbnail=thumbnail % 'rarrow',
title="\xc2\xbb Siguiente \xc2\xbb", url=paginacion))
for item in itemlist:
if item.infoLabels['plot'] == '':
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|amp;|\s{2}|&nbsp;", "", data)
patron = '<div id="video-synopsys" itemprop="description">(.*?)<div id="video-bottom">'
data = scrapertools.find_single_match(data, patron)
item.infoLabels['plot'] = scrapertools.find_single_match(data, '<p>(.*?)</p></div>')
item.infoLabels['plot'] = scrapertools.htmlclean(item.plot)
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
logger.info(data)
patron = 'data-lazy-src="([^"]+)".*?' # img
patron += '</noscript><a href="([^"]+)".*?' # url
patron += '<span>([^<]+)</span></a>.*?' # title
patron += '<span class="nb_cat border-radius-5">([^<]+)</span>' # num_vids
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedurl, scrapedtitle, vids in matches:
title = "%s (%s)" % (scrapedtitle, vids.title())
thumbnail = scrapedthumbnail
url = scrapedurl
itemlist.append(Item(channel=item.channel, action="peliculas", fanart=scrapedthumbnail,
title=title, url=url, thumbnail=thumbnail, plot='',
viewmode="movie_with_plot", folder=True))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = urlparse.urljoin(item.url, "?s={0}".format(texto))
try:
return sub_search(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
def sub_search(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron_todos = '<div id="content">(.*?)</li></ul></div></div>'
data = scrapertools.find_single_match(data, patron_todos)
patron = 'data-lazy-src="([^"]+)".*?' # img
patron += 'title="([^"]+)"/>.*?' # title
patron += '</noscript><a href="([^"]+)"' # url
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
title = "%s" % (scrapedtitle)
itemlist.append(item.clone(title=title, url=scrapedurl,
action="findvideos", thumbnail=scrapedthumbnail))
paginacion = scrapertools.find_single_match(
data, "<a href='([^']+)' class=\"inactive\">\d+</a>")
if paginacion:
itemlist.append(Item(channel=item.channel, action="sub_search",
title="\xc2\xbb Siguiente \xc2\xbb", url=paginacion))
return itemlist
def findvideos(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|amp;|\s{2}|&nbsp;", "", data)
logger.info(data)
patron_todos = '<div class="video-embed">(.*?)</div>'
data = scrapertools.find_single_match(data, patron_todos)
patron = '<iframe src="[^"]+" data-lazy-src="([^"]+)".*?</iframe>'
matches = scrapertools.find_multiple_matches(data, patron)
for url in matches:
title = item.title
server = servertools.get_server_from_url(url)
itemlist.append(item.clone(action='play', title=title, server=server, mediatype='movie', url=url))
for videoitem in itemlist:
videoitem.infoLabels = item.infoLabels
videoitem.channel = item.channel
videoitem.title = "%s [COLOR yellow](%s)[/COLOR]" % (item.title, videoitem.server)
# if config.get_library_support() and len(itemlist) > 0:
# itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la biblioteca[/COLOR]',
# url=item.url, action="add_pelicula_to_library",
# thumbnail='https://s19.postimg.org/l5z8iy1zn/biblioteca.png',
# extra="findvideos", contentTitle=item.contentTitle))
return itemlist

View File

@@ -0,0 +1,49 @@
{
"active": true,
"changes": [
{
"date": "28/06/2017",
"description": "Nuevo conector"
}
],
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "streamcherry.com/(?:embed|f)/([A-z0-9]+)",
"url": "http://streamcherry.com/embed/\\1"
}
]
},
"free": true,
"id": "streamcherry",
"name": "streamcherry",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "http://i.imgur.com/l45Tk0G.png",
"version": 1
}

View File

@@ -0,0 +1,61 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para streamcherry
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
# --------------------------------------------------------
from core import httptools
from core import logger
from core import scrapertools
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "We are unable to find the video" in data:
return False, "[streamcherry] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
video_urls = []
matches = scrapertools.find_multiple_matches(data, 'type\s*:\s*"([^"]+)"\s*,\s*src:"([^"]+)",height\s*:\s*(\d+)')
for ext, media_url, calidad in matches:
ext = ext.replace("video/", "")
if not media_url.startswith("http"):
media_url = "http:%s" % media_url
video_urls.append([".%s %sp [streamcherry]" % (ext, calidad), media_url])
video_urls.reverse()
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
patronvideos = 'streamcherry.com/(?:embed|f)/([A-z0-9]+)'
logger.info("#" + patronvideos + "#")
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for match in matches:
titulo = "[streamcherry]"
url = "http://streamcherry.com/embed/%s" % match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append([titulo, url, 'streamcherry'])
encontrados.add(url)
else:
logger.info(" url duplicada=" + url)
return devuelve