Merge pull request #123 from Intel11/patch-1

Actualizado
This commit is contained in:
Alfa
2017-10-09 12:08:26 -04:00
committed by GitHub
16 changed files with 246 additions and 281 deletions

View File

@@ -21,12 +21,12 @@ CHANNEL_DEFAULT_HEADERS = [
REGEX_NEXT_PAGE = r"class='current'>\d+?</li><li><a href=\"([^']+?)\""
REGEX_TITLE = r'(?:bigChar_a" href=.+?>)(.+?)(?:</a>)'
REGEX_THUMB = r'src="(http://media.animeflv\.me/uploads/thumbs/[^"]+?)"'
REGEX_THUMB = r'src="(http://media.animeflv\.co/uploads/thumbs/[^"]+?)"'
REGEX_PLOT = r'<span class="info">Línea de historia:</span><p><span>(.*?)</span>'
REGEX_URL = r'href="(http://animeflv\.me/Anime/[^"]+)">'
REGEX_URL = r'href="(http://animeflv\.co/Anime/[^"]+)">'
REGEX_SERIE = r'%s.+?%s([^<]+?)</a><p>(.+?)</p>' % (REGEX_THUMB, REGEX_URL)
REGEX_EPISODE = r'href="(http://animeflv\.me/Ver/[^"]+?)">(?:<span.+?</script>)?(.+?)</a></td><td>(\d+/\d+/\d+)</td></tr>'
REGEX_GENERO = r'<a href="(http://animeflv\.me/genero/[^\/]+/)">([^<]+)</a>'
REGEX_EPISODE = r'href="(http://animeflv\.co/Ver/[^"]+?)">(?:<span.+?</script>)?(.+?)</a></td><td>(\d+/\d+/\d+)</td></tr>'
REGEX_GENERO = r'<a href="(http://animeflv\.co/genero/[^\/]+/)">([^<]+)</a>'
def get_url_contents(url):
@@ -309,7 +309,7 @@ def findvideos(item):
itemlist = []
page_html = get_url_contents(item.url)
regex_api = r'http://player\.animeflv\.me/[^\"]+'
regex_api = r'http://player\.animeflv\.co/[^\"]+'
iframe_url = scrapertools.find_single_match(page_html, regex_api)
iframe_html = get_url_contents(iframe_url)

View File

@@ -3,24 +3,21 @@
# Alfa
# ------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
import re
from core import httptools
from core import tmdb
from core import jsontools as json
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from platformcode import logger
def mainlist(item):
logger.info()
itemlist = []
itemlist = list()
itemlist.append(item.clone(title="Novedades", action="peliculas", url="http://gnula.mobi/"))
itemlist.append(item.clone(title="Castellano", action="peliculas",
url="http://www.gnula.mobi/tag/esp)anol/"))
url="http://www.gnula.mobi/tag/espanol/"))
itemlist.append(item.clone(title="Latino", action="peliculas", url="http://gnula.mobi/tag/latino/"))
itemlist.append(item.clone(title="VOSE", action="peliculas", url="http://gnula.mobi/tag/subtitulada/"))
@@ -53,43 +50,66 @@ def sub_search(item):
patron = '<div class="row">.*?<a href="([^"]+)" title="([^"]+)">.*?<img src="(.*?)" title'
matches = scrapertools.find_multiple_matches(data, patron)
for url,name,img in matches:
itemlist.append(item.clone(title=name, url=url, action="findvideos", show=name, thumbnail=img))
for url, name, img in matches:
itemlist.append(item.clone(title=name, url=url, action="findvideos", thumbnail=img))
paginacion = scrapertools.find_single_match(data, '<a href="([^"]+)" ><i class="glyphicon '
'glyphicon-chevron-right" aria-hidden="true"></i>')
if paginacion:
itemlist.append(channel=item.channel, action="sub_search", title="Next page >>" , url=paginacion)
itemlist.append(Item(channel=item.channel, action="sub_search", title="Next page >>", url=paginacion))
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="col-mt-5 postsh">.*?href="(.*?)" title="(.*?)".*?under-title">(.*?)<.*?src="(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(data)
patron = '<div class="col-mt-5 postsh">.*?href="(.*?)" title="(.*?)".*?under-title">(.*?)<.*?src="(.*?)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedyear, scrapedtitle, scrapedthumbnail in matches:
url = scrapedurl
title = scrapedtitle
year = scrapertools.find_single_match(scrapedyear, r'.*?\((\d{4})\)')
thumbnail = scrapedthumbnail
new_item =Item (channel = item.channel, action="findvideos", title=title, contentTitle=title, url=url,
thumbnail=thumbnail, infoLabels = {'year':year})
if year:
tmdb.set_infoLabels_item(new_item)
itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, fulltitle = scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, infoLabels={'year': year}))
itemlist.append(new_item)
next_page_url = scrapertools.find_single_match(data,'<link rel="next" href="(.*?)"\/>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
tmdb.set_infoLabels(itemlist, True)
next_page_url = scrapertools.find_single_match(data, '<link rel="next" href="(.*?)"')
if next_page_url != "":
next_page_url = item.url + next_page_url
itemlist.append(item.clone(action="peliculas", title="Siguiente >>", text_color="yellow",
url=next_page_url))
url=next_page_url))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'data-src="([^"]+)".*?'
patron += 'data-toggle="tab">([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for url, language in matches:
url = url.replace("&amp;", "&")
response = httptools.downloadpage(url, follow_redirects=False, add_referer=True)
if response.data:
url = scrapertools.find_single_match(response.data, 'src="([^"]+)"')
else:
url = response.headers.get("location", "")
url = url.replace("&quot","")
titulo = "Ver en %s (" + language + ")"
itemlist.append(item.clone(
action = "play",
title = titulo,
url = url,
language = language))
tmdb.set_infoLabels(itemlist, True)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
return itemlist
def play(item):
item.thumbnail = item.contentThumbnail
return [item]

View File

@@ -634,7 +634,7 @@ def findvideos(item):
title_label = bbcode_kodi2html(" ( [COLOR green][B]Tráiler[/B][/COLOR] )")
itemlist.append(
Item(channel=item.channel, action="trailer", title=title_label, fulltitle=title_label, url=url_targets,
Item(channel=item.channel, action="buscartrailer", title=title_label, fulltitle=title_label, url=url_targets,
thumbnail=item.thumbnail, show=item.show))
itemlist.append(Item(channel=item.channel, action="set_status", title=title, fulltitle=title, url=url_targets,
@@ -676,93 +676,39 @@ def findvideos(item):
matches.append([match["lang"], match["quality"], url, embed])
enlaces = []
for idioma, calidad, url, embed in matches:
servername = scrapertools.find_single_match(url, "(?:http:|https:)//(?:www.|)([^.]+).")
if servername == "streamin": servername = "streaminto"
if servername == "waaw": servername = "netutv"
if servername == "uploaded" or servername == "ul": servername = "uploadedto"
mostrar_server = True
if config.get_setting("hidepremium") == True:
mostrar_server = servertools.is_server_enabled(servername)
if mostrar_server:
option = "Ver"
if re.search(r'return ([\'"]{2,}|\})', embed):
option = "Descargar"
calidad = unicode(calidad, "utf8").upper().encode("utf8")
servername_c = unicode(servername, "utf8").capitalize().encode("utf8")
title = option + ": " + servername_c + " (" + calidad + ")" + " (" + idioma + ")"
thumbnail = item.thumbnail
plot = item.title + "\n\n" + scrapertools.find_single_match(data,
'<meta property="og:description" content="([^"]+)"')
plot = scrapertools.htmlclean(plot)
fanart = scrapertools.find_single_match(data, '<div style="background-image.url. ([^\s]+)')
if account:
url += "###" + id + ";" + type
option = "Ver"
if re.search(r'return ([\'"]{2,}|\})', embed):
option = "Descargar"
calidad = unicode(calidad, "utf8").upper().encode("utf8")
title = option + ": %s (" + calidad + ")" + " (" + idioma + ")"
thumbnail = item.thumbnail
plot = item.title + "\n\n" + scrapertools.find_single_match(data,
'<meta property="og:description" content="([^"]+)"')
plot = scrapertools.htmlclean(plot)
fanart = scrapertools.find_single_match(data, '<div style="background-image.url. ([^\s]+)')
if account:
url += "###" + id + ";" + type
enlaces.append(
Item(channel=item.channel, action="play", title=title, fulltitle=title, url=url, thumbnail=thumbnail,
plot=plot, fanart=fanart, show=item.show, folder=True, server=servername, infoLabels=infolabels,
contentTitle=item.contentTitle, contentType=item.contentType, tipo=option))
itemlist.append(
Item(channel=item.channel, action="play", title=title, fulltitle=title, url=url, thumbnail=thumbnail,
plot=plot, fanart=fanart, show=item.show, folder=True, infoLabels=infolabels,
contentTitle=item.contentTitle, contentType=item.contentType, tipo=option))
enlaces.sort(key=lambda it: it.tipo, reverse=True)
itemlist.extend(enlaces)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
itemlist.sort(key=lambda it: it.title, reverse=True)
## 2 = película
if type == "2" and item.category != "Cine":
## STRM para todos los enlaces de servidores disponibles
## Si no existe el archivo STRM de la peícula muestra el item ">> Añadir a la videoteca..."
try:
itemlist.extend(file_cine_library(item, url_targets))
except:
pass
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=url_targets, thumbnail = item.thumbnail,
fulltitle = item.contentTitle
))
return itemlist
def trailer(item):
import youtube
itemlist = []
item.url = "https://www.googleapis.com/youtube/v3/search" + \
"?q=" + item.show.replace(" ", "+") + "+trailer+HD+Español" \
"&regionCode=ES" + \
"&part=snippet" + \
"&hl=es_ES" + \
"&key=AIzaSyAd-YEOqZz9nXVzGtn3KWzYLbLaajhqIDA" + \
"&type=video" + \
"&maxResults=50" + \
"&pageToken="
itemlist.extend(youtube.fichas(item))
# itemlist.pop(-1)
return itemlist
def file_cine_library(item, url_targets):
import os
from core import filetools
videolibrarypath = os.path.join(config.get_videolibrary_path(), "CINE")
archivo = item.show.strip()
strmfile = archivo + ".strm"
strmfilepath = filetools.join(videolibrarypath, strmfile)
if not os.path.exists(strmfilepath):
itemlist = []
itemlist.append(Item(channel=item.channel, title=">> Añadir a la videoteca...", url=url_targets,
action="add_file_cine_library", extra="episodios", show=archivo))
return itemlist
def add_file_cine_library(item):
from core import videolibrarytools
new_item = item.clone(title=item.show, action="play_from_library")
videolibrarytools.save_movie(new_item)
itemlist = []
itemlist.append(Item(title='El vídeo ' + item.show + ' se ha añadido a la videoteca'))
# xbmctools.renderItems(itemlist, "", "", "")
platformtools.render_items(itemlist, "")
return
def play(item):
if "###" in item.url:
@@ -780,13 +726,11 @@ def play(item):
if devuelve:
item.url = devuelve[0][1]
item.server = devuelve[0][2]
item.thumbnail = item.contentThumbnail
item.fulltitle = item.contentTitle
return [item]
## --------------------------------------------------------------------------------
## --------------------------------------------------------------------------------
def agrupa_datos(data):
## Agrupa los datos
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|<!--.*?-->', '', data)

View File

@@ -1,11 +1,11 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import logger, config
@@ -16,117 +16,54 @@ def mainlist(item):
itemlist = []
itemlist.append(Item(channel=item.channel, action="peliculas", title="Recientes", url=host))
itemlist.append(Item(channel=item.channel, action="PorFecha", title="Año de Lanzamiento", url=host))
itemlist.append(Item(channel=item.channel, action="Idiomas", title="Idiomas", url=host))
itemlist.append(Item(channel=item.channel, action="calidades", title="Por calidad", url=host))
itemlist.append(Item(channel=item.channel, action="generos", title="Por género", url=host))
itemlist.append(Item(channel = item.channel,
action = "filtro",
title = "Año de Lanzamiento",
category = "lanzamiento"
))
itemlist.append(Item(channel = item.channel,
action = "filtro",
title = "Idiomas",
category = "idioma"
))
itemlist.append(Item(channel = item.channel,
action = "filtro",
title = "Por calidad",
category = "calidades"
))
itemlist.append(Item(channel = item.channel,
action = "filtro",
title = "Por género",
category = "generos"
))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar...", url=host))
return itemlist
def PorFecha(item):
logger.info()
# Descarga la pagina
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<section class="lanzamiento">(.*?)</section>')
# Extrae las entradas (carpetas)
patron = '<a href="([^"]+).*?title="([^"]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
def filtro(item):
logger.info(item.category)
itemlist = []
patron1 = '<section class="%s">(.*?)</section>' %item.category
patron2 = '<a href="([^"]+).*?title="([^"]+)'
data = httptools.downloadpage(host).data
data = scrapertools.find_single_match(data, patron1)
matches = scrapertools.find_multiple_matches(data, patron2)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle.strip()
thumbnail = ""
plot = ""
url = urlparse.urljoin(item.url, scrapedurl)
itemlist.append(
Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot,
fulltitle=title, viewmode="movie"))
return itemlist
def Idiomas(item):
logger.info()
# Descarga la pagina
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<section class="idioma">(.*?)</section>')
# Extrae las entradas (carpetas)
patron = '<a href="([^"]+).*?title="([^"]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle.strip()
thumbnail = ""
plot = ""
url = urlparse.urljoin(item.url, scrapedurl)
itemlist.append(
Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot,
fulltitle=title, viewmode="movie"))
return itemlist
def calidades(item):
logger.info()
# Descarga la pagina
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<section class="calidades">(.*?)</section>')
# Extrae las entradas (carpetas)
patron = '<a href="([^"]+).*?title="([^"]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle.strip()
thumbnail = ""
plot = ""
url = urlparse.urljoin(item.url, scrapedurl)
itemlist.append(
Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot,
fulltitle=title, viewmode="movie"))
return itemlist
def generos(item):
logger.info()
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<section class="generos">(.*?)</section>')
patron = '<a href="([^"]+).*?title="([^"]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle.strip()
thumbnail = ""
plot = ""
url = urlparse.urljoin(item.url, scrapedurl)
if "Adulto" in title and config.get_setting("adult_mode") == 0:
if "Adulto" in scrapedtitle and config.get_setting("adult_mode") == 0:
continue
itemlist.append(
Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot,
fulltitle=title, viewmode="movie"))
Item(channel=item.channel, action="peliculas", title=scrapedtitle.strip(), url=scrapedurl,
viewmode="movie"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "?s=" + texto
try:
# return buscar(item)
return peliculas(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
@@ -138,21 +75,20 @@ def search(item, texto):
def peliculas(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
tabla_pelis = scrapertools.find_single_match(data,
'class="section col-17 col-main grid-125 overflow clearfix">(.*?)</div></section>')
patron = '<img src="([^"]+)" alt="([^"]+).*?href="([^"]+)'
matches = re.compile(patron, re.DOTALL).findall(tabla_pelis)
itemlist = []
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
year = scrapertools.find_single_match(scrapedtitle, "[0-9]{4}")
fulltitle = scrapedtitle.replace(scrapertools.find_single_match(scrapedtitle, '\([0-9]+\)' ), "")
itemlist.append(Item(channel = item.channel,
item.infoLabels['year'] = year
itemlist.append(item.clone(channel = item.channel,
action = "findvideos",
title = scrapedtitle,
url = scrapedurl,
@@ -160,7 +96,7 @@ def peliculas(item):
plot = "",
fulltitle = fulltitle
))
tmdb.set_infoLabels(itemlist, True)
next_page = scrapertools.find_single_match(data, 'rel="next" href="([^"]+)')
if next_page != "":
itemlist.append(
@@ -172,31 +108,30 @@ def peliculas(item):
def findvideos(item):
logger.info()
data = httptools.downloadpage(item.url).data
patron = 'hand" rel="([^"]+).*?title="(.*?)".*?<span>([^<]+)</span>.*?</span><span class="q">(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
encontrados = []
itemtemp = []
data = httptools.downloadpage(item.url).data
patron = 'hand" rel="([^"]+).*?title="(.*?)".*?<span>([^<]+)</span>.*?</span><span class="q">(.*?)<'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, server_name, language, quality in matches:
if scrapedurl in encontrados:
continue
encontrados.append(scrapedurl)
language = language.strip()
quality = quality.strip()
itemlist.append(Item(channel=item.channel,
mq = "(" + quality + ")"
if "http" in quality:
quality = mq = ""
titulo = "%s (" + language + ") " + mq
itemlist.append(item.clone(channel=item.channel,
action = "play",
extra = "",
fulltitle = item.fulltitle,
title = "%s (" + language + ") (" + quality + ")",
thumbnail = item.thumbnail,
title = titulo,
url = scrapedurl,
folder = False,
language = language,
quality = quality
))
tmdb.set_infoLabels(itemlist, True)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if itemlist:
itemlist.append(Item(channel=item.channel))

View File

@@ -5,6 +5,7 @@ import re
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
@@ -58,7 +59,7 @@ def porGenero(item):
patron = 'cat-item.*?href="([^"]+).*?>(.*?)<.*?span>([^<]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, patron)
for urlgen, genero, cantidad in matches:
cantidad = cantidad.replace(".", "")
@@ -103,7 +104,6 @@ def agregadas(item):
title = info[3]
plot = info[4]
year = info[5].strip()
itemlist.append(Item(channel=item.channel,
action='findvideos',
contentType = "movie",
@@ -116,15 +116,11 @@ def agregadas(item):
contentTitle = title,
url=url
))
# Paginación
try:
next_page = scrapertools.find_single_match(data,'tima>.*?href=(.*?) ><i')
itemlist.append(Item(channel=item.channel, action="agregadas", title='Pagina Siguiente >>',
url=next_page.strip(),
viewmode="movie_with_plot"))
except:
pass
tmdb.set_infoLabels_itemlist(itemlist, True)
next_page = scrapertools.find_single_match(data,'tima>.*?href=(.*?) ><i')
itemlist.append(Item(channel=item.channel, action="agregadas", title='Pagina Siguiente >>',
url=next_page.strip(),
viewmode="movie_with_plot"))
return itemlist
@@ -135,11 +131,9 @@ def listaBuscar(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n", " ", data)
logger.info("data=" + data)
patron = 'class="row"> <a.*?="([^"]+).*?src="([^"]+).*?title="([^"]+).*?class="text-list">(.*?)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, patron)
for url, thumbnail, title, sinopsis in matches:
itemlist.append(Item(channel=item.channel, action="findvideos", title=title + " ", fulltitle=title, url=url,
@@ -157,7 +151,7 @@ def findvideos(item):
# Descarga la pagina
data = httptools.downloadpage(item.url).data
patron = 'cursor: hand" rel="(.*?)".*?class="optxt"><span>(.*?)<.*?width.*?class="q">(.*?)</span'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedidioma, scrapedcalidad in matches:
idioma = ""
@@ -168,16 +162,17 @@ def findvideos(item):
language = scrapedidioma
if not ("omina.farlante1" in scrapedurl or "404" in scrapedurl):
itemlist.append(
Item(channel=item.channel, action="play", title=title, fulltitle=item.title, url=scrapedurl,
thumbnail=item.thumbnail, plot=plot, show=item.show, quality= quality, language=language, extra = item.thumbnail))
item.clone(channel=item.channel, action="play", title=title, fulltitle=item.title, url=scrapedurl,
plot=plot, quality= quality, language=language, extra = item.thumbnail))
tmdb.set_infoLabels_itemlist(itemlist, True)
itemlist=servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Opción "Añadir esta película a la biblioteca de KODI"
if item.extra != "library":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
filtro=True, action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
infoLabels={'title': item.fulltitle}, fulltitle=item.title,
extra="library"))
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
fulltitle=item.title
))
return itemlist

View File

@@ -1,7 +1,7 @@
{
"id": "pordede",
"name": "Pordede",
"active": true,
"active": false,
"adult": false,
"language": ["cast"],
"thumbnail": "pordede.png",
@@ -105,4 +105,4 @@
]
}
]
}
}

View File

@@ -202,7 +202,7 @@ def filterchannels(category, view="thumb_"):
def get_thumb(thumb_name, view="thumb_"):
icon_pack_name = config.get_setting('icon_set')
icon_pack_name = config.get_setting('icon_set', default="default")
if icon_pack_name == "default":
resource_path = os.path.join(config.get_runtime_path(), "resources", "media", "themes")
else:

View File

@@ -5,7 +5,7 @@ from threading import Timer
import xbmc
import xbmcaddon
import xbmcgui
from core import filetools
from channelselector import get_thumb
from platformcode import config
@@ -82,27 +82,13 @@ def set_key():
MAIN_MENU = {
"news": {"label": "Novedades",
"icon": filetools.join(config.get_runtime_path(), "resources", "media", "general", "default",
"thumb_news.png"), "order": 0},
"channels": {"label": "Canales",
"icon": filetools.join(config.get_runtime_path(), "resources", "media", "general", "default",
"thumb_channels.png"), "order": 1},
"search": {"label": "Buscador",
"icon": filetools.join(config.get_runtime_path(), "resources", "media", "general", "default",
"thumb_search.png"), "order": 2},
"favorites": {"label": "Favoritos",
"icon": filetools.join(config.get_runtime_path(), "resources", "media", "general", "default",
"thumb_favorites.png"), "order": 3},
"videolibrary": {"label": "Videoteca",
"icon": filetools.join(config.get_runtime_path(), "resources", "media", "general", "default",
"thumb_videolibrary.png"), "order": 4},
"downloads": {"label": "Descargas",
"icon": filetools.join(config.get_runtime_path(), "resources", "media", "general", "default",
"thumb_downloads.png"), "order": 5},
"settings": {"label": "Configuración",
"icon": filetools.join(config.get_runtime_path(), "resources", "media", "general", "default",
"thumb_setting_0.png"), "order": 6},
"news": {"label": "Novedades", "icon": get_thumb("news.png"), "order": 0},
"channels": {"label": "Canales", "icon": get_thumb("channels.png"), "order": 1},
"search": {"label": "Buscador", "icon": get_thumb("search.png"), "order": 2},
"favorites": {"label": "Favoritos", "icon": get_thumb("favorites.png"), "order": 3},
"videolibrary": {"label": "Videoteca", "icon": get_thumb("videolibrary.png"), "order": 4},
"downloads": {"label": "Descargas", "icon": get_thumb("downloads.png"), "order": 5},
"settings": {"label": "Configuración", "icon": get_thumb("setting_0.png"), "order": 6}
}

View File

@@ -10,7 +10,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "https://www.bitporno.com/e/([A-z0-9]+)",
"pattern": "https://www.bitporno.com/(?:e|embed)/([A-z0-9]+)",
"url": "https://www.bitporno.com/e/\\1"
},
{

View File

@@ -0,0 +1,49 @@
{
"active": true,
"changes": [
{
"date": "09/10/2017",
"description": "Versión inicial"
}
],
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "cloudsany.com/i/([A-z0-9]+)",
"url": "https://cloudsany.com/i/\\1"
}
]
},
"free": true,
"id": "cloudsany",
"name": "cloudsany",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://s1.postimg.org/6wixo35myn/cloudsany1.png",
"version": 1
}

View File

@@ -0,0 +1,33 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Alfa addon - KODI Plugin
# Conector para cloudsany
# https://github.com/alfa-addon
# ------------------------------------------------------------
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "no longer exists" in data:
return False, "[Cloudsany] El fichero ha sido borrado"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
data = scrapertools.find_single_match(data, 'p,a,c,k,e.*?</script>')
unpack = jsunpack.unpack(data)
logger.info("Intel11 %s" %unpack)
video_urls = []
videourl = scrapertools.find_single_match(unpack, 'config={file:"([^"]+)')
video_urls.append([".MP4 [Cloudsany]", videourl])
return video_urls

View File

@@ -48,5 +48,6 @@
"visible": false
}
],
"thumbnail": "https://s1.postimg.org/9e6doboo2n/cloudy1.png",
"version": 1
}

View File

@@ -7,12 +7,12 @@ from core import scrapertools
from lib import jsunpack
from platformcode import logger
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0'}
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0'}
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, headers=headers).data
data = httptools.downloadpage(page_url, add_referer = True).data
if "File was deleted" in data or "Not Found" in data or "File was locked by administrator" in data:
return False, "[Gamovideo] El archivo no existe o ha sido borrado"
@@ -24,7 +24,7 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url, headers=headers).data
data = httptools.downloadpage(page_url, add_referer = True).data
packer = scrapertools.find_single_match(data,
"<script type='text/javascript'>(eval.function.p,a,c,k,e,d..*?)</script>")

View File

@@ -10,7 +10,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "(rapidgator.net/file/.*?(?:\\.html))",
"pattern": "(rapidgator.net/file/\\w+(?:\\.html|))",
"url": "http://\\1"
}
]
@@ -50,4 +50,4 @@
],
"thumbnail": "server_rapidgator.png",
"version": 1
}
}

View File

@@ -68,5 +68,6 @@
"visible": false
}
],
"thumbnail": "https://s1.postimg.org/912d5vxmv3/streamplay1.png",
"version": 1
}
}

View File

@@ -58,5 +58,6 @@
"visible": false
}
],
"thumbnail" : "https://s1.postimg.org/4wje61el4f/yourupload1.png",
"version": 1
}
}