delete non working icarus channels

This commit is contained in:
mac12m99
2019-04-17 20:17:44 +02:00
parent ea7c89c9ac
commit be47983fc2
36 changed files with 0 additions and 4472 deletions

View File

@@ -1,36 +0,0 @@
{
"id": "animevision",
"name": "Animevision",
"active": false,
"adult": false,
"language": ["ita"],
"thumbnail": "http:\/\/animevision.it\/images\/logo.png",
"bannermenu": "http:\/\/animevision.it\/images\/logo.png",
"categories": ["anime"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Includi in Novità - Anime",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,145 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per animevision
# ----------------------------------------------------------
import re
from core import httptools, scrapertools, tmdb
from platformcode import logger
from core.item import Item
host = "https://www.animevision.it"
def mainlist(item):
logger.info("kod.animevision mainlist")
itemlist = [Item(channel=item.channel,
action="lista_anime",
title="[COLOR azure]Anime [/COLOR]- [COLOR orange]Lista Completa[/COLOR]",
url=host + "/elenco.php",
thumbnail=CategoriaThumbnail,
fanart=CategoriaFanart),
Item(channel=item.channel,
action="search",
title="[COLOR yellow]Cerca...[/COLOR]",
url=host + "/?s=",
thumbnail=CategoriaThumbnail,
fanart=CategoriaFanart)]
return itemlist
def search(item, texto):
logger.info("kod.animevision search")
item.url = host + "/?search=" + texto
try:
return lista_anime_src(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def lista_anime_src(item):
logger.info("kod.animevision lista_anime_src")
itemlist = []
data = httptools.downloadpage(item.url).data
patron = r"<a class=\'false[Ll]ink\'\s*href=\'([^\']+)\'[^>]+>[^>]+>[^<]+<img\s*style=\'[^\']+\'\s*class=\'[^\']+\'\s*src=\'[^\']+\'\s*data-src=\'([^\']+)\'\s*alt=\'([^\']+)\'[^>]*>"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedimg, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedimg = host + "/" + scrapedimg
scrapedurl = host + "/" + scrapedurl
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title=scrapedtitle,
url=scrapedurl,
fulltitle=scrapedtitle,
show=scrapedtitle,
thumbnail=scrapedimg,
fanart=scrapedimg,
viewmode="movie"))
return itemlist
def lista_anime(item):
logger.info("kod.animevision lista_anime")
itemlist = []
data = httptools.downloadpage(item.url).data
patron = "<div class='epContainer' ><a class='falseLink' href='(.*?)'><div[^=]+=[^=]+=[^=]+=[^=]+='(.*?)'[^=]+=[^=]+=[^=]+=[^=]+=[^=]+=[^=]+=[^=]+=[^=]+=[^=]+=[^=]+=[^=]+=[^=]+=[^=]+=[^>]+><b>(.*?)<"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedimg, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedimg = host + "/" + scrapedimg
scrapedurl = host + "/" + scrapedurl
itemlist.append(
Item(channel=item.channel,
action="episodi",
contentType="tvshow",
title=scrapedtitle,
text_color="azure",
url=scrapedurl,
fulltitle=scrapedtitle,
show=scrapedtitle,
thumbnail=scrapedimg,
fanart=scrapedimg,
viewmode="movie"))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def episodi(item):
logger.info("kod.animevision episodi")
itemlist = []
data = httptools.downloadpage(item.url).data
patron = "<a class='nodecoration text-white' href='(.*?)'>(.+?)<"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapedtitle.split(';')[1]
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedurl = host + "/" + scrapedurl
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title=scrapedtitle,
url=scrapedurl,
fulltitle=scrapedtitle,
show=scrapedtitle,
thumbnail=item.thumbnail,
fanart=item.fanart))
return itemlist
CategoriaThumbnail = "http://static.europosters.cz/image/750/poster/street-fighter-anime-i4817.jpg"
CategoriaFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"

View File

@@ -1,30 +0,0 @@
{
"id": "cineblog01blog",
"name": "Cineblog01Blog",
"language": ["ita"],
"active": false,
"adult": false,
"thumbnail": "https://www.cineblog01.cloud/templates/cineblog01/images/logo.png",
"banner": "https://www.cineblog01.cloud/templates/cineblog01/images/logo.png",
"categories": [
"movie"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": false,
"enabled": false,
"visible": false
}
]
}

View File

@@ -1,207 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per cineblog01blog
# ------------------------------------------------------------
import re
from platformcode import logger, config
from core import httptools, scrapertools, servertools
from core.item import Item
from core import tmdb
from channels import support
host = "https://www.cineblog01.cloud"
# ----------------------------------------------------------------------------------------------------------------
def mainlist(item):
logger.info()
itemlist = [Item(channel=item.channel,
action="peliculas",
title=support.color("Nuovi film", "azure"),
url="%s/new-film-streaming/" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="categorie",
title=support.color("Categorie", "azure"),
url=host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="filmperanno",
title=support.color("Film per anno", "azure"),
url=host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title=support.color("Cerca ..." , "yellow"),
action="search",
extra="movie",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == "film":
item.url = "%s/new-film-streaming" % host
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def search(item, texto):
logger.info()
item.url = host + "/xfsearch/" + texto
try:
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def categorie(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
blocco = scrapertools.find_single_match(data, r'<ul>\s*<li class="drop">(.*?)</ul>')
patron = r'<li><a href="([^"]+)">([^"]+)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title=scrapedtitle,
url="".join([host, scrapedurl]),
folder=True))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def filmperanno(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
blocco = scrapertools.find_single_match(data, r'<li class="drop"><a.*?class="link1"><b>Film per anno</b></a>(.*?)</ul>')
patron = r'<li><a href="([^"]+)">([^"]+)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title=scrapedtitle,
url=scrapedurl,
folder=True))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def peliculas(item):
logger.info()
itemlist = []
while True:
data = httptools.downloadpage(item.url).data
patron = r'<div class="short-story">\s*<a href="([^"]+)".*?>\s*'
patron += r'<img.*?style="background:url\(([^\)]+)\).*?">'
patron += r'\s*<div class="custom-title">([^<]+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
year = scrapertools.find_single_match(scrapedtitle, r'\((\d{4})\)')
scrapedtitle = scrapedtitle.replace(year, support.color(year, "red"))
# Bypass fake links
html = httptools.downloadpage(scrapedurl).data
patron = '<div class="video-player-plugin">([\s\S]*)<div class="wrapper-plugin-video">'
matches = re.compile(patron, re.DOTALL).findall(html)
for url in matches:
if "scrolling" not in url: continue
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="movie",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
extra="movie",
thumbnail=scrapedthumbnail,
folder=True))
# Pagine
patronvideos = r'<a href="([^"]+)">Avanti</a>'
next_page = scrapertools.find_single_match(data, patronvideos)
if not next_page:
break
else:
item.url = next_page
if itemlist:
itemlist.append(
Item(
channel=item.channel,
action="peliculas",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=item.url,
thumbnail= "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
break
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def findvideos(item):
logger.info()
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
server = re.sub(r'[-\[\]\s]+', '', videoitem.title)
videoitem.title = "".join(["[%s] " % color(server, 'orange'), item.title])
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist

View File

@@ -1,71 +0,0 @@
{
"id": "cinemasubito",
"name": "Cinemasubito",
"active": false,
"adult": false,
"language": ["ita"],
"thumbnail": "https://www.cinemasubito.biz/uploads/custom-logo.png",
"banner": "https://www.cinemasubito.biz/uploads/custom-logo.png",
"categories": ["tvshow", "movie"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi in ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Includi in Novità - Serie TV",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Numero di link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "2", "5", "10", "15" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": ["Non filtrare","IT"]
}
]
}

View File

@@ -1,324 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per cinemasubito
# ------------------------------------------------------------
import binascii, re, urlparse
from channels import autoplay, filtertools
from core import httptools, scrapertools, servertools, tmdb
from core.item import Item
from lib import jscrypto
from platformcode import config, logger
host = "http://www.cinemasubito.org"
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['openload', 'streamango', 'youtube']
list_quality = ['HD', 'SD']
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'cinemasubito')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'cinemasubito')
headers = [
['User-Agent', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:58.0) Gecko/20100101 Firefox/58.0'],
['Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'],
['Accept-Encoding', 'gzip, deflate'],
['Accept-Language', 'en-US,en;q=0.5'],
['Host', host.replace("http://", "")],
['DNT', '1'],
['Upgrade-Insecure-Requests', '1'],
['Connection', 'keep-alive'],
['Referer', host],
['Cache-Control', 'max-age=0']
]
def mainlist(item):
logger.info("kod.cinemasubito mainlist")
autoplay.init(item.channel, list_servers, list_quality)
itemlist = [Item(channel=item.channel,
title="[COLOR azure]Film[/COLOR]",
action="peliculas",
url="%s/film/pagina/1" % host,
extra="movie",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="[COLOR azure]Film Per Categoria[/COLOR]",
action="categorias",
url=host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="[COLOR yellow]Cerca...[/COLOR]",
action="search",
extra="movie",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"),
Item(channel=item.channel,
title="[COLOR azure]Serie TV[/COLOR]",
action="peliculas_tv",
url="%s/serie" % host,
extra="tvshow",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="[COLOR yellow]Cerca Serie TV...[/COLOR]",
action="search",
extra="tvshow",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
autoplay.show_option(item.channel, itemlist)
return itemlist
def search(item, texto):
logger.info("kod.cinemasubito " + item.url + " search " + texto)
item.url = host + "/cerca/" + texto
try:
if item.extra == "movie":
return peliculas(item)
if item.extra == "tvshow":
return peliculas_tv(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
bloque = scrapertools.find_single_match(data, '<h4>Genere</h4>(.*?)<li class="genre">')
# Estrae i contenuti
patron = r'<a href="([^"]+)" title="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapedtitle.replace("Film genere ", "")
itemlist.append(
Item(
channel=item.channel,
action="peliculas",
title=scrapedtitle,
url=scrapedurl,
thumbnail=
"https://farm8.staticflickr.com/7562/15516589868_13689936d0_o.png",
folder=True))
return itemlist
def peliculas(item):
logger.info("kod.cinemasubito peliculas")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
# Estrae i contenuti
patron = r'<a href="([^"]+)" title="([^"]+)">\s*<div class="wrt">'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
# qualità e linguaggio possono essere inseriti nell' item in modo che siano mostrati nei titoli intelligenti
quality = ''
scrapedplot = ""
scrapedthumbnail = ""
quality = scrapertools.find_single_match(scrapedtitle, r'\[(.*?)\]')
year = scrapertools.find_single_match(scrapedtitle, r'\((.*?)\)')
title = scrapertools.find_single_match(scrapedtitle, r'(.*?)(?:\(|\[)')
title = '%s [%s] (%s)' % (title, quality, year)
# Il contentTitle deve essere semplice senza nessun altro dettaglio come anno,qualità etc.
# deve esserci solo un tipo di content, o contentTitle o contentSerieName
contentTitle = scrapertools.find_single_match(scrapedtitle, r'(.*?)(?:\(|\[)')
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentTitle=contentTitle,
quality=quality,
title="[COLOR azure]" + scrapedtitle + "[/COLOR] ",
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
extra=item.extra,
infoLabels={'year':year}))
# Con questo si ricavano le informazioni da tmdb per tutti elementi di itemlist
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True, idioma_busqueda='it')
# Paginazione
patronvideos = r'<a href="[^"]+"[^d]+data-ci-pagination-page[^>]+>[^<]+<\/a><\/span>[^=]+="([^"]+)"'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
return itemlist
def peliculas_tv(item):
logger.info("kod.cinemasubito peliculas")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
# Estrae i contenuti
patron = r'<a href="([^"]+)" title="([^"]+)">\s*<div class="wrt">'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
quality = ''
scrapedplot = ''
scrapedthumbnail = ''
quality = scrapertools.find_single_match(scrapedtitle, r'\[(.*?)\]')
year = scrapertools.find_single_match(scrapedtitle, r'\((.*?)\)')
title = scrapertools.find_single_match(scrapedtitle, r'(.*?)(?:\(|\[)')
title = '%s [%s] (%s)' % (title, quality, year)
# Il contentTitle deve essere semplice senza nessun altro dettaglio come anno,qualità etc.
# deve esserci solo un tipo di content, o contentTitle o contentSerieName
contentSerieName = scrapedtitle
itemlist.append(
Item(channel=item.channel,
action="episodios",
contentSerieName=contentSerieName,
quality=quality,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
show=scrapedtitle,
extra=item.extra))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginazione
patronvideos = r'<a href="[^"]+"[^d]+data-ci-pagination-page[^>]+>[^<]+<\/a><\/span>[^=]+="([^"]+)"'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
itemlist.append(
Item(channel=item.channel,
action="peliculas_tv",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
return itemlist
def episodios(item):
logger.info("kod.channels.cinemasubito episodios")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'href="([^"]+)"><span class="glyphicon glyphicon-triangle-right"></span>(.*?)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
if host not in scrapedurl:
scrapedurl = host + scrapedurl
else:
scrapedurl = scrapedurl
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="episode",
fulltitle=scrapedtitle,
show=scrapedtitle,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
folder=True))
# Comandi di servizio
if config.get_videolibrary_support() and len(itemlist) != 0:
itemlist.append(
Item(channel=item.channel,
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
url=item.url,
action="add_serie_to_library",
extra="episodios",
show=item.show))
return itemlist
def findvideos(item):
logger.info("kod.cinemasubito findvideos_tv")
links = set()
data = httptools.downloadpage(item.url, headers=headers).data
p = scrapertools.find_single_match(data, r'var decrypted = CryptoJS\.AES\.decrypt\(vlinkCrypted, "([^"]+)",')
urls = scrapertools.find_multiple_matches(data,
r"<li><a rel=[^t]+target=[^c]+class=[^=]+=[^:]+:'(.*?)'[^:]+:'(.*?)'[^:]+:'(.*?)'")
for url, iv, salt in urls:
salt = binascii.unhexlify(salt)
iv = binascii.unhexlify(iv)
url = jscrypto.decode(url, p, iv=iv, salt=salt)
url = url.replace(r'\/', '/')
links.add(url)
itemlist = servertools.find_video_items(data=str(links) + data)
for videoitem in itemlist:
videoitem.title = item.title + videoitem.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.show = item.show
videoitem.plot = item.plot
videoitem.channel = item.channel
videoitem.contentType = item.contentType
videoitem.language = IDIOMAS['Italiano']
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow][B]Aggiungi alla videoteca[/B][/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist

View File

@@ -1,20 +0,0 @@
{
"id": "downloadme",
"name": "DownloadMe",
"active": false,
"adult": false,
"language": ["ita"],
"thumbnail": "https://www.downloadme.gratis/wp-content/uploads/2018/07/downloadme-retina-cropped-alternativo.png",
"bannermenu": "https://www.downloadme.gratis/wp-content/uploads/2018/07/downloadme-retina-cropped-alternativo.png",
"categories": ["movie"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": false,
"enabled": false,
"visible": false
}
]
}

View File

@@ -1,169 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale downloadme
# Version: 201804162230
# ------------------------------------------------------------
import re
from core import httptools, scrapertools
from core import servertools
from core.item import Item
from core import tmdb
from lib.unshortenit import unshorten
from platformcode import logger, config
from lib import unshortenit
host = "https://www.downloadme.gratis"
headers = [['Referer', host]]
def mainlist(item):
logger.info("[downloadme.py] mainlist")
# Main options
itemlist = [Item(channel=item.channel,
action="peliculas",
title="[COLOR azure]Film[/COLOR]",
url="%s/category/film/" % host,
extra="movie",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
#Item(channel=item.channel,
# action="peliculas",
# title="Serie TV",
# text_color="azure",
# url="%s/category/serie-tv/" % host,
# extra="tv",
# thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
#Item(channel=item.channel,
# action="peliculas",
# title="Anime",
# text_color="azure",
# url="%s/category/anime/" % host,
# extra="tv",
# thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="categorie",
title="[COLOR azure]Categorie[/COLOR]",
url="%s/" % host,
extra="movie",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png")]
return itemlist
def categorie(item):
logger.info("[downloadme.py] peliculas")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
blocco = scrapertools.find_single_match(data, '<ul id="menu-categorie" class="menu">(.*?)</ul>')
patron = '<a href="(.*?)">(.*?)</a>'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(channel=item.channel,
action="peliculas",
text_color="azure",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=scrapedtitle,
url="%s/%s" % (host, scrapedurl),
extra=item.extra,
viewmode="movie_with_plot",
Folder=True))
return itemlist
def peliculas(item):
logger.info("[downloadme.py] peliculas")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
#blocco = scrapertools.find_single_match(data, '</p></div><div class="row">(.*?)<span class="sep">')
patron = r'<a href="(.*?)" title="(.*?)">'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
#scrapedtitle = scrapedtitle.split("&#8211;")[0]
#scrapedtitle = scrapedtitle.split(" Download")[0]
scrapedthumbnail = ""
itemlist.append(
Item(channel=item.channel,
action="findvideos" if 'movie' in item.extra else 'episodes',
text_color="azure",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=scrapedtitle,
url="%s/%s" % (host, scrapedurl),
viewmode="movie_with_plot",
thumbnail=scrapedthumbnail))
nextpage_regex = '<a class="next page-numbers" href="([^"]+)">'
next_page = scrapertools.find_single_match(data, nextpage_regex)
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url="%s%s" % (host, next_page),
extra=item.extra,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
return itemlist
def episodes(item):
logger.info("[downloadme.py] tv_series")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<a href="([^"]+)"[^>]*>([^<]+)</a>(?:<br>|</p>)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
if not scrapertools.find_single_match(scrapedtitle, r'\d+'): continue
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
action="findvideos",
text_color="azure",
contentType="episode",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=scrapedtitle,
thumbnail=item.thumbnail,
url=scrapedurl,
viewmode="movie_with_plot"))
return itemlist
def findvideos(item):
logger.info("kod.downloadme findvideos")
itemlist = []
if 'movie' in item.extra:
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<a\s*href="([^"]+)" target="_blank" rel="noopener">.*?link[^<]+</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl in matches:
url, c = unshorten(scrapedurl)
data += url + '\n'
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title + videoitem.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.show = item.show
videoitem.plot = item.plot
videoitem.channel = item.channel
return itemlist

View File

@@ -1,20 +0,0 @@
{
"id": "dragonballforever",
"name": "Dragonball Forever",
"language": ["ita"],
"active": false,
"adult": false,
"thumbnail": "https://www.dragonballforever.it/wp-content/uploads/2017/02/header_dbf-1.jpg",
"banner": "https://www.dragonballforever.it/wp-content/uploads/2017/02/header_dbf-1.jpg",
"categories": ["anime"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": false,
"enabled": false,
"visible": false
}
]
}

View File

@@ -1,89 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per dragonballforever
# ------------------------------------------------------------
import re
from platformcode import logger
from core import httptools
from core import scrapertools
from core.item import Item
from channels import support
host = "https://www.dragonballforever.it"
# ----------------------------------------------------------------------------------------------------------------
def mainlist(item):
logger.info()
itemlist = [Item(channel=item.channel,
action="episodi",
title=support.color("Dragon Ball Kai", "azure"),
url="%s/dragon-ball-kai-episodi/" % host,
extra="Kai",
show="Dragon Ball Kai",
thumbnail="https://www.dragonballforever.it/wp-content/uploads/2016/11/dragonball_kai_cover.jpg"),
Item(channel=item.channel,
title=support.color("Dragon Ball Super", "azure"),
action="episodi",
url="%s/dragon-ball-super/" % host,
extra="Super",
show="Dragon Ball Super",
thumbnail="https://www.dragonballforever.it/wp-content/uploads/2016/11/dbsuper-locandina.jpg")]
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def episodi(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = r'<a href="([^"]+)"[^>]+><strong>(Dragon Ball %s [^<]+)</strong></a>' % item.extra
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
scrapedtitle = color(scrapertools.decodeHtmlentities(scrapedtitle).replace('Dragon Ball %s episodio Streaming ' % item.extra, '').replace('#', '').strip(), 'azure')
epnumber = scrapertools.find_single_match(scrapedtitle, r'\d+')
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title=re.sub(r'\d+', 'Episodio: %s' % support.color(epnumber, 'red'), scrapedtitle),
fulltitle="Dragon Ball %s Episodio: %s" % (item.extra, scrapedtitle),
url=scrapedurl,
extra=item.extra,
show=item.show,
thumbnail=item.thumbnail,
folder=True))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if 'Super' in item.extra:
item.url = host + "/strm/dbsuper/%s" % scrapertools.find_single_match(data, r'file:\s*"\.\./([^"]+)"')
elif 'Kai' in item.extra:
item.url = scrapertools.find_single_match(data, r'flashvars=[\'|\"]+(?:file=|)([^&]+)&')
itemlist.append(
Item(channel=item.channel,
action="play",
title="%s [.%s]" % (support.color(item.show, 'azure'), support.color(item.url.split('.')[-1], 'orange')),
fulltitle=support.color(item.fulltitle, 'orange') if 'Super' in item.extra else support.color(item.fulltitle, 'deepskyblue'),
url=item.url,
show=item.show,
extra=item.extra,
thumbnail=item.thumbnail))
return itemlist

View File

@@ -1,62 +0,0 @@
{
"id": "filmgratis",
"name": "Filmgratis",
"language": ["ita"],
"active": false,
"adult": false,
"thumbnail": "https://www.filmgratis.video/templates/itafilm/images/logo.png",
"banner": "https://www.filmgratis.video/templates/itafilm/images/logo.png",
"categories": ["movie"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Numero de link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "1", "2", "5", "10" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": ["Non filtrare","IT"]
}
]
}

View File

@@ -1,297 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per filmgratis
# ------------------------------------------------------------
import re, urlparse
from platformcode import logger,config
from core import scrapertools, httptools, servertools, tmdb
from core.item import Item
from channels import autoplay
from channels import filtertools
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['openload', 'streamango', 'vidoza', 'youtube']
list_quality = ['HD', 'SD']
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'filmgratis')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'filmgratis')
host = "https://www.filmgratis.one"
# ----------------------------------------------------------------------------------------------------------------
def mainlist(item):
logger.info("kod.filmgratis mainlist")
autoplay.init(item.channel, list_servers, list_quality)
itemlist = [Item(channel=item.channel,
action="peliculas",
title=support.color("Home", "orange"),
url=host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="annoattuale",
title=support.color("Film di quest'anno", "azure"),
url=host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="categorie",
title=support.color("Categorie", "azure"),
url=host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="peranno",
title=support.color("Per anno", "azure"),
url=host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="perpaese",
title=support.color("Per paese", "azure"),
url=host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="search",
title=support.color("Cerca ...", "yellow"),
extra="movie",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png")]
autoplay.show_option(item.channel, itemlist)
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def search(item, texto):
logger.info("filmgratis.py Search ===> " + texto)
item.url = "%s/index.php?story=%s&do=search&subaction=search" % (host, texto)
try:
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def newest(categoria):
logger.info("filmgratis " + categoria)
itemlist = []
item = Item()
try:
if categoria == "film":
item.url = host
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def annoattuale(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
blocco = scrapertools.find_single_match(data, r'<div class="left-menu-main">(.*?)</div>')
patron = r'<a href="([^"]+)">Film\s*\d{4}</a>'
item.url = urlparse.urljoin(host, scrapertools.find_single_match(blocco, patron))
return peliculas(item)
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def categorie(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
blocco = scrapertools.find_single_match(data, r'<div class="menu-janr-content">(.*?)</div>')
patron = r'<a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
if 'film erotici' in scrapedtitle.lower(): continue
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title=scrapedtitle,
text_color="azure",
url=urlparse.urljoin(host, scrapedurl),
folder=True))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def peranno(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
blocco = scrapertools.find_single_match(data, r'<div class="sort-menu-title">\s*Anno di pubblicazione:\s*</div>(.*?)</div>')
patron = r'<a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title=scrapedtitle,
text_color="azure",
url=urlparse.urljoin(host, scrapedurl),
folder=True))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def perpaese(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
blocco = scrapertools.find_single_match(data, r'<div class="sort-menu-title">\s*Paesi di produzione:\s*</div>(.*?)</div>')
patron = r'<a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title=scrapedtitle,
text_color="azure",
url=urlparse.urljoin(host, scrapedurl),
folder=True))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = r'<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)".*?/></a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
year = scrapertools.find_single_match(scrapedtitle, r'\((\d{4})\)')
html = httptools.downloadpage(scrapedurl).data
patron = r'<div class="video-player-plugin">([\s\S]*)<div class="wrapper-plugin-video">'
matches = re.compile(patron, re.DOTALL).findall(html)
for url in matches:
if "scrolling" in url:
scrapedurl = scrapedurl
cleantitle = scrapedtitle
year = scrapertools.find_single_match(scrapedtitle, r'\((\d{4})\)')
infolabels = {}
if year:
cleantitle = cleantitle.replace("(%s)" % year, '').strip()
infolabels['year'] = year
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="movie",
title=scrapedtitle.replace(year, support.color("%s" % year, "red")),
fulltitle=cleantitle,
text_color="azure",
url=scrapedurl,
extra="movie",
show=cleantitle,
thumbnail=scrapedthumbnail,
infoLabels=infolabels,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Pagine
patronvideos = r'<a href="([^"]+)">>'
next_page = scrapertools.find_single_match(data, patronvideos)
if next_page:
scrapedurl = urlparse.urljoin(item.url, next_page)
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
extra=item.extra,
folder=True))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def findvideos(item):
logger.info()
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
server = re.sub(r'[-\[\]\s]+', '', videoitem.title)
videoitem.title = "".join(["[%s] " % color(server.capitalize(), 'orange'), item.title])
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
videoitem.contentType = item.contentType
videoitem.language = IDIOMAS['Italiano']
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow][B]Aggiungi alla videoteca[/B][/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist

View File

@@ -1,23 +0,0 @@
{
"id": "filmontv",
"name": "Filmontv",
"language": ["ita"],
"active": false,
"adult": false,
"thumbnail": null,
"banner": null,
"categories": [
null
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
}
]
}

View File

@@ -1,85 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale filmontv
# ------------------------------------------------------------
import re
import urllib
from core import httptools
from platformcode import logger
from core import scrapertools
from core.item import Item
from core import tmdb
host = "https://www.comingsoon.it"
TIMEOUT_TOTAL = 60
def mainlist(item):
logger.info(" mainlist")
itemlist = [Item(channel=item.channel,
title="[COLOR red]IN ONDA ADESSO[/COLOR]",
action="tvoggi",
url="%s/filmtv/oggi/in-onda/" % host,
thumbnail="http://a2.mzstatic.com/eu/r30/Purple/v4/3d/63/6b/3d636b8d-0001-dc5c-a0b0-42bdf738b1b4/icon_256.png"),
Item(channel=item.channel,
title="[COLOR azure]Mattina[/COLOR]",
action="tvoggi",
url="%s/filmtv/oggi/mattina/" % host,
thumbnail="http://icons.iconarchive.com/icons/icons-land/weather/256/Sunrise-icon.png"),
Item(channel=item.channel,
title="[COLOR azure]Pomeriggio[/COLOR]",
action="tvoggi",
url="%s/filmtv/oggi/pomeriggio/" % host,
thumbnail="http://icons.iconarchive.com/icons/custom-icon-design/weather/256/Sunny-icon.png"),
Item(channel=item.channel,
title="[COLOR azure]Sera[/COLOR]",
action="tvoggi",
url="%s/filmtv/oggi/sera/" % host,
thumbnail="http://icons.iconarchive.com/icons/icons-land/vista-people/256/Occupations-Pizza-Deliveryman-Male-Light-icon.png"),
Item(channel=item.channel,
title="[COLOR azure]Notte[/COLOR]",
action="tvoggi",
url="%s/filmtv/oggi/notte/" % host,
thumbnail="http://icons.iconarchive.com/icons/oxygen-icons.org/oxygen/256/Status-weather-clear-night-icon.png")]
return itemlist
def tvoggi(item):
logger.info(" tvoggi")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
# Estrae i contenuti
patron = '<div class="col-xs-12 col-sm-6 box-contenitore filmintv">.*?src="([^"]+)[^<]+<[^<]+<[^<]+<[^<]+<[^<]+<.*?titolo">([^<]+)<.*?ore <span>([^<]+)<\/span><br \/>([^<]+)<\/div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, time, scrapedtv in matches:
scrapedurl = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
itemlist.append(
Item(channel=item.channel,
action="do_search",
extra=urllib.quote_plus(scrapedtitle) + '{}' + 'movie',
title="[COLOR red]" + time + "[/COLOR] - [COLOR azure]" + scrapedtitle + "[/COLOR] [COLOR yellow][" + scrapedtv + "][/COLOR]" ,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
folder=True), tipo="movie")
return itemlist
# Esta es la función que realmente realiza la búsqueda
def do_search(item):
from channels import search
return search.do_search(item)

View File

@@ -1,36 +0,0 @@
{
"id": "filmperevolvere",
"name": "FilmPerEvolvere",
"active": false,
"adult": false,
"language": ["ita"],
"thumbnail": "https:\/\/filmperevolvere.it\/wp-content\/uploads\/2017\/06\/cropped-coversito.jpg",
"bannermenu": "https:\/\/filmperevolvere.it\/wp-content\/uploads\/2017\/06\/cropped-coversito.jpg",
"categories": ["vosi","movie"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi in Ricerca Globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,213 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per filmperevolvere
# ----------------------------------------------------------
import re
import urlparse
import lib.pyaes as aes
from core import httptools
from platformcode import logger, config
from core import scrapertools
from core import servertools
from core.item import Item
from core import tmdb
host = "https://filmperevolvere.it"
headers = [
['User-Agent', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:54.0) Gecko/20100101 Firefox/54.0'],
['Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'],
['Accept-Encoding', 'gzip, deflate'],
['Accept-Language', 'en-US,en;q=0.5'],
['Referer', host],
['DNT', '1'],
['Upgrade-Insecure-Requests', '1'],
['Cache-Control', 'max-age=0']
]
def mainlist(item):
logger.info("kod.filmperevolvere mainlist")
itemlist = [Item(channel=item.channel,
title="[COLOR azure]Ultimi Film Inseriti[/COLOR]",
action="peliculas",
url=host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="[COLOR azure]Categorie[/COLOR]",
action="categorie",
url=host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="[COLOR yellow]Cerca...[/COLOR]",
action="search",
extra="movie",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
return itemlist
def newest(categoria):
logger.info("[filmperevolvere.py] newest" + categoria)
itemlist = []
item = Item()
try:
if categoria == "film":
item.url = host
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def search(item, texto):
logger.info("[filmperevolvere.py] " + item.url + " search " + texto)
item.url = host + "/?s=" + texto
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorie(item):
itemlist = []
c = get_test_cookie(item.url)
if c: headers.append(['Cookie', c])
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
bloque = scrapertools.find_single_match(data,
'GENERI<span class="mega-indicator">(.*?)<\/ul>')
# Estrae i contenuti
patron = '<a class="mega-menu-link" href="(.*?)">(.*?)</a>'
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedurl, scrapedtitle in matches:
if scrapedtitle.startswith(("HOME")):
continue
if scrapedtitle.startswith(("SERIE TV")):
continue
if scrapedtitle.startswith(("GENERI")):
continue
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title=scrapedtitle,
url='c|%s' % scrapedurl,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png",
folder=True))
for i in itemlist:
logger.info(i)
return itemlist
def peliculas(item):
logger.info("kod.filmperevolvere peliculas")
itemlist = []
c = get_test_cookie(item.url)
if c: headers.append(['Cookie', c])
if item.url[1]=="|":
patron = 'class="ei-item-title"><a\s*href="([^"]*)">([^<]*)'
item.url=item.url[2:]
else:
patron = '<div class="post-thumbnail">\s*<a href="([^"]+)" title="([^"]+)">\s*<img width="520"'
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
# Estrae i contenuti
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedtitle = scrapedtitle.title()
txt = "Serie Tv"
if txt in scrapedtitle: continue
itemlist.append(
Item(channel=item.channel,
action="findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
folder=True))
# Paginazione
patronvideos = '<span class=\'current\'>[^<]+</span><a class=[^=]+=[^=]+="(.*?)">'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info("kod.filmperevolvere findvideos")
c = get_test_cookie(item.url)
if c: headers.append(['Cookie', c])
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = "".join([item.title, '[COLOR green][B]', videoitem.title, '[/B][/COLOR]'])
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist
def get_test_cookie(url):
data = httptools.downloadpage(url, headers=headers).data
a = scrapertools.find_single_match(data, 'a=toNumbers\("([^"]+)"\)')
if a:
b = scrapertools.find_single_match(data, 'b=toNumbers\("([^"]+)"\)')
if b:
c = scrapertools.find_single_match(data, 'c=toNumbers\("([^"]+)"\)')
if c:
cookie = aes.AESModeOfOperationCBC(a.decode('hex'), iv=b.decode('hex')).decrypt(c.decode('hex'))
return '__test=%s' % cookie.encode('hex')
return ''

View File

@@ -1,70 +0,0 @@
{
"id": "filmzstreaming",
"name": "Filmzstreaming",
"active": false,
"adult": false,
"language": ["ita"],
"thumbnail": "https:\/\/filmzstreaming.pw\/wp-content\/uploads\/2017\/10\/FilmZStreaming-2.png",
"bannermenu": "https:\/\/filmzstreaming.pw\/wp-content\/uploads\/2017\/10\/FilmZStreaming-2.png",
"categories": ["movie", "tvshow"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Includi in Novità - Serie TV",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Numero de link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": ["Non filtrare","IT"]
}
]
}

View File

@@ -1,376 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per filmzstreaming
# ----------------------------------------------------------
import re, urlparse, urllib
from platformcode import logger, config
from channels import autoplay
from channels import filtertools
from core import scrapertools, servertools, httptools
from core.item import Item
from core import tmdb
host = "https://filmzstreaming.blue"
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['openload', 'streamango', 'youtube']
list_quality = ['default']
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'filmzstreaming')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'filmzstreaming')
headers = [['Referer', host]]
def mainlist(item):
logger.info("kod.filmzstreaming mainlist")
autoplay.init(item.channel, list_servers, list_quality)
itemlist = [Item(channel=item.channel,
title="[COLOR azure]Ultimi film inseriti[/COLOR]",
action="peliculas",
extra="movie",
url="%s/film/" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="[COLOR azure]Categorie film[/COLOR]",
action="categorias",
url=host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="[COLOR azure]Serie TV[/COLOR]",
action="peliculas_tv",
extra="tvshow",
url="%s/serietv/" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="[COLOR yellow]Cerca...[/COLOR]",
action="search",
extra="movie",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"),
Item(channel=item.channel,
title="[COLOR yellow]Cerca Serie TV...[/COLOR]",
action="search",
extra="tvshow",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
autoplay.show_option(item.channel, itemlist)
return itemlist
def peliculas(item):
logger.info("kod.filmzstreaming peliculas")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
blocco = scrapertools.find_single_match(data, '</h1>(.*?)<div class="sidebar scrolling">')
# Estrae i contenuti
patron = r'<h3><a href="([^"]+)">(.*?)</a></h3>'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedurl, scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle = scrapedtitle.replace("Streaming ", "")
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="movie",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail))
# Paginazione
patronvideos = '<span class="current">[^>]+</span><a href=\'(.*?)\' class="inactive">'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def peliculas_tv(item):
logger.info("kod.filmzstreaming peliculas")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
blocco = scrapertools.find_single_match(data, '</h1>(.*?)<div class="sidebar scrolling">')
# Estrae i contenuti
patron = r'<h3><a href="([^"]+)">(.*?)</a></h3>'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedurl, scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle = scrapedtitle.replace(" Streaming", "")
scrapedtitle = scrapedtitle.title()
itemlist.append(
Item(channel=item.channel,
action="episodios",
contentType="tv",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail))
# Paginazione
patronvideos = '<span class="current">[^>]+</span><a href=\'(.*?)\' class="inactive">'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
itemlist.append(
Item(channel=item.channel,
action="peliculas_tv",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def episodios(item):
logger.info("kod.filmzstreaming peliculas_tv")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
# Estrae i contenuti
patron = '<div class="numerando">(.*?)</div><div class="episodiotitle"> <a href="([^"]+)">(.*?)</a> '
matches = re.compile(patron, re.DOTALL).findall(data)
for scraped_1, scrapedurl, scraped_2 in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedtitle = scraped_1 + " " + scraped_2
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="episode",
extra=item.extra,
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail))
# Comandi di servizio
if config.get_videolibrary_support() and len(itemlist) != 0:
itemlist.append(
Item(channel=item.channel,
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
url=item.url,
action="add_serie_to_library",
extra="episodios",
show=item.show))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def peliculas_src_tv(item):
logger.info("kod.filmzstreaming peliculas")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
# Estrae i contenuti
patron = '<div class="title">\s*<a href="([^"]+)">(.*?)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle = scrapedtitle.replace("Streaming ", "")
itemlist.append(
Item(channel=item.channel,
action="episodios",
contentType="tv",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail))
# Paginazione
patronvideos = '<span class="current">[^>]+</span><a href=\'(.*?)\' class="inactive">'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
itemlist.append(
Item(channel=item.channel,
action="peliculas_src_tv",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def peliculas_src(item):
logger.info("kod.filmzstreaming peliculas")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
# Estrae i contenuti
patron = '<div class="title">\s*<a href="([^"]+)">(.*?)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle = scrapedtitle.replace("Streaming ", "")
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="movie",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail))
# Paginazione
patronvideos = '<span class="current">[^>]+</span><a href=\'(.*?)\' class="inactive">'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
itemlist.append(
Item(channel=item.channel,
action="peliculas_src",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def categorias(item):
logger.info("kod.filmzstreaming categorias")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
# Narrow search by selecting only the combo
bloque = scrapertools.find_single_match(data, '<ul class="sub-menu">(.*?)</ul>')
# The categories are the options for the combo
patron = '<a href="([^"]+)">(.*?)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png",
folder=True))
return itemlist
def search(item, texto):
logger.info("[filmzstreaming.py] " + item.url + " search " + texto)
item.url = host + "/?s=" + texto
try:
if item.extra == "movie":
return peliculas_src(item)
if item.extra == "tvshow":
return peliculas_src_tv(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def findvideos(item):
logger.info("[filmzstreaming.py] findvideos")
# Carica la pagina
if item.contentType == 'episode':
data = httptools.downloadpage(item.url).data
patron = '<li id=[^=]+="dooplay_player_option[^=]+="([^"]+)" data-nume="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for posts, numes in matches:
uri = "%s/wp-admin/admin-ajax.php" % host
payload = urllib.urlencode({'action': 'doo_player_ajax', 'post': posts, 'nume': numes})
data += httptools.downloadpage(uri, post=payload).data
else:
data = httptools.downloadpage(item.url).data
patron = '<span class="loader"></span></li><li id=[^=]+="dooplay_player_[^=]+="([^"]+)" data-nume="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
for posts, numes in matches:
uri = "%s/wp-admin/admin-ajax.php" % host
payload = urllib.urlencode({'action': 'doo_player_ajax', 'post': posts, 'nume': numes})
data += httptools.downloadpage(uri, post=payload).data
#import requests
#payload = {'action': 'doo_player_ajax', 'post': posts, 'nume': numes}
#req = requests.post(uri, data=payload)
#data += str(req.text)
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = "".join([item.title, '[COLOR green][B]' + videoitem.title + '[/B][/COLOR]'])
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
videoitem.contentType = item.contentType
videoitem.language = IDIOMAS['Italiano']
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow][B]Aggiungi alla videoteca[/B][/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist

View File

@@ -1,44 +0,0 @@
{
"id": "marapcana",
"name": "Marapcana",
"language": ["ita"],
"active": false,
"adult": false,
"thumbnail": "http://marapcana.site/maraplogo7.png",
"banner": "http://marapcana.site/maraplogo7.png",
"categories": ["movie", "tvshow"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Includi in Novità - Serie TV",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,258 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per marapcana
# ------------------------------------------------------------
import re
from core import scrapertools, httptools, servertools
from core.item import Item
from core import tmdb
from lib import unshortenit
from platformcode import logger, config
host = "http://marapcana.live"
# in caso di oscuramento verificare l'indirizzo http://marapcana.online/
headers = [['Referer', host]]
PERPAGE = 12
def mainlist(item):
logger.info(" mainlist")
# Main options
itemlist = [Item(channel=item.channel,
action="peliculas",
title="[COLOR azure]Film[/COLOR]",
url="%s/film-categoria/dvdrip-bdrip/" % host,
extra="movie",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="categorie",
title="[COLOR azure]Categorie[/COLOR]",
url="%s/elenchi-film/" % host,
extra="movie",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="search",
title="[COLOR yellow]Cerca...[/COLOR]",
extra="movie",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="peliculas_tv",
title="[COLOR azure]Serie TV[/COLOR]",
url="%s/lista-serie-tv/" % host,
extra="tvshow",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="search",
title="[COLOR yellow]Cerca SerieTV...[/COLOR]",
extra="tvshow",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png")]
return itemlist
def peliculas(item):
logger.info(" peliculas")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = '<a href="([^"]+)" title="([^"]+)" class="teaser-thumb">'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
scrapedthumbnail = ""
itemlist.append(
Item(channel=item.channel,
action="findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
extra=item.extra,
viewmode="movie_with_plot",
Folder=True))
nextpage_regex = '<a class="nextpostslink".*?href="([^"]+)".*?<\/a>'
next_page = scrapertools.find_single_match(data, nextpage_regex)
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url="%s" % next_page,
extra=item.extra,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def categorie(item):
itemlist = []
if item.url == "":
item.url = host
data = httptools.downloadpage(item.url, headers=headers).data
bloque = scrapertools.find_single_match(data, 'Genere(.*?)</select>')
patron = '<option value="([^"]+)">(.*?)</option>'
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedurl, scrapedtitle in matches:
if "adesso" in scrapedtitle:
continue
itemlist.append(
Item(channel=item.channel,
action="peliculas",
fulltitle=scrapedtitle,
title=scrapedtitle,
url=scrapedurl,
viewmode="movie_with_plot",
Folder=True))
return itemlist
def peliculas_tv(item):
itemlist = []
if item.url == "":
item.url = host
p = 1
if '{}' in item.url:
item.url, p = item.url.split('{}')
p = int(p)
data = httptools.downloadpage(item.url, headers=headers).data
bloque = scrapertools.find_single_match(data, 'Lista Serie Tv</h2>(.*?)</section>')
patron = '<a href=\'(/serie/[^\']+)\'>([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(bloque)
scrapedplot = ""
scrapedthumbnail = ""
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
if (p - 1) * PERPAGE > i: continue
if i >= p * PERPAGE: break
title = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="episodios",
title=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=title,
show=title,
plot=scrapedplot,
folder=True))
if len(matches) >= p * PERPAGE:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="peliculas_tv",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def episodios(item):
itemlist = []
if item.url == "":
item.url = host
if host not in item.url:
item.url = '%s%s' % (host, item.url)
data = httptools.downloadpage(item.url, headers=headers).data
bloque = scrapertools.find_single_match(data, '<table>(.*?)</table>')
patron = '<tr><td>([^<]+)</td>.*?</tr>'
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedtitle in matches:
itemlist.append(
Item(channel=item.channel,
action="episodio",
fulltitle=scrapedtitle,
title=scrapedtitle,
url=item.url,
viewmode="movie_with_plot",
Folder=True))
return itemlist
def episodio(item):
if item.url == "":
item.url = host
data = httptools.downloadpage(item.url, headers=headers).data
patron = '<tr><td>' + item.title + '</td>.*?</tr>'
data = scrapertools.find_single_match(data, patron)
itemlist = servertools.find_video_items(data=data)
for i in itemlist:
tab = re.compile(
'<div\s*id="(tab[^"]+)"[^>]+>[^>]+>[^>]+src="http[s]*:%s[^"]+"' % i.url.replace('http:', '').replace(
'https:', ''), re.DOTALL).findall(data)
qual = ''
if tab:
qual = re.compile('<a\s*href="#%s">([^<]+)<' % tab[0], re.DOTALL).findall(data)[0].replace("'", "")
qual = "[COLOR orange]%s[/COLOR] - " % qual
i.title = '%s[COLOR green][B]%s[/B][/COLOR] - %s' % (qual, i.title[2:], item.title)
i.channel = item.channel
i.fulltitle = item.title
return itemlist
def search(item, texto):
logger.info("[marapcana.py] " + item.url + " search " + texto)
item.url = host + "/?s=" + texto
try:
if item.extra == "movie":
return peliculas(item)
if item.extra == "tvshow":
return peliculas_tv(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def findvideos(item):
logger.info(" findvideos")
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
urls = re.findall(r'href="([^"]+)" target="_blank" rel="noopener noreferrer">', data, re.DOTALL)
if urls:
for url in urls:
url, c = unshortenit.unshorten(url)
data += url + '\n'
itemlist = servertools.find_video_items(data=data)
for i in itemlist:
tab = re.compile(
'<div\s*id="(tab[^"]+)"[^>]+>[^>]+>[^>]+src="http[s]*:%s[^"]+"' % i.url.replace('http:', '').replace(
'https:', ''), re.DOTALL).findall(data)
qual = ''
if tab:
qual = re.compile('<a\s*href="#%s">([^<]+)<' % tab[0], re.DOTALL).findall(data)[0].replace("'", "")
qual = "[COLOR orange]%s[/COLOR] - " % qual
i.title = '%s[COLOR green][B]%s[/B][/COLOR] - %s' % (qual, i.title[1:], item.title)
i.channel = item.channel
i.fulltitle = item.title
return itemlist

View File

@@ -1,36 +0,0 @@
{
"id": "mmaiptv",
"name": "MmaIptv",
"active": false,
"adult": false,
"language": ["ita"],
"thumbnail": "https:\/\/imageshack.com\/a\/img924\/5981\/XN1yc1.png",
"bannermenu": "https:\/\/imageshack.com\/a\/img924\/5981\/XN1yc1.png",
"categories": ["anime"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": false,
"enabled": false,
"visible": true
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Includi in Novità - Anime",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,107 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per mmaiptv
# ----------------------------------------------------------
import re
import urlparse
from core import httptools
from platformcode import logger
from core import scrapertools
from core import servertools
from core.item import Item
from core import tmdb
host = "http://mmaiptv.it"
headers = [['Referer', host]]
def mainlist(item):
logger.info("[mmaiptv.py] mainlist")
# Main options
itemlist = [Item(channel=item.channel,
action="list_titles",
title="[COLOR azure]Tutti[/COLOR]",
url="%s/b.php" % host,
extra="anime",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="search",
title="[COLOR yellow]Cerca[/COLOR]",
url="%s/b.php" % host,
extra="search",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
return itemlist
def list_titles(item):
logger.info("[mmaiptv.py] list_titles")
itemlist = []
if item.url == "":
item.url = host
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
patronvideos = '<div class="tab-pane active".*?<font color="#000000">([^<]+)<\/font>.*?<a href="([^"]+)"><img src="([^"]+)".*?<\/div>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for scrapedtitle, scrapedurl, scrapedthumbnail in matches:
itemlist.append(
Item(channel=item.channel,
action="episodes",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=scrapedtitle,
url=scrapedurl if not 'search' in item.extra else (host + "/"+scrapedurl),
thumbnail=scrapedthumbnail,
extra=item.extra,
viewmode="movie_with_plot",
Folder=True))
return itemlist
def episodes(item):
logger.info("[mmaiptv.py] serietv")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
patronvideos = '<div class="tab-pane active".*?<font color="#000000">([^<]+)<\/font>.*?<a href="([^"]+)"><img src="([^"]+)".*?<\/div>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for scrapedtitle,scrapedurl,scrapedthumbnail in matches:
itemlist.append(
Item(channel=item.channel,
action="findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
extra=item.extra,
viewmode="movie_with_plot"))
return list(reversed(itemlist))
def search(item, texto):
logger.info("[mmaiptv.py] search")
item.url = host + "/d.php?search=" + texto
return list_titles(item)
def findvideos(item):
logger.info("[mmaiptv.py] findvideos")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = "file: \"([^\"]+)\""
matches = re.compile(patron, re.DOTALL).findall(data)
headers.append(['Referer', item.url])
for video in matches:
itemlist.append(Item(channel=item.channel, action="play", title="[.mp4] [COLOR azure]%s[/COLOR]" % item.title,url=video, folder=False))
return itemlist

View File

@@ -1,36 +0,0 @@
{
"id": "mondolunatico_new",
"name": "Mondo Lunatico New",
"language": ["ita"],
"active": false,
"adult": false,
"thumbnail": "http://mondolunatico.org/stream/wp-content/uploads/2016/10/dooplay1-2.png",
"banner": "http://mondolunatico.org/stream/wp-content/uploads/2016/10/dooplay1-2.png",
"categories": ["movie"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,183 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale mondolunatico_new
# ------------------------------------------------------------
import re
import urlparse
from core import httptools, scrapertools, servertools
from core.item import Item
from core import tmdb
from platformcode import logger, config
from channels import support
host = "http://mondolunatico.org"
def mainlist(item):
logger.info("kod.istreaming mainlist")
itemlist = [Item(channel=item.channel,
title="[COLOR azure]Ultimi Film Inseriti[/COLOR]",
action="peliculas",
url="%s/stream/movies/" % host,
extra="movie",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="[COLOR azure]Film Per Categoria[/COLOR]",
action="categorias",
url="%s/stream/" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="[COLOR yellow]Cerca...[/COLOR]",
action="search",
extra="movie",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
return itemlist
def categorias(item):
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<h2>Film Per Genere</h2><ul class="genres scrolling">(.*?)</ul>')
# Estrae i contenuti
patron = '<li[^>]+><a href="([^"]+)"[^>]+>([^<]+)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png",
folder=True))
return itemlist
def search(item, texto):
logger.info("[mondolunatico.py] " + item.url + " search " + texto)
item.url = host + "/stream?s=" + texto
try:
if item.extra == "movie":
return pelis_movie_src(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def pelis_movie_src(item):
logger.info("kod.mondolunatico_new peliculas")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
# Estrae i contenuti
patron = '<div class="thumbnail animation-2">\s*<a href="([^"]+)">\s*<img src="([^"]+)" alt="(.*?)" />'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapedplot = ""
for scrapedurl, scrapedthumbnail, scrapedtitle, in matches:
title = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="findvideos",
contentType="movie",
title=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=title,
show=title,
plot=scrapedplot,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def peliculas(item):
logger.info("kod.mondolunatico_new peliculas")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
# Estrae i contenuti
patron = '</span><a href="([^"]+)">(.*?)</a></h3>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
title = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="findvideos",
contentType="movie",
title=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=title,
show=title,
plot=scrapedplot,
folder=True))
# Paginazione
patronvideos = '<span class="current">[^<]+</span><a href=\'(.*?)\''
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="peliculas",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info("kod.mondolunatico findvideos")
# Carica la pagina
data = httptools.downloadpage(item.url).data
# Estrae i contenuti
patron = 'src="([^"]+)" frameborder="0"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl in matches:
if "dir?" in scrapedurl:
data += httptools.downloadpage(scrapedurl).url + '\n'
else:
data += httptools.downloadpage(scrapedurl).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
server = re.sub(r'[-\[\]\s]+', '', videoitem.title).capitalize()
videoitem.title = "".join(["[%s] " % support.color(server, 'orange'), item.title])
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.show = item.show
videoitem.plot = item.plot
videoitem.channel = item.channel
videoitem.contentType = item.contentType
return itemlist

View File

@@ -1,12 +0,0 @@
{
"id": "occhiodelwrestling",
"name": "Occhio Del Wrestling",
"active": false,
"adult": false,
"language": ["ita"],
"thumbnail": "http://www.wrestlingrevolution.it/images/new/slideshow/occhiodelwrestling.jpg",
"banner": "http://www.wrestlingrevolution.it/images/new/slideshow/occhiodelwrestling.jpg",
"categories": ["documentary"],
"settings": []
}

View File

@@ -1,135 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per http://www.occhiodelwrestling.netsons.org/
# Ringraziamo Icarus crew
# ------------------------------------------------------------
import re
from core import httptools, scrapertools, servertools
from core.item import Item
from lib import unshortenit
from platformcode import logger
host = "http://www.occhiodelwrestling.netsons.org"
# ----------------------------------------------------------------------------------------------------------------
def mainlist(item):
logger.info()
itemlist = [
Item(channel=item.channel,
action="categorie",
title="Lista categorie",
text_color="azure",
url=host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"
)
]
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def categorie(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
blocco = scrapertools.find_single_match(data, '<div class="menu-container">(.*?)</div>')
patron = r'<li[^>]+><a title="[^"]+" href="([^"]+)">([^<]+)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(channel=item.channel,
action="loaditems",
title=scrapedtitle,
text_color="azure",
url=scrapedurl,
thumbnail=item.thumbnail,
folder=True))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def loaditems(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = r'<a class="[^"]+" href="([^"]+)" title="([^"]+)"[^>]*>\s*<img[^s]+src="([^"]+)"[^>]+>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedimg in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title=scrapedtitle,
text_color="azure",
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedimg,
folder=True))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = r'<li><a href="([^"]+)">([^<]+)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
index = 1
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(channel=item.channel,
action="play",
title="[COLOR orange][B]Link %s:[/B][/COLOR] %s" % (index, scrapedtitle),
text_color="azure",
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=item.thumbnail))
index += 1
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def play(item):
logger.info()
url, c = unshortenit.unshorten(item.url)
itemlist = servertools.find_video_items(data=url)
for videoitem in itemlist:
videoitem.title = item.show
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist
# ================================================================================================================

View File

@@ -1,36 +0,0 @@
{
"id": "serietvhd",
"name": "SerieTvHd",
"active": false,
"adult": false,
"language": ["ita"],
"thumbnail": "https:\/\/serietvhd.stream\/wp-content\/uploads\/2017\/12\/logo-nuovo.png",
"bannermenu": "https:\/\/serietvhd.stream\/wp-content\/uploads\/2017\/12\/logo-nuovo.png",
"categories": ["tvshow"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Includi in Novità - Serie TV",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,293 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per serietvhd
# ----------------------------------------------------------
import re
import urlparse
from core import httptools
from platformcode import logger, config
from core import scrapertools
from core import servertools
from core.item import Item
from core import tmdb
host = "https://serietvhd.stream"
headers = [['Referer', host]]
def mainlist(item):
logger.info("[serietvhd.py] mainlist")
# Main options
itemlist = [Item(channel=item.channel,
action="lista_serie",
title="[COLOR azure]Serie Tv[/COLOR]",
url="%s/serietv/" % host,
extra="serietv",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="lista_serie",
title="[COLOR azure]Piu Popolari[/COLOR]",
url="%s/piu-popolari/" % host,
extra="piu-popolari",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="lista_serie",
title="[COLOR azure]Piu Votati[/COLOR]",
url="%s/piu-votati/" % host,
extra="piu-votati",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="by_anno_or_by_genere",
title="[COLOR azure]Genere[/COLOR]",
url="%s/serietv/" % host,
extra="by_genere",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="by_anno_or_by_genere",
title="[COLOR azure]Anno di Rilascio[/COLOR]",
url="%s/serietv/" % host,
extra="by_anno",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
#Item(channel=item.channel,
# action="topimdb",
# title="[COLOR azure]Top IMDB[/COLOR]",
# url="%s/top-imdb/" % host,
# extra="topimdb",
# thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="search",
title="[COLOR yellow]Cerca...[/COLOR]",
url="%s/serietv/" % host,
extra="tvshow",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"), ]
return itemlist
def lista_serie(item):
logger.info("[serietvhd.py] lista_serie ")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
#salvo il valore originale per eventuali usi futuri
dataoriginale=data
if item.extra=="serietv":
data = scrapertools.find_single_match(data, '<div id="archive-content" class="animation-2 items">.*?</article></div>')
patronvideos = '<article id.*?src="([^"]+)" alt="([^"]+)".*?href="([^"]+)">.*?</article>'
matches = re.compile(patronvideos, re.DOTALL).finditer(data)
for match in matches:
scrapedthumbnail = urlparse.urljoin(item.url, match.group(1))
scrapedthumbnail = scrapedthumbnail.replace(" ", "%20")
scrapedtitle = scrapertools.unescape(match.group(2)).replace("[", "").replace("]", "")
scrapedurl = urlparse.urljoin(item.url, match.group(3))
itemlist.append(
Item(channel=item.channel,
action="serietv",
contentType="serietv",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
extra=item.extra,
viewmode="movie_with_plot"))
next_page = scrapertools.find_single_match(dataoriginale, '<div class="pagination">.*?href="([^"]+)".*?</div>')
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action="lista_serie",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url="%s" % next_page,
extra=item.extra,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def serietv(item):
logger.info("[serietvhd.py] serietv")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
dataoriginale=data
#estraggo i div contenenti le stagioni in un array
matches = scrapertools.find_multiple_matches(data, '<div class="se-c">.*?</div></div>')
for match in matches:
"""#per ogni stagione estraggo il numero di stagione
stagione = scrapertools.find_single_match(match, '<div class="se-q">.*?"title">([^<]+).*?</div>')
itemlist.append(
Item(channel=item.channel,
action="",
contentType="serietv",
fulltitle=stagione,
show=stagione,
title="[COLOR yellow]%s[/COLOR]" % stagione,
viewmode="movie_with_plot"))"""
#estraggo gli episodi della singola stagione
patronvideos = '<li>.*?src="([^"]+)".*?"numerando">([^<]+).*?href="([^"]+)">([^<]+).*?"date">([^<]+).*?</li>'
matches2 = re.compile(patronvideos, re.DOTALL).finditer(match)
for match2 in matches2:
scrapedthumbnail = urlparse.urljoin(item.url, match2.group(1))
scrapedthumbnail = scrapedthumbnail.replace(" ", "%20")
episodio = scrapertools.unescape(match2.group(2))
scrapedurl = urlparse.urljoin(item.url, match2.group(3))
scrapedtitle = scrapertools.unescape(match2.group(4))
data = scrapertools.unescape(match2.group(5))
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="movie",
fulltitle=scrapedtitle,
show=scrapedtitle,
title="["+episodio +"] "+scrapedtitle + " ["+data+"]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
extra=item.extra,
viewmode="movie_with_plot") )
if config.get_videolibrary_support() and len(itemlist) != 0:
itemlist.append(
Item(channel=item.channel,
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
url=item.url,
action="add_serie_to_library",
extra="serietv",
show=item.show))
return itemlist
def by_anno_or_by_genere(item):
logger.info("[serietvhd.py] genere")
itemlist = []
if item.url == "":
item.url = host
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
if item.extra=="by_anno":
patronvideos = '<li><a href="([^"]+)">([^"]+)</a></li>'
elif item.extra=="by_genere":
patronvideos = '<li id="menu-item.*?genres.*?<a href="([^"]+)">([^<]+)</a></li>'
matches = re.compile(patronvideos, re.DOTALL).finditer(data)
for match in matches:
scrapedurl = urlparse.urljoin(item.url, match.group(1))
scrapedtitle = match.group(2)
itemlist.append(
Item(channel=item.channel,
action="lista_serie",
title=scrapedtitle,
url=scrapedurl,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png",
folder=True))
return itemlist
def topimdb(item):
logger.info("[serietvhd.py] topimdb")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
#salvo il valore originale per eventuali usi futuri
dataoriginale=data
patronvideos = '<div class="top-imdb-item".*?href="([^"]+)".*?src="([^"]+)".*?"puesto">([^<]+)<.*?"rating">([^<]+)<.*?>([^<]+)</a></div></div>'
matches = re.compile(patronvideos, re.DOTALL).finditer(data)
for match in matches:
scrapedurl = urlparse.urljoin(item.url, match.group(1))
scrapedthumbnail = urlparse.urljoin(item.url, match.group(2))
scrapedthumbnail = scrapedthumbnail.replace(" ", "%20")
posizione = scrapertools.unescape(match.group(3))
voto = scrapertools.unescape(match.group(4))
scrapedurl = scrapertools.unescape(match.group(5))
scrapedtitle = scrapertools.unescape(match.group(6))
itemlist.append(
Item(channel=item.channel,
action="serietv",
contentType="serietv",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
extra=item.extra,
viewmode="movie_with_plot"))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def search(item, texto):
try:
item.url = host + "/?s=" + texto
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
patronvideos = '<div class="result-item">.*?href="([^"]+)">.*?src="([^"]+)".*?alt="([^"]+)".*?</div>'
matches = re.compile(patronvideos, re.DOTALL).finditer(data)
for match in matches:
scrapedurl = urlparse.urljoin(item.url, match.group(1))
scrapedthumbnail = urlparse.urljoin(item.url, match.group(2))
scrapedthumbnail = scrapedthumbnail.replace(" ", "%20")
scrapedtitle = scrapertools.unescape(match.group(3))
itemlist.append(
Item(channel=item.channel,
action="serietv",
contentType="movie",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=scrapedtitle.replace("[", "").replace("]", ""),
url=scrapedurl,
thumbnail=scrapedthumbnail,
extra=item.extra,
viewmode="movie_with_plot"))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def findvideos(item):
logger.info("[serietvhd.py] findvideos")
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = "".join([item.title, '[COLOR green][B]' + videoitem.title + '[/B][/COLOR]'])
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist

View File

@@ -1,37 +0,0 @@
{
"id": "serietvu",
"name": "SerieTVU",
"language": ["ita"],
"active": false,
"adult": false,
"thumbnail": "http://www.serietvu.com/wp-content/themes/gurarjbar/images/logo.png",
"banner": "http://www.serietvu.com/wp-content/themes/gurarjbar/images/logo.png",
"categories": [ "tvshow"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Includi in Novità - Serie TV",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,294 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per http://www.serietvu.com/
# ------------------------------------------------------------
import re
from core import httptools, scrapertools, servertools
from core.item import Item
from core import tmdb
from platformcode import logger, config
host = "https://www.serietvu.club"
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
headers = [['Referer', host]]
# ----------------------------------------------------------------------------------------------------------------
def mainlist(item):
logger.info("[SerieTVU.py]==> mainlist")
itemlist = [Item(channel=item.channel,
action="lista_serie",
title=support.color("Nuove serie TV", "orange"),
url="%s/category/serie-tv" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="latestep",
title=support.color("Nuovi Episodi", "azure"),
url="%s/ultimi-episodi" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="lista_serie",
title=support.color("Serie TV Aggiornate", "azure"),
url="%s/ultimi-episodi" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="categorie",
title=support.color("Categorie", "azure"),
url=host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="search",
title=support.color("Cerca ...", "yellow"),
extra="tvshow",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def newest(categoria):
logger.info("[SerieTVU.py]==> newest" + categoria)
itemlist = []
item = Item()
try:
if categoria == "series":
item.url = host + "/ultimi-episodi"
item.action = "latestep"
itemlist = latestep(item)
if itemlist[-1].action == "latestep":
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def search(item, texto):
logger.info("[SerieTVU.py]==> search")
item.url = host + "/?s=" + texto
try:
return lista_serie(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def categorie(item):
logger.info("[SerieTVU.py]==> categorie")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
blocco = scrapertools.find_single_match(data, r'<h2>Sfoglia</h2>\s*<ul>(.*?)</ul>\s*</section>')
patron = r'<li><a href="([^"]+)">([^<]+)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(channel=item.channel,
action="lista_serie",
title=scrapedtitle,
contentType="tv",
url="%s%s" % (host, scrapedurl),
thumbnail=item.thumbnail,
folder=True))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def latestep(item):
logger.info("[SerieTVU.py]==> latestep")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<div class="item">\s*<a href="([^"]+)" data-original="([^"]+)" class="lazy inner">'
patron += r'[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<small>([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedimg, scrapedtitle, scrapedinfo in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
episodio = re.compile(r'(\d+)x(\d+)', re.DOTALL).findall(scrapedinfo)
title = "%s %s" % (scrapedtitle, scrapedinfo)
itemlist.append(
Item(channel=item.channel,
action="findepisodevideo",
title=title,
fulltitle=scrapedtitle,
url=scrapedurl,
extra=episodio,
thumbnail=scrapedimg,
show=title,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def lista_serie(item):
logger.info("[SerieTVU.py]==> lista_serie")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<div class="item">\s*<a href="([^"]+)" data-original="([^"]+)" class="lazy inner">'
patron += r'[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedimg, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
itemlist.append(
Item(channel=item.channel,
action="episodios",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedimg,
show=scrapedtitle,
folder=True))
# Pagine
patron = '<a href="([^"]+)"[^>]+>Pagina'
next_page = scrapertools.find_single_match(data, patron)
if next_page:
itemlist.append(
Item(channel=item.channel,
action="lista_serie",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=next_page,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def episodios(item):
logger.info("[SerieTVU.py]==> episodios")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<option value="(\d+)"[\sselected]*>.*?</option>'
matches = re.compile(patron, re.DOTALL).findall(data)
for value in matches:
patron = r'<div class="list [active]*" data-id="%s">(.*?)</div>\s*</div>' % value
blocco = scrapertools.find_single_match(data, patron)
patron = r'(<a data-id="\d+[^"]*" data-href="([^"]+)" data-original="([^"]+)" class="[^"]+">)[^>]+>[^>]+>([^<]+)</div>'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedextra, scrapedurl, scrapedimg, scrapedtitle in matches:
number = scrapertools.decodeHtmlentities(scrapedtitle.replace("Episodio", "")).strip()
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title=value + "x" + number.zfill(2),
fulltitle=scrapedtitle,
contentType="episode",
url=scrapedurl,
thumbnail=scrapedimg,
extra=scrapedextra,
folder=True))
if config.get_videolibrary_support() and len(itemlist) != 0:
itemlist.append(
Item(channel=item.channel,
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
url=item.url,
action="add_serie_to_library",
extra="episodios",
show=item.show))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def findvideos(item):
logger.info("[SerieTVU.py]==> findvideos")
itemlist = servertools.find_video_items(data=item.extra)
for videoitem in itemlist:
server = re.sub(r'[-\[\]\s]+', '', videoitem.title).capitalize()
videoitem.title = "".join(["[%s] " % color(server, 'orange'), item.title])
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.show = item.show
videoitem.plot = item.plot
videoitem.channel = item.channel
videoitem.contentType = item.contentType
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def findepisodevideo(item):
logger.info("[SerieTVU.py]==> findepisodevideo")
# Download Pagina
data = httptools.downloadpage(item.url, headers=headers).data
# Prendo il blocco specifico per la stagione richiesta
patron = r'<div class="list [active]*" data-id="%s">(.*?)</div>\s*</div>' % item.extra[0][0]
blocco = scrapertools.find_single_match(data, patron)
# Estraggo l'episodio
patron = r'<a data-id="%s[^"]*" data-href="([^"]+)" data-original="([^"]+)" class="[^"]+">' % item.extra[0][1].lstrip("0")
matches = re.compile(patron, re.DOTALL).findall(blocco)
itemlist = servertools.find_video_items(data=matches[0][0])
for videoitem in itemlist:
server = re.sub(r'[-\[\]\s]+', '', videoitem.title).capitalize()
videoitem.title = "".join(["[%s] " % color(server, 'orange'), item.title])
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.show = item.show
videoitem.plot = item.plot
videoitem.channel = item.channel
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
\r
# ================================================================================================================

View File

@@ -1,45 +0,0 @@
{
"id": "streaminghd",
"name": "Streaming HD*",
"language": ["ita"],
"active": false,
"adult": false,
"thumbnail": "https://www.visions.tn/wp-content/uploads/2015/11/large_news_HD-STREAMING.jpg",
"banner": "https://www.visions.tn/wp-content/uploads/2015/11/large_news_HD-STREAMING.jpg",
"categories": ["tvshow","movie","vosi"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Includi in Novità - Serie TV",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,251 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale streaminghd
# ------------------------------------------------------------
from core import httptools, scrapertools, servertools, listtools
from core.item import Item
from platformcode import logger
from core import tmdb
import re
__channel__ = "streaminghd"
listtools.__channel__ = __channel__
host = "https://streaminghd.blog"
headers = [['User-Agent', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:58.0) Gecko/20100101 Firefox/58.0']] ## <-- user agent per poter accedere
def mainlist(item):
logger.info("[streaminghd.py] mainlist")
# Main options
itemlist = [Item(channel=item.channel,
action="peliculas",
title="[COLOR azure]Film[/COLOR]",
url="%s/film/" % host,
extracheck="film",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="peliculas",
title="[COLOR azure]Piu' Votati[/COLOR]",
url="%s/piu-votati/" % host,
extracheck="piuvotati",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="peliculas",
title="[COLOR azure]Piu' Visti[/COLOR]",
url="%s/piu-visti//" % host,
extracheck="piuvisti",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="peliculas",
title="[COLOR azure]Serie TV[/COLOR]",
url="%s/serietv/serie/" % host,
extracheck="serietv",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="by_anno_or_by_genere",
title="[COLOR azure]Genere[/COLOR]",
url=host,
extracheck="by_genere",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="by_anno_or_by_genere",
title="[COLOR azure]Elenco Per Anno[/COLOR]",
url=host,
extracheck="by_anno",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
action="search",
title="[COLOR yellow]Cerca Film[/COLOR]",
extra="movie",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"),
Item(channel=item.channel,
action="search",
title="[COLOR yellow]Cerca Serie[/COLOR]",
extra="tvshow",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
return itemlist
def peliculas(item):
logger.info("[streaminghd.py] peliculas")
patron = ''
if item.url == "":
item.url = host
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
logger.info("[streaminghd.py] peliculas")
datat = data
## Setting generic parameters for moth of movies
itemp = {'title': '\\2 (\\5) [Rate: [COLOR yellow]\\3[/COLOR]]',
'url': '\\4',
'thumbnail': '\\1',
'extracheck': item.extracheck}
tipos = 'movie'
if 'serie' in item.extracheck:
tipos = 'tv'
itemp['content'] = 'tvshow'
itemp['action'] = 'list_seasons'
## special condition for few movies
if item.extracheck == "film":
datat = scrapertools.find_single_match(data, '<div id="archive-content".*?<\/article><\/div>')
elif "search" in item.extracheck: ## <-- NON FIXATO
datat = scrapertools.find_single_match(data, '<div class="search-page">.*?<\/div><\/div><\/div>')
itemp['title'] = '\\3 (\\4)'
itemp['url'] = '\\2'
patron = 'article.*?src="([^"\s]+)\s*".*?href="([^"\s]+)\s*"\s*>([^<]+).*?year">([^<]+).*?<\/article>'
elif "piu" in item.extracheck: ## <-- Fix più visti / votati
datat = scrapertools.find_single_match(data, '<article.*?class="item movies">.*?<\/div><\/div><\/div>')
logger.info("[streaminghd.py] blocco"+datat)
itemp['title'] = '\\2 (\\3)'
itemp['url'] = '\\1'
patron = '<article.*?href="([^"]+)".*?alt="([^"]+)".*?wdate">([^"]+)<\/span>'
elif 'serie' in item.extracheck: ## <-- Fix più serie
datat = scrapertools.find_single_match(data, '<article.*?class="item tvshows">.*?<\/article><\/div>')
if not patron: patron = '<article.*?src="([^"\s]+)\s*"\s*alt="([^"]+)".*?\/span> s*([^<]*).*?href="([^"\s]+)\s*".*?span>([^<]+).*?<\/article>'
itemlist = listtools.list_titles_info(regx=patron, data=datat, itemp=itemp, tipos=tipos)
i = listtools.next_page(data, '<div.*?pagination.*?href="([^"\s]+)\s*"', 'peliculas')
if i:
i.extracheck = item.extracheck
itemlist.append(i)
return itemlist
def list_seasons(item):
logger.info("[streaminghd.py] list_seasons")
itemlist = listtools.list_seasons(item=item, sdel='<span class="title".*?Stagion.*?<\/span>',
enddel='<\/div><\/div><\/div><\/div>',
epdel={
'regx': '<div\s*class="numerando".*?>([^<]+).*?episodiotitle.*?href="([^"\s]+)\s*">([^<]+)',
'title': '\\1 \\3', 'url': '\\2'})
return itemlist
def episodios(item):
logger.info("[streaminghd.py] episodios")
itemlist = listtools.list_episodes(item=item, data=item.url,
epre={
'regx': '<div\s*class="numerando".*?>([^<]+).*?episodiotitle.*?href="([^"\s]+)\s*">([^<]+)',
'title': '\\1 \\3', 'url': '\\2'})
return itemlist
def findvideos(item):
logger.info("[streaminghd.py] findvideos")
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = "".join([item.title, '[COLOR green][B]' + videoitem.title + '[/B][/COLOR]'])
videoitem.channel = item.channel
return itemlist
def search(item, texto):
logger.info("[streaminghd.py] " + item.url + " search " + texto)
try:
if item.extra == "movie":
item.url = host + "/?s=" + texto
return peliculas_src(item)
if item.extra == "tvshow":
item.url = host + "/serietv/?s=" + texto
return peliculas_tv_src(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def peliculas_src(item):
logger.info("kod.streaminghd peliculas")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
# Estrae i contenuti
patron = r'<article><div class="image"><div class="thumbnail animation-2"><a href="([^"]+)">[^=]+=[^=]+="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="movie",
extra="movie",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def peliculas_tv_src(item):
logger.info("kod.streaminghd peliculas")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
# Estrae i contenuti
patron = r'<article><div class="image"><div class="thumbnail animation-2"><a href="([^"]+)">[^=]+=[^=]+="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append(
Item(channel=item.channel,
action="episodios",
contentType="episode",
extra="tvshow",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def by_anno_or_by_genere(item):
logger.info("[streaminghd.py] genere")
if item.url == "": item.url = host
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
if item.extracheck == "by_anno":
patronvideos = '<li><a href="([^"]+)">([^"]+)<\/a><\/li>'
elif item.extracheck == "by_genere":
patronvideos = '<li class="cat-item\s*cat-item.*?href="([^"\s]+)\s*">([^<]+)<\/a>.*?<\/li>'
itemlist = listtools.list_titles(regx=patronvideos, data=data,
itemp={'title': '\\2', 'url': '\\1', 'action': 'peliculas', 'content': 'list'})
return itemlist

View File

@@ -1,36 +0,0 @@
{
"id": "toonitalia",
"name": "ToonItalia",
"active": false,
"adult": false,
"language": ["ita"],
"thumbnail": "https://toonitalia.org/wp-content/themes/seos-video/images/header.png",
"bannermenu": "https://toonitalia.org/wp-content/themes/seos-video/images/header.png",
"categories": ["anime"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Includi in Novità - Anime",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,196 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per toonitalia
# ----------------------------------------------------------
import re
import urlparse
from platformcode import logger, config
from core import servertools, httptools, scrapertools
from core.item import Item
from core import tmdb
host = "https://toonitalia.org"
def mainlist(item):
logger.info("[toonitalia.py] Mainlist")
# Menu Principale
itemlist = [Item(channel=item.channel,
action="lista_anime",
title="Lista Anime",
text_color="azure",
url="%s/lista-anime-2/" % host,
extra="tv",
thumbnail="https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"),
Item(channel=item.channel,
action="lista_anime",
title="Anime Sub Ita",
text_color="azure",
url="%s/lista-anime-sub-ita/" % host,
extra="tv",
thumbnail="https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"),
Item(channel=item.channel,
action="lista_anime",
title="Film Animazione",
text_color="azure",
url="%s/lista-film-animazione/" % host,
extra="movie",
thumbnail="https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"),
Item(channel=item.channel,
action="lista_anime",
title="Serie TV",
text_color="azure",
url="%s/lista-serie-tv/" % host,
extra="tv",
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="Cerca ...",
text_color="yellow",
action="search",
extra="anime",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
return itemlist
def search(item, texto):
logger.info("[toonitalia.py] Search")
item.url = "%s/?s=%s" % (host, texto)
try:
return src_list(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def src_list(item):
logger.info("[toonitalia.py] src_list")
itemlist = []
data = httptools.downloadpage(item.url).data
blocchi = re.findall(r'<article id="post(.*?)</article>', data, re.DOTALL)
for blocco in blocchi:
url_title = re.findall(r'<h2 class="entry-title"><a href="([^"]+)"[^>]+>([^<]+)</a></h2>', blocco, re.DOTALL)
scrapedtitle = scrapertools.decodeHtmlentities(url_title[0][1])
scrapedurl = url_title[0][0]
itemlist.append(
Item(channel=item.channel,
action="links",
text_color="azure",
contentType="tv",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
show=scrapedtitle,
extra=item.extra,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def lista_anime(item):
logger.info("[toonitalia.py] Lista_anime")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
# Definisce il Blocco in cui cercare
blocco = scrapertools.find_single_match(data, '<ul class="lcp_catlist"[^>]+>(.*?)</ul>')
patron = '<a href="([^"]+)".*?>([^"]+)</a>'
matches = re.compile(patron, re.DOTALL).finditer(blocco)
for match in matches:
scrapedtitle = scrapertools.unescape(match.group(2))
scrapedurl = match.group(1)
itemlist.append(
Item(channel=item.channel,
action="links",
text_color="azure",
contentType="tv",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
show=scrapedtitle,
extra=item.extra,
folder=True))
return itemlist
def links(item):
logger.info("[toonitalia.py] Links")
itemlist = []
data = httptools.downloadpage(item.url).data
patron = r'<span style="color:#[^;]+;">[Ll]inks?\s*'
patron += r'([^<]+)<\/span>(?:<\/p>\s*|<br\s*\/>)(.*?)(?:<\/p>|\s*<a name)'
blocchi = scrapertools.find_multiple_matches(data, patron)
if not len(blocchi) > 0:
patron = r'<a name="Links?\s*([^"]+)"><\/a>(.*?)<\/p>'
blocchi = scrapertools.find_multiple_matches(data, patron)
for scrapedtitle, blocco in blocchi:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
action="episodi",
text_color="orange",
title="Guarda con %s" % scrapedtitle,
url=blocco,
extra=scrapedtitle,
thumbnail=item.thumbnail,
folder=True))
return itemlist
def episodi(item):
logger.info("[toonitalia.py] Episodi")
itemlist = []
patron = ''
if 'openload' in item.extra.lower():
patron = r'<a href="([^"]+)"[^>]+>(?:[^>]+>[^>]+>[^>]+>\s*<b>|)([^<]+)(?:</b>|</a>)'
else:
patron = r'<a href="([^"]+)"[^>]+>([^<]+)</a>'
matches = re.findall(patron, item.url, re.DOTALL)
for scrapedurl, scrapedtitle in matches:
if 'wikipedia' in scrapedurl: continue
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).replace("×", "x")
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="tv",
title=scrapedtitle,
text_color="azure",
fulltitle=scrapedtitle,
url=scrapedurl,
extra="tv",
show=item.show,
thumbnail=item.thumbnail,
folder=True))
return itemlist
def findvideos(item):
logger.info("[toonitalia.py] Findvideos")
itemlist = servertools.find_video_items(data=item.url)
for videoitem in itemlist:
server = re.sub(r'[-\[\]\s]+', '', videoitem.title)
videoitem.title = "".join(["[%s] " % color(server.capitalize(), 'orange'), item.title])
videoitem.text_color = "azure"
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist
\r

View File

@@ -1,36 +0,0 @@
{
"id": "umsfunsub",
"name": "UMSFunSub",
"active": false,
"adult": false,
"language": ["ita"],
"thumbnail": "http:\/\/www.hiumi.it\/public\/forum\/styles\/art_deluxe\/imageset\/logo.png",
"bannermenu": "http:\/\/www.hiumi.it\/public\/forum\/styles\/art_deluxe\/imageset\/logo.png",
"categories": ["anime"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": false,
"enabled": false,
"visible": true
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Includi in Novità - Anime",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,164 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per umsfunsub
# ----------------------------------------------------------
import re
import urllib
from core import httptools
from platformcode import logger
from core import scrapertools
from core.item import Item
from core import tmdb
host = "http://trackerums.altervista.org"
headers = [['Referer', host]]
def mainlist(item):
logger.info("[UMSFunSub.py]==> mainlist")
itemlist = [Item(channel=item.channel,
title=support.color("Progetti", "azure"),
action="progetti",
plot="- In corso\n- Conclusi",
url=makeurl("progetti-fansub-anime-giapponesi-attivi-shoujo-shounen-manga.php"),
thumbnail="http://www.hiumi.it/public/forum/styles/art_deluxe/imageset/logo.png"),
Item(channel=item.channel,
title=support.color("Lista Completa", "azure"),
action="lista_anime",
url=makeurl("streaming-fansub-gratuiti.php?categoria=In_corso&cat=Conclusi"),
thumbnail="http://www.hiumi.it/public/forum/styles/art_deluxe/imageset/logo.png"),
Item(channel=item.channel,
title=support.color("Cerca ...", "yellow"),
action="search",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")
]
return itemlist
def progetti(item):
logger.info("[UMSFunSub.py]==> progetti")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
blocco = scrapertools.find_single_match(data, '<div id="pf_imageMenu1" class="imageMenu">(.*?)</div>')
patron = '<a href="[^=]+=([\w]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedcategory, scrapedtitle in matches:
scrapedurl = "streaming-fansub-gratuiti.php?categoria=" + scrapedcategory
if len(itemlist) < 2:
itemlist.append(
Item(channel=item.channel,
action="lista_anime",
title=color(scrapedtitle, "azure"),
url=makeurl(scrapedurl),
thumbnail=item.thumbnail,
folder=True))
return itemlist
def search(item, texto):
logger.info("[UMSFunSub.py]==> search")
item.url = makeurl("risultato_ricerca.php?ricerca=" + texto)
try:
return lista_anime(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def lista_anime(item):
logger.info("[UMSFunSub.py]==> lista_anime")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = '<img src="([^"]+)"[^<]+<[^<]+<[^<]+>[^>]+<[^<]+<[^<]+<[^<]+<[^>]+>([^<]+)<[^<]+<[^<]+<[^<]+<a href="([^&]+)&amp;titolo=([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapeddetails, scrapedurl, scrapedtitle in matches:
scrapedurl = item.url.replace(item.url.split("/")[-1], scrapedurl)
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
itemlist.append(
Item(channel=item.channel,
action="episodi",
title="%s %s %s" % (
color(scrapedtitle, "azure"), support.color(" | ", "red"), color(scrapeddetails, "deepskyblue")),
fulltitle=scrapedtitle,
show=scrapedtitle,
url=scrapedurl,
thumbnail=makeurl(scrapedthumbnail),
folder=True))
return itemlist
def episodi(item):
logger.info("[UMSFunSub.py]==> episodi")
itemlist = []
item.url = item.url.replace("dettagli_sub.php", "lista-ep-streaming.php") + "&titolo=" + urllib.quote(
item.fulltitle)
data = httptools.downloadpage(item.url, headers=headers).data
patron = '<div id="listaepvera">([\d+|\w+]+)\.?([^<]+)\s+?[^<]+<[^<]+<a href="[^\d]+(\d+)&'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapednumber, scrapedtitle, scrapedid in matches:
animetitle = item.title.replace("[COLOR red] |" + item.title.split("|")[-1], "")
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title="%s | %s - %s" % (
color(scrapednumber, "gold"), color(animetitle, "azure"), color(scrapedtitle, "deepskyblue")),
fulltitle="%s | %s" % (color(animetitle, "red"), color(scrapedtitle, "deepskyblue")),
show=item.show,
url=makeurl("dettagli-stream.php?id=" + scrapedid, item.title),
thumbnail=item.thumbnail,
folder=True))
return itemlist
def findvideos(item):
logger.info("[UMSFunSub.py]==> findvideos")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patronvideo = 'flashvars="file=([^&]+)&'
urlvideo = scrapertools.find_single_match(data, patronvideo)
estensionevideo = urlvideo.split(".")[-1]
itemlist.append(Item(channel=item.channel,
action="play",
title="[%s] %s" % (support.color("." + estensionevideo, "orange"), item.title),
fulltitle=item.fulltitle,
show=item.show,
url=urlvideo,
thumbnail=item.thumbnail))
return itemlist
def makeurl(text, title=""):
if title == "":
return host + "/" + text
else:
return host + "/" + text + "&titolo=" + urllib.quote(title)