add italian channels
This commit is contained in:
61
plugin.video.alfa/channels/altadefinizione01.json
Normal file
61
plugin.video.alfa/channels/altadefinizione01.json
Normal file
@@ -0,0 +1,61 @@
|
||||
{
|
||||
"id": "altadefinizione01",
|
||||
"name": "Altadefinizione01",
|
||||
"language": ["ita"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/altadefinizione01.png",
|
||||
"banner": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/altadefinizione01.png",
|
||||
"categories": [
|
||||
"movie",
|
||||
"top channels"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
|
||||
{
|
||||
"id": "include_in_newest_film",
|
||||
"type": "bool",
|
||||
"label": "Includi in novità - Film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces_num",
|
||||
"type": "list",
|
||||
"label": "Numero de link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "5", "10", "15", "20" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Non filtrare",
|
||||
"IT"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
375
plugin.video.alfa/channels/altadefinizione01.py
Normal file
375
plugin.video.alfa/channels/altadefinizione01.py
Normal file
@@ -0,0 +1,375 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# *AddonNamer* - XBMC Plugin
|
||||
# Canale per altadefinizione01
|
||||
# ------------------------------------------------------------
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import scrapertools, servertools, httptools, tmdb, scrapertoolsV2
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
|
||||
#URL che reindirizza sempre al dominio corrente
|
||||
host = "https://altadefinizione01.to"
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['openload', 'streamango', 'rapidvideo', 'streamcherry', 'megadrive']
|
||||
list_quality = ['default']
|
||||
|
||||
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'altadefinizione01')
|
||||
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'altadefinizione01')
|
||||
|
||||
headers = None
|
||||
blacklist_categorie = ['Altadefinizione01', 'Altadefinizione.to']
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("kod.altadefinizione01 mainlist")
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = [Item(channel=item.channel,
|
||||
title="[COLOR azure]In sala[/COLOR]",
|
||||
action="sala",
|
||||
url="%s/page/1/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Ultimi film inseriti[/COLOR]",
|
||||
action="peliculas",
|
||||
url="%s/page/1/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Sub ITA[/COLOR]",
|
||||
action="subIta",
|
||||
url="%s/sub-ita/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Categorie film[/COLOR]",
|
||||
action="categorias",
|
||||
url=host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR yellow]Cerca...[/COLOR]",
|
||||
action="search",
|
||||
extra="movie",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info("kod.altadefinizione01 newest" + categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "film":
|
||||
item.url = host
|
||||
item.action = "peliculas"
|
||||
itemlist = peliculas(item)
|
||||
|
||||
if itemlist[-1].action == "peliculas":
|
||||
itemlist.pop()
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def createItem(data, item, itemlist, scrapedurl, scrapedtitle, scrapedthumbnail, scrapedquality, subDiv, subText):
|
||||
info = scrapertoolsV2.find_multiple_matches(data, '<span class="ml-label">([0-9]+)+<\/span>.*?<span class="ml-label">(.*?)<\/span>.*?<p class="ml-cat".*?<p>(.*?)<\/p>.*?<a href="(.*?)" class="ml-watch">')
|
||||
infoLabels = {}
|
||||
for infoLabels['year'], duration, scrapedplot, checkUrl in info:
|
||||
if checkUrl == scrapedurl:
|
||||
break
|
||||
|
||||
infoLabels['duration'] = int(duration.replace(' min', '')) * 60 # calcolo la durata in secondi
|
||||
scrapedthumbnail = host + scrapedthumbnail
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
fulltitle = scrapedtitle
|
||||
if subDiv:
|
||||
fulltitle += ' (' + subText + ')'
|
||||
fulltitle += ' [' + scrapedquality.strip() + ']'
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
text_color="azure",
|
||||
contentType="movie",
|
||||
contentTitle=scrapedtitle,
|
||||
contentQuality=scrapedquality.strip(),
|
||||
plot=scrapedplot,
|
||||
title=fulltitle,
|
||||
url=scrapedurl,
|
||||
infoLabels=infoLabels,
|
||||
thumbnail=scrapedthumbnail))
|
||||
|
||||
|
||||
def sala(item):
|
||||
logger.info("kod.altadefinizione01 peliculas")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# The categories are the options for the combo
|
||||
patron = '<div class="ml-mask">.*?<div class="cover_kapsul".*?<a href="(.*?)">.*?<img .*?src="(.*?)".*?alt="(.*?)".*?<div class="trdublaj">(.*?)<\/div>.(<div class="sub_ita">(.*?)<\/div>|())'
|
||||
matches = scrapertoolsV2.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedquality, subDiv, subText, empty in matches:
|
||||
createItem(data, item, itemlist, scrapedurl, scrapedtitle, scrapedthumbnail, scrapedquality, subDiv, subText)
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def subIta(item):
|
||||
logger.info("kod.altadefinizione01 subita")
|
||||
return peliculas(item, sub=True)
|
||||
|
||||
|
||||
def peliculas(item, sub=False):
|
||||
logger.info("kod.altadefinizione01 peliculas")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# The categories are the options for the combo
|
||||
patron = '<div class="cover_kapsul ml-mask".*?<a href="(.*?)">(.*?)<\/a>.*?<img .*?src="(.*?)".*?<div class="trdublaj">(.*?)<\/div>.(<div class="sub_ita">(.*?)<\/div>|())'
|
||||
matches = scrapertoolsV2.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedquality, subDiv, subText, empty in matches:
|
||||
if sub or not subDiv:
|
||||
createItem(data, item, itemlist, scrapedurl, scrapedtitle, scrapedthumbnail, scrapedquality, subDiv, subText)
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Paginazione
|
||||
patronvideos = '<span>[^<]+</span>[^<]+<a href="(.*?)">'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
action = "peliculas" if not sub else "subIta"
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action=action,
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info("kod.altadefinizione01 categorias")
|
||||
itemlist = []
|
||||
|
||||
# data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Narrow search by selecting only the combo
|
||||
bloque = scrapertools.get_match(data, '<ul class="kategori_list">(.*?)</ul>')
|
||||
|
||||
# The categories are the options for the combo
|
||||
patron = '<li><a href="([^"]+)">(.*?)</a></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
if not scrapedtitle in blacklist_categorie:
|
||||
scrapedurl = host + scrapedurl
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="subIta",
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("[altadefinizione01.py] " + item.url + " search " + texto)
|
||||
item.url = "%s/index.php?do=search&story=%s&subaction=search" % (
|
||||
host, texto)
|
||||
try:
|
||||
if item.extra == "movie":
|
||||
return subIta(item)
|
||||
if item.extra == "tvshow":
|
||||
return peliculas_tv(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("[altadefinizione01.py] findvideos")
|
||||
|
||||
# Carica la pagina
|
||||
if item.contentType == "episode":
|
||||
data = item.url
|
||||
else:
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = "".join([item.title, '[COLOR green][B]' + videoitem.title + '[/B][/COLOR]'])
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.show = item.show
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
videoitem.contentType = item.contentType
|
||||
videoitem.language = IDIOMAS['Italiano']
|
||||
|
||||
# Requerido para Filtrar enlaces
|
||||
|
||||
if __comprueba_enlaces__:
|
||||
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if item.contentType != 'episode':
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow][B]Aggiungi alla videoteca[/B][/COLOR]', url=item.url,
|
||||
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
""" def peliculas_tv(item):
|
||||
logger.info("kod.altadefinizionezone peliculas")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
patron = '<section class="main">(.*?)</section>'
|
||||
data = scrapertools.find_single_match(data, patron)
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<h2 class="titleFilm"><a href="([^"]+)">(.*?)</a></h2>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="seasons",
|
||||
fulltitle=scrapedtitle,
|
||||
contentType='tv',
|
||||
contentTitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
|
||||
# Paginazione
|
||||
patronvideos = '<span>.*?</span>.*?href="(.*?)">'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if matches:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas_tv",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def seasons(item):
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
patron = '<li><a href="([^"]+)" data-toggle="tab">(.*?)</a></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedseason in matches:
|
||||
scrapedurl = item.url + scrapedurl
|
||||
scrapedtitle = item.title
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="episodios",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]" + " " + "Stagione " + scrapedseason,
|
||||
url=scrapedurl,
|
||||
thumbnail=item.scrapedthumbnail,
|
||||
plot=item.scrapedplot,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
patron = 'class="tab-pane fade" id="%s">(.*?)class="tab-pane fade"' % item.url.split('#')[1]
|
||||
bloque = scrapertools.find_single_match(data, patron)
|
||||
patron = 'class="text-muted">.*?<[^>]+>(.*?)<[^>]+>[^>]+>[^>][^>]+>[^<]+<a href="#" class="slink" id="megadrive-(.*?)" data-link="(.*?)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
|
||||
for scrapedtitle, scrapedepi, scrapedurl in matches:
|
||||
scrapedthumbnail = ""
|
||||
scrapedplot = ""
|
||||
scrapedepi = scrapedepi.split('_')[0] + "x" + scrapedepi.split('_')[1].zfill(2)
|
||||
scrapedtitle = scrapedepi + scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="episode",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
folder=True))
|
||||
|
||||
# Comandi di servizio
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios",
|
||||
show=item.show))
|
||||
|
||||
return itemlist """
|
||||
62
plugin.video.alfa/channels/altadefinizioneclick.json
Normal file
62
plugin.video.alfa/channels/altadefinizioneclick.json
Normal file
@@ -0,0 +1,62 @@
|
||||
{
|
||||
"id": "altadefinizioneclick",
|
||||
"name": "AltadefinizioneClick",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["ita"],
|
||||
"thumbnail": "https:\/\/raw.githubusercontent.com\/Zanzibar82\/images\/master\/posters\/altadefinizioneclick.png",
|
||||
"bannermenu": "https:\/\/raw.githubusercontent.com\/Zanzibar82\/images\/master\/posters\/altadefinizioneciclk.png",
|
||||
"categories": [
|
||||
"tvshow",
|
||||
"movie",
|
||||
"vos"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_film",
|
||||
"type": "bool",
|
||||
"label": "Includi in novità - Film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
|
||||
{
|
||||
"id": "comprueba_enlaces",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces_num",
|
||||
"type": "list",
|
||||
"label": "Numero de link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "5", "10", "15", "20" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Non filtrare",
|
||||
"IT"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
310
plugin.video.alfa/channels/altadefinizioneclick.py
Normal file
310
plugin.video.alfa/channels/altadefinizioneclick.py
Normal file
@@ -0,0 +1,310 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per altadefinizioneclick
|
||||
# ----------------------------------------------------------
|
||||
import base64
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import scrapertools, servertools, httptools, tmdb
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
|
||||
host = "https://altadefinizione.center" ### <- cambio Host da .fm a .center
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['openload', 'streamango', "vidoza", "thevideo", "okru", 'youtube']
|
||||
list_quality = ['default']
|
||||
|
||||
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'altadefinizioneclick')
|
||||
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'altadefinizioneclick')
|
||||
|
||||
headers = [['Referer', host]]
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("kod.altadefinizione.pink mainlist")
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = [
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Novita'[/COLOR]",
|
||||
action="fichas",
|
||||
url=host + "/nuove-uscite/",
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Film per Genere[/COLOR]",
|
||||
action="genere",
|
||||
url=host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Film per Anno[/COLOR]",
|
||||
action="anno",
|
||||
url=host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Film Sub-Ita[/COLOR]",
|
||||
action="fichas",
|
||||
url=host + "/sub-ita/",
|
||||
thumbnail="http://i.imgur.com/qUENzxl.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR orange]Cerca...[/COLOR]",
|
||||
action="search",
|
||||
extra="movie",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("[altadefinizioneclick.py] " + item.url + " search " + texto)
|
||||
|
||||
item.url = host + "/?s=" + texto
|
||||
|
||||
try:
|
||||
return fichas_src(item)
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def genere(item):
|
||||
logger.info("[altadefinizioneclick.py] genere")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
patron = '<ul class="listSubCat" id="Film">(.*?)</ul>'
|
||||
data = scrapertools.find_single_match(data, patron)
|
||||
|
||||
patron = '<li><a href="(.*?)">(.*?)</a></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="fichas",
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def anno(item):
|
||||
logger.info("[altadefinizioneclick.py] genere")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
patron = '<ul class="listSubCat" id="Anno">(.*?)</div>'
|
||||
data = scrapertools.find_single_match(data, patron)
|
||||
|
||||
patron = '<li><a href="([^"]+)">([^<]+)</a></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="fichas",
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def fichas(item):
|
||||
logger.info("[altadefinizioneclick.py] fichas")
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
patron = '<img width[^s]+src="([^"]+)[^>]+>[^>]+>[^>]+>[^>]+><a href="([^"]+)">([^<]+)<\/a>[^>]+>[^>]+>[^>]+>(?:[^>]+>|)[^I]+IMDB\:\s*([^<]+)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle, scrapedpuntuacion in matches:
|
||||
|
||||
title = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
clean_title = title
|
||||
title += " (" + scrapedpuntuacion + ")"
|
||||
|
||||
# ------------------------------------------------
|
||||
scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
|
||||
# ------------------------------------------------
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="movie",
|
||||
contentTitle=clean_title,
|
||||
title="[COLOR azure]" + title + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
fulltitle=title,
|
||||
show=title))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Pagination
|
||||
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
|
||||
if next_page != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="fichas",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=next_page,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
|
||||
|
||||
return itemlist
|
||||
|
||||
def fichas_src(item):
|
||||
logger.info("[altadefinizioneclick.py] fichas")
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
patron = '<a href="([^"]+)">\s*<div[^=]+=[^=]+=[^=]+=[^=]+=[^=]+="(.*?)"[^>]+>[^<]+<[^>]+>\s*<h[^=]+="titleFilm">(.*?)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
|
||||
title = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
clean_title = re.sub(r'\(\d+\.?\d*\)', '', title).strip()
|
||||
|
||||
# ------------------------------------------------
|
||||
scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
|
||||
# ------------------------------------------------
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="movie",
|
||||
contentTitle=clean_title,
|
||||
title="[COLOR azure]" + title + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
fulltitle=title,
|
||||
show=title))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Pagination
|
||||
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
|
||||
if next_page != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="fichas_src",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=next_page,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
|
||||
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("[altadefinizioneclick.py] findvideos")
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data.replace('\n', '')
|
||||
patron = r'<iframe id="[^"]+" width="[^"]+" height="[^"]+" src="([^"]+)"[^>]+><\/iframe>'
|
||||
url = scrapertools.find_single_match(data, patron).replace("?alta", "")
|
||||
url = url.replace("&download=1", "")
|
||||
|
||||
if 'hdpass' in url:
|
||||
data = httptools.downloadpage(url, headers=headers).data
|
||||
|
||||
start = data.find('<div class="row mobileRes">')
|
||||
end = data.find('<div id="playerFront">', start)
|
||||
data = data[start:end]
|
||||
|
||||
patron_res = '<div class="row mobileRes">(.*?)</div>'
|
||||
patron_mir = '<div class="row mobileMirrs">(.*?)</div>'
|
||||
patron_media = r'<input type="hidden" name="urlEmbed" data-mirror="([^"]+)" id="urlEmbed" value="([^"]+)"\s*/>'
|
||||
|
||||
res = scrapertools.find_single_match(data, patron_res)
|
||||
|
||||
urls = []
|
||||
for res_url, res_video in scrapertools.find_multiple_matches(res, '<option.*?value="([^"]+?)">([^<]+?)</option>'):
|
||||
|
||||
data = httptools.downloadpage(urlparse.urljoin(url, res_url), headers=headers).data.replace('\n', '')
|
||||
|
||||
mir = scrapertools.find_single_match(data, patron_mir)
|
||||
|
||||
for mir_url in scrapertools.find_multiple_matches(mir, '<option.*?value="([^"]+?)">[^<]+?</value>'):
|
||||
|
||||
data = httptools.downloadpage(urlparse.urljoin(url, mir_url), headers=headers).data.replace('\n', '')
|
||||
|
||||
for media_label, media_url in re.compile(patron_media).findall(data):
|
||||
urls.append(url_decode(media_url))
|
||||
|
||||
itemlist = servertools.find_video_items(data='\n'.join(urls))
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title + videoitem.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.show = item.show
|
||||
videoitem.plot = item.plot
|
||||
videoitem.channel = item.channel
|
||||
videoitem.contentType = item.contentType
|
||||
videoitem.language = IDIOMAS['Italiano']
|
||||
|
||||
# Requerido para Filtrar enlaces
|
||||
|
||||
if __comprueba_enlaces__:
|
||||
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if item.contentType != 'episode':
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow][B]Aggiungi alla videoteca[/B][/COLOR]', url=item.url,
|
||||
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def url_decode(url_enc):
|
||||
lenght = len(url_enc)
|
||||
if lenght % 2 == 0:
|
||||
len2 = lenght / 2
|
||||
first = url_enc[0:len2]
|
||||
last = url_enc[len2:lenght]
|
||||
url_enc = last + first
|
||||
reverse = url_enc[::-1]
|
||||
return base64.b64decode(reverse)
|
||||
|
||||
last_car = url_enc[lenght - 1]
|
||||
url_enc[lenght - 1] = ' '
|
||||
url_enc = url_enc.strip()
|
||||
len1 = len(url_enc)
|
||||
len2 = len1 / 2
|
||||
first = url_enc[0:len2]
|
||||
last = url_enc[len2:len1]
|
||||
url_enc = last + first
|
||||
reverse = url_enc[::-1]
|
||||
reverse = reverse + last_car
|
||||
return base64.b64decode(reverse)
|
||||
33
plugin.video.alfa/channels/altadefinizionehd.json
Normal file
33
plugin.video.alfa/channels/altadefinizionehd.json
Normal file
@@ -0,0 +1,33 @@
|
||||
{
|
||||
"id": "altadefinizionehd",
|
||||
"name": "AltadefinizioneHD",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": [
|
||||
"it"
|
||||
],
|
||||
"thumbnail": "https://altadefinizione.wiki/logowiki.png",
|
||||
"bannermenu": "https://altadefinizione.wiki/logowiki.png",
|
||||
"categories": [
|
||||
"tvshow",
|
||||
"movie"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_film",
|
||||
"type": "bool",
|
||||
"label": "Includi in novità - film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
229
plugin.video.alfa/channels/altadefinizionehd.py
Normal file
229
plugin.video.alfa/channels/altadefinizionehd.py
Normal file
@@ -0,0 +1,229 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per filmissimi
|
||||
# https://alfa-addon.com/categories/kod-addon.50/
|
||||
# ----------------------------------------------------------
|
||||
import re
|
||||
|
||||
from core import httptools, scrapertools, servertools
|
||||
from platformcode import logger, config
|
||||
from core.item import Item
|
||||
from core.tmdb import infoIca
|
||||
|
||||
|
||||
|
||||
host = "https://altadefinizione.wiki"
|
||||
|
||||
headers = [['Referer', host]]
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("[filmissimi.py] mainlist")
|
||||
itemlist = [Item(channel=item.channel,
|
||||
action="elenco",
|
||||
title="[COLOR yellow]Novita'[/COLOR]",
|
||||
url=host,
|
||||
thumbnail=NovitaThumbnail,
|
||||
fanart=FilmFanart),
|
||||
Item(channel=item.channel,
|
||||
action="elenco",
|
||||
title="[COLOR azure]Film Sub-Ita[/COLOR]",
|
||||
url=host + "/genere/sub-ita",
|
||||
thumbnail=NovitaThumbnail,
|
||||
fanart=FilmFanart),
|
||||
Item(channel=item.channel,
|
||||
action="elenco",
|
||||
title="[COLOR azure]Film HD[/COLOR]",
|
||||
url=host + "/genere/film-in-hd",
|
||||
thumbnail=NovitaThumbnail,
|
||||
fanart=FilmFanart),
|
||||
Item(channel=item.channel,
|
||||
action="genere",
|
||||
title="[COLOR azure]Genere[/COLOR]",
|
||||
url=host,
|
||||
thumbnail=GenereThumbnail,
|
||||
fanart=FilmFanart),
|
||||
Item(channel=item.channel,
|
||||
action="search",
|
||||
extra="movie",
|
||||
title="[COLOR orange]Cerca..[/COLOR]",
|
||||
thumbnail=CercaThumbnail,
|
||||
fanart=FilmFanart)]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info("[filmissimi.py] newest" + categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "film":
|
||||
item.url = "http://www.filmissimi.net"
|
||||
item.action = "elenco"
|
||||
itemlist = elenco(item)
|
||||
|
||||
if itemlist[-1].action == "elenco":
|
||||
itemlist.pop()
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def genere(item):
|
||||
logger.info("[filmissimi.py] genere")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
bloque = scrapertools.get_match(data, '<ul id="menu-categorie-1" class="ge">(.*?)</div>')
|
||||
|
||||
patron = '<li id=[^>]+><a href="(.*?)">(.*?)</a></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="elenco",
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def elenco(item):
|
||||
logger.info("[filmissimi.py] elenco")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
elemento = scrapertools.find_single_match(data, r'<div class="estre">(.*?)<div class="paginacion">')
|
||||
|
||||
patron = r'<a href="([^"]+)" title="([^"]+)"[^>]*>[^>]+>\s*.*?img src="([^"]+)"[^>]*>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(elemento)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
logger.info("title=[" + scrapedtitle + "] url=[" + scrapedurl + "] thumbnail=[" + scrapedthumbnail + "]")
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="movie",
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail), tipo="movie"))
|
||||
|
||||
# Paginazione
|
||||
# ===========================================================================================================================
|
||||
matches = scrapedSingle(item.url, '<div class="paginacion">(.*?)</div>',
|
||||
"current'>.*?<\/span><.*?href='(.*?)'>.*?</a>")
|
||||
if len(matches) > 0:
|
||||
paginaurl = matches[0]
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="elenco", title=AvantiTxt, url=paginaurl, thumbnail=AvantiImg))
|
||||
else:
|
||||
itemlist.append(Item(channel=item.channel, action="mainlist", title=ListTxt, folder=True))
|
||||
# ===========================================================================================================================
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("[filmissimi.py] init texto=[" + texto + "]")
|
||||
itemlist = []
|
||||
url = host + "/?s=" + texto
|
||||
|
||||
data = httptools.downloadpage(url, headers=headers).data
|
||||
|
||||
patron = 'src="([^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+><a href="([^"]+)"[^>]*>([^<]+)<\/a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail), tipo="movie"))
|
||||
|
||||
# Paginazione
|
||||
# ===========================================================================================================================
|
||||
matches = scrapedSingle(url, '<div class="paginacion">(.*?)</div>', "current'>.*?<\/span><.*?href='(.*?)'>.*?</a>")
|
||||
|
||||
if len(matches) > 0:
|
||||
paginaurl = matches[0]
|
||||
itemlist.append(Item(channel=item.channel, action="elenco", title=AvantiTxt, url=paginaurl, thumbnail=AvantiImg))
|
||||
else:
|
||||
itemlist.append(Item(channel=item.channel, action="mainlist", title=ListTxt, folder=True))
|
||||
# ===========================================================================================================================
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
server = re.sub(r'[-\[\]\s]+', '', videoitem.title)
|
||||
videoitem.title = "".join(["[COLOR azure][[COLOR orange]%s[/COLOR]][/COLOR] " % server.capitalize(), item.title])
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.show = item.show
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def scrapedAll(url="", patron=""):
|
||||
matches = []
|
||||
data = httptools.downloadpage(url, headers=headers).data
|
||||
MyPatron = patron
|
||||
matches = re.compile(MyPatron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
return matches
|
||||
|
||||
|
||||
|
||||
def scrapedSingle(url="", single="", patron=""):
|
||||
data = httptools.downloadpage(url, headers=headers).data
|
||||
elemento = scrapertools.find_single_match(data, single)
|
||||
matches = re.compile(patron, re.DOTALL).findall(elemento)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
return matches
|
||||
|
||||
|
||||
|
||||
NovitaThumbnail = "https://superrepo.org/static/images/icons/original/xplugin.video.moviereleases.png.pagespeed.ic.j4bhi0Vp3d.png"
|
||||
GenereThumbnail = "https://farm8.staticflickr.com/7562/15516589868_13689936d0_o.png"
|
||||
FilmFanart = "https://superrepo.org/static/images/fanart/original/script.artwork.downloader.jpg"
|
||||
CercaThumbnail = "http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"
|
||||
CercaFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
|
||||
ListTxt = "[COLOR orange]Torna a elenco principale [/COLOR]"
|
||||
AvantiTxt = config.get_localized_string(30992)
|
||||
AvantiImg = "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"
|
||||
thumbnail = "http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"
|
||||
30
plugin.video.alfa/channels/animeforce.json
Normal file
30
plugin.video.alfa/channels/animeforce.json
Normal file
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"id": "animeforce",
|
||||
"name": "AnimeForce",
|
||||
"language": ["it"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "http://www.animeforce.org/wp-content/uploads/2013/05/logo-animeforce.png",
|
||||
"banner": "http://www.animeforce.org/wp-content/uploads/2013/05/logo-animeforce.png",
|
||||
"categories": [
|
||||
"anime"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_anime",
|
||||
"type": "bool",
|
||||
"label": "Includi in novità - Anime",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
504
plugin.video.alfa/channels/animeforce.py
Normal file
504
plugin.video.alfa/channels/animeforce.py
Normal file
@@ -0,0 +1,504 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per http://animeinstreaming.net/
|
||||
# ------------------------------------------------------------
|
||||
import re, urllib, urlparse
|
||||
|
||||
from core import httptools, scrapertools, servertools, tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from servers.decrypters import adfly
|
||||
|
||||
|
||||
|
||||
host = "https://ww1.animeforce.org"
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
|
||||
headers = [['Referer', host]]
|
||||
|
||||
PERPAGE = 20
|
||||
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def mainlist(item):
|
||||
log("mainlist", "mainlist", item.channel)
|
||||
itemlist = [Item(channel=item.channel,
|
||||
action="lista_anime",
|
||||
title="[COLOR azure]Anime [/COLOR]- [COLOR lightsalmon]Lista Completa[/COLOR]",
|
||||
url=host + "/lista-anime/",
|
||||
thumbnail=CategoriaThumbnail,
|
||||
fanart=CategoriaFanart),
|
||||
Item(channel=item.channel,
|
||||
action="animeaggiornati",
|
||||
title="[COLOR azure]Anime Aggiornati[/COLOR]",
|
||||
url=host,
|
||||
thumbnail=CategoriaThumbnail,
|
||||
fanart=CategoriaFanart),
|
||||
Item(channel=item.channel,
|
||||
action="ultimiep",
|
||||
title="[COLOR azure]Ultimi Episodi[/COLOR]",
|
||||
url=host,
|
||||
thumbnail=CategoriaThumbnail,
|
||||
fanart=CategoriaFanart),
|
||||
Item(channel=item.channel,
|
||||
action="search",
|
||||
title="[COLOR yellow]Cerca ...[/COLOR]",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# =================================================================
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def newest(categoria):
|
||||
log("newest", "newest" + categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "anime":
|
||||
item.url = host
|
||||
item.action = "ultimiep"
|
||||
itemlist = ultimiep(item)
|
||||
|
||||
if itemlist[-1].action == "ultimiep":
|
||||
itemlist.pop()
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# =================================================================
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def search(item, texto):
|
||||
log("search", "search", item.channel)
|
||||
item.url = host + "/?s=" + texto
|
||||
try:
|
||||
return search_anime(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
# =================================================================
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def search_anime(item):
|
||||
log("search_anime", "search_anime", item.channel)
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = r'<a href="([^"]+)"><img.*?src="([^"]+)".*?title="([^"]+)".*?/>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
if "Sub Ita Download & Streaming" in scrapedtitle or "Sub Ita Streaming":
|
||||
if 'episodio' in scrapedtitle.lower():
|
||||
itemlist.append(episode_item(item, scrapedtitle, scrapedurl, scrapedthumbnail))
|
||||
else:
|
||||
scrapedtitle, eptype = clean_title(scrapedtitle, simpleClean=True)
|
||||
cleantitle, eptype = clean_title(scrapedtitle)
|
||||
|
||||
scrapedurl, total_eps = create_url(scrapedurl, cleantitle)
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="episodios",
|
||||
text_color="azure",
|
||||
contentType="tvshow",
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
fulltitle=cleantitle,
|
||||
show=cleantitle,
|
||||
thumbnail=scrapedthumbnail))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Next Page
|
||||
next_page = scrapertools.find_single_match(data, r'<link rel="next" href="([^"]+)"[^/]+/>')
|
||||
if next_page != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="search_anime",
|
||||
text_bold=True,
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=next_page,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# =================================================================
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def animeaggiornati(item):
|
||||
log("animeaggiornati", "animeaggiornati", item.channel)
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
patron = r'<img.*?src="([^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+><a href="([^"]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
if 'Streaming' in scrapedtitle:
|
||||
cleantitle, eptype = clean_title(scrapedtitle)
|
||||
|
||||
# Creazione URL
|
||||
scrapedurl, total_eps = create_url(scrapedurl, scrapedtitle)
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="episodios",
|
||||
text_color="azure",
|
||||
contentType="tvshow",
|
||||
title=cleantitle,
|
||||
url=scrapedurl,
|
||||
fulltitle=cleantitle,
|
||||
show=cleantitle,
|
||||
thumbnail=scrapedthumbnail))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# =================================================================
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def ultimiep(item):
|
||||
log("ultimiep", "ultimiep", item.channel)
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
patron = r'<img.*?src="([^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+><a href="([^"]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
if 'Streaming' in scrapedtitle:
|
||||
itemlist.append(episode_item(item, scrapedtitle, scrapedurl, scrapedthumbnail))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# =================================================================
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def lista_anime(item):
|
||||
log("lista_anime", "lista_anime", item.channel)
|
||||
|
||||
itemlist = []
|
||||
|
||||
p = 1
|
||||
if '{}' in item.url:
|
||||
item.url, p = item.url.split('{}')
|
||||
p = int(p)
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = r'<li>\s*<strong>\s*<a\s*href="([^"]+?)">([^<]+?)</a>\s*</strong>\s*</li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
|
||||
if (p - 1) * PERPAGE > i: continue
|
||||
if i >= p * PERPAGE: break
|
||||
|
||||
# Pulizia titolo
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
|
||||
cleantitle, eptype = clean_title(scrapedtitle, simpleClean=True)
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action="episodios",
|
||||
text_color="azure",
|
||||
contentType="tvshow",
|
||||
title=cleantitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
fulltitle=cleantitle,
|
||||
show=cleantitle,
|
||||
plot=scrapedplot,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
if len(matches) >= p * PERPAGE:
|
||||
scrapedurl = item.url + '{}' + str(p + 1)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action="lista_anime",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# =================================================================
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def episodios(item):
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = '<td style="[^"]*?">\s*.*?<strong>(.*?)</strong>.*?\s*</td>\s*<td style="[^"]*?">\s*<a href="([^"]+?)"[^>]+>\s*<img.*?src="([^"]+?)".*?/>\s*</a>\s*</td>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
vvvvid_videos = False
|
||||
for scrapedtitle, scrapedurl, scrapedimg in matches:
|
||||
if 'nodownload' in scrapedimg or 'nostreaming' in scrapedimg:
|
||||
continue
|
||||
if 'vvvvid' in scrapedurl.lower():
|
||||
if not vvvvid_videos: vvvvid_videos = True
|
||||
itemlist.append(Item(title='I Video VVVVID Non sono supportati', text_color="red"))
|
||||
continue
|
||||
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
scrapedtitle = re.sub(r'<[^>]*?>', '', scrapedtitle)
|
||||
scrapedtitle = '[COLOR azure][B]' + scrapedtitle + '[/B][/COLOR]'
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="episode",
|
||||
title=scrapedtitle,
|
||||
url=urlparse.urljoin(host, scrapedurl),
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
plot=item.plot,
|
||||
fanart=item.fanart,
|
||||
thumbnail=item.thumbnail))
|
||||
|
||||
# Comandi di servizio
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0 and not vvvvid_videos:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title=config.get_localized_string(30161),
|
||||
text_color="yellow",
|
||||
text_bold=True,
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios",
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# ==================================================================
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def findvideos(item):
|
||||
logger.info("kod.animeforce findvideos")
|
||||
|
||||
itemlist = []
|
||||
|
||||
if item.extra:
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
blocco = scrapertools.get_match(data, r'%s(.*?)</tr>' % item.extra)
|
||||
url = scrapertools.find_single_match(blocco, r'<a href="([^"]+)"[^>]*>')
|
||||
if 'vvvvid' in url.lower():
|
||||
itemlist = [Item(title='I Video VVVVID Non sono supportati', text_color="red")]
|
||||
return itemlist
|
||||
if 'http' not in url: url = "".join(['https:', url])
|
||||
else:
|
||||
url = item.url
|
||||
|
||||
if 'adf.ly' in url:
|
||||
url = adfly.get_long_url(url)
|
||||
elif 'bit.ly' in url:
|
||||
url = httptools.downloadpage(url, only_headers=True, follow_redirects=False).headers.get("location")
|
||||
|
||||
if 'animeforce' in url:
|
||||
headers.append(['Referer', item.url])
|
||||
data = httptools.downloadpage(url, headers=headers).data
|
||||
itemlist.extend(servertools.find_video_items(data=data))
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title + videoitem.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.show = item.show
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
videoitem.contentType = item.contentType
|
||||
|
||||
url = url.split('&')[0]
|
||||
data = httptools.downloadpage(url, headers=headers).data
|
||||
patron = """<source\s*src=(?:"|')([^"']+?)(?:"|')\s*type=(?:"|')video/mp4(?:"|')>"""
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
headers.append(['Referer', url])
|
||||
for video in matches:
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=item.title,
|
||||
url=video + '|' + urllib.urlencode(dict(headers)), folder=False))
|
||||
else:
|
||||
itemlist.extend(servertools.find_video_items(data=url))
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title + videoitem.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.show = item.show
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
videoitem.contentType = item.contentType
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# ==================================================================
|
||||
|
||||
# =================================================================
|
||||
# Funzioni di servizio
|
||||
# -----------------------------------------------------------------
|
||||
def scrapedAll(url="", patron=""):
|
||||
data = httptools.downloadpage(url).data
|
||||
MyPatron = patron
|
||||
matches = re.compile(MyPatron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
return matches
|
||||
|
||||
|
||||
# =================================================================
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def create_url(url, title, eptype=""):
|
||||
logger.info()
|
||||
|
||||
if 'download' not in url:
|
||||
url = url.replace('-streaming', '-download-streaming')
|
||||
|
||||
total_eps = ""
|
||||
if not eptype:
|
||||
url = re.sub(r'episodio?-?\d+-?(?:\d+-|)[oav]*', '', url)
|
||||
else: # Solo se è un episodio passa
|
||||
total_eps = scrapertools.find_single_match(title.lower(), r'\((\d+)-(?:episodio|sub-ita)\)') # Questo numero verrà rimosso dall'url
|
||||
if total_eps: url = url.replace('%s-' % total_eps, '')
|
||||
url = re.sub(r'%s-?\d*-' % eptype.lower(), '', url)
|
||||
url = url.replace('-fine', '')
|
||||
|
||||
return url, total_eps
|
||||
|
||||
# =================================================================
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def clean_title(title, simpleClean=False):
|
||||
logger.info()
|
||||
|
||||
title = title.replace("Streaming", "").replace("&", "")
|
||||
title = title.replace("Download", "")
|
||||
title = title.replace("Sub Ita", "")
|
||||
cleantitle = title.replace("#038;", "").replace("amp;", "").strip()
|
||||
|
||||
if '(Fine)' in title:
|
||||
cleantitle = cleantitle.replace('(Fine)', '').strip() + " (Fine)"
|
||||
eptype = ""
|
||||
if not simpleClean:
|
||||
if "episodio" in title.lower():
|
||||
eptype = scrapertools.find_single_match(title, "((?:Episodio?|OAV))")
|
||||
cleantitle = re.sub(r'%s\s*\d*\s*(?:\(\d+\)|)' % eptype, '', title).strip()
|
||||
|
||||
if 'episodio' not in eptype.lower():
|
||||
cleantitle = re.sub(r'Episodio?\s*\d+\s*(?:\(\d+\)|)\s*[\(OAV\)]*', '', cleantitle).strip()
|
||||
|
||||
if '(Fine)' in title:
|
||||
cleantitle = cleantitle.replace('(Fine)', '')
|
||||
|
||||
return cleantitle, eptype
|
||||
|
||||
# =================================================================
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def episode_item(item, scrapedtitle, scrapedurl, scrapedthumbnail):
|
||||
scrapedtitle, eptype = clean_title(scrapedtitle, simpleClean=True)
|
||||
cleantitle, eptype = clean_title(scrapedtitle)
|
||||
|
||||
# Creazione URL
|
||||
scrapedurl, total_eps = create_url(scrapedurl, scrapedtitle, eptype)
|
||||
|
||||
epnumber = ""
|
||||
if 'episodio' in eptype.lower():
|
||||
epnumber = scrapertools.find_single_match(scrapedtitle.lower(), r'episodio?\s*(\d+)')
|
||||
eptype += ":? %s%s" % (epnumber, (r" \(%s\):?" % total_eps) if total_eps else "")
|
||||
|
||||
extra = "<tr>\s*<td[^>]+><strong>(?:[^>]+>|)%s(?:[^>]+>[^>]+>|[^<]*|[^>]+>)</strong>" % eptype
|
||||
item = Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="tvshow",
|
||||
title=scrapedtitle,
|
||||
text_color="azure",
|
||||
url=scrapedurl,
|
||||
fulltitle=cleantitle,
|
||||
extra=extra,
|
||||
show=cleantitle,
|
||||
thumbnail=scrapedthumbnail)
|
||||
return item
|
||||
|
||||
# =================================================================
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def scrapedSingle(url="", single="", patron=""):
|
||||
data = httptools.downloadpage(url).data
|
||||
paginazione = scrapertools.find_single_match(data, single)
|
||||
matches = re.compile(patron, re.DOTALL).findall(paginazione)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
return matches
|
||||
|
||||
|
||||
# =================================================================
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def Crea_Url(pagina="1", azione="ricerca", categoria="", nome=""):
|
||||
# esempio
|
||||
# chiamate.php?azione=ricerca&cat=&nome=&pag=
|
||||
Stringa = host + "chiamate.php?azione=" + azione + "&cat=" + categoria + "&nome=" + nome + "&pag=" + pagina
|
||||
log("crea_Url", Stringa)
|
||||
return Stringa
|
||||
|
||||
|
||||
# =================================================================
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def log(funzione="", stringa="", canale=""):
|
||||
logger.debug("[" + canale + "].[" + funzione + "] " + stringa)
|
||||
|
||||
|
||||
# =================================================================
|
||||
|
||||
# =================================================================
|
||||
# riferimenti di servizio
|
||||
# -----------------------------------------------------------------
|
||||
AnimeThumbnail = "http://img15.deviantart.net/f81c/i/2011/173/7/6/cursed_candies_anime_poster_by_careko-d3jnzg9.jpg"
|
||||
AnimeFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
|
||||
CategoriaThumbnail = "http://static.europosters.cz/image/750/poster/street-fighter-anime-i4817.jpg"
|
||||
CategoriaFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
|
||||
CercaThumbnail = "http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"
|
||||
CercaFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
|
||||
AvantiTxt = config.get_localized_string(30992)
|
||||
AvantiImg = "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"
|
||||
58
plugin.video.alfa/channels/animeleggendari.json
Normal file
58
plugin.video.alfa/channels/animeleggendari.json
Normal file
@@ -0,0 +1,58 @@
|
||||
{
|
||||
"id": "animeleggendari",
|
||||
"name": "AnimeLeggendari",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": [
|
||||
"it"
|
||||
],
|
||||
"thumbnail": "https://animeleggendari.com/wp-content/uploads/2018/01/123header.jpg",
|
||||
"bannermenu": "https://animeleggendari.com/wp-content/uploads/2018/01/123header.jpg",
|
||||
"categories": [
|
||||
"anime"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_anime",
|
||||
"type": "bool",
|
||||
"label": "Includi in novit\u00e0 - Anime",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces_num",
|
||||
"type": "list",
|
||||
"label": "Numero de link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "1", "3", "5", "10" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": ["Non filtrare", "IT"]
|
||||
}
|
||||
]
|
||||
}
|
||||
224
plugin.video.alfa/channels/animeleggendari.py
Normal file
224
plugin.video.alfa/channels/animeleggendari.py
Normal file
@@ -0,0 +1,224 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per animeleggendari
|
||||
# ------------------------------------------------------------
|
||||
|
||||
import re
|
||||
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import servertools, httptools, scrapertools, tmdb
|
||||
from platformcode import logger, config
|
||||
from core.item import Item
|
||||
|
||||
host = "https://animeleggendari.com"
|
||||
|
||||
# Richiesto per Autoplay
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['openload', 'streamango']
|
||||
list_quality = ['default']
|
||||
|
||||
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'animeleggendari')
|
||||
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'animeleggendari')
|
||||
|
||||
def mainlist(item):
|
||||
logger.info('[animeleggendari.py] mainlist')
|
||||
|
||||
# Richiesto per Autoplay
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = [Item(channel=item.channel,
|
||||
action="lista_anime",
|
||||
title="[B]Anime Leggendari[/B]",
|
||||
url="%s/category/anime-leggendari/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="lista_anime",
|
||||
title="Anime [B]ITA[/B]",
|
||||
url="%s/category/anime-ita/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="lista_anime",
|
||||
title="Anime [B]SUB ITA[/B]",
|
||||
url="%s/category/anime-sub-ita/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="lista_anime",
|
||||
title="Conclusi",
|
||||
url="%s/category/serie-anime-concluse/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="lista_anime",
|
||||
title="In Corso",
|
||||
url="%s/category/anime-in-corso/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="generi",
|
||||
title="Generi >",
|
||||
url=host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="search",
|
||||
title="[B]Cerca...[/B]",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
|
||||
# Autoplay visualizza voce menu
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info('[animeleggendari.py] search')
|
||||
|
||||
item.url = host + "/?s=" + texto
|
||||
try:
|
||||
return lista_anime(item)
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
def generi(item):
|
||||
logger.info('[animeleggendari.py] generi')
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data.replace('\n','').replace('\t','')
|
||||
logger.info("[animeleggendari.py] generi= "+data)
|
||||
|
||||
blocco =scrapertools.find_single_match(data, r'Generi.*?<ul.*?>(.*?)<\/ul>')
|
||||
logger.info("[animeleggendari.py] blocco= "+blocco)
|
||||
patron = '<a href="([^"]+)">([^<]+)<'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
logger.info("[animeleggendari.py] matches= "+str(matches))
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapedtitle.replace('Anime ','')
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="lista_anime",
|
||||
title=title,
|
||||
url=scrapedurl))
|
||||
|
||||
return itemlist
|
||||
|
||||
def lista_anime(item):
|
||||
logger.info('[animeleggendari.py] lista_anime')
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = r'<a class="[^"]+" href="([^"]+)" title="([^"]+)"><img[^s]+src="([^"]+)"[^>]+'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip()).replace("streaming", "")
|
||||
if 'top 10 anime da vedere' in scrapedtitle.lower(): continue
|
||||
|
||||
lang = scrapertools.find_single_match(scrapedtitle, r"((?:SUB ITA|ITA))")
|
||||
cleantitle = scrapedtitle.replace(lang, "").replace('(Streaming & Download)', '')
|
||||
cleantitle = cleantitle.replace('OAV', '').replace('OVA', '').replace('MOVIE', '')
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="episodios",
|
||||
contentType="tvshow" if 'movie' not in scrapedtitle.lower() and 'ova' not in scrapedtitle.lower() else "movie",
|
||||
text_color="azure",
|
||||
title=scrapedtitle.replace('(Streaming & Download)', '').replace(lang, '[B][' + lang + '][/B]'),
|
||||
fulltitle=cleantitle,
|
||||
url=scrapedurl,
|
||||
show=cleantitle,
|
||||
thumbnail=scrapedthumbnail,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
patronvideos = r'<a class="next page-numbers" href="([^"]+)">'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="lista_anime",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
def episodios(item):
|
||||
logger.info('[animeleggendari.py] episodios')
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
blocco = scrapertools.find_single_match(data, r'(?:<p style="text-align: left;">|<div class="pagination clearfix">\s*)(.*?)</span></a></div>')
|
||||
|
||||
# Il primo episodio è la pagina stessa
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType=item.contentType,
|
||||
title="Episodio: 1",
|
||||
text_color="azure",
|
||||
fulltitle="%s %s %s " % (color(item.title, "deepskyblue"), color("|", "azure"), color("1", "orange")),
|
||||
url=item.url,
|
||||
thumbnail=item.thumbnail,
|
||||
folder=True))
|
||||
if blocco != "":
|
||||
patron = r'<a href="([^"]+)".*?><span class="pagelink">(\d+)</span></a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapednumber in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType=item.contentType,
|
||||
title="Episodio: %s" % scrapednumber,
|
||||
text_color="azure",
|
||||
fulltitle="%s %s %s " % (color(item.title, "deepskyblue"), color("|", "azure"), color(scrapednumber, "orange")),
|
||||
url=scrapedurl,
|
||||
thumbnail=item.thumbnail,
|
||||
folder=True))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodi",
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
logger.info('[animeleggendari.py] findvideos')
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
server = re.sub(r'[-\[\]\s]+', '', videoitem.title)
|
||||
videoitem.title = "".join(["[%s] " % color(server.capitalize(), 'orange'), item.title])
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.show = item.show
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
|
||||
# Richiesto per Verifica se i link esistono
|
||||
if __comprueba_enlaces__:
|
||||
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
|
||||
# Richiesto per FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Autoplay
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
def color(text, color):
|
||||
return "[COLOR %s]%s[/COLOR]" % (color, text)
|
||||
24
plugin.video.alfa/channels/animestream.json
Normal file
24
plugin.video.alfa/channels/animestream.json
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"id": "animestream",
|
||||
"name": "Animestream",
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"language": [
|
||||
"it"
|
||||
],
|
||||
"thumbnail": "http:\/\/i.imgur.com\/83bw6iB.jpg",
|
||||
"bannermenu": "http:\/\/i.imgur.com\/83bw6iB.jpg",
|
||||
"categories": [
|
||||
"anime"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
265
plugin.video.alfa/channels/animestream.py
Normal file
265
plugin.video.alfa/channels/animestream.py
Normal file
@@ -0,0 +1,265 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per animestream
|
||||
# ----------------------------------------------------------
|
||||
import re, urlparse
|
||||
|
||||
from core import httptools, scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
|
||||
|
||||
|
||||
host = "http://www.animestream.it/"
|
||||
|
||||
hostcategoria = host + "/Ricerca-Tutti-pag1"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("kod.animestram mainlist")
|
||||
|
||||
itemlist = [Item(channel=item.channel,
|
||||
action="lista_anime",
|
||||
title="[COLOR azure]Anime[/COLOR]",
|
||||
url=Crea_Url(),
|
||||
thumbnail=AnimeThumbnail,
|
||||
fanart=AnimeFanart),
|
||||
Item(channel=item.channel,
|
||||
action="categoria",
|
||||
title="[COLOR azure]Categorie[/COLOR]",
|
||||
url=hostcategoria,
|
||||
thumbnail=CategoriaThumbnail,
|
||||
fanart=CategoriaFanart),
|
||||
Item(channel=item.channel,
|
||||
action="search",
|
||||
title="[COLOR orange]Cerca...[/COLOR]",
|
||||
extra="anime",
|
||||
thumbnail=CercaThumbnail,
|
||||
fanart=CercaFanart)]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista_anime(item):
|
||||
logger.info("kod.animestram lista_anime")
|
||||
itemlist = []
|
||||
|
||||
patron = 'class="anime"[^<]+<.*?window.location=\'(.*?)\'.*?url\((.*?)\);">[^=]+[^<]+[^>]+[^<]+<h4>(.*?)</h4>'
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in scrapedAll(item.url, patron):
|
||||
logger.debug(
|
||||
"kod.animestram lista_anime scrapedurl: " + scrapedurl + " scrapedthumbnail:" + scrapedthumbnail + "scrapedtitle:" + scrapedtitle)
|
||||
scrapedthumbnail = scrapedthumbnail.replace("(", "")
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="episodios",
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=urlparse.urljoin(host, scrapedthumbnail),
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
fanart=urlparse.urljoin(host, scrapedthumbnail)))
|
||||
|
||||
# Paginazione
|
||||
# ===========================================================
|
||||
pagina = scrapedSingle(item.url, '<div class="navc">.*?</div>', '<b.*?id="nav".*>.*?</b>[^<]+<.*?>(.*?)</a>')
|
||||
if len(pagina) > 0:
|
||||
paginaurl = Crea_Url(pagina[0], "ricerca")
|
||||
logger.debug("kod.animestram lista_anime Paginaurl: " + paginaurl)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="lista_anime",
|
||||
title=AvantiTxt,
|
||||
url=paginaurl,
|
||||
thumbnail=AvantiImg,
|
||||
folder=True))
|
||||
# ===========================================================
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista_anime_categoria(item):
|
||||
logger.info("kod.animestram lista_anime_categoria")
|
||||
itemlist = []
|
||||
|
||||
patron = 'class="anime"[^<]+<.*?window.location=\'(.*?)\'.*?url\((.*?)\);">[^=]+[^<]+[^>]+[^<]+<h4>(.*?)</h4>'
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in scrapedAll(item.url, patron):
|
||||
logger.debug(
|
||||
"kod.animestram lista_anime_categoria scrapedurl: " + scrapedurl + " scrapedthumbnail:" + scrapedthumbnail + "scrapedtitle:" + scrapedtitle)
|
||||
scrapedthumbnail = scrapedthumbnail.replace("(", "")
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="episodios",
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=urlparse.urljoin(host, scrapedthumbnail),
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
fanart=urlparse.urljoin(host, scrapedthumbnail)))
|
||||
|
||||
# Paginazione
|
||||
# ===========================================================
|
||||
pagina = scrapedSingle(item.url, '<div class="navc">.*?</div>', '<b.*?id="nav".*>.*?</b>[^<]+<.*?>(.*?)</a>')
|
||||
if len(pagina) > 0:
|
||||
paginaurl = Crea_Url(pagina[0], "ricerca", item.title)
|
||||
logger.debug("kod.animestram Paginaurl: " + paginaurl)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="lista_anime_categoria",
|
||||
title=AvantiTxt,
|
||||
url=paginaurl,
|
||||
thumbnail=AvantiImg,
|
||||
folder=True))
|
||||
# ===========================================================
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("kod.animestram search " + texto)
|
||||
itemlist = []
|
||||
|
||||
url = Crea_Url("1", "ricerca", "", texto)
|
||||
patron = 'class="anime"[^<]+<.*?window.location=\'(.*?)\'.*?url\((.*?)\);">[^=]+[^<]+[^>]+[^<]+<h4>(.*?)</h4>'
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in scrapedAll(url, patron):
|
||||
logger.debug(
|
||||
"scrapedurl: " + scrapedurl + " scrapedthumbnail:" + scrapedthumbnail + "scrapedtitle:" + scrapedtitle)
|
||||
scrapedthumbnail = scrapedthumbnail.replace("(", "")
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="episodios",
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=urlparse.urljoin(host, scrapedthumbnail),
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
fanart=urlparse.urljoin(host, scrapedthumbnail)))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def categoria(item):
|
||||
logger.info("kod.animestram categoria")
|
||||
itemlist = []
|
||||
patron = '<option value="(.*?)">.*?</option>'
|
||||
|
||||
for scrapedCategoria in scrapedAll(item.url, patron):
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedCategoria)
|
||||
cat = Crea_Url("", "ricerca", scrapedtitle.replace(' ', "%20"))
|
||||
if len(scrapedtitle) > 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="lista_anime_categoria",
|
||||
title=scrapedtitle,
|
||||
url=cat,
|
||||
thumbnail="",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
fanart=AnimeFanart))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info("kod.animestram episodios")
|
||||
itemlist = []
|
||||
|
||||
patron = 'class="episodio">\s*<.*?href=([^>]+)><img.*?src=(.*?)width[^<]+<[^<]+<[^<]+<[^<]+<.*?>(.*?)</a>'
|
||||
patronvideos = '<a id="nav" href="([^"]+)">></a>'
|
||||
url = urlparse.urljoin(host, item.url)
|
||||
|
||||
while True:
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in scrapedAll(url, patron):
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="episode",
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=urlparse.urljoin(host, scrapedthumbnail),
|
||||
fulltitle=item.show + ' | ' + scrapedtitle,
|
||||
show=item.show,
|
||||
fanart=urlparse.urljoin(host, scrapedthumbnail)))
|
||||
|
||||
data = httptools.downloadpage(urlparse.urljoin(host, item.url)).data
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
url = urlparse.urljoin(url, matches[0])
|
||||
else:
|
||||
break
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios",
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("kod.animestram findvideos")
|
||||
itemlist = []
|
||||
|
||||
patron = '<source.*?src="(.*?)".*?>'
|
||||
for scrapedurl in scrapedAll(urlparse.urljoin(host, item.url), patron):
|
||||
url = urlparse.urljoin(host, scrapedurl)
|
||||
logger.debug("kod.animestram player url Video:" + url)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="play",
|
||||
title=item.title,
|
||||
url=url,
|
||||
thumbnail=item.thumbnail,
|
||||
plot=item.plot,
|
||||
fanart=item.fanart,
|
||||
contentType=item.contentType,
|
||||
folder=False))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def scrapedAll(url="", patron=""):
|
||||
data = httptools.downloadpage(url).data
|
||||
MyPatron = patron
|
||||
matches = re.compile(MyPatron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
return matches
|
||||
|
||||
|
||||
def scrapedSingle(url="", single="", patron=""):
|
||||
data = httptools.downloadpage(url).data
|
||||
paginazione = scrapertools.find_single_match(data, single)
|
||||
matches = re.compile(patron, re.DOTALL).findall(paginazione)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
return matches
|
||||
|
||||
|
||||
def Crea_Url(pagina="1", azione="ricerca", categoria="", nome=""):
|
||||
# esempio
|
||||
# chiamate.php?azione=ricerca&cat=&nome=&pag=
|
||||
Stringa = host + "chiamate.php?azione=" + azione + "&cat=" + categoria + "&nome=" + nome + "&pag=" + pagina
|
||||
logger.debug("kod.animestram CreaUrl " + Stringa)
|
||||
|
||||
return Stringa
|
||||
|
||||
|
||||
AnimeThumbnail = "http://img15.deviantart.net/f81c/i/2011/173/7/6/cursed_candies_anime_poster_by_careko-d3jnzg9.jpg"
|
||||
AnimeFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
|
||||
CategoriaThumbnail = "http://static.europosters.cz/image/750/poster/street-fighter-anime-i4817.jpg"
|
||||
CategoriaFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
|
||||
CercaThumbnail = "http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"
|
||||
CercaFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
|
||||
AvantiTxt = config.get_localized_string(30992)
|
||||
AvantiImg = "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"
|
||||
33
plugin.video.alfa/channels/animesubita.json
Normal file
33
plugin.video.alfa/channels/animesubita.json
Normal file
@@ -0,0 +1,33 @@
|
||||
{
|
||||
"id": "animesubita",
|
||||
"name": "AnimeSubIta",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": [
|
||||
"it"
|
||||
],
|
||||
"thumbnail": "animesubita.png",
|
||||
"bannermenu": "animesubita.png",
|
||||
"categories": [
|
||||
"anime"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_anime",
|
||||
"type": "bool",
|
||||
"label": "Includi in novit\u00e0 - Anime",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
351
plugin.video.alfa/channels/animesubita.py
Normal file
351
plugin.video.alfa/channels/animesubita.py
Normal file
@@ -0,0 +1,351 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per AnimeSubIta
|
||||
# ------------------------------------------------------------
|
||||
|
||||
import re, urllib, urlparse
|
||||
|
||||
from core import servertools, httptools, scrapertools, tmdb
|
||||
from platformcode import logger, config
|
||||
from core.item import Item
|
||||
|
||||
|
||||
|
||||
host = "http://www.animesubita.org"
|
||||
|
||||
PERPAGE = 20
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = [Item(channel=item.channel,
|
||||
action="lista_anime_completa",
|
||||
title=color("Lista Anime", "azure"),
|
||||
url="%s/lista-anime/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="ultimiep",
|
||||
title=color("Ultimi Episodi", "azure"),
|
||||
url="%s/category/ultimi-episodi/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="lista_anime",
|
||||
title=color("Anime in corso", "azure"),
|
||||
url="%s/category/anime-in-corso/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="categorie",
|
||||
title=color("Categorie", "azure"),
|
||||
url="%s/generi/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="search",
|
||||
title=color("Cerca anime ...", "yellow"),
|
||||
extra="anime",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")
|
||||
]
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "anime":
|
||||
item.url = host
|
||||
item.action = "ultimiep"
|
||||
itemlist = ultimiep(item)
|
||||
|
||||
if itemlist[-1].action == "ultimiep":
|
||||
itemlist.pop()
|
||||
# Continua l'esecuzione in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
item.url = host + "/?s=" + texto
|
||||
try:
|
||||
return lista_anime(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def categorie(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = r'<li><a title="[^"]+" href="([^"]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="lista_anime",
|
||||
title=scrapedtitle.replace('Anime', '').strip(),
|
||||
text_color="azure",
|
||||
url=scrapedurl,
|
||||
thumbnail=item.thumbnail,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def ultimiep(item):
|
||||
logger.info("ultimiep")
|
||||
itemlist = lista_anime(item, False, False)
|
||||
|
||||
for itm in itemlist:
|
||||
title = scrapertools.decodeHtmlentities(itm.title)
|
||||
# Pulizia titolo
|
||||
title = title.replace("Streaming", "").replace("&", "")
|
||||
title = title.replace("Download", "")
|
||||
title = title.replace("Sub Ita", "").strip()
|
||||
eptype = scrapertools.find_single_match(title, "((?:Episodio?|OAV))")
|
||||
cleantitle = re.sub(r'%s\s*\d*\s*(?:\(\d+\)|)' % eptype, '', title).strip()
|
||||
# Creazione URL
|
||||
url = re.sub(r'%s-?\d*-' % eptype.lower(), '', itm.url)
|
||||
if "-streaming" not in url:
|
||||
url = url.replace("sub-ita", "sub-ita-streaming")
|
||||
|
||||
epnumber = ""
|
||||
if 'episodio' in eptype.lower():
|
||||
epnumber = scrapertools.find_single_match(title.lower(), r'episodio?\s*(\d+)')
|
||||
eptype += ":? " + epnumber
|
||||
|
||||
extra = "<tr>\s*<td[^>]+><strong>(?:[^>]+>|)%s(?:[^>]+>[^>]+>|[^<]*|[^>]+>)</strong>" % eptype
|
||||
itm.title = color(title, 'azure').strip()
|
||||
itm.action = "findvideos"
|
||||
itm.url = url
|
||||
itm.fulltitle = cleantitle
|
||||
itm.extra = extra
|
||||
itm.show = re.sub(r'Episodio\s*', '', title)
|
||||
itm.thumbnail = item.thumbnail
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def lista_anime(item, nextpage=True, show_lang=True):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
blocco = scrapertools.find_single_match(data, r'<div class="post-list group">(.*?)</nav><!--/.pagination-->')
|
||||
# patron = r'<a href="([^"]+)" title="([^"]+)">\s*<img[^s]+src="([^"]+)"[^>]+>' # Patron con thumbnail, Kodi non scarica le immagini dal sito
|
||||
patron = r'<a href="([^"]+)" title="([^"]+)">'
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
scrapedtitle = re.sub(r'\s+', ' ', scrapedtitle)
|
||||
# Pulizia titolo
|
||||
scrapedtitle = scrapedtitle.replace("Streaming", "").replace("&", "")
|
||||
scrapedtitle = scrapedtitle.replace("Download", "")
|
||||
lang = scrapertools.find_single_match(scrapedtitle, r"([Ss][Uu][Bb]\s*[Ii][Tt][Aa])")
|
||||
scrapedtitle = scrapedtitle.replace("Sub Ita", "").strip()
|
||||
eptype = scrapertools.find_single_match(scrapedtitle, "((?:Episodio?|OAV))")
|
||||
cleantitle = re.sub(r'%s\s*\d*\s*(?:\(\d+\)|)' % eptype, '', scrapedtitle)
|
||||
|
||||
|
||||
cleantitle = cleantitle.replace(lang, "").strip()
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="episodi",
|
||||
contentType="tvshow" if 'oav' not in scrapedtitle.lower() else "movie",
|
||||
title=color(scrapedtitle.replace(lang, "(%s)" % color(lang, "red") if show_lang else "").strip(), 'azure'),
|
||||
fulltitle=cleantitle,
|
||||
url=scrapedurl,
|
||||
show=cleantitle,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
if nextpage:
|
||||
patronvideos = r'<link rel="next" href="([^"]+)"\s*/>'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = matches[0]
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="lista_anime",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def lista_anime_completa(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
p = 1
|
||||
if '{}' in item.url:
|
||||
item.url, p = item.url.split('{}')
|
||||
p = int(p)
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
blocco = scrapertools.find_single_match(data, r'<ul class="lcp_catlist"[^>]+>(.*?)</ul>')
|
||||
patron = r'<a href="([^"]+)"[^>]+>([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
|
||||
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
|
||||
if (p - 1) * PERPAGE > i: continue
|
||||
if i >= p * PERPAGE: break
|
||||
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
|
||||
cleantitle = scrapedtitle.replace("Sub Ita Streaming", "").replace("Ita Streaming", "")
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="episodi",
|
||||
contentType="tvshow" if 'oav' not in scrapedtitle.lower() else "movie",
|
||||
title=color(scrapedtitle, 'azure'),
|
||||
fulltitle=cleantitle,
|
||||
show=cleantitle,
|
||||
url=scrapedurl,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
if len(matches) >= p * PERPAGE:
|
||||
scrapedurl = item.url + '{}' + str(p + 1)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action="lista_anime_completa",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def episodi(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = '<td style="[^"]*?">\s*.*?<strong>(.*?)</strong>.*?\s*</td>\s*<td style="[^"]*?">\s*<a href="([^"]+?)"[^>]+>\s*<img.*?src="([^"]+?)".*?/>\s*</a>\s*</td>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedtitle, scrapedurl, scrapedimg in matches:
|
||||
if 'nodownload' in scrapedimg or 'nostreaming' in scrapedimg:
|
||||
continue
|
||||
if 'vvvvid' in scrapedurl.lower():
|
||||
itemlist.append(Item(title='I Video VVVVID Non sono supportati'))
|
||||
continue
|
||||
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
scrapedtitle = re.sub(r'<[^>]*?>', '', scrapedtitle)
|
||||
scrapedtitle = '[COLOR azure][B]' + scrapedtitle + '[/B][/COLOR]'
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="episode",
|
||||
title=scrapedtitle,
|
||||
url=urlparse.urljoin(host, scrapedurl),
|
||||
fulltitle=item.title,
|
||||
show=scrapedtitle,
|
||||
plot=item.plot,
|
||||
fanart=item.thumbnail,
|
||||
thumbnail=item.thumbnail))
|
||||
|
||||
# Comandi di servizio
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios",
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
headers = {'Upgrade-Insecure-Requests': '1',
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0'}
|
||||
|
||||
if item.extra:
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
blocco = scrapertools.get_match(data, r'%s(.*?)</tr>' % item.extra)
|
||||
item.url = scrapertools.find_single_match(blocco, r'<a href="([^"]+)"[^>]+>')
|
||||
|
||||
patron = r'http:\/\/link[^a]+animesubita[^o]+org\/[^\/]+\/.*?(episodio\d*)[^p]+php(\?.*)'
|
||||
for phpfile, scrapedurl in re.findall(patron, item.url, re.DOTALL):
|
||||
url = "%s/%s.php%s" % (host, phpfile, scrapedurl)
|
||||
headers['Referer'] = url
|
||||
data = httptools.downloadpage(url, headers=headers).data
|
||||
# ------------------------------------------------
|
||||
cookies = ""
|
||||
matches = re.compile('(.%s.*?)\n' % host.replace("http://", "").replace("www.", ""), re.DOTALL).findall(config.get_cookie_data())
|
||||
for cookie in matches:
|
||||
name = cookie.split('\t')[5]
|
||||
value = cookie.split('\t')[6]
|
||||
cookies += name + "=" + value + ";"
|
||||
headers['Cookie'] = cookies[:-1]
|
||||
# ------------------------------------------------
|
||||
scrapedurl = scrapertools.find_single_match(data, r'<source src="([^"]+)"[^>]+>')
|
||||
url = scrapedurl + '|' + urllib.urlencode(headers)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="play",
|
||||
text_color="azure",
|
||||
title="[%s] %s" % (color("Diretto", "orange"), item.title),
|
||||
fulltitle=item.fulltitle,
|
||||
url=url,
|
||||
thumbnail=item.thumbnail,
|
||||
fanart=item.thumbnail,
|
||||
plot=item.plot))
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def color(text, color):
|
||||
return "[COLOR %s]%s[/COLOR]" % (color, text)
|
||||
|
||||
# ================================================================================================================
|
||||
24
plugin.video.alfa/channels/animetubeita.json
Normal file
24
plugin.video.alfa/channels/animetubeita.json
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"id": "animetubeita",
|
||||
"name": "Animetubeita",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": [
|
||||
"it"
|
||||
],
|
||||
"thumbnail": "http:\/\/i.imgur.com\/rQPx1iQ.png",
|
||||
"bannermenu": "http:\/\/i.imgur.com\/rQPx1iQ.png",
|
||||
"categories": [
|
||||
"anime"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
364
plugin.video.alfa/channels/animetubeita.py
Normal file
364
plugin.video.alfa/channels/animetubeita.py
Normal file
@@ -0,0 +1,364 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per animetubeita
|
||||
# ----------------------------------------------------------
|
||||
import re, urllib
|
||||
|
||||
from core import httptools, scrapertools, tmdb
|
||||
from platformcode import logger, config
|
||||
from core.item import Item
|
||||
|
||||
|
||||
|
||||
host = "http://www.animetubeita.com"
|
||||
hostlista = host + "/lista-anime/"
|
||||
hostgeneri = host + "/generi/"
|
||||
hostcorso = host + "/category/serie-in-corso/"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
log("animetubeita", "mainlist", item.channel)
|
||||
itemlist = [Item(channel=item.channel,
|
||||
action="lista_home",
|
||||
title="[COLOR azure]Home[/COLOR]",
|
||||
url=host,
|
||||
thumbnail=AnimeThumbnail,
|
||||
fanart=AnimeFanart),
|
||||
# Item(channel=item.channel,
|
||||
# action="lista_anime",
|
||||
# title="[COLOR azure]A-Z[/COLOR]",
|
||||
# url=hostlista,
|
||||
# thumbnail=AnimeThumbnail,
|
||||
# fanart=AnimeFanart),
|
||||
Item(channel=item.channel,
|
||||
action="lista_genere",
|
||||
title="[COLOR azure]Genere[/COLOR]",
|
||||
url=hostgeneri,
|
||||
thumbnail=CategoriaThumbnail,
|
||||
fanart=CategoriaFanart),
|
||||
Item(channel=item.channel,
|
||||
action="lista_in_corso",
|
||||
title="[COLOR azure]Serie in Corso[/COLOR]",
|
||||
url=hostcorso,
|
||||
thumbnail=CategoriaThumbnail,
|
||||
fanart=CategoriaFanart),
|
||||
Item(channel=item.channel,
|
||||
action="search",
|
||||
title="[COLOR lime]Cerca...[/COLOR]",
|
||||
url=host + "/?s=",
|
||||
thumbnail=CercaThumbnail,
|
||||
fanart=CercaFanart)]
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def lista_home(item):
|
||||
log("animetubeita", "lista_home", item.channel)
|
||||
|
||||
itemlist = []
|
||||
|
||||
patron = '<h2 class="title"><a href="(.*?)" rel="bookmark" title=".*?">.*?<img.*?src="(.*?)".*?<strong>Titolo</strong></td>.*?<td>(.*?)</td>.*?<td><strong>Trama</strong></td>.*?<td>(.*?)</'
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot in scrapedAll(item.url, patron):
|
||||
title = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
title = title.split("Sub")[0]
|
||||
fulltitle = re.sub(r'[Ee]pisodio? \d+', '', title)
|
||||
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="dl_s",
|
||||
contentType="tvshow",
|
||||
title="[COLOR azure]" + title + "[/COLOR]",
|
||||
fulltitle=fulltitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
fanart=scrapedthumbnail,
|
||||
show=fulltitle,
|
||||
plot=scrapedplot))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Paginazione
|
||||
# ===========================================================
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<link rel="next" href="(.*?)"'
|
||||
next_page = scrapertools.find_single_match(data, patron)
|
||||
if next_page != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="lista_home",
|
||||
title=AvantiTxt,
|
||||
url=next_page,
|
||||
thumbnail=AvantiImg,
|
||||
folder=True))
|
||||
# ===========================================================
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
# def lista_anime(item):
|
||||
# log("animetubeita", "lista_anime", item.channel)
|
||||
|
||||
# itemlist = []
|
||||
|
||||
# patron = '<li.*?class="page_.*?href="(.*?)">(.*?)</a></li>'
|
||||
# for scrapedurl, scrapedtitle in scrapedAll(item.url, patron):
|
||||
# title = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
# title = title.split("Sub")[0]
|
||||
# log("url:[" + scrapedurl + "] scrapedtitle:[" + title + "]")
|
||||
# itemlist.append(
|
||||
# Item(channel=item.channel,
|
||||
# action="dettaglio",
|
||||
# contentType="tvshow",
|
||||
# title="[COLOR azure]" + title + "[/COLOR]",
|
||||
# url=scrapedurl,
|
||||
# show=title,
|
||||
# thumbnail="",
|
||||
# fanart=""))
|
||||
|
||||
# tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# return itemlist
|
||||
|
||||
|
||||
|
||||
def lista_genere(item):
|
||||
log("lista_anime_genere", "lista_genere", item.channel)
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
bloque = scrapertools.get_match(data,
|
||||
'<div class="hentry page post-1 odd author-admin clear-block">(.*?)<div id="disqus_thread">')
|
||||
|
||||
patron = '<li class="cat-item cat-item.*?"><a href="(.*?)" >(.*?)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="lista_generi",
|
||||
title='[COLOR lightsalmon][B]' + scrapedtitle + '[/B][/COLOR]',
|
||||
url=scrapedurl,
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
thumbnail=item.thumbnail))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def lista_generi(item):
|
||||
log("animetubeita", "lista_generi", item.channel)
|
||||
|
||||
itemlist = []
|
||||
patron = '<h2 class="title"><a href="(.*?)" rel="bookmark" title=".*?">.*?<img.*?src="(.*?)".*?<strong>Titolo</strong></td>.*?<td>(.*?)</td>.*?<td><strong>Trama</strong></td>.*?<td>(.*?)</'
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot in scrapedAll(item.url, patron):
|
||||
title = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
title = title.split("Sub")[0]
|
||||
fulltitle = re.sub(r'[Ee]pisodio? \d+', '', title)
|
||||
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="dettaglio",
|
||||
title="[COLOR azure]" + title + "[/COLOR]",
|
||||
contentType="tvshow",
|
||||
fulltitle=fulltitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
show=fulltitle,
|
||||
fanart=scrapedthumbnail,
|
||||
plot=scrapedplot))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Paginazione
|
||||
# ===========================================================
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<link rel="next" href="(.*?)"'
|
||||
next_page = scrapertools.find_single_match(data, patron)
|
||||
if next_page != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="lista_generi",
|
||||
title=AvantiTxt,
|
||||
url=next_page,
|
||||
thumbnail=AvantiImg,
|
||||
folder=True))
|
||||
# ===========================================================
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def lista_in_corso(item):
|
||||
log("animetubeita", "lista_home", item.channel)
|
||||
|
||||
itemlist = []
|
||||
|
||||
patron = '<h2 class="title"><a href="(.*?)" rel="bookmark" title="Link.*?>(.*?)</a></h2>.*?<img.*?src="(.*?)".*?<td><strong>Trama</strong></td>.*?<td>(.*?)</td>'
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedplot in scrapedAll(item.url, patron):
|
||||
title = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
title = title.split("Sub")[0]
|
||||
fulltitle = re.sub(r'[Ee]pisodio? \d+', '', title)
|
||||
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="dettaglio",
|
||||
title="[COLOR azure]" + title + "[/COLOR]",
|
||||
contentType="tvshow",
|
||||
fulltitle=fulltitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
show=fulltitle,
|
||||
fanart=scrapedthumbnail,
|
||||
plot=scrapedplot))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Paginazione
|
||||
# ===========================================================
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<link rel="next" href="(.*?)"'
|
||||
next_page = scrapertools.find_single_match(data, patron)
|
||||
if next_page != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="lista_in_corso",
|
||||
title=AvantiTxt,
|
||||
url=next_page,
|
||||
thumbnail=AvantiImg,
|
||||
folder=True))
|
||||
# ===========================================================
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def dl_s(item):
|
||||
log("animetubeita", "dl_s", item.channel)
|
||||
|
||||
itemlist = []
|
||||
encontrados = set()
|
||||
|
||||
# 1
|
||||
patron = '<p><center><a.*?href="(.*?)"'
|
||||
for scrapedurl in scrapedAll(item.url, patron):
|
||||
if scrapedurl in encontrados: continue
|
||||
encontrados.add(scrapedurl)
|
||||
title = "DOWNLOAD & STREAMING"
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="dettaglio",
|
||||
title="[COLOR azure]" + title + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=item.thumbnail,
|
||||
fanart=item.thumbnail,
|
||||
plot=item.plot,
|
||||
folder=True))
|
||||
# 2
|
||||
patron = '<p><center>.*?<a.*?href="(.*?)"'
|
||||
for scrapedurl in scrapedAll(item.url, patron):
|
||||
if scrapedurl in encontrados: continue
|
||||
encontrados.add(scrapedurl)
|
||||
title = "DOWNLOAD & STREAMING"
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="dettaglio",
|
||||
title="[COLOR azure]" + title + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=item.thumbnail,
|
||||
fanart=item.thumbnail,
|
||||
plot=item.plot,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def dettaglio(item):
|
||||
log("animetubeita", "dettaglio", item.channel)
|
||||
|
||||
itemlist = []
|
||||
headers = {'Upgrade-Insecure-Requests': '1',
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0'}
|
||||
|
||||
episodio = 1
|
||||
patron = r'<a href="http:\/\/link[^a]+animetubeita[^c]+com\/[^\/]+\/[^s]+((?:stream|strm))[^p]+php(\?.*?)"'
|
||||
for phpfile, scrapedurl in scrapedAll(item.url, patron):
|
||||
title = "Episodio " + str(episodio)
|
||||
episodio += 1
|
||||
url = "%s/%s.php%s" % (host, phpfile, scrapedurl)
|
||||
headers['Referer'] = url
|
||||
data = httptools.downloadpage(url, headers=headers).data
|
||||
# ------------------------------------------------
|
||||
cookies = ""
|
||||
matches = re.compile('(.animetubeita.com.*?)\n', re.DOTALL).findall(config.get_cookie_data())
|
||||
for cookie in matches:
|
||||
name = cookie.split('\t')[5]
|
||||
value = cookie.split('\t')[6]
|
||||
cookies += name + "=" + value + ";"
|
||||
headers['Cookie'] = cookies[:-1]
|
||||
# ------------------------------------------------
|
||||
url = scrapertools.find_single_match(data, """<source src="([^"]+)" type='video/mp4'>""")
|
||||
url += '|' + urllib.urlencode(headers)
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="play",
|
||||
title="[COLOR azure]" + title + "[/COLOR]",
|
||||
url=url,
|
||||
thumbnail=item.thumbnail,
|
||||
fanart=item.thumbnail,
|
||||
plot=item.plot))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
log("animetubeita", "search", item.channel)
|
||||
item.url = item.url + texto
|
||||
|
||||
try:
|
||||
return lista_home(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
|
||||
def scrapedAll(url="", patron=""):
|
||||
matches = []
|
||||
data = httptools.downloadpage(url).data
|
||||
MyPatron = patron
|
||||
matches = re.compile(MyPatron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
return matches
|
||||
|
||||
|
||||
|
||||
def scrapedSingle(url="", single="", patron=""):
|
||||
matches = []
|
||||
data = httptools.downloadpage(url).data
|
||||
elemento = scrapertools.find_single_match(data, single)
|
||||
matches = re.compile(patron, re.DOTALL).findall(elemento)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
return matches
|
||||
|
||||
|
||||
|
||||
def log(funzione="", stringa="", canale=""):
|
||||
logger.debug("[" + canale + "].[" + funzione + "] " + stringa)
|
||||
|
||||
|
||||
|
||||
AnimeThumbnail = "http://img15.deviantart.net/f81c/i/2011/173/7/6/cursed_candies_anime_poster_by_careko-d3jnzg9.jpg"
|
||||
AnimeFanart = "http://www.animetubeita.com/wp-content/uploads/21407_anime_scenery.jpg"
|
||||
CategoriaThumbnail = "http://static.europosters.cz/image/750/poster/street-fighter-anime-i4817.jpg"
|
||||
CategoriaFanart = "http://www.animetubeita.com/wp-content/uploads/21407_anime_scenery.jpg"
|
||||
CercaThumbnail = "http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"
|
||||
CercaFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
|
||||
AvantiTxt = config.get_localized_string(30992)
|
||||
AvantiImg = "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"
|
||||
24
plugin.video.alfa/channels/animevision.json
Normal file
24
plugin.video.alfa/channels/animevision.json
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"id": "animevision",
|
||||
"name": "Animevision",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": [
|
||||
"it"
|
||||
],
|
||||
"thumbnail": "http:\/\/animevision.it\/images\/logo.png",
|
||||
"bannermenu": "http:\/\/animevision.it\/images\/logo.png",
|
||||
"categories": [
|
||||
"anime"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
145
plugin.video.alfa/channels/animevision.py
Normal file
145
plugin.video.alfa/channels/animevision.py
Normal file
@@ -0,0 +1,145 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per animevision
|
||||
# ----------------------------------------------------------
|
||||
import re
|
||||
|
||||
from core import httptools, scrapertools, tmdb
|
||||
from platformcode import logger
|
||||
from core.item import Item
|
||||
|
||||
|
||||
|
||||
host = "https://www.animevision.it"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("kod.animevision mainlist")
|
||||
|
||||
itemlist = [Item(channel=item.channel,
|
||||
action="lista_anime",
|
||||
title="[COLOR azure]Anime [/COLOR]- [COLOR orange]Lista Completa[/COLOR]",
|
||||
url=host + "/elenco.php",
|
||||
thumbnail=CategoriaThumbnail,
|
||||
fanart=CategoriaFanart),
|
||||
Item(channel=item.channel,
|
||||
action="search",
|
||||
title="[COLOR yellow]Cerca...[/COLOR]",
|
||||
url=host + "/?s=",
|
||||
thumbnail=CategoriaThumbnail,
|
||||
fanart=CategoriaFanart)]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("kod.animevision search")
|
||||
item.url = host + "/?search=" + texto
|
||||
try:
|
||||
return lista_anime_src(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
|
||||
|
||||
def lista_anime_src(item):
|
||||
logger.info("kod.animevision lista_anime_src")
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = r"<a class=\'false[Ll]ink\'\s*href=\'([^\']+)\'[^>]+>[^>]+>[^<]+<img\s*style=\'[^\']+\'\s*class=\'[^\']+\'\s*src=\'[^\']+\'\s*data-src=\'([^\']+)\'\s*alt=\'([^\']+)\'[^>]*>"
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedimg, scrapedtitle in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
scrapedimg = host + "/" + scrapedimg
|
||||
scrapedurl = host + "/" + scrapedurl
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
thumbnail=scrapedimg,
|
||||
fanart=scrapedimg,
|
||||
viewmode="movie"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def lista_anime(item):
|
||||
logger.info("kod.animevision lista_anime")
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = "<div class='epContainer' ><a class='falseLink' href='(.*?)'><div[^=]+=[^=]+=[^=]+=[^=]+='(.*?)'[^=]+=[^=]+=[^=]+=[^=]+=[^=]+=[^=]+=[^=]+=[^=]+=[^=]+=[^=]+=[^=]+=[^=]+=[^=]+=[^>]+><b>(.*?)<"
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedimg, scrapedtitle in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
scrapedimg = host + "/" + scrapedimg
|
||||
scrapedurl = host + "/" + scrapedurl
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="episodi",
|
||||
contentType="tvshow",
|
||||
title=scrapedtitle,
|
||||
text_color="azure",
|
||||
url=scrapedurl,
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
thumbnail=scrapedimg,
|
||||
fanart=scrapedimg,
|
||||
viewmode="movie"))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def episodi(item):
|
||||
logger.info("kod.animevision episodi")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = "<a class='nodecoration text-white' href='(.*?)'>(.+?)<"
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapedtitle.split(';')[1]
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
scrapedurl = host + "/" + scrapedurl
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
thumbnail=item.thumbnail,
|
||||
fanart=item.fanart))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
CategoriaThumbnail = "http://static.europosters.cz/image/750/poster/street-fighter-anime-i4817.jpg"
|
||||
CategoriaFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
|
||||
49
plugin.video.alfa/channels/animeworld.json
Normal file
49
plugin.video.alfa/channels/animeworld.json
Normal file
@@ -0,0 +1,49 @@
|
||||
{
|
||||
"id": "animeworld",
|
||||
"name": "AnimeWorld",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["it"],
|
||||
"thumbnail": "https://cdn.animeworld.it/static/images/general/logoaw.png",
|
||||
"version": "2",
|
||||
"date": "14/02/2019",
|
||||
"changes": "TUTTO",
|
||||
"categories": [
|
||||
"anime"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_anime",
|
||||
"type": "bool",
|
||||
"label": "Includi in novità - Anime",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces_num",
|
||||
"type": "list",
|
||||
"label": "Numero de link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "1", "3", "5", "10" ]
|
||||
}
|
||||
]
|
||||
}
|
||||
425
plugin.video.alfa/channels/animeworld.py
Normal file
425
plugin.video.alfa/channels/animeworld.py
Normal file
@@ -0,0 +1,425 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per animeworld
|
||||
# ----------------------------------------------------------
|
||||
import re, urlparse
|
||||
|
||||
from core import httptools, scrapertoolsV2, tmdb
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
from platformcode.logger import log
|
||||
|
||||
|
||||
|
||||
host = "https://www.animeworld.it"
|
||||
|
||||
headers = [['Referer', host]]
|
||||
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def mainlist(item):
|
||||
log()
|
||||
itemlist = [
|
||||
Item(
|
||||
channel=item.channel,
|
||||
action="build_menu",
|
||||
title="[B]Anime ITA[/B]",
|
||||
url=host+'/filter?language[]=1',
|
||||
thumbnail=CategoriaThumbnail,
|
||||
fanart=CategoriaFanart),
|
||||
Item(
|
||||
channel=item.channel,
|
||||
action="build_menu",
|
||||
title="[B]Anime SUB[/B]",
|
||||
url=host+'/filter?language[]=0',
|
||||
thumbnail=CategoriaThumbnail,
|
||||
fanart=CategoriaFanart),
|
||||
Item(
|
||||
channel=item.channel,
|
||||
action="alfabetico",
|
||||
title="Anime A-Z",
|
||||
url=host + "/az-list",
|
||||
thumbnail=CategoriaThumbnail,
|
||||
fanart=CategoriaFanart),
|
||||
Item(
|
||||
channel=item.channel,
|
||||
action="video",
|
||||
title="Ultime Aggiunte",
|
||||
url=host + "/newest",
|
||||
thumbnail=CategoriaThumbnail,
|
||||
fanart=CategoriaFanart),
|
||||
Item(
|
||||
channel=item.channel,
|
||||
action="video",
|
||||
title="Ultimi Episodi",
|
||||
url=host + "/updated",
|
||||
thumbnail=CategoriaThumbnail,
|
||||
fanart=CategoriaFanart),
|
||||
Item(
|
||||
channel=item.channel,
|
||||
action="search",
|
||||
title="[B]Cerca ...[/B]",
|
||||
thumbnail=
|
||||
"http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")
|
||||
]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# Crea Menu Filtro ======================================================
|
||||
|
||||
def build_menu(item):
|
||||
itemlist = []
|
||||
itemlist.append(Item(
|
||||
channel=item.channel,
|
||||
action="video",
|
||||
title="[B]Tutti[/B]",
|
||||
url=item.url,
|
||||
thumbnail=CategoriaThumbnail,
|
||||
fanart=CategoriaFanart))
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'\n|\t','',data)
|
||||
data = re.sub(r'>\s*<','><',data)
|
||||
|
||||
block = scrapertoolsV2.get_match(data, r'<form class="filters.*?>(.*?)<\/form>')
|
||||
|
||||
matches = re.compile(r'<button class="btn btn-sm btn-default dropdown-toggle" data-toggle="dropdown"> (.*?) <span.*?>(.*?)<\/ul>', re.DOTALL).findall(block)
|
||||
|
||||
for title, html in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='build_sub_menu',
|
||||
contentType="tvshow",
|
||||
title='[B]' + title + ' >[/B]',
|
||||
fulltitle=title,
|
||||
show=title,
|
||||
url=item.url,
|
||||
html=html))
|
||||
|
||||
# Elimina FLingua dal Menu
|
||||
itemlist.pop(5)
|
||||
|
||||
return itemlist
|
||||
|
||||
# Crea SottoMenu Filtro ======================================================
|
||||
|
||||
def build_sub_menu(item):
|
||||
itemlist = []
|
||||
matches = re.compile(r'<input.*?name="(.*?)" value="(.*?)".*?><label.*?>(.*?)<\/label>', re.DOTALL).findall(item.html)
|
||||
for name, value, title in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='video',
|
||||
contentType="tvshow",
|
||||
title='[B]' + title + ' >[/B]',
|
||||
fulltitle=title,
|
||||
show=title,
|
||||
url=item.url + '&' + name + '=' + value,
|
||||
plot=""))
|
||||
|
||||
return itemlist
|
||||
|
||||
# Novità ======================================================
|
||||
|
||||
def newest(categoria):
|
||||
log()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "anime":
|
||||
item.url = host + '/newest'
|
||||
item.action = "video"
|
||||
itemlist = video(item)
|
||||
|
||||
if itemlist[-1].action == "video":
|
||||
itemlist.pop()
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# Cerca ===========================================================
|
||||
|
||||
def search(item, texto):
|
||||
log(texto)
|
||||
item.url = host + '/search?keyword=' + texto
|
||||
try:
|
||||
return video(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
|
||||
|
||||
# Lista A-Z ====================================================
|
||||
|
||||
def alfabetico(item):
|
||||
log()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'\n|\t','',data)
|
||||
data = re.sub(r'>\s*<','><',data)
|
||||
|
||||
block = scrapertoolsV2.get_match(data, r'<span>.*?A alla Z.<\/span>.*?<ul>(.*?)<\/ul>')
|
||||
|
||||
matches = re.compile('<a href="([^"]+)" title="([^"]+)">', re.DOTALL).findall(block)
|
||||
scrapertoolsV2.printMatches(matches)
|
||||
|
||||
for url, title in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='lista_anime',
|
||||
contentType="tvshow",
|
||||
title=title,
|
||||
fulltitle=title,
|
||||
show=title,
|
||||
url=url,
|
||||
plot=""))
|
||||
|
||||
return itemlist
|
||||
|
||||
def lista_anime(item):
|
||||
log()
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'\n|\t','',data)
|
||||
data = re.sub(r'>\s*<','><',data)
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = r'<div class="item"><a href="([^"]+)".*?src="([^"]+)".*?data-jtitle="([^"]+)".*?>([^<]+)<\/a><p>(.*?)<\/p>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumb, scrapedoriginal, scrapedtitle, scrapedplot in matches:
|
||||
|
||||
if scrapedoriginal == scrapedtitle:
|
||||
scrapedoriginal=''
|
||||
else:
|
||||
scrapedoriginal = ' - [ ' + scrapedoriginal + ' ]'
|
||||
|
||||
year = ''
|
||||
lang = ''
|
||||
if '(' in scrapedtitle:
|
||||
year = scrapertoolsV2.find_single_match(scrapedtitle, r'(\([0-9]+\))')
|
||||
lang = scrapertoolsV2.find_single_match(scrapedtitle, r'(\([a-zA-Z]+\))')
|
||||
|
||||
title = scrapedtitle.replace(year,'').replace(lang,'')
|
||||
original = scrapedoriginal.replace(year,'').replace(lang,'')
|
||||
title = '[B]' + title + '[/B]' + year + lang + original
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
contentType="tvshow",
|
||||
action="episodios",
|
||||
text_color="azure",
|
||||
title=title,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumb,
|
||||
fulltitle=title,
|
||||
show=title,
|
||||
plot=scrapedplot,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Next page
|
||||
next_page = scrapertoolsV2.find_single_match(data, '<a class="page-link" href="([^"]+)" rel="next"')
|
||||
|
||||
if next_page != '':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='lista_anime',
|
||||
title='[B]' + config.get_localized_string(30992) + ' »[/B]',
|
||||
url=next_page,
|
||||
contentType=item.contentType,
|
||||
thumbnail='http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png'))
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def video(item):
|
||||
log()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'\n|\t','',data)
|
||||
data = re.sub(r'>\s*<','><',data)
|
||||
|
||||
patron = r'<a href="([^"]+)" class="poster.*?><img src="([^"]+)"(.*?)data-jtitle="([^"]+)" .*?>(.*?)<\/a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumb ,scrapedinfo, scrapedoriginal, scrapedtitle in matches:
|
||||
# Cerca Info come anno o lingua nel Titolo
|
||||
year = ''
|
||||
lang = ''
|
||||
if '(' in scrapedtitle:
|
||||
year = scrapertoolsV2.find_single_match(scrapedtitle, r'( \([0-9]+\))')
|
||||
lang = scrapertoolsV2.find_single_match(scrapedtitle, r'( \([a-zA-Z]+\))')
|
||||
|
||||
# Rimuove Anno e Lingua nel Titolo
|
||||
title = scrapedtitle.replace(year,'').replace(lang,'')
|
||||
original = scrapedoriginal.replace(year,'').replace(lang,'')
|
||||
|
||||
# Compara Il Titolo con quello originale
|
||||
if original == title:
|
||||
original=''
|
||||
else:
|
||||
original = ' - [ ' + scrapedoriginal + ' ]'
|
||||
|
||||
# cerca info supplementari
|
||||
ep = ''
|
||||
ep = scrapertoolsV2.find_single_match(scrapedinfo, '<div class="ep">(.*?)<')
|
||||
if ep != '':
|
||||
ep = ' - ' + ep
|
||||
|
||||
ova = ''
|
||||
ova = scrapertoolsV2.find_single_match(scrapedinfo, '<div class="ova">(.*?)<')
|
||||
if ova != '':
|
||||
ova = ' - (' + ova + ')'
|
||||
|
||||
ona = ''
|
||||
ona = scrapertoolsV2.find_single_match(scrapedinfo, '<div class="ona">(.*?)<')
|
||||
if ona != '':
|
||||
ona = ' - (' + ona + ')'
|
||||
|
||||
movie = ''
|
||||
movie = scrapertoolsV2.find_single_match(scrapedinfo, '<div class="movie">(.*?)<')
|
||||
if movie != '':
|
||||
movie = ' - (' + movie + ')'
|
||||
|
||||
special = ''
|
||||
special = scrapertoolsV2.find_single_match(scrapedinfo, '<div class="special">(.*?)<')
|
||||
if special != '':
|
||||
special = ' - (' + special + ')'
|
||||
|
||||
|
||||
# Concatena le informazioni
|
||||
info = ep + lang + year + ova + ona + movie + special
|
||||
|
||||
# Crea il title da visualizzare
|
||||
long_title = '[B]' + title + '[/B]' + info + original
|
||||
|
||||
# Controlla se sono Episodi o Film
|
||||
if movie == '':
|
||||
contentType = 'tvshow'
|
||||
action = 'episodios'
|
||||
else:
|
||||
contentType = 'movie'
|
||||
action = 'findvideos'
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
contentType=contentType,
|
||||
action=action,
|
||||
title=long_title,
|
||||
url=scrapedurl,
|
||||
fulltitle=title,
|
||||
show=title,
|
||||
thumbnail=scrapedthumb))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Next page
|
||||
next_page = scrapertoolsV2.find_single_match(data, '<a class="page-link" href=".*?page=([^"]+)" rel="next"')
|
||||
|
||||
if next_page != '':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='video',
|
||||
title='[B]' + config.get_localized_string(30992) + ' »[/B]',
|
||||
url=re.sub('&page=([^"]+)', '', item.url) + '&page=' + next_page,
|
||||
contentType=item.contentType,
|
||||
thumbnail='http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
log()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data.replace('\n', '')
|
||||
data = re.sub(r'>\s*<', '><', data)
|
||||
block = scrapertoolsV2.find_single_match(data, r'<div class="widget servers".*?>(.*?)<div id="download"')
|
||||
block = scrapertoolsV2.find_single_match(block,r'<div class="server.*?>(.*?)<div class="server.*?>')
|
||||
|
||||
patron = r'<li><a.*?href="([^"]+)".*?>(.*?)<\/a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(block)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = '[B] Episodio ' + scrapedtitle + '[/B]'
|
||||
itemlist.append(
|
||||
Item(
|
||||
channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="episode",
|
||||
title=scrapedtitle,
|
||||
url=urlparse.urljoin(host, scrapedurl),
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
plot=item.plot,
|
||||
fanart=item.thumbnail,
|
||||
thumbnail=item.thumbnail))
|
||||
|
||||
# Aggiungi a Libreria
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
itemlist.append(
|
||||
Item(
|
||||
channel=item.channel,
|
||||
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios",
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
log()
|
||||
|
||||
itemlist = []
|
||||
|
||||
anime_id = scrapertoolsV2.find_single_match(item.url, r'.*\..*?\/(.*)')
|
||||
data = httptools.downloadpage(host + "/ajax/episode/serverPlayer?id=" + anime_id).data
|
||||
patron = '<source src="([^"]+)"'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for video in matches:
|
||||
itemlist.append(
|
||||
Item(
|
||||
channel=item.channel,
|
||||
action="play",
|
||||
title=item.title + " [[COLOR orange]Diretto[/COLOR]]",
|
||||
url=video,
|
||||
contentType=item.contentType,
|
||||
folder=False))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
# riferimenti di servizio ====================================================
|
||||
AnimeThumbnail = "http://img15.deviantart.net/f81c/i/2011/173/7/6/cursed_candies_anime_poster_by_careko-d3jnzg9.jpg"
|
||||
AnimeFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
|
||||
CategoriaThumbnail = "http://static.europosters.cz/image/750/poster/street-fighter-anime-i4817.jpg"
|
||||
CategoriaFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
|
||||
CercaThumbnail = "http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"
|
||||
CercaFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
|
||||
AvantiTxt = config.get_localized_string(30992)
|
||||
AvantiImg = "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"
|
||||
24
plugin.video.alfa/channels/bleachportal.json
Normal file
24
plugin.video.alfa/channels/bleachportal.json
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"id": "bleachportal",
|
||||
"name": "BleachPortal",
|
||||
"language": ["it"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"fanart": "http://i39.tinypic.com/35ibvcx.jpg",
|
||||
"thumbnail": "http://www.bleachportal.it/images/index_r1_c1.jpg",
|
||||
"banner": "http://cgi.di.uoa.gr/~std05181/images/bleach.jpg",
|
||||
"categories": [
|
||||
"anime"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
117
plugin.video.alfa/channels/bleachportal.py
Normal file
117
plugin.video.alfa/channels/bleachportal.py
Normal file
@@ -0,0 +1,117 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# ------------------------------------------------------------
|
||||
# XBMC Plugin
|
||||
# Canale per http://bleachportal.it
|
||||
# ------------------------------------------------------------
|
||||
|
||||
import re
|
||||
|
||||
from core import scrapertools, httptools
|
||||
from platformcode import logger
|
||||
from core.item import Item
|
||||
|
||||
|
||||
|
||||
host = "http://www.bleachportal.it"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("[BleachPortal.py]==> mainlist")
|
||||
itemlist = [Item(channel=item.channel,
|
||||
action="episodi",
|
||||
title="[COLOR azure] Bleach [/COLOR] - [COLOR deepskyblue]Lista Episodi[/COLOR]",
|
||||
url=host + "/streaming/bleach/stream_bleach.htm",
|
||||
thumbnail="http://i45.tinypic.com/286xp3m.jpg",
|
||||
fanart="http://i40.tinypic.com/5jsinb.jpg",
|
||||
extra="bleach"),
|
||||
Item(channel=item.channel,
|
||||
action="episodi",
|
||||
title="[COLOR azure] D.Gray Man [/COLOR] - [COLOR deepskyblue]Lista Episodi[/COLOR]",
|
||||
url=host + "/streaming/d.gray-man/stream_dgray-man.htm",
|
||||
thumbnail="http://i59.tinypic.com/9is3tf.jpg",
|
||||
fanart="http://wallpapercraft.net/wp-content/uploads/2016/11/Cool-D-Gray-Man-Background.jpg",
|
||||
extra="dgrayman")]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodi(item):
|
||||
logger.info("[BleachPortal.py]==> episodi")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<td>?[<span\s|<width="\d+%"\s]+?class="[^"]+">\D+([\d\-]+)\s?<[^<]+<[^<]+<[^<]+<[^<]+<.*?\s+?.*?<span style="[^"]+">([^<]+).*?\s?.*?<a href="\.*(/?[^"]+)">'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
animetitle = "Bleach" if item.extra == "bleach" else "D.Gray Man"
|
||||
for scrapednumber, scrapedtitle, scrapedurl in matches:
|
||||
scrapedtitle = scrapedtitle.decode('latin1').encode('utf8')
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
title="[COLOR azure]%s Ep: [COLOR deepskyblue]%s[/COLOR][/COLOR]" % (animetitle, scrapednumber),
|
||||
url=item.url.replace("stream_bleach.htm",scrapedurl) if "stream_bleach.htm" in item.url else item.url.replace("stream_dgray-man.htm", scrapedurl),
|
||||
plot=scrapedtitle,
|
||||
extra=item.extra,
|
||||
thumbnail=item.thumbnail,
|
||||
fanart=item.fanart,
|
||||
fulltitle="[COLOR red]%s Ep: %s[/COLOR] | [COLOR deepskyblue]%s[/COLOR]" % (animetitle, scrapednumber, scrapedtitle)))
|
||||
|
||||
if item.extra == "bleach":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="oav",
|
||||
title="[B][COLOR azure] OAV e Movies [/COLOR][/B]",
|
||||
url=item.url.replace("stream_bleach.htm", "stream_bleach_movie_oav.htm"),
|
||||
extra=item.extra,
|
||||
thumbnail=item.thumbnail,
|
||||
fanart=item.fanart))
|
||||
|
||||
return list(reversed(itemlist))
|
||||
|
||||
|
||||
def oav(item):
|
||||
logger.info("[BleachPortal.py]==> oav")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<td>?[<span\s|<width="\d+%"\s]+?class="[^"]+">-\s+(.*?)<[^<]+<[^<]+<[^<]+<[^<]+<.*?\s+?.*?<span style="[^"]+">([^<]+).*?\s?.*?<a href="\.*(/?[^"]+)">'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapednumber, scrapedtitle, scrapedurl in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
title="[COLOR deepskyblue] " + scrapednumber + " [/COLOR]",
|
||||
url=item.url.replace("stream_bleach_movie_oav.htm", scrapedurl),
|
||||
plot=scrapedtitle,
|
||||
extra=item.extra,
|
||||
thumbnail=item.thumbnail,
|
||||
fulltitle="[COLOR red]" + scrapednumber + "[/COLOR] | [COLOR deepskyblue]" + scrapedtitle + "[/COLOR]"))
|
||||
|
||||
return list(reversed(itemlist))
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("[BleachPortal.py]==> findvideos")
|
||||
itemlist = []
|
||||
|
||||
if "bleach//" in item.url:
|
||||
item.url = re.sub(r'\w+//', "", item.url)
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
if "bleach" in item.extra:
|
||||
video = scrapertools.find_single_match(data, 'file: "(.*?)",')
|
||||
else:
|
||||
video = scrapertools.find_single_match(data, 'file=(.*?)&').rsplit('/', 1)[-1]
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="play",
|
||||
title="[[COLOR orange]Diretto[/COLOR]] [B]%s[/B]" % item.title,
|
||||
url=item.url.replace(item.url.split("/")[-1], "/" + video),
|
||||
thumbnail=item.thumbnail,
|
||||
fulltitle=item.fulltitle))
|
||||
return itemlist
|
||||
68
plugin.video.alfa/channels/casacinema.json
Normal file
68
plugin.video.alfa/channels/casacinema.json
Normal file
@@ -0,0 +1,68 @@
|
||||
{
|
||||
"id": "casacinema",
|
||||
"name": "Casacinema",
|
||||
"language": ["ita"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/casacinema.png",
|
||||
"banner": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/casacinema.png",
|
||||
"categories": [
|
||||
"tvshow",
|
||||
"movie","cult","top channels"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_film",
|
||||
"type": "bool",
|
||||
"label": "Includi in novità - Film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_series",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Serie TV",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces_num",
|
||||
"type": "list",
|
||||
"label": "Numero de link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "5", "10", "15", "20" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Non filtrare",
|
||||
"IT"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
375
plugin.video.alfa/channels/casacinema.py
Normal file
375
plugin.video.alfa/channels/casacinema.py
Normal file
@@ -0,0 +1,375 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per casacinema
|
||||
# ------------------------------------------------------------
|
||||
import re, urlparse
|
||||
|
||||
from core import scrapertools, scrapertoolsV2, httptools, servertools, tmdb
|
||||
from channels import autoplay, filtertools
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
|
||||
|
||||
host = 'https://www.casacinema.news' ### <- Camio Host da .video a .news -> Continua. riga 164
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['openload', 'wstream', 'speedvideo']
|
||||
list_quality = ['HD', 'SD']
|
||||
|
||||
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'casacinema')
|
||||
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'casacinema')
|
||||
|
||||
headers = [['Referer', '%s/genere/serie-tv' % host]]
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("kod.casacinema mainlist")
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = [Item(channel=item.channel,
|
||||
title="[COLOR azure]Film - Novita'[/COLOR]",
|
||||
action="peliculas",
|
||||
extra="movie",
|
||||
url="%s/genere/film" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Film - HD[/COLOR]",
|
||||
action="peliculas",
|
||||
extra="movie",
|
||||
url="%s/?s=[HD]" % host,
|
||||
thumbnail="http://jcrent.com/apple%20tv%20final/HD.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Categorie[/COLOR]",
|
||||
action="categorias",
|
||||
extra="movie",
|
||||
url="%s/genere/film" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Film Sub - Ita[/COLOR]",
|
||||
action="peliculas",
|
||||
extra="movie",
|
||||
url="%s/genere/sub-ita" % host,
|
||||
thumbnail="http://i.imgur.com/qUENzxl.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR yellow]Cerca...[/COLOR]",
|
||||
action="search",
|
||||
extra="movie",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Serie TV[/COLOR]",
|
||||
extra="tvshow",
|
||||
action="peliculas_tv",
|
||||
url="%s/genere/serie-tv" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR yellow]Cerca Serie TV...[/COLOR]",
|
||||
action="search",
|
||||
extra="tvshow",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info("[casacinema.py] newest" + categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "film":
|
||||
item.url = host + '/genere/film'
|
||||
item.extra = "movie"
|
||||
item.action = "peliculas"
|
||||
itemlist = peliculas(item)
|
||||
|
||||
if itemlist[-1].action == "peliculas":
|
||||
itemlist.pop()
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("[casacinema.py] " + item.url + " search " + texto)
|
||||
|
||||
item.url = host + "?s=" + texto
|
||||
|
||||
try:
|
||||
if item.extra == "tvshow":
|
||||
return peliculas_tv(item)
|
||||
if item.extra == "movie":
|
||||
return peliculas(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("kod.casacinema peliculas")
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<li><a href="([^"]+)"[^=]+="([^"]+)"><div>\s*<div[^>]+>(.*?)<'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
title = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
cleantitle = re.sub(r'[-–]*\s*[Ii]l [Ff]ilm\s*[-–]*?', '', title).strip()
|
||||
cleantitle = cleantitle.replace('[HD]', '').strip()
|
||||
|
||||
year = scrapertools.find_single_match(title, r'\((\d{4})\)')
|
||||
infolabels = {}
|
||||
if year:
|
||||
cleantitle = cleantitle.replace("(%s)" % year, '').strip()
|
||||
infolabels['year'] = year
|
||||
|
||||
scrapedplot = ""
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="movie",
|
||||
title=title,
|
||||
text_color="azure",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
fulltitle=cleantitle,
|
||||
show=cleantitle,
|
||||
plot=scrapedplot,
|
||||
infoLabels=infolabels,
|
||||
extra=item.extra,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
## Paginación
|
||||
next_page = scrapertools.find_single_match(data, '<li><a href="([^"]+)">Pagina') ### <- Regex rimosso spazio - precedente <li><a href="([^"]+)" >Pagina -> Continua. riga 221
|
||||
|
||||
if next_page != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=next_page,
|
||||
extra=item.extra,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas_tv(item):
|
||||
logger.info("kod.casacinema peliculas")
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<li><a href="([^"]+)"[^=]+="([^"]+)"><div>\s*<div[^>]+>(.*?)<'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
title = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
cleantitle = re.sub(r'[-–]*\s*[Ss]erie [Tt]v\s*[-–]*?', '', title).strip()
|
||||
cleantitle = cleantitle.replace('[HD]', '').replace('[SD]', '').strip()
|
||||
|
||||
year = scrapertools.find_single_match(title, r'\((\d{4})\)')
|
||||
infolabels = {}
|
||||
if year:
|
||||
cleantitle = cleantitle.replace("(%s)" % year, '').strip()
|
||||
infolabels['year'] = year
|
||||
|
||||
scrapedplot = ""
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="episodios",
|
||||
contentType="tvshow",
|
||||
title=title,
|
||||
text_color="azure",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
fulltitle=cleantitle,
|
||||
show=cleantitle,
|
||||
plot=scrapedplot,
|
||||
infoLabels=infolabels,
|
||||
extra=item.extra,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
## Paginación
|
||||
next_page = scrapertools.find_single_match(data, '<li><a href="([^"]+)">Pagina') ### <- Regex rimosso spazio - precedente <li><a href="([^"]+)" >Pagina
|
||||
|
||||
if next_page != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas_tv",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=next_page,
|
||||
extra=item.extra,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
|
||||
|
||||
return itemlist
|
||||
|
||||
def categorias(item):
|
||||
logger.info("kod.casacinema categorias")
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Narrow search by selecting only the combo
|
||||
bloque = scrapertools.get_match(data, 'Categorie(.*?)</ul>')
|
||||
|
||||
# The categories are the options for the combo
|
||||
patron = '<a href="(.*?)">(.*?)</a></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
extra=item.extra,
|
||||
url=urlparse.urljoin(host, scrapedurl)))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
def load_episodios(html, item, itemlist, lang_title):
|
||||
patron = '.*?<a href="[^"]+"[^o]+ofollow[^>]+>[^<]+</a><(?:b|/)[^>]+>'
|
||||
matches = re.compile(patron).findall(html)
|
||||
for data in matches:
|
||||
# Estrae i contenuti
|
||||
scrapedtitle = scrapertoolsV2.htmlclean(re.sub(r'(<a [^>]+>)*(<\/a>.*)*(Speedvideo)*', '', data)).strip()
|
||||
if scrapedtitle != 'Categorie':
|
||||
scrapedtitle = scrapedtitle.replace('×', 'x')
|
||||
scrapedtitle = scrapedtitle.replace('×', 'x')
|
||||
scrapedtitle = scrapedtitle.replace(';', '')
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="episode",
|
||||
title="[COLOR azure]%s[/COLOR]" % (scrapedtitle + " (" + lang_title + ")"),
|
||||
url=data,
|
||||
thumbnail=item.thumbnail,
|
||||
extra=item.extra,
|
||||
fulltitle=scrapedtitle + " (" + lang_title + ")" + ' - ' + item.show,
|
||||
show=item.show))
|
||||
|
||||
logger.info("[casacinema.py] episodios")
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
data = scrapertools.get_match(data, '<p>(?:<strong>|)(.*?)<div id="disqus_thread">')
|
||||
|
||||
lang_titles = []
|
||||
starts = []
|
||||
patron = r"Stagione.*?(?:ITA|\d+)"
|
||||
matches = re.compile(patron, re.IGNORECASE).finditer(data)
|
||||
for match in matches:
|
||||
season_title = match.group()
|
||||
if season_title != '':
|
||||
lang_titles.append('SUB ITA' if 'SUB' in season_title.upper() else 'ITA')
|
||||
starts.append(match.end())
|
||||
|
||||
i = 1
|
||||
len_lang_titles = len(lang_titles)
|
||||
|
||||
while i <= len_lang_titles:
|
||||
inizio = starts[i - 1]
|
||||
fine = starts[i] if i < len_lang_titles else -1
|
||||
|
||||
html = data[inizio:fine]
|
||||
lang_title = lang_titles[i - 1]
|
||||
|
||||
load_episodios(html, item, itemlist, lang_title)
|
||||
|
||||
i += 1
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios" + "###" + item.extra,
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("kod.casacinema findvideos")
|
||||
|
||||
data = item.url if item.extra == "tvshow" else httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
html = httptools.downloadpage(data).data
|
||||
patron = '"http:\/\/shrink-service\.it\/[^\/]+\/[^\/]+\/([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(html)
|
||||
|
||||
for url in matches:
|
||||
if url is not None:
|
||||
data = data
|
||||
else:
|
||||
continue
|
||||
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
server = re.sub(r'[-\[\]\s]+', '', videoitem.title).capitalize()
|
||||
videoitem.title = "".join(["[%s] " % color(server, 'orange'), item.title])
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.show = item.show
|
||||
videoitem.plot = item.plot
|
||||
videoitem.channel = item.channel
|
||||
videoitem.contentType = item.contentType
|
||||
videoitem.language = IDIOMAS['Italiano']
|
||||
|
||||
# Requerido para Filtrar enlaces
|
||||
|
||||
if __comprueba_enlaces__:
|
||||
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if item.contentType != 'episode':
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow][B]Aggiungi alla videoteca[/B][/COLOR]', url=item.url,
|
||||
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
|
||||
|
||||
return itemlist
|
||||
|
||||
def color(text, color):
|
||||
return "[COLOR " + color + "]" + text + "[/COLOR]"
|
||||
22
plugin.video.alfa/channels/cb01anime.json
Normal file
22
plugin.video.alfa/channels/cb01anime.json
Normal file
@@ -0,0 +1,22 @@
|
||||
{
|
||||
"id": "cb01anime",
|
||||
"name": "Cb01anime",
|
||||
"language": ["it"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "http://i.imgur.com/bHoUMo2.png",
|
||||
"banner": "http://i.imgur.com/bHoUMo2.png",
|
||||
"categories": [
|
||||
"anime"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
278
plugin.video.alfa/channels/cb01anime.py
Normal file
278
plugin.video.alfa/channels/cb01anime.py
Normal file
@@ -0,0 +1,278 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# ------------------------------------------------------------
|
||||
# XBMC Plugin
|
||||
# Canale per cineblog01 - anime
|
||||
# ------------------------------------------------------------
|
||||
import re
|
||||
|
||||
from core import httptools, scrapertools, servertools, tmdb
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
|
||||
|
||||
|
||||
host = "https://www.cineblog01.pink"
|
||||
|
||||
#esclusione degli articoli 'di servizio'
|
||||
blacklist = ['AVVISO IMPORTANTE – CB01.ROCKS', 'Lista Alfabetica Completa Anime/Cartoon', 'CB01.UNO ▶ TROVA L’INDIRIZZO UFFICIALE']
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def mainlist(item):
|
||||
logger.info("[cb01anime.py] mainlist")
|
||||
|
||||
# Main options
|
||||
itemlist = [Item(channel=item.channel,
|
||||
action="list_titles",
|
||||
title="[COLOR azure]Anime - Novita'[/COLOR]",
|
||||
url=host + '/anime',
|
||||
thumbnail="http://orig09.deviantart.net/df5a/f/2014/169/2/a/fist_of_the_north_star_folder_icon_by_minacsky_saya-d7mq8c8.png"),
|
||||
Item(channel=item.channel,
|
||||
action="genere",
|
||||
title="[COLOR azure]Anime - Per Genere[/COLOR]",
|
||||
url=host + '/anime',
|
||||
thumbnail="http://xbmc-repo-ackbarr.googlecode.com/svn/trunk/dev/skin.cirrus%20extended%20v2/extras/moviegenres/Genres.png"),
|
||||
Item(channel=item.channel,
|
||||
action="alfabetico",
|
||||
title="[COLOR azure]Anime - Per Lettera A-Z[/COLOR]",
|
||||
url=host + '/anime',
|
||||
thumbnail="http://i.imgur.com/IjCmx5r.png"),
|
||||
Item(channel=item.channel,
|
||||
action="listacompleta",
|
||||
title="[COLOR azure]Anime - Lista Completa[/COLOR]",
|
||||
url="%s/anime/lista-completa-anime-cartoon/" % host,
|
||||
thumbnail="http://i.imgur.com/IjCmx5r.png"),
|
||||
Item(channel=item.channel,
|
||||
action="search",
|
||||
title="[COLOR yellow]Cerca Anime[/COLOR]",
|
||||
extra="anime",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# =================================================================
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def genere(item):
|
||||
logger.info("[cb01anime.py] genere")
|
||||
|
||||
return build_itemlist(item, '<select name="select2"(.*?)</select>', '<option value="([^"]+)">([^<]+)</option>',
|
||||
"list_titles")
|
||||
|
||||
|
||||
def alfabetico(item):
|
||||
logger.info("[cb01anime.py] alfabetico")
|
||||
|
||||
return build_itemlist(item, '<option value=\'-1\'>Anime per Lettera</option>(.*?)</select>',
|
||||
'<option value="([^"]+)">\(([^<]+)\)</option>', "list_titles")
|
||||
|
||||
|
||||
def listacompleta(item):
|
||||
logger.info("[cb01anime.py] listacompleta")
|
||||
|
||||
return build_itemlist(item,
|
||||
'<a href="#char_5a" title="Go to the letter Z">Z</a></span></div>(.*?)</ul></div><div style="clear:both;"></div></div>',
|
||||
'<li><a href="' + host + '([^"]+)"><span class="head">([^<]+)</span></a></li>', "episodios")
|
||||
|
||||
|
||||
def build_itemlist(item, re_bloque, re_patron, iaction):
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Narrow search by selecting only the combo
|
||||
bloque = scrapertools.get_match(data, re_bloque)
|
||||
|
||||
# The categories are the options for the combo
|
||||
matches = re.compile(re_patron, re.DOTALL).findall(bloque)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for url, titulo in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action=iaction,
|
||||
contentType="tvshow",
|
||||
title=titulo,
|
||||
fulltitle=titulo,
|
||||
text_color="azure",
|
||||
show=titulo,
|
||||
url=host + url,
|
||||
plot=""))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# =================================================================
|
||||
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def search(item, texto):
|
||||
logger.info("[cb01anime.py] " + item.url + " search " + texto)
|
||||
|
||||
item.url = host + "/anime/?s=" + texto
|
||||
|
||||
return list_titles(item)
|
||||
|
||||
|
||||
# =================================================================
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def list_titles(item):
|
||||
logger.info("[cb01anime.py] mainlist")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patronvideos = r'<div class="span4">\s*<a href="([^"]+)">'
|
||||
patronvideos += r'<img src="([^"]+)"[^>]+><\/a>[^>]+>[^>]+>'
|
||||
patronvideos += r'[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(.*?)<\/a>'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
scrapedtitle = scrapertools.htmlclean(scrapedtitle).strip()
|
||||
if not scrapedtitle in blacklist:
|
||||
if 'lista richieste' in scrapedtitle.lower(): continue
|
||||
|
||||
patron = r'(?:\[[Ff][Uu][Ll]{2}\s*[Ii][Tt][Aa]\]|\[[Ss][Uu][Bb]\s*[Ii][Tt][Aa]\])'
|
||||
cleantitle = re.sub(patron, '', scrapedtitle).strip()
|
||||
|
||||
## ------------------------------------------------
|
||||
scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
|
||||
## ------------------------------------------------
|
||||
|
||||
# Añade al listado de XBMC
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="listacompleta" if "Lista Alfabetica Completa Anime/Cartoon" in scrapedtitle else "episodios",
|
||||
contentType="tvshow",
|
||||
title=scrapedtitle,
|
||||
fulltitle=cleantitle,
|
||||
text_color="azure",
|
||||
show=cleantitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
viewmode="movie_with_plot"))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Put the next page mark
|
||||
try:
|
||||
next_page = scrapertools.get_match(data, "<link rel='next' href='([^']+)'")
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="list_titles",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=next_page,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
|
||||
except:
|
||||
pass
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# =================================================================
|
||||
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def episodios(item):
|
||||
logger.info("[cb01anime.py] episodios")
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# data = scrapertools.decodeHtmlentities(data)
|
||||
|
||||
patron1 = '(?:<p>|<td bgcolor="#ECEAE1">)<span class="txt_dow">(.*?)(?:</p>)?(?:\s*</span>)?\s*</td>'
|
||||
patron2 = '<a.*?href="([^"]+)"[^>]*>([^<]+)</a>'
|
||||
matches1 = re.compile(patron1, re.DOTALL).findall(data)
|
||||
if len(matches1) > 0:
|
||||
for match1 in re.split('<br />|<p>', matches1[0]):
|
||||
if len(match1) > 0:
|
||||
# Estrae i contenuti
|
||||
titulo = None
|
||||
scrapedurl = ''
|
||||
matches2 = re.compile(patron2, re.DOTALL).finditer(match1)
|
||||
for match2 in matches2:
|
||||
if titulo is None:
|
||||
titulo = match2.group(2)
|
||||
scrapedurl += match2.group(1) + '#' + match2.group(2) + '|'
|
||||
if titulo is not None:
|
||||
title = item.title + " " + titulo
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="episode",
|
||||
title=title,
|
||||
extra=scrapedurl,
|
||||
fulltitle=item.fulltitle,
|
||||
show=item.show))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios",
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# =================================================================
|
||||
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def findvideos(item):
|
||||
logger.info("[cb01anime.py] findvideos")
|
||||
|
||||
itemlist = []
|
||||
|
||||
for match in item.extra.split(r'|'):
|
||||
match_split = match.split(r'#')
|
||||
scrapedurl = match_split[0]
|
||||
if len(scrapedurl) > 0:
|
||||
scrapedtitle = match_split[1]
|
||||
title = item.title + " [COLOR blue][" + scrapedtitle + "][/COLOR]"
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="play",
|
||||
title=title,
|
||||
url=scrapedurl,
|
||||
fulltitle=item.fulltitle,
|
||||
show=item.show,
|
||||
ontentType=item.contentType,
|
||||
folder=False))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# =================================================================
|
||||
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
def play(item):
|
||||
logger.info("[cb01anime.py] play")
|
||||
|
||||
if '/goto/' in item.url:
|
||||
item.url = item.url.split('/goto/')[-1].decode('base64')
|
||||
|
||||
data = item.url
|
||||
|
||||
logger.debug("##### Play data ##\n%s\n##" % data)
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.show
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.show = item.show
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
videoitem.contentType = item.contentType
|
||||
|
||||
return itemlist
|
||||
|
||||
31
plugin.video.alfa/channels/cineblog01.json
Normal file
31
plugin.video.alfa/channels/cineblog01.json
Normal file
@@ -0,0 +1,31 @@
|
||||
{
|
||||
"id": "cineblog01",
|
||||
"name": "Cineblog01",
|
||||
"language": ["it"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/cineblog01.png",
|
||||
"banner": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/cineblog01.png",
|
||||
"categories": [
|
||||
"tvshow",
|
||||
"movie","cult","top channels"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_film",
|
||||
"type": "bool",
|
||||
"label": "Includi in novità - Film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
602
plugin.video.alfa/channels/cineblog01.py
Normal file
602
plugin.video.alfa/channels/cineblog01.py
Normal file
@@ -0,0 +1,602 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per cineblog01
|
||||
# ------------------------------------------------------------
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from channels import autoplay
|
||||
from core import scrapertools, httptools, servertools, tmdb
|
||||
from core.item import Item
|
||||
from lib import unshortenit
|
||||
from platformcode import logger, config
|
||||
|
||||
#impostati dinamicamente da getUrl()
|
||||
host = ""
|
||||
headers = ""
|
||||
|
||||
permUrl = httptools.downloadpage('https://www.cb01.uno/', follow_redirects=False).headers
|
||||
host = 'https://www.'+permUrl['location'].replace('https://www.google.it/search?q=site:', '')
|
||||
headers = [['Referer', host]]
|
||||
|
||||
list_servers = ['openload', 'streamango', 'wstream']
|
||||
list_quality = ['HD', 'SD']
|
||||
|
||||
#esclusione degli articoli 'di servizio'
|
||||
blacklist = ['Aggiornamento Quotidiano Serie TV', 'Richieste Serie TV', 'CB01.UNO ▶ TROVA L’INDIRIZZO UFFICIALE', 'COMING SOON!', 'OSCAR 2019 ▶ CB01.UNO: Vota il tuo film preferito! 🎬']
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("[cineblog01.py] mainlist")
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
# Main options
|
||||
itemlist = [Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR azure]Novita'[/COLOR]",
|
||||
url=host,
|
||||
extra="movie",
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR azure]Alta Definizione [HD][/COLOR]",
|
||||
url="%s/tag/film-hd-altadefinizione/" % host,
|
||||
extra="movie",
|
||||
thumbnail="http://jcrent.com/apple%20tv%20final/HD.png"),
|
||||
Item(channel=item.channel,
|
||||
action="menuhd",
|
||||
title="[COLOR azure]Menù HD[/COLOR]",
|
||||
url=host,
|
||||
extra="movie",
|
||||
thumbnail="http://files.softicons.com/download/computer-icons/disks-icons-by-wil-nichols/png/256x256/Blu-Ray.png"),
|
||||
Item(channel=item.channel,
|
||||
action="menugeneros",
|
||||
title="[COLOR azure]Per Genere[/COLOR]",
|
||||
url=host,
|
||||
extra="movie",
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="menuanyos",
|
||||
title="[COLOR azure]Per Anno[/COLOR]",
|
||||
url=host,
|
||||
extra="movie",
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="search",
|
||||
title="[COLOR yellow]Cerca Film[/COLOR]",
|
||||
extra="movie",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"),
|
||||
Item(channel=item.channel,
|
||||
action="listserie",
|
||||
title="[COLOR azure]Serie Tv - Novita'[/COLOR]",
|
||||
url="%s/serietv/" % host,
|
||||
extra="tvshow",
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="search",
|
||||
title="[COLOR yellow]Cerca Serie Tv[/COLOR]",
|
||||
extra="tvshow",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info("[cineblog01.py] newest")
|
||||
itemlist = []
|
||||
item = Item()
|
||||
if categoria == "film":
|
||||
item.url = host + '/lista-film-ultimi-100-film-aggiunti/'
|
||||
item.extra = "movie"
|
||||
try:
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
blocco = scrapertools.get_match(data, 'Ultimi 100 film aggiunti:.*?<\/div>')
|
||||
patron = '<a href="([^"]+)">([^<]+)<\/a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="movie",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title=scrapedtitle,
|
||||
text_color="azure",
|
||||
url=scrapedurl,
|
||||
extra=item.extra,
|
||||
viewmode="movie_with_plot"))
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("[cineblog01.py] peliculas")
|
||||
itemlist = []
|
||||
|
||||
if item.url == "":
|
||||
item.url = host
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patronvideos = '<div class="span4".*?<a.*?<p><img src="([^"]+)".*?'
|
||||
patronvideos += '<div class="span8">.*?<a href="([^"]+)"> <h1>([^"]+)</h1></a>.*?'
|
||||
patronvideos += '<strong>([^<]*)[<br />,</strong>].*?<br />([^<+]+)'
|
||||
matches = re.compile(patronvideos, re.DOTALL).finditer(data)
|
||||
|
||||
for match in matches:
|
||||
scrapedtitle = scrapertools.unescape(match.group(3))
|
||||
if not scrapedtitle in blacklist:
|
||||
scrapedurl = urlparse.urljoin(item.url, match.group(2))
|
||||
scrapedthumbnail = urlparse.urljoin(item.url, match.group(1))
|
||||
scrapedthumbnail = scrapedthumbnail.replace(" ", "%20")
|
||||
scrapedplot = scrapertools.unescape("[COLOR orange]" + match.group(4) + "[/COLOR]\n" + match.group(5).strip())
|
||||
scrapedplot = scrapertools.htmlclean(scrapedplot).strip()
|
||||
|
||||
cleantitle = re.sub(r'(?:\[HD/?3?D?\]|\[Sub-ITA\])', '', scrapedtitle)
|
||||
year = scrapertools.find_single_match(scrapedtitle, r'\((\d{4})\)')
|
||||
infolabels = {}
|
||||
if year:
|
||||
cleantitle = cleantitle.replace("(%s)" % year, '').strip()
|
||||
infolabels['year'] = year
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="movie",
|
||||
title=scrapedtitle,
|
||||
fulltitle=cleantitle,
|
||||
text_color="azure",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
infoLabels=infolabels,
|
||||
show=cleantitle,
|
||||
extra=item.extra))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Next page mark
|
||||
#next_page = scrapertools.find_single_match(data, r"<link rel='next' href='(.*?)' />")
|
||||
#if not next_page:
|
||||
next_page = scrapertools.find_single_match(data, r'<li class="active_page"><a href="[^"]+">\d+</a></li>\s<li><a href="([^"]+)">\d+</a></li>')
|
||||
|
||||
if next_page != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=next_page,
|
||||
extra=item.extra,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
|
||||
|
||||
return itemlist
|
||||
|
||||
def menugeneros(item):
|
||||
logger.info("[cineblog01.py] menugeneros")
|
||||
return menulist(item, '<select name="select2"(.*?)</select>')
|
||||
|
||||
|
||||
def menuhd(item):
|
||||
logger.info("[cineblog01.py] menuhd")
|
||||
return menulist(item, '<select name="select1"(.*?)</select>')
|
||||
|
||||
|
||||
def menuanyos(item):
|
||||
logger.info("[cineblog01.py] menuvk")
|
||||
return menulist(item, '<select name="select3"(.*?)</select>')
|
||||
|
||||
|
||||
def menulist(item, re_txt):
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Narrow search by selecting only the combo
|
||||
bloque = scrapertools.get_match(data, re_txt)
|
||||
|
||||
# The categories are the options for the combo
|
||||
patron = '<option value="([^"]+)">([^<]+)</option>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for url, titulo in matches:
|
||||
scrapedtitle = titulo
|
||||
scrapedurl = urlparse.urljoin(item.url, url)
|
||||
scrapedthumbnail = ""
|
||||
scrapedplot = ""
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
extra=item.extra,
|
||||
plot=scrapedplot))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# Al llamarse "search" la función, el launcher pide un texto a buscar y lo añade como parámetro
|
||||
def search(item, texto):
|
||||
logger.info("[cineblog01.py] " + item.url + " search " + texto)
|
||||
|
||||
try:
|
||||
|
||||
if item.extra == "movie":
|
||||
item.url = host + "/?s=" + texto
|
||||
return peliculas(item)
|
||||
if item.extra == "tvshow":
|
||||
item.url = host + "/serietv/?s=" + texto
|
||||
return listserie(item)
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def listserie(item):
|
||||
logger.info("[cineblog01.py] listaserie")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patronvideos = '<div class="span4">\s*<a href="([^"]+)"><img src="([^"]+)".*?<div class="span8">.*?<h1>([^<]+)</h1></a>(.*?)<br><a'
|
||||
matches = re.compile(patronvideos, re.DOTALL).finditer(data)
|
||||
|
||||
for match in matches:
|
||||
scrapedtitle = scrapertools.unescape(match.group(3))
|
||||
if not scrapedtitle in blacklist:
|
||||
scrapedurl = match.group(1)
|
||||
scrapedthumbnail = match.group(2)
|
||||
scrapedplot = scrapertools.unescape(match.group(4))
|
||||
scrapedplot = scrapertools.htmlclean(scrapedplot).strip()
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="season_serietv",
|
||||
contentType="tvshow",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
extra=item.extra,
|
||||
plot=scrapedplot))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Next page mark
|
||||
next_page = scrapertools.find_single_match(data, "<link rel='next' href='(.*?)' />")
|
||||
|
||||
if next_page != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="listserie",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=next_page,
|
||||
extra=item.extra,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def season_serietv(item):
|
||||
def load_season_serietv(html, item, itemlist, season_title):
|
||||
if len(html) > 0 and len(season_title) > 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="episodios",
|
||||
title="[COLOR azure]%s[/COLOR]" % season_title,
|
||||
contentType="episode",
|
||||
url=html,
|
||||
extra="tvshow",
|
||||
show=item.show))
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# data = scrapertools.decodeHtmlentities(data)
|
||||
data = scrapertools.get_match(data, '<td bgcolor="#ECEAE1">(.*?)</table>')
|
||||
|
||||
# for x in range(0, len(scrapedtitle)-1):
|
||||
# logger.debug('%x: %s - %s',x,ord(scrapedtitle[x]),chr(ord(scrapedtitle[x])))
|
||||
blkseparator = chr(32) + chr(226) + chr(128) + chr(147) + chr(32)
|
||||
data = data.replace(blkseparator, ' - ')
|
||||
|
||||
starts = []
|
||||
season_titles = []
|
||||
patron = '^(?:seri|stagion)[i|e].*$'
|
||||
matches = re.compile(patron, re.MULTILINE | re.IGNORECASE).finditer(data)
|
||||
for match in matches:
|
||||
if match.group() != '':
|
||||
season_titles.append(match.group())
|
||||
starts.append(match.end())
|
||||
|
||||
i = 1
|
||||
len_season_titles = len(season_titles)
|
||||
|
||||
while i <= len_season_titles:
|
||||
inizio = starts[i - 1]
|
||||
fine = starts[i] if i < len_season_titles else -1
|
||||
|
||||
html = data[inizio:fine]
|
||||
season_title = season_titles[i - 1]
|
||||
load_season_serietv(html, item, itemlist, season_title)
|
||||
i += 1
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
itemlist = []
|
||||
|
||||
if item.extra == "tvshow":
|
||||
itemlist.extend(episodios_serie_new(item))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios" + "###" + item.extra,
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios_serie_new(item):
|
||||
def load_episodios(html, item, itemlist, lang_title):
|
||||
# for data in scrapertools.decodeHtmlentities(html).splitlines():
|
||||
patron = '((?:.*?<a href=".*?"[^=]+="_blank"[^>]+>.*?<\/a>)+)'
|
||||
matches = re.compile(patron).findall(html)
|
||||
for data in matches:
|
||||
# Estrae i contenuti
|
||||
scrapedtitle = data.split('<a ')[0]
|
||||
scrapedtitle = re.sub(r'<[^>]*>', '', scrapedtitle).strip()
|
||||
if scrapedtitle != 'Categorie':
|
||||
scrapedtitle = scrapedtitle.replace('×', 'x')
|
||||
if scrapedtitle.find(' - ') > 0:
|
||||
scrapedtitle = scrapedtitle[0:scrapedtitle.find(' - ')]
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="episode",
|
||||
title="[COLOR azure]%s[/COLOR]" % (scrapedtitle + " (" + lang_title + ")"),
|
||||
url=data,
|
||||
thumbnail=item.thumbnail,
|
||||
extra=item.extra,
|
||||
fulltitle=scrapedtitle + " (" + lang_title + ")" + ' - ' + item.show,
|
||||
show=item.show))
|
||||
|
||||
logger.info("[cineblog01.py] episodios")
|
||||
|
||||
itemlist = []
|
||||
|
||||
lang_title = item.title
|
||||
if lang_title.upper().find('SUB') > 0:
|
||||
lang_title = 'SUB ITA'
|
||||
else:
|
||||
lang_title = 'ITA'
|
||||
|
||||
html = item.url
|
||||
load_episodios(html, item, itemlist, lang_title)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
if item.contentType == "movie":
|
||||
return findvid_film(item)
|
||||
if item.contentType == "episode":
|
||||
return findvid_serie(item)
|
||||
return []
|
||||
|
||||
|
||||
def findvid_film(item):
|
||||
def load_links(itemlist, re_txt, color, desc_txt, quality=""):
|
||||
streaming = scrapertools.find_single_match(data, re_txt)
|
||||
patron = '<td><a[^h]href="([^"]+)"[^>]+>([^<]+)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(streaming)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
logger.debug("##### findvideos %s ## %s ## %s ##" % (desc_txt, scrapedurl, scrapedtitle))
|
||||
title = "[COLOR " + color + "]" + desc_txt + ":[/COLOR] " + item.title + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]"
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="play",
|
||||
title=title,
|
||||
url=scrapedurl,
|
||||
server=scrapedtitle,
|
||||
fulltitle=item.fulltitle,
|
||||
thumbnail=item.thumbnail,
|
||||
show=item.show,
|
||||
quality=quality,
|
||||
contentType=item.contentType,
|
||||
folder=False))
|
||||
|
||||
logger.info("[cineblog01.py] findvid_film")
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# data = scrapertools.decodeHtmlentities(data)
|
||||
|
||||
# Extract the quality format
|
||||
patronvideos = '>([^<]+)</strong></div>'
|
||||
matches = re.compile(patronvideos, re.DOTALL).finditer(data)
|
||||
QualityStr = ""
|
||||
for match in matches:
|
||||
QualityStr = scrapertools.unescape(match.group(1))[6:]
|
||||
|
||||
# Estrae i contenuti - Streaming
|
||||
load_links(itemlist, '<strong>Streaming:</strong>(.*?)<table height="30">', "orange", "Streaming", "SD")
|
||||
|
||||
# Estrae i contenuti - Streaming HD
|
||||
load_links(itemlist, '<strong>Streaming HD[^<]+</strong>(.*?)<table height="30">', "yellow", "Streaming HD", "HD")
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
# Estrae i contenuti - Streaming 3D
|
||||
load_links(itemlist, '<strong>Streaming 3D[^<]+</strong>(.*?)<table height="30">', "pink", "Streaming 3D")
|
||||
|
||||
# Estrae i contenuti - Download
|
||||
load_links(itemlist, '<strong>Download:</strong>(.*?)<table height="30">', "aqua", "Download")
|
||||
|
||||
# Estrae i contenuti - Download HD
|
||||
load_links(itemlist, '<strong>Download HD[^<]+</strong>(.*?)<table width="100%" height="20">', "azure",
|
||||
"Download HD")
|
||||
|
||||
if len(itemlist) == 0:
|
||||
itemlist = servertools.find_video_items(item=item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvid_serie(item):
|
||||
def load_vid_series(html, item, itemlist, blktxt):
|
||||
if len(blktxt) > 2:
|
||||
vtype = blktxt.strip()[:-1] + " - "
|
||||
else:
|
||||
vtype = ''
|
||||
patron = '<a href="([^"]+)"[^=]+="_blank"[^>]+>(.*?)</a>'
|
||||
# Estrae i contenuti
|
||||
matches = re.compile(patron, re.DOTALL).finditer(html)
|
||||
for match in matches:
|
||||
scrapedurl = match.group(1)
|
||||
scrapedtitle = match.group(2)
|
||||
title = item.title + " [COLOR blue][" + vtype + scrapedtitle + "][/COLOR]"
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="play",
|
||||
title=title,
|
||||
url=scrapedurl,
|
||||
fulltitle=item.fulltitle,
|
||||
show=item.show,
|
||||
contentType=item.contentType,
|
||||
folder=False))
|
||||
|
||||
logger.info("[cineblog01.py] findvid_serie")
|
||||
|
||||
itemlist = []
|
||||
lnkblk = []
|
||||
lnkblkp = []
|
||||
|
||||
data = item.url
|
||||
|
||||
# First blocks of links
|
||||
if data[0:data.find('<a')].find(':') > 0:
|
||||
lnkblk.append(data[data.find(' - ') + 3:data[0:data.find('<a')].find(':') + 1])
|
||||
lnkblkp.append(data.find(' - ') + 3)
|
||||
else:
|
||||
lnkblk.append(' ')
|
||||
lnkblkp.append(data.find('<a'))
|
||||
|
||||
# Find new blocks of links
|
||||
patron = '<a\s[^>]+>[^<]+</a>([^<]+)'
|
||||
matches = re.compile(patron, re.DOTALL).finditer(data)
|
||||
for match in matches:
|
||||
sep = match.group(1)
|
||||
if sep != ' - ':
|
||||
lnkblk.append(sep)
|
||||
|
||||
i = 0
|
||||
if len(lnkblk) > 1:
|
||||
for lb in lnkblk[1:]:
|
||||
lnkblkp.append(data.find(lb, lnkblkp[i] + len(lnkblk[i])))
|
||||
i = i + 1
|
||||
|
||||
for i in range(0, len(lnkblk)):
|
||||
if i == len(lnkblk) - 1:
|
||||
load_vid_series(data[lnkblkp[i]:], item, itemlist, lnkblk[i])
|
||||
else:
|
||||
load_vid_series(data[lnkblkp[i]:lnkblkp[i + 1]], item, itemlist, lnkblk[i])
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info("[cineblog01.py] play")
|
||||
itemlist = []
|
||||
|
||||
### Handling new cb01 wrapper
|
||||
if host[9:] + "/film/" in item.url:
|
||||
iurl = httptools.downloadpage(item.url, only_headers=True, follow_redirects=False).headers.get("location", "")
|
||||
logger.info("/film/ wrapper: %s" % iurl)
|
||||
if iurl:
|
||||
item.url = iurl
|
||||
|
||||
if '/goto/' in item.url:
|
||||
item.url = item.url.split('/goto/')[-1].decode('base64')
|
||||
|
||||
item.url = item.url.replace('http://cineblog01.uno', 'http://k4pp4.pw')
|
||||
|
||||
logger.debug("##############################################################")
|
||||
if "go.php" in item.url:
|
||||
data = httptools.downloadpage(item.url).data
|
||||
try:
|
||||
data = scrapertools.get_match(data, 'window.location.href = "([^"]+)";')
|
||||
except IndexError:
|
||||
try:
|
||||
# data = scrapertools.get_match(data, r'<a href="([^"]+)">clicca qui</a>')
|
||||
# In alternativa, dato che a volte compare "Clicca qui per proseguire":
|
||||
data = scrapertools.get_match(data, r'<a href="([^"]+)".*?class="btn-wrapper">.*?licca.*?</a>')
|
||||
except IndexError:
|
||||
data = httptools.downloadpage(item.url, only_headers=True, follow_redirects=False).headers.get(
|
||||
"location", "")
|
||||
data, c = unshortenit.unwrap_30x_only(data)
|
||||
logger.debug("##### play go.php data ##\n%s\n##" % data)
|
||||
elif "/link/" in item.url:
|
||||
data = httptools.downloadpage(item.url).data
|
||||
from lib import jsunpack
|
||||
|
||||
try:
|
||||
data = scrapertools.get_match(data, "(eval\(function\(p,a,c,k,e,d.*?)</script>")
|
||||
data = jsunpack.unpack(data)
|
||||
logger.debug("##### play /link/ unpack ##\n%s\n##" % data)
|
||||
except IndexError:
|
||||
logger.debug("##### The content is yet unpacked ##\n%s\n##" % data)
|
||||
|
||||
data = scrapertools.find_single_match(data, 'var link(?:\s)?=(?:\s)?"([^"]+)";')
|
||||
data, c = unshortenit.unwrap_30x_only(data)
|
||||
if data.startswith('/'):
|
||||
data = urlparse.urljoin("http://swzz.xyz", data)
|
||||
data = httptools.downloadpage(data).data
|
||||
logger.debug("##### play /link/ data ##\n%s\n##" % data)
|
||||
else:
|
||||
data = item.url
|
||||
logger.debug("##### play else data ##\n%s\n##" % data)
|
||||
logger.debug("##############################################################")
|
||||
|
||||
try:
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.show
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.show = item.show
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.contentType = item.contentType
|
||||
videoitem.channel = item.channel
|
||||
except AttributeError:
|
||||
logger.error("vcrypt data doesn't contain expected URL")
|
||||
|
||||
return itemlist
|
||||
|
||||
30
plugin.video.alfa/channels/cineblog01blog.json
Normal file
30
plugin.video.alfa/channels/cineblog01blog.json
Normal file
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"id": "cineblog01blog",
|
||||
"name": "Cineblog01Blog",
|
||||
"language": ["it"],
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"thumbnail": "https://www.cineblog01.cloud/templates/cineblog01/images/logo.png",
|
||||
"banner": "https://www.cineblog01.cloud/templates/cineblog01/images/logo.png",
|
||||
"categories": [
|
||||
"movie"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_film",
|
||||
"type": "bool",
|
||||
"label": "Includi in novità - Film",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
213
plugin.video.alfa/channels/cineblog01blog.py
Normal file
213
plugin.video.alfa/channels/cineblog01blog.py
Normal file
@@ -0,0 +1,213 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per cineblog01blog
|
||||
# ------------------------------------------------------------
|
||||
|
||||
import re
|
||||
|
||||
from platformcode import logger, config
|
||||
from core import httptools, scrapertools, servertools
|
||||
from core.item import Item
|
||||
from core.tmdb import infoIca
|
||||
|
||||
host = "https://www.cineblog01.cloud"
|
||||
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = [Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title=color("Nuovi film", "azure"),
|
||||
url="%s/new-film-streaming/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="categorie",
|
||||
title=color("Categorie", "azure"),
|
||||
url=host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="filmperanno",
|
||||
title=color("Film per anno", "azure"),
|
||||
url=host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title=color("Cerca ..." , "yellow"),
|
||||
action="search",
|
||||
extra="movie",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "film":
|
||||
item.url = "%s/new-film-streaming" % host
|
||||
item.action = "peliculas"
|
||||
itemlist = peliculas(item)
|
||||
|
||||
if itemlist[-1].action == "peliculas":
|
||||
itemlist.pop()
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
item.url = host + "/xfsearch/" + texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def categorie(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
blocco = scrapertools.get_match(data, r'<ul>\s*<li class="drop">(.*?)</ul>')
|
||||
patron = r'<li><a href="([^"]+)">([^"]+)</a></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title=scrapedtitle,
|
||||
url="".join([host, scrapedurl]),
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def filmperanno(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
blocco = scrapertools.get_match(data, r'<li class="drop"><a.*?class="link1"><b>Film per anno</b></a>(.*?)</ul>')
|
||||
patron = r'<li><a href="([^"]+)">([^"]+)</a></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
while True:
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = r'<div class="short-story">\s*<a href="([^"]+)".*?>\s*'
|
||||
patron += r'<img.*?style="background:url\(([^\)]+)\).*?">'
|
||||
patron += r'\s*<div class="custom-title">([^<]+)</div>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
|
||||
year = scrapertools.find_single_match(scrapedtitle, r'\((\d{4})\)')
|
||||
scrapedtitle = scrapedtitle.replace(year, color(year, "red"))
|
||||
|
||||
# Bypass fake links
|
||||
html = httptools.downloadpage(scrapedurl).data
|
||||
|
||||
patron = '<div class="video-player-plugin">([\s\S]*)<div class="wrapper-plugin-video">'
|
||||
matches = re.compile(patron, re.DOTALL).findall(html)
|
||||
for url in matches:
|
||||
if "scrolling" not in url: continue
|
||||
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="movie",
|
||||
title=scrapedtitle,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
extra="movie",
|
||||
thumbnail=scrapedthumbnail,
|
||||
folder=True), tipo="movie"))
|
||||
|
||||
# Pagine
|
||||
patronvideos = r'<a href="([^"]+)">Avanti</a>'
|
||||
next_page = scrapertools.find_single_match(data, patronvideos)
|
||||
|
||||
if not next_page:
|
||||
break
|
||||
else:
|
||||
item.url = next_page
|
||||
if itemlist:
|
||||
itemlist.append(
|
||||
Item(
|
||||
channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=item.url,
|
||||
thumbnail= "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
break
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
server = re.sub(r'[-\[\]\s]+', '', videoitem.title)
|
||||
videoitem.title = "".join(["[%s] " % color(server, 'orange'), item.title])
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.show = item.show
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def color(text, color):
|
||||
return "[COLOR "+color+"]"+text+"[/COLOR]"
|
||||
|
||||
# ================================================================================================================
|
||||
60
plugin.video.alfa/channels/cinemalibero.json
Normal file
60
plugin.video.alfa/channels/cinemalibero.json
Normal file
@@ -0,0 +1,60 @@
|
||||
{
|
||||
"id": "cinemalibero",
|
||||
"name": "Cinemalibero",
|
||||
"language": ["ita"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "https://www.cinemalibero.center/wp-content/themes/Cinemalibero%202.0/images/logo02.png",
|
||||
"banner": "https://www.cinemalibero.center/wp-content/themes/Cinemalibero%202.0/images/logo02.png",
|
||||
"categories": [
|
||||
"tvshow", "movie","anime", "sport"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_film",
|
||||
"type": "bool",
|
||||
"label": "Includi in novità - Film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
|
||||
{
|
||||
"id": "comprueba_enlaces",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces_num",
|
||||
"type": "list",
|
||||
"label": "Numero de link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "5", "10", "15", "20" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Non filtrare",
|
||||
"IT"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
335
plugin.video.alfa/channels/cinemalibero.py
Normal file
335
plugin.video.alfa/channels/cinemalibero.py
Normal file
@@ -0,0 +1,335 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per CinemaLibero - First Version
|
||||
# Alhaziel
|
||||
# ------------------------------------------------------------
|
||||
import base64
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import scrapertools, servertools, httptools
|
||||
from platformcode import logger, config
|
||||
from core.item import Item
|
||||
from lib import unshortenit
|
||||
from platformcode import config
|
||||
from core.tmdb import infoIca
|
||||
|
||||
# Necessario per Autoplay
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['wstream', 'openload', 'streamango', 'akstream', 'clipwatching', 'cloudvideo', 'youtube']
|
||||
list_quality = ['default']
|
||||
|
||||
# Necessario per Verifica Link
|
||||
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'cinemalibero')
|
||||
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'cinemalibero')
|
||||
|
||||
host = 'https://www.cinemalibero.center'
|
||||
|
||||
headers = [['Referer', host]]
|
||||
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info('[cinemalibero.py] mainlist')
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality) # Necessario per Autoplay
|
||||
|
||||
# Menu Principale
|
||||
itemlist = [Item(channel=item.channel,
|
||||
action='video',
|
||||
title='Film',
|
||||
url=host+'/category/film/',
|
||||
contentType='movie',
|
||||
thumbnail=''),
|
||||
Item(channel=item.channel,
|
||||
action='sottomenu_film',
|
||||
title='Generi Film',
|
||||
url=host,
|
||||
contentType='movie',
|
||||
thumbnail=''),
|
||||
Item(channel=item.channel,
|
||||
action='video',
|
||||
title='Serie TV',
|
||||
url=host+'/category/serie-tv/',
|
||||
contentType='episode',
|
||||
extra='tv',
|
||||
thumbnail=''),
|
||||
Item(channel=item.channel,
|
||||
action='video',
|
||||
title='Anime',
|
||||
url=host+'/category/anime-giapponesi/',
|
||||
contentType='episode',
|
||||
thumbnail=''),
|
||||
Item(channel=item.channel,
|
||||
action='video',
|
||||
title='Sport',
|
||||
url=host+'/category/sport/',
|
||||
contentType='movie',
|
||||
thumbnail=''),
|
||||
Item(channel=item.channel,
|
||||
action='search',
|
||||
title='[B]Cerca...[/B]',
|
||||
thumbnail=''),
|
||||
]
|
||||
|
||||
autoplay.show_option(item.channel, itemlist) # Necessario per Autoplay (Menu Configurazione)
|
||||
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("[cinemalibero.py] " + item.url + " search " + texto)
|
||||
item.url = host + "/?s=" + texto
|
||||
try:
|
||||
return video(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def video(item):
|
||||
logger.info('[cinemalibero.py] video')
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data.replace('\n','').replace('\t','')
|
||||
block = scrapertools.find_single_match(data, '<div class="container">.*?class="col-md-12">(.*?)<div class=(?:"container"|"bg-dark ")>')
|
||||
|
||||
# Estrae i contenuti
|
||||
matches = re.compile(r'<div class="col-lg-3">(.*?)<\/a><\/div>', re.DOTALL).findall(block)
|
||||
|
||||
for match in matches:
|
||||
url = scrapertools.find_single_match(match, r'href="([^"]+)"')
|
||||
long_title = scrapertools.find_single_match(match, r'<div class="titolo">([^<]+)<\/div>')
|
||||
thumb = scrapertools.find_single_match(match, r'url=\((.*?)\)')
|
||||
quality = scrapertools.find_single_match(match, r'<div class="voto">([^<]+)<\/div>')
|
||||
genere = scrapertools.find_single_match(match, r'<div class="genere">([^<]+)<\/div>')
|
||||
|
||||
year = scrapertools.find_single_match(long_title, r'\(([0-9)]+)') or scrapertools.find_single_match(long_title, r'\) ([0-9)]+)')
|
||||
lang = scrapertools.find_single_match(long_title, r'\(([a-zA-Z)]+)')
|
||||
|
||||
title = re.sub(r'\(.*','',long_title)
|
||||
title = re.sub(r'(?:\(|\))','',title)
|
||||
if genere:
|
||||
genere = ' - [' + genere + ']'
|
||||
if year:
|
||||
long_title = title + ' - ('+ year + ')' + genere
|
||||
if lang:
|
||||
long_title = '[B]' + title + '[/B]' + ' - ('+ lang + ')' + genere
|
||||
else:
|
||||
long_title = '[B]' + title + '[/B]'
|
||||
|
||||
# Seleziona fra Serie TV e Film
|
||||
if item.contentType == 'movie':
|
||||
tipologia = 'movie'
|
||||
action = 'findvideos'
|
||||
elif item.contentType == 'episode':
|
||||
tipologia = 'tv'
|
||||
action = 'episodios'
|
||||
else:
|
||||
tipologia = 'movie'
|
||||
action = 'select'
|
||||
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action=action,
|
||||
contentType=item.contentType,
|
||||
title=long_title,
|
||||
fulltitle=title,
|
||||
quality=quality,
|
||||
url=url,
|
||||
thumbnail=thumb,
|
||||
infoLabels=year,
|
||||
show=title), tipo=tipologia))
|
||||
|
||||
# Next page
|
||||
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers".*?href="([^"]+)">')
|
||||
|
||||
if next_page != '':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='video',
|
||||
title='[B]' + config.get_localized_string(30992) + ' »[/B]',
|
||||
url=next_page,
|
||||
contentType=item.contentType,
|
||||
thumbnail='http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def select(item):
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
block = scrapertools.find_single_match(data, r'<div class="col-md-8 bg-white rounded-left p-5"><div>(.*?)<\/div>')
|
||||
if re.findall('rel="category tag">serie', data, re.IGNORECASE):
|
||||
logger.info('select = ### è una serie ###')
|
||||
return episodios(Item(channel=item.channel,
|
||||
title=item.title,
|
||||
fulltitle=item.fulltitle,
|
||||
url=item.url,
|
||||
extra='serie',
|
||||
contentType='episode'))
|
||||
elif re.findall('rel="category tag">anime', data, re.IGNORECASE):
|
||||
if re.findall('episodio', block, re.IGNORECASE):
|
||||
logger.info('select = ### è un anime ###')
|
||||
return episodios(Item(channel=item.channel,
|
||||
title=item.title,
|
||||
fulltitle=item.fulltitle,
|
||||
url=item.url,
|
||||
extra='anime',
|
||||
contentType='episode'))
|
||||
else:
|
||||
logger.info('select = ### è un film ###')
|
||||
return findvideos(Item(channel=item.channel,
|
||||
title=item.title,
|
||||
fulltitle=item.fulltitle,
|
||||
url=item.url,
|
||||
contentType='movie'))
|
||||
else:
|
||||
logger.info('select = ### è un film ###')
|
||||
return findvideos(Item(channel=item.channel,
|
||||
title=item.title,
|
||||
fulltitle=item.fulltitle,
|
||||
url=item.url,
|
||||
contentType='movie'))
|
||||
|
||||
|
||||
def findvideos(item): # Questa def. deve sempre essere nominata findvideos
|
||||
logger.info('[cinemalibero.py] findvideos')
|
||||
itemlist = []
|
||||
# import web_pdb; web_pdb.set_trace()
|
||||
if item.contentType == 'episode':
|
||||
data = item.url.lower()
|
||||
block = scrapertools.find_single_match(data,r'>streaming.*?<\/strong>*?<\/h2>(.*?)<\/div>')
|
||||
urls = re.findall('<a.*?href="([^"]+)"', block, re.DOTALL)
|
||||
else:
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
data = re.sub(r'\n|\t','',data).lower()
|
||||
block = scrapertools.find_single_match(data,r'>streaming.*?<\/strong>(.*?)<strong>')
|
||||
urls = re.findall('<a href="([^"]+)".*?class="external"', block, re.DOTALL)
|
||||
|
||||
logger.info('URLS'+ str(urls))
|
||||
if urls:
|
||||
data =''
|
||||
for url in urls:
|
||||
url, c = unshortenit.unshorten(url)
|
||||
data += url + '\n'
|
||||
|
||||
logger.info('DATA'+ data)
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.fulltitle + ' - [COLOR limegreen][[/COLOR]'+videoitem.title+' [COLOR limegreen]][/COLOR]'
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.show = item.show
|
||||
videoitem.plot = item.plot
|
||||
videoitem.channel = item.channel
|
||||
videoitem.contentType = item.contentType
|
||||
|
||||
# Link Aggiungi alla Libreria
|
||||
if item.contentType != 'episode':
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findservers':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR lightblue][B]Aggiungi alla videoteca[/B][/COLOR]', url=item.url,
|
||||
action='add_pelicula_to_library', extra='findservers', contentTitle=item.contentTitle))
|
||||
|
||||
# Necessario per filtrare i Link
|
||||
if __comprueba_enlaces__:
|
||||
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
|
||||
# Necessario per FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Necessario per AutoPlay
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def episodios(item): # Questa def. deve sempre essere nominata episodios
|
||||
logger.info('[cinemalibero.py] episodios')
|
||||
itemlist = []
|
||||
extra =''
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
block = scrapertools.find_single_match(data, r'<div class="col-md-8 bg-white rounded-left p-5"><div>(.*?)<\/div>')
|
||||
if re.findall('rel="category tag">serie', data, re.IGNORECASE):
|
||||
# logger.info('select = ### è una serie ###')
|
||||
extra='serie'
|
||||
elif re.findall('rel="category tag">anime', data, re.IGNORECASE):
|
||||
if re.findall('episodi', block, re.IGNORECASE):
|
||||
# logger.info('select = ### è un anime ###')
|
||||
extra='anime'
|
||||
|
||||
|
||||
|
||||
|
||||
block = re.sub(r'<h2>.*?<\/h2>','',block)
|
||||
block = block.replace('<p>','').replace('<p style="text-align: left;">','').replace('–<','<').replace('-<','<').replace('–<','<').replace('– <','<').replace('<strong>','<stop><start><strong>')+'<stop>'
|
||||
block = re.sub(r'stagione completa.*?<\/p>','',block,flags=re.IGNORECASE)
|
||||
|
||||
|
||||
if extra == 'serie':
|
||||
block = block.replace('<br /> <a','<a')
|
||||
matches = re.compile(r'<start>.*?(?:stagione|Stagione)(.*?)<\/(?:strong|span)><\/p>(.*?)<stop>', re.DOTALL).findall(block)
|
||||
|
||||
for lang, html in matches:
|
||||
lang = re.sub('<.*?>','',lang)
|
||||
html = html.replace('<br />','\n').replace('</p>','\n')
|
||||
|
||||
matches = re.compile(r'([^<]+)([^\n]+)\n', re.DOTALL).findall(html)
|
||||
for scrapedtitle, html in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType='episode',
|
||||
title=scrapedtitle + ' - (' + lang + ')',
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
url=html))
|
||||
|
||||
elif extra == 'anime':
|
||||
block = re.sub(r'<start.*?(?:download:|Download:).*?<stop>','',block)
|
||||
block = re.sub(r'(?:mirror|Mirror)[^<]+<','',block)
|
||||
block = block.replace('<br />','\n').replace('/a></p>','\n')
|
||||
block = re.sub(r'<start.*?(?:download|Download).*?\n','',block)
|
||||
matches = re.compile('<a(.*?)\n', re.DOTALL).findall(block)
|
||||
for html in matches:
|
||||
scrapedtitle = scrapertools.find_single_match(html, r'>(.*?)<\/a>')
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType='episode',
|
||||
title=scrapedtitle,
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
url=html))
|
||||
|
||||
else:
|
||||
logger.info('select = ### è un film ###')
|
||||
return findvideos(Item(channel=item.channel,
|
||||
title=item.title,
|
||||
fulltitle=item.fulltitle,
|
||||
url=item.url,
|
||||
show=item.fulltitle,
|
||||
contentType='movie'))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios",
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
60
plugin.video.alfa/channels/cinemastreaming.json
Normal file
60
plugin.video.alfa/channels/cinemastreaming.json
Normal file
@@ -0,0 +1,60 @@
|
||||
{
|
||||
"id": "cinemastreaming",
|
||||
"name": "Cinemastreaming",
|
||||
"language": ["ita"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "https://www.telegramitalia.it/wp-content/uploads/2018/02/IMG_20180222_214809_805.jpg",
|
||||
"banner": "https://www.telegramitalia.it/wp-content/uploads/2018/02/IMG_20180222_214809_805.jpg",
|
||||
"categories": [
|
||||
"tvshow", "movie"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_film",
|
||||
"type": "bool",
|
||||
"label": "Includi in novità - Film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
|
||||
{
|
||||
"id": "comprueba_enlaces",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces_num",
|
||||
"type": "list",
|
||||
"label": "Numero de link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "5", "10", "15", "20" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Non filtrare",
|
||||
"IT"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
81
plugin.video.alfa/channels/cinemastreaming.py
Normal file
81
plugin.video.alfa/channels/cinemastreaming.py
Normal file
@@ -0,0 +1,81 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per cinemastreaming
|
||||
# ------------------------------------------------------------
|
||||
import re
|
||||
|
||||
from channels import filtertools
|
||||
from core import scrapertools, servertools, httptools
|
||||
from core.item import Item
|
||||
from platformcode import config
|
||||
from core.tmdb import infoIca
|
||||
|
||||
host = 'https://cinemastreaming.info'
|
||||
|
||||
headers = [['Referer', host]]
|
||||
|
||||
def mainlist(item):
|
||||
log()
|
||||
|
||||
# Menu Principale
|
||||
|
||||
itemlist = [Item(channel = item.channel,
|
||||
contentType = 'movie',
|
||||
title = 'Film',
|
||||
url = host + '/film/',
|
||||
action = 'video',
|
||||
thumbnail = '',
|
||||
fanart = ''
|
||||
),
|
||||
]
|
||||
|
||||
return itemlist
|
||||
|
||||
def video(item):
|
||||
log()
|
||||
|
||||
itemlist = [] # Creo una lista Vuota
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
block = scrapertools.get_match(data, r'<main>(.*?)<\/main>')
|
||||
block = re.sub('\t|\n', '', block)
|
||||
|
||||
patron = r'<article.*?class="TPost C">.*?<a href="([^"]+)">.*?src="([^"]+)".*?>.*?<h3 class="Title">([^<]+)<\/h3>(.*?)<\/article>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(block)
|
||||
|
||||
for scrapedurl, scrapedthumb, scrapedtitle, scrapedinfo in matches:
|
||||
log('Info Block', scrapedinfo)
|
||||
patron = r'<span class="Year">(.*?)<\/span>.*?<span class="Vote.*?">(.*?)<\/span>.*?<div class="Description"><p>(.*?)<\/p>.*?<p class="Genre.*?">(.*?)<\/p><p class="Director.*?">.*?<a.*?>(.*?)<\/a>.*?<p class="Actors.*?">(.*?)<\/p>'
|
||||
info = re.compile(patron, re.DOTALL).findall(scrapedinfo)
|
||||
for year, rating, plot, genre, director, cast in info:
|
||||
genre = scrapertools.find_multiple_matches(genre, r'<a.*?>(.*?)<\/a>')
|
||||
cast = scrapertools.find_multiple_matches(cast, r'<a.*?>(.*?)<\/a>')
|
||||
|
||||
infoLabels = {}
|
||||
infoLabels['Year'] = year
|
||||
infoLabels['Rating'] = rating
|
||||
infoLabels['Plot'] = plot
|
||||
infoLabels['Genre'] = genre
|
||||
infoLabels['Director'] = director
|
||||
infoLabels['Cast'] = cast
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType=item.contentType,
|
||||
title=scrapedtitle,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumb,
|
||||
infoLabels = infoLabels,
|
||||
show=scrapedtitle,))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def log(stringa1="", stringa2=""):
|
||||
import inspect, os
|
||||
from platformcode import logger
|
||||
logger.info("[" + os.path.basename(__file__) + "] - [" + inspect.stack()[1][3] + "] " + str(stringa1) + str(stringa2))
|
||||
74
plugin.video.alfa/channels/cinemasubito.json
Normal file
74
plugin.video.alfa/channels/cinemasubito.json
Normal file
@@ -0,0 +1,74 @@
|
||||
{
|
||||
"id": "cinemasubito",
|
||||
"name": "Cinemasubito",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["it"],
|
||||
"thumbnail": "https://www.cinemasubito.biz/uploads/custom-logo.png",
|
||||
"banner": "https://www.cinemasubito.biz/uploads/custom-logo.png",
|
||||
"version": "5",
|
||||
"date": "02/07/2017",
|
||||
"categories": [
|
||||
"tvshow",
|
||||
"movie"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi in ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_film",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
|
||||
},
|
||||
|
||||
{
|
||||
"id": "include_in_newest_series",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Serie TV",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
|
||||
|
||||
{
|
||||
"id": "comprueba_enlaces",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces_num",
|
||||
"type": "list",
|
||||
"label": "Numero di link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "2", "5", "10", "15" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Non filtrare",
|
||||
"IT"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
324
plugin.video.alfa/channels/cinemasubito.py
Normal file
324
plugin.video.alfa/channels/cinemasubito.py
Normal file
@@ -0,0 +1,324 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per cinemasubito
|
||||
# ------------------------------------------------------------
|
||||
import binascii, re, urlparse
|
||||
|
||||
from channels import autoplay, filtertools
|
||||
from core import httptools, scrapertools, servertools, tmdb
|
||||
from core.item import Item
|
||||
from lib import jscrypto
|
||||
from platformcode import config, logger
|
||||
|
||||
|
||||
|
||||
|
||||
host = "http://www.cinemasubito.org"
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['openload', 'streamango', 'youtube']
|
||||
list_quality = ['HD', 'SD']
|
||||
|
||||
|
||||
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'cinemasubito')
|
||||
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'cinemasubito')
|
||||
|
||||
headers = [
|
||||
['User-Agent', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:58.0) Gecko/20100101 Firefox/58.0'],
|
||||
['Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'],
|
||||
['Accept-Encoding', 'gzip, deflate'],
|
||||
['Accept-Language', 'en-US,en;q=0.5'],
|
||||
['Host', host.replace("http://", "")],
|
||||
['DNT', '1'],
|
||||
['Upgrade-Insecure-Requests', '1'],
|
||||
['Connection', 'keep-alive'],
|
||||
['Referer', host],
|
||||
['Cache-Control', 'max-age=0']
|
||||
]
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("kod.cinemasubito mainlist")
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = [Item(channel=item.channel,
|
||||
title="[COLOR azure]Film[/COLOR]",
|
||||
action="peliculas",
|
||||
url="%s/film/pagina/1" % host,
|
||||
extra="movie",
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Film Per Categoria[/COLOR]",
|
||||
action="categorias",
|
||||
url=host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR yellow]Cerca...[/COLOR]",
|
||||
action="search",
|
||||
extra="movie",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Serie TV[/COLOR]",
|
||||
action="peliculas_tv",
|
||||
url="%s/serie" % host,
|
||||
extra="tvshow",
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR yellow]Cerca Serie TV...[/COLOR]",
|
||||
action="search",
|
||||
extra="tvshow",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("kod.cinemasubito " + item.url + " search " + texto)
|
||||
item.url = host + "/cerca/" + texto
|
||||
try:
|
||||
if item.extra == "movie":
|
||||
return peliculas(item)
|
||||
if item.extra == "tvshow":
|
||||
return peliculas_tv(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def categorias(item):
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
bloque = scrapertools.get_match(data, '<h4>Genere</h4>(.*?)<li class="genre">')
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = r'<a href="([^"]+)" title="([^"]+)">'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapedtitle.replace("Film genere ", "")
|
||||
itemlist.append(
|
||||
Item(
|
||||
channel=item.channel,
|
||||
action="peliculas",
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=
|
||||
"https://farm8.staticflickr.com/7562/15516589868_13689936d0_o.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("kod.cinemasubito peliculas")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = r'<a href="([^"]+)" title="([^"]+)">\s*<div class="wrt">'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
# qualità e linguaggio possono essere inseriti nell' item in modo che siano mostrati nei titoli intelligenti
|
||||
quality = ''
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
quality = scrapertools.find_single_match(scrapedtitle, r'\[(.*?)\]')
|
||||
year = scrapertools.find_single_match(scrapedtitle, r'\((.*?)\)')
|
||||
title = scrapertools.find_single_match(scrapedtitle, r'(.*?)(?:\(|\[)')
|
||||
title = '%s [%s] (%s)' % (title, quality, year)
|
||||
|
||||
# Il contentTitle deve essere semplice senza nessun altro dettaglio come anno,qualità etc.
|
||||
# deve esserci solo un tipo di content, o contentTitle o contentSerieName
|
||||
|
||||
contentTitle = scrapertools.find_single_match(scrapedtitle, r'(.*?)(?:\(|\[)')
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentTitle=contentTitle,
|
||||
quality=quality,
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR] ",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
extra=item.extra,
|
||||
infoLabels={'year':year}))
|
||||
|
||||
|
||||
# Con questo si ricavano le informazioni da tmdb per tutti elementi di itemlist
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True, idioma_busqueda='it')
|
||||
|
||||
# Paginazione
|
||||
patronvideos = r'<a href="[^"]+"[^d]+data-ci-pagination-page[^>]+>[^<]+<\/a><\/span>[^=]+="([^"]+)"'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas_tv(item):
|
||||
logger.info("kod.cinemasubito peliculas")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = r'<a href="([^"]+)" title="([^"]+)">\s*<div class="wrt">'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
quality = ''
|
||||
scrapedplot = ''
|
||||
scrapedthumbnail = ''
|
||||
quality = scrapertools.find_single_match(scrapedtitle, r'\[(.*?)\]')
|
||||
year = scrapertools.find_single_match(scrapedtitle, r'\((.*?)\)')
|
||||
title = scrapertools.find_single_match(scrapedtitle, r'(.*?)(?:\(|\[)')
|
||||
title = '%s [%s] (%s)' % (title, quality, year)
|
||||
|
||||
# Il contentTitle deve essere semplice senza nessun altro dettaglio come anno,qualità etc.
|
||||
# deve esserci solo un tipo di content, o contentTitle o contentSerieName
|
||||
|
||||
contentSerieName = scrapedtitle
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="episodios",
|
||||
contentSerieName=contentSerieName,
|
||||
quality=quality,
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
show=scrapedtitle,
|
||||
extra=item.extra))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
|
||||
# Paginazione
|
||||
patronvideos = r'<a href="[^"]+"[^d]+data-ci-pagination-page[^>]+>[^<]+<\/a><\/span>[^=]+="([^"]+)"'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas_tv",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info("kod.channels.cinemasubito episodios")
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
patron = r'href="([^"]+)"><span class="glyphicon glyphicon-triangle-right"></span>(.*?)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
if host not in scrapedurl:
|
||||
scrapedurl = host + scrapedurl
|
||||
else:
|
||||
scrapedurl = scrapedurl
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="episode",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
folder=True))
|
||||
|
||||
# Comandi di servizio
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios",
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("kod.cinemasubito findvideos_tv")
|
||||
|
||||
links = set()
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
p = scrapertools.find_single_match(data, r'var decrypted = CryptoJS\.AES\.decrypt\(vlinkCrypted, "([^"]+)",')
|
||||
urls = scrapertools.find_multiple_matches(data,
|
||||
r"<li><a rel=[^t]+target=[^c]+class=[^=]+=[^:]+:'(.*?)'[^:]+:'(.*?)'[^:]+:'(.*?)'")
|
||||
for url, iv, salt in urls:
|
||||
salt = binascii.unhexlify(salt)
|
||||
iv = binascii.unhexlify(iv)
|
||||
url = jscrypto.decode(url, p, iv=iv, salt=salt)
|
||||
url = url.replace(r'\/', '/')
|
||||
links.add(url)
|
||||
|
||||
itemlist = servertools.find_video_items(data=str(links) + data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title + videoitem.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.show = item.show
|
||||
videoitem.plot = item.plot
|
||||
videoitem.channel = item.channel
|
||||
videoitem.contentType = item.contentType
|
||||
videoitem.language = IDIOMAS['Italiano']
|
||||
|
||||
# Requerido para Filtrar enlaces
|
||||
|
||||
if __comprueba_enlaces__:
|
||||
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if item.contentType != 'episode':
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow][B]Aggiungi alla videoteca[/B][/COLOR]', url=item.url,
|
||||
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
22
plugin.video.alfa/channels/cinetecadibologna.json
Normal file
22
plugin.video.alfa/channels/cinetecadibologna.json
Normal file
@@ -0,0 +1,22 @@
|
||||
{
|
||||
"id": "cinetecadibologna",
|
||||
"name": "Cinetecadibologna",
|
||||
"language": ["it"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "http://cinestore.cinetecadibologna.it/pics/logo.gif",
|
||||
"banner": "http://cinestore.cinetecadibologna.it/pics/logo.gif",
|
||||
"categories": [
|
||||
"documentary", "cult"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
157
plugin.video.alfa/channels/cinetecadibologna.py
Normal file
157
plugin.video.alfa/channels/cinetecadibologna.py
Normal file
@@ -0,0 +1,157 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per cinetecadibologna
|
||||
# ------------------------------------------------------------
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import httptools, scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
|
||||
|
||||
|
||||
|
||||
host = "http://cinestore.cinetecadibologna.it"
|
||||
|
||||
headers = [['Referer', host]]
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("kod.cinetecadibologna mainlist")
|
||||
itemlist = [Item(channel=item.channel,
|
||||
title="[COLOR azure]Elenco Film - Cineteca di Bologna[/COLOR]",
|
||||
action="peliculas",
|
||||
url="%s/video/alfabetico_completo" % host,
|
||||
thumbnail="http://cinestore.cinetecadibologna.it/pics/logo.gif"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Epoche - Cineteca di Bologna[/COLOR]",
|
||||
action="epoche",
|
||||
url="%s/video/epoche" % host,
|
||||
thumbnail="http://cinestore.cinetecadibologna.it/pics/logo.gif"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Percorsi Tematici - Cineteca di Bologna[/COLOR]",
|
||||
action="percorsi",
|
||||
url="%s/video/percorsi" % host,
|
||||
thumbnail="http://cinestore.cinetecadibologna.it/pics/logo.gif")]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("kod.cinetecadibologna peliculas")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<img src="([^"]+)"[^>]+>\s*[^>]+>\s*<div[^>]+>\s*<div[^>]+>[^>]+>\s*<a href="([^"]+)"[^>]+>(.*?)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
scrapedthumbnail = host + scrapedthumbnail
|
||||
scrapedurl = host + scrapedurl
|
||||
if not "/video/" in scrapedurl:
|
||||
continue
|
||||
html = scrapertools.cache_page(scrapedurl)
|
||||
start = html.find("Sinossi:")
|
||||
end = html.find('<div class="sx_col">', start)
|
||||
scrapedplot = html[start:end]
|
||||
scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
|
||||
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", fulltitle=scrapedtitle, show=scrapedtitle,
|
||||
title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot,
|
||||
folder=True))
|
||||
|
||||
# Paginazione
|
||||
patronvideos = '<div class="footerList clearfix">\s*<div class="sx">\s*[^>]+>[^g]+gina[^>]+>\s*[^>]+>\s*<div class="dx">\s*<a href="(.*?)">pagina suc'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url= scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
def epoche(item):
|
||||
logger.info("kod.cinetecadibologna categorias")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Narrow search by selecting only the combo
|
||||
bloque = scrapertools.get_match(data, '<h1 class="pagetitle">Epoche</h1>(.*?)</ul>')
|
||||
|
||||
# The categories are the options for the combo
|
||||
patron = '<a href="([^"]+)">(.*?)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedurl = host + scrapedurl
|
||||
scrapedplot = ""
|
||||
if scrapedtitle.startswith(("'")):
|
||||
scrapedtitle = scrapedtitle.replace("'", "Anni '")
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://www.cinetecadibologna.it/pics/cinema-ritrovato-alcinema.png",
|
||||
plot=scrapedplot))
|
||||
|
||||
return itemlist
|
||||
|
||||
def percorsi(item):
|
||||
logger.info("kod.cinetecadibologna categorias")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
patron = '<div class="cover_percorso">\s*<a href="([^"]+)">\s*<img src="([^"]+)"[^>]+>\s*[^>]+>(.*?)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
scrapedurl = host + scrapedurl
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = host + scrapedthumbnail
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot))
|
||||
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("kod.cinetecadibologna findvideos")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
patron = 'filename: "(.*?)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for video in matches:
|
||||
video = host + video
|
||||
itemlist.append(
|
||||
Item(
|
||||
channel=item.channel,
|
||||
action="play",
|
||||
title=item.title + " [[COLOR orange]Diretto[/COLOR]]",
|
||||
url=video,
|
||||
folder=False))
|
||||
|
||||
return itemlist
|
||||
|
||||
36
plugin.video.alfa/channels/cloudvideo.py
Normal file
36
plugin.video.alfa/channels/cloudvideo.py
Normal file
@@ -0,0 +1,36 @@
|
||||
# Conector Cloudvideo By Alfa development Group
|
||||
# --------------------------------------------------------
|
||||
|
||||
import re
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from lib import jsunpack
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url)
|
||||
if data.code == 404:
|
||||
return False, "[Cloud] El archivo no existe o ha sido borrado"
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
video_urls = []
|
||||
data = httptools.downloadpage(page_url).data
|
||||
enc_data = scrapertools.find_single_match(data, "type='text/javascript'>(.*?)</script>")
|
||||
dec_data = jsunpack.unpack(enc_data)
|
||||
sources = scrapertools.find_single_match(data, "<source(.*?)</source")
|
||||
patron = 'src="([^"]+)'
|
||||
matches = scrapertools.find_multiple_matches(sources, patron)
|
||||
for url in matches:
|
||||
quality = 'm3u8'
|
||||
video_url = url
|
||||
if 'label' in url:
|
||||
url = url.split(',')
|
||||
video_url = url[0]
|
||||
quality = url[1].replace('label:','')
|
||||
video_urls.append(['cloudvideo [%s]' % quality, video_url])
|
||||
return video_urls
|
||||
31
plugin.video.alfa/channels/documentaristreamingda.json
Normal file
31
plugin.video.alfa/channels/documentaristreamingda.json
Normal file
@@ -0,0 +1,31 @@
|
||||
{
|
||||
"id": "documentaristreamingda",
|
||||
"name": "DocumentariStreamingDa",
|
||||
"language": ["it"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "documentaristreamingda.png",
|
||||
"banner": "documentaristreamingda.png",
|
||||
"version": "1",
|
||||
"date": "27/05/2017",
|
||||
"changes": "Fix import",
|
||||
"categories": ["documentary"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_documentales",
|
||||
"type": "bool",
|
||||
"label": "Includi in novità - Documentari",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
261
plugin.video.alfa/channels/documentaristreamingda.py
Normal file
261
plugin.video.alfa/channels/documentaristreamingda.py
Normal file
@@ -0,0 +1,261 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi onDemand - XBMC Plugin
|
||||
# Canale per documentaristreamingda
|
||||
# ------------------------------------------------------------
|
||||
import re, urlparse
|
||||
|
||||
from platformcode import logger, config
|
||||
from core import httptools, scrapertools, servertools
|
||||
from core.item import Item
|
||||
|
||||
|
||||
|
||||
host = "https://documentari-streaming-da.com"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("kod.documentaristreamingda mainlist")
|
||||
itemlist = [Item(channel=item.channel,
|
||||
title="[COLOR azure]Aggiornamenti[/COLOR]",
|
||||
action="peliculas",
|
||||
url=host + "/?searchtype=movie&post_type=movie&sl=lasts&s=",
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Categorie[/COLOR]",
|
||||
action="categorias",
|
||||
url=host + "/documentari-streaming-dataarchive/",
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR yellow]Cerca...[/COLOR]",
|
||||
action="search",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info("kod.documentaristreamingda newest" + categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "documentales":
|
||||
item.url = host + "/?searchtype=movie&post_type=movie&sl=lasts&s="
|
||||
item.action = "peliculas"
|
||||
itemlist = peliculas(item)
|
||||
|
||||
if itemlist[-1].action == "peliculas":
|
||||
itemlist.pop()
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.get_match(data, 'Categorie</a></li>(.*?)</ul>')
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<a href="([^"]+)">([^<]+)</a></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.replace("Documentari ", ""))
|
||||
|
||||
html = httptools.downloadpage(scrapedurl).data
|
||||
|
||||
patron = '>Ultime uscite[^<]+<\/h3><a href="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(html)
|
||||
for url in matches:
|
||||
url = url.replace("&", "&")
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=url,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("kod.documentaristreamingda " + item.url + " search " + texto)
|
||||
item.url = host + "/?searchtype=movie&post_type=movie&s=" + texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("kod.documentaristreamingda peliculas")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<div class="movie-poster">\s*<img[^s]+src="([^"]+)"[^=]+=[^=]+="([^"]+)"[^>]+>[^<]+<a[^h]+href="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
|
||||
# html = httptools.downloadpage(scrapedurl)
|
||||
# start = html.find("</div><h2>")
|
||||
# end = html.find("<p><strong>", start)
|
||||
# scrapedplot = html[start:end]
|
||||
# scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
|
||||
# scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle.replace("streaming", "")
|
||||
scrapedtitle = scrapedtitle.replace("_", " ")
|
||||
scrapedtitle = scrapedtitle.replace("-", " ")
|
||||
scrapedtitle = scrapedtitle.title()
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
viewmode="movie_with_plot",
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
folder=True))
|
||||
|
||||
# Paginazione
|
||||
patronvideos = '<a class="next page-numbers" href="(.*?)">'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
scrapedurl = scrapedurl.replace("&", "&")
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("kod.documentaristreamingda findvideos")
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
links = []
|
||||
begin = data.find('<div class="moview-details-text">')
|
||||
if begin != -1:
|
||||
end = data.find('<!-- //movie-details -->', begin)
|
||||
mdiv = data[begin:end]
|
||||
|
||||
items = [[m.end(), m.group(1)] for m in re.finditer('<b style="color:#333333;">(.*?)<\/b>', mdiv)]
|
||||
if items:
|
||||
for idx, val in enumerate(items):
|
||||
if idx == len(items) - 1:
|
||||
_data = mdiv[val[0]:-1]
|
||||
else:
|
||||
_data = mdiv[val[0]:items[idx + 1][0]]
|
||||
|
||||
for link in re.findall('<a.*?href="([^"]+)"[^>]+>.*?<b>(.*?)<\/b><\/a>+', _data):
|
||||
if not link[0].strip() in [l[1] for l in links]: links.append(
|
||||
[val[1], link[0].strip(), link[1].strip()])
|
||||
|
||||
items = [[m.end(), m.group(1)] for m in re.finditer('<p><strong>(.*?)<\/strong><\/p>', mdiv)]
|
||||
if items:
|
||||
_title = ''
|
||||
for idx, val in enumerate(items):
|
||||
if idx == len(items) - 1:
|
||||
_data = mdiv[val[0]:-1]
|
||||
else:
|
||||
_data = mdiv[val[0]:items[idx + 1][0]]
|
||||
|
||||
for link in re.findall('<a\s.*?href="([^"]+)".*?>(?:<span[^>]+>)*(?:<strong>)*([^<]+)', _data):
|
||||
if not link[0].strip() in [l[1] for l in links]:
|
||||
if not link[1].strip() in link[0]: _title = link[1].strip()
|
||||
links.append([_title, link[0].strip(), 'unknown'])
|
||||
|
||||
items = [[m.start(), m.group(1)] for m in re.finditer('<li><strong>([^<]+)<', mdiv)]
|
||||
if items:
|
||||
for idx, val in enumerate(items):
|
||||
if idx == len(items) - 1:
|
||||
_data = mdiv[val[0]:-1]
|
||||
else:
|
||||
_data = mdiv[val[0]:items[idx + 1][0]]
|
||||
|
||||
for link in re.findall('<a\s.*?href="([^"]+)".*?>(?:<span[^>]+>)*(?:<strong>)*([^<]+)', _data):
|
||||
if not link[0].strip() in [l[1] for l in links]: links.append(
|
||||
[val[1], link[0].strip(), link[1].strip()])
|
||||
|
||||
itemlist = []
|
||||
if links:
|
||||
for l in links:
|
||||
title = unicode(l[0], 'utf8', 'ignore')
|
||||
title = title.replace(u'\xa0', ' ').replace('Documentario ', '').replace(' doc ', ' ').replace(' streaming',
|
||||
'').replace(
|
||||
' Streaming', '')
|
||||
url = l[1]
|
||||
action = "play"
|
||||
server = "unknown"
|
||||
folder = False
|
||||
|
||||
if url == '#' or not title: continue
|
||||
|
||||
logger.info('server: %s' % l[2])
|
||||
if l[2] != 'unknown':
|
||||
server = unicode(l[2], 'utf8', 'ignore')
|
||||
else:
|
||||
logger.info(url)
|
||||
match = re.search('https?:\/\/(?:www\.)*([^\.]+)\.', url)
|
||||
if match:
|
||||
server = match.group(1)
|
||||
|
||||
if server == "documentari-streaming-db":
|
||||
action = "findvideos"
|
||||
folder = True
|
||||
logger.info('server: %s, action: %s' % (server, action))
|
||||
|
||||
logger.info(title + ' - [COLOR blue]' + server + '[/COLOR]')
|
||||
|
||||
itemlist.append(Item(
|
||||
channel=item.channel,
|
||||
title=title + ' - [COLOR blue]' + server + '[/COLOR]',
|
||||
action=action,
|
||||
server=server, # servertools.get_server_from_url(url),
|
||||
url=url,
|
||||
thumbnail=item.thumbnail,
|
||||
fulltitle=title,
|
||||
show=item.show,
|
||||
plot=item.plot,
|
||||
parentContent=item,
|
||||
folder=folder)
|
||||
)
|
||||
else:
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = "".join([item.title, '[COLOR green][B]' + videoitem.title + '[/B][/COLOR]'])
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.show = item.show
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
|
||||
return itemlist
|
||||
|
||||
24
plugin.video.alfa/channels/downloadme.json
Normal file
24
plugin.video.alfa/channels/downloadme.json
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"id": "downloadme",
|
||||
"name": "DownloadMe",
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"language": [
|
||||
"it"
|
||||
],
|
||||
"thumbnail": "https://www.downloadme.gratis/wp-content/uploads/2018/07/downloadme-retina-cropped-alternativo.png",
|
||||
"bannermenu": "https://www.downloadme.gratis/wp-content/uploads/2018/07/downloadme-retina-cropped-alternativo.png",
|
||||
"categories": [
|
||||
"movie"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
168
plugin.video.alfa/channels/downloadme.py
Normal file
168
plugin.video.alfa/channels/downloadme.py
Normal file
@@ -0,0 +1,168 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand .- XBMC Plugin
|
||||
# Canale downloadme
|
||||
# Version: 201804162230
|
||||
# ------------------------------------------------------------
|
||||
import re
|
||||
|
||||
from core import httptools, scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from core.tmdb import infoIca
|
||||
from platformcode import logger, config
|
||||
from lib import unshortenit
|
||||
|
||||
|
||||
|
||||
|
||||
host = "https://www.downloadme.gratis"
|
||||
|
||||
headers = [['Referer', host]]
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("[downloadme.py] mainlist")
|
||||
|
||||
# Main options
|
||||
itemlist = [Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR azure]Film[/COLOR]",
|
||||
url="%s/category/film/" % host,
|
||||
extra="movie",
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
#Item(channel=item.channel,
|
||||
# action="peliculas",
|
||||
# title="Serie TV",
|
||||
# text_color="azure",
|
||||
# url="%s/category/serie-tv/" % host,
|
||||
# extra="tv",
|
||||
# thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
#Item(channel=item.channel,
|
||||
# action="peliculas",
|
||||
# title="Anime",
|
||||
# text_color="azure",
|
||||
# url="%s/category/anime/" % host,
|
||||
# extra="tv",
|
||||
# thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="categorie",
|
||||
title="[COLOR azure]Categorie[/COLOR]",
|
||||
url="%s/" % host,
|
||||
extra="movie",
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png")]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorie(item):
|
||||
logger.info("[downloadme.py] peliculas")
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
blocco = scrapertools.find_single_match(data, '<ul id="menu-categorie" class="menu">(.*?)</ul>')
|
||||
patron = '<a href="(.*?)">(.*?)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
text_color="azure",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title=scrapedtitle,
|
||||
url="%s/%s" % (host, scrapedurl),
|
||||
extra=item.extra,
|
||||
viewmode="movie_with_plot",
|
||||
Folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("[downloadme.py] peliculas")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
#blocco = scrapertools.find_single_match(data, '</p></div><div class="row">(.*?)<span class="sep">')
|
||||
patron = r'<a href="(.*?)" title="(.*?)">'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
#scrapedtitle = scrapedtitle.split("–")[0]
|
||||
#scrapedtitle = scrapedtitle.split(" Download")[0]
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos" if 'movie' in item.extra else 'episodes',
|
||||
text_color="azure",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title=scrapedtitle,
|
||||
url="%s/%s" % (host, scrapedurl),
|
||||
viewmode="movie_with_plot",
|
||||
thumbnail=scrapedthumbnail), tipo=item.extra))
|
||||
|
||||
nextpage_regex = '<a class="next page-numbers" href="([^"]+)">'
|
||||
next_page = scrapertools.find_single_match(data, nextpage_regex)
|
||||
if next_page != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url="%s%s" % (host, next_page),
|
||||
extra=item.extra,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
|
||||
return itemlist
|
||||
|
||||
def episodes(item):
|
||||
logger.info("[downloadme.py] tv_series")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
patron = r'<a href="([^"]+)"[^>]*>([^<]+)</a>(?:<br>|</p>)'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
if not scrapertools.find_single_match(scrapedtitle, r'\d+'): continue
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
text_color="azure",
|
||||
contentType="episode",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title=scrapedtitle,
|
||||
thumbnail=item.thumbnail,
|
||||
url=scrapedurl,
|
||||
viewmode="movie_with_plot"))
|
||||
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("kod.downloadme findvideos")
|
||||
itemlist = []
|
||||
|
||||
if 'movie' in item.extra:
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
patron = r'<a\s*href="([^"]+)" target="_blank" rel="noopener">.*?link[^<]+</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl in matches:
|
||||
url, c = unshorten(scrapedurl)
|
||||
data += url + '\n'
|
||||
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title + videoitem.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.show = item.show
|
||||
videoitem.plot = item.plot
|
||||
videoitem.channel = item.channel
|
||||
|
||||
return itemlist
|
||||
|
||||
24
plugin.video.alfa/channels/dragonballforever.json
Normal file
24
plugin.video.alfa/channels/dragonballforever.json
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"id": "dragonballforever",
|
||||
"name": "Dragonball Forever",
|
||||
"language": ["it"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": "it",
|
||||
"thumbnail": "https://www.dragonballforever.it/wp-content/uploads/2017/02/header_dbf-1.jpg",
|
||||
"banner": "https://www.dragonballforever.it/wp-content/uploads/2017/02/header_dbf-1.jpg",
|
||||
"version": "1",
|
||||
"date": "23/10/2017",
|
||||
"changes": "New Channel",
|
||||
"categories": ["anime"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
96
plugin.video.alfa/channels/dragonballforever.py
Normal file
96
plugin.video.alfa/channels/dragonballforever.py
Normal file
@@ -0,0 +1,96 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per dragonballforever
|
||||
# ------------------------------------------------------------
|
||||
import re
|
||||
from platformcode import logger
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
|
||||
|
||||
|
||||
host = "https://www.dragonballforever.it"
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = [Item(channel=item.channel,
|
||||
action="episodi",
|
||||
title=color("Dragon Ball Kai", "azure"),
|
||||
url="%s/dragon-ball-kai-episodi/" % host,
|
||||
extra="Kai",
|
||||
show="Dragon Ball Kai",
|
||||
thumbnail="https://www.dragonballforever.it/wp-content/uploads/2016/11/dragonball_kai_cover.jpg"),
|
||||
Item(channel=item.channel,
|
||||
title=color("Dragon Ball Super", "azure"),
|
||||
action="episodi",
|
||||
url="%s/dragon-ball-super/" % host,
|
||||
extra="Super",
|
||||
show="Dragon Ball Super",
|
||||
thumbnail="https://www.dragonballforever.it/wp-content/uploads/2016/11/dbsuper-locandina.jpg")]
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def episodi(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = r'<a href="([^"]+)"[^>]+><strong>(Dragon Ball %s [^<]+)</strong></a>' % item.extra
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = color(scrapertools.decodeHtmlentities(scrapedtitle).replace('Dragon Ball %s episodio Streaming ' % item.extra, '').replace('#', '').strip(), 'azure')
|
||||
epnumber = scrapertools.find_single_match(scrapedtitle, r'\d+')
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
title=re.sub(r'\d+', 'Episodio: %s' % color(epnumber, 'red'), scrapedtitle),
|
||||
fulltitle="Dragon Ball %s Episodio: %s" % (item.extra, scrapedtitle),
|
||||
url=scrapedurl,
|
||||
extra=item.extra,
|
||||
show=item.show,
|
||||
thumbnail=item.thumbnail,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
if 'Super' in item.extra:
|
||||
item.url = host + "/strm/dbsuper/%s" % scrapertools.find_single_match(data, r'file:\s*"\.\./([^"]+)"')
|
||||
elif 'Kai' in item.extra:
|
||||
item.url = scrapertools.find_single_match(data, r'flashvars=[\'|\"]+(?:file=|)([^&]+)&')
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="play",
|
||||
title="%s [.%s]" % (color(item.show, 'azure'), color(item.url.split('.')[-1], 'orange')),
|
||||
fulltitle=color(item.fulltitle, 'orange') if 'Super' in item.extra else color(item.fulltitle, 'deepskyblue'),
|
||||
url=item.url,
|
||||
show=item.show,
|
||||
extra=item.extra,
|
||||
thumbnail=item.thumbnail))
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def color(text, color):
|
||||
return "[COLOR "+color+"]"+text+"[/COLOR]"
|
||||
|
||||
# ================================================================================================================
|
||||
32
plugin.video.alfa/channels/dreamsub.json
Normal file
32
plugin.video.alfa/channels/dreamsub.json
Normal file
@@ -0,0 +1,32 @@
|
||||
{
|
||||
"id": "dreamsub",
|
||||
"name": "DreamSub",
|
||||
"language": ["it"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": "it",
|
||||
"thumbnail": "http://www.dreamsub.it/res/img/logo.png",
|
||||
"banner": "http://www.dreamsub.it/res/img/logo.png",
|
||||
"version": "11",
|
||||
"date": "06/09/2017",
|
||||
"changes": "Small Fix",
|
||||
"categories": ["anime","vos"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_anime",
|
||||
"type": "bool",
|
||||
"label": "Includi in novità - Anime",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
256
plugin.video.alfa/channels/dreamsub.py
Normal file
256
plugin.video.alfa/channels/dreamsub.py
Normal file
@@ -0,0 +1,256 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per dreamsub
|
||||
# ------------------------------------------------------------
|
||||
import re, urlparse
|
||||
|
||||
from core import scrapertools, httptools, servertools, tmdb
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
|
||||
|
||||
|
||||
host = "https://www.dreamsub.co"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("kod.dreamsub mainlist")
|
||||
itemlist = [Item(channel=item.channel,
|
||||
title="[COLOR azure]Anime / Cartoni[/COLOR]",
|
||||
action="serietv",
|
||||
url="%s/anime" % host,
|
||||
thumbnail="http://orig09.deviantart.net/df5a/f/2014/169/2/a/fist_of_the_north_star_folder_icon_by_minacsky_saya-d7mq8c8.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Categorie[/COLOR]",
|
||||
action="categorie",
|
||||
url=host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Ultimi episodi Anime[/COLOR]",
|
||||
action="ultimiep",
|
||||
url=host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR yellow]Cerca...[/COLOR]",
|
||||
action="search",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info("kod.altadefinizione01 newest" + categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "anime":
|
||||
item.url = "https://www.dreamsub.tv"
|
||||
item.action = "ultimiep"
|
||||
itemlist = ultimiep(item)
|
||||
|
||||
if itemlist[-1].action == "ultimiep":
|
||||
itemlist.pop()
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def serietv(item):
|
||||
logger.info("kod.dreamsub peliculas")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.get_match(data,
|
||||
'<input type="submit" value="Vai!" class="blueButton">(.*?)<div class="footer">')
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = 'Lingua[^<]+<br>\s*<a href="([^"]+)" title="([^"]+)">'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedurl = host + scrapedurl
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = scrapedtitle.replace("Streaming", "")
|
||||
scrapedtitle = scrapedtitle.replace("Lista episodi ", "")
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="episodios",
|
||||
contentType="tvshow",
|
||||
title="[COLOR azure]%s[/COLOR]" % scrapedtitle,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
show=scrapedtitle,
|
||||
plot=scrapedplot,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Paginazione
|
||||
patronvideos = '<li class="currentPage">[^>]+><li[^<]+<a href="([^"]+)">'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="serietv",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def ultimiep(item):
|
||||
logger.info("kod.dreamsub ultimiep")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.get_match(data, '<ul class="last" id="recentAddedEpisodesAnimeDDM">(.*?)</ul>')
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<li><a href="([^"]+)"[^>]+>([^<]+)<br>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
ep = scrapertools.find_single_match(scrapedtitle, r'\d+$').zfill(2)
|
||||
scrapedtitle = re.sub(r'\d+$', ep, scrapedtitle)
|
||||
scrapedurl = host + scrapedurl
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
cleantitle = re.sub(r'\d*-?\d+$', '', scrapedtitle).strip()
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="tvshow",
|
||||
title=scrapedtitle,
|
||||
fulltitle=cleantitle,
|
||||
text_color="azure",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
show=cleantitle,
|
||||
plot=scrapedplot,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
for itm in itemlist:
|
||||
itm.contentType = "episode"
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorie(item):
|
||||
logger.info("[dreamsub.py] categorie")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
blocco = scrapertools.find_single_match(data,
|
||||
r'<select name="genere" id="genere" class="selectInput">(.*?)</select>')
|
||||
patron = r'<option value="([^"]+)">'
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
|
||||
for value in matches:
|
||||
url = "%s/genere/%s" % (host, value)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="serietv",
|
||||
title="[COLOR azure]%s[/COLOR]" % value.capitalize(),
|
||||
url=url,
|
||||
extra="tv",
|
||||
thumbnail=item.thumbnail,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("[dreamsub.py] " + item.url + " search " + texto)
|
||||
item.url = "%s/search/%s" % (host, texto)
|
||||
try:
|
||||
return serietv(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info("kod.channels.dreamsub episodios")
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.get_match(data, '<div class="seasonEp">(.*?)<div class="footer">')
|
||||
|
||||
patron = '<li><a href="([^"]+)"[^<]+<b>(.*?)<\/b>[^>]+>([^<]+)<\/i>(.*?)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
|
||||
for scrapedurl, title1, title2, title3 in matches:
|
||||
scrapedurl = host + scrapedurl
|
||||
scrapedtitle = title1 + " " + title2 + title3
|
||||
scrapedtitle = scrapedtitle.replace("Download", "")
|
||||
scrapedtitle = scrapedtitle.replace("Streaming", "")
|
||||
scrapedtitle = scrapedtitle.replace("& ", "")
|
||||
scrapedtitle = re.sub(r'\s+', ' ', scrapedtitle)
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="episode",
|
||||
fulltitle=scrapedtitle,
|
||||
show=item.show,
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=item.thumbnail,
|
||||
plot=item.plot,
|
||||
folder=True))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios",
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
print item.url
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
if 'keepem.online' in data:
|
||||
urls = scrapertools.find_multiple_matches(data, r'(https://keepem\.online/f/[^"]+)"')
|
||||
for url in urls:
|
||||
url = httptools.downloadpage(url).url
|
||||
itemlist += servertools.find_video_items(data=url)
|
||||
|
||||
for videoitem in itemlist:
|
||||
server = re.sub(r'[-\[\]\s]+', '', videoitem.title)
|
||||
videoitem.title = "".join(
|
||||
["[[COLOR orange]%s[/COLOR]] " % server.capitalize(), "[COLOR azure]%s[/COLOR]" % item.title])
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.show = item.show
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
videoitem.contentType = item.contentType
|
||||
|
||||
return itemlist
|
||||
24
plugin.video.alfa/channels/eurostreaming.json
Normal file
24
plugin.video.alfa/channels/eurostreaming.json
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"id": "eurostreaming",
|
||||
"name": "Eurostreaming",
|
||||
"language": ["it"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": "it",
|
||||
"thumbnail": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/eurostreaming.png",
|
||||
"banner": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/eurostreaming.png",
|
||||
"version": "8",
|
||||
"date": "16/06/2016",
|
||||
"changes": "Small Fix",
|
||||
"categories": ["tvshow","anime"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
236
plugin.video.alfa/channels/eurostreaming.py
Normal file
236
plugin.video.alfa/channels/eurostreaming.py
Normal file
@@ -0,0 +1,236 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per eurostreaming
|
||||
# ------------------------------------------------------------
|
||||
import re, urlparse
|
||||
|
||||
from channels import autoplay
|
||||
from core import scrapertools, httptools, servertools, tmdb, scrapertoolsV2
|
||||
from core.item import Item
|
||||
from lib import unshortenit
|
||||
from platformcode import logger, config
|
||||
|
||||
host = "https://eurostreaming.one"
|
||||
list_servers = ['openload', 'speedvideo', 'wstream', 'streamango' 'flashx', 'nowvideo']
|
||||
list_quality = ['default']
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("kod.eurostreaming mainlist")
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = [
|
||||
Item(
|
||||
channel=item.channel,
|
||||
title="[COLOR azure]Serie TV[/COLOR]",
|
||||
action="serietv",
|
||||
extra="tvshow",
|
||||
url="%s/category/serie-tv-archive/" % host,
|
||||
thumbnail=
|
||||
"http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"
|
||||
),
|
||||
Item(
|
||||
channel=item.channel,
|
||||
title="[COLOR azure]Anime / Cartoni[/COLOR]",
|
||||
action="serietv",
|
||||
extra="tvshow",
|
||||
url="%s/category/anime-cartoni-animati/" % host,
|
||||
thumbnail=
|
||||
"http://orig09.deviantart.net/df5a/f/2014/169/2/a/fist_of_the_north_star_folder_icon_by_minacsky_saya-d7mq8c8.png"
|
||||
),
|
||||
Item(
|
||||
channel=item.channel,
|
||||
title="[COLOR yellow]Cerca...[/COLOR]",
|
||||
action="search",
|
||||
extra="tvshow",
|
||||
thumbnail=
|
||||
"http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")
|
||||
]
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def serietv(item):
|
||||
logger.info("kod.eurostreaming peliculas")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<div class="post-thumb">\s*<a href="([^"]+)" title="([^"]+)">\s*<img src="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.replace("Streaming", ""))
|
||||
if scrapedtitle.startswith("Link to "):
|
||||
scrapedtitle = scrapedtitle[8:]
|
||||
# num = scrapertools.find_single_match(scrapedurl, '(-\d+/)')
|
||||
# if num:
|
||||
# scrapedurl = scrapedurl.replace(num, "-episodi/")
|
||||
itemlist.append(
|
||||
Item(
|
||||
channel=item.channel,
|
||||
action="episodios",
|
||||
contentType="tvshow",
|
||||
title=scrapedtitle,
|
||||
fulltitle=scrapedtitle,
|
||||
text_color="azure",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
show=scrapedtitle,
|
||||
extra=item.extra,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Paginazione
|
||||
patronvideos = '<a class="next page-numbers" href="?([^>"]+)">Avanti »</a>'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
itemlist.append(
|
||||
Item(
|
||||
channel=item.channel,
|
||||
action="serietv",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=
|
||||
"http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
extra=item.extra,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("[eurostreaming.py] " + item.url + " search " + texto)
|
||||
item.url = "%s/?s=%s" % (host, texto)
|
||||
try:
|
||||
return serietv(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def episodios(item):
|
||||
def load_episodios(html, item, itemlist, lang_title):
|
||||
patron = '((?:.*?<a[^h]+href="[^"]+"[^>]+>[^<][^<]+<(?:b|\/)[^>]+>)+)'
|
||||
matches = re.compile(patron).findall(html)
|
||||
for data in matches:
|
||||
# Estrazione
|
||||
|
||||
scrapedtitle = data.split('<a ')[0]
|
||||
scrapedtitle = re.sub(r'<[^>]*>', '', scrapedtitle).strip()
|
||||
if scrapedtitle != 'Categorie':
|
||||
scrapedtitle = scrapedtitle.replace('×', 'x')
|
||||
scrapedtitle = scrapedtitle.replace('×', 'x')
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="episode",
|
||||
title="[COLOR azure]%s[/COLOR]" % (scrapedtitle + " (" + lang_title + ")"),
|
||||
url=data,
|
||||
thumbnail=item.thumbnail,
|
||||
extra=item.extra,
|
||||
fulltitle=scrapedtitle + " (" + lang_title + ")" + ' - ' + item.show,
|
||||
show=item.show))
|
||||
|
||||
logger.info("[eurostreaming.py] episodios")
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Download pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
link = False
|
||||
|
||||
if scrapertoolsV2.get_match(data, '<div class="nano_cp_container"><span.*?CLICCA QUI'):
|
||||
item.url = scrapertoolsV2.find_single_match(data, '<script type="text\/javascript">.*?var nano_ajax_object =.*?"go_to":"(.*?)"').replace('\\', '')
|
||||
link = True
|
||||
else:
|
||||
match = scrapertoolsV2.get_match(data, '<h3 style="text-align: center;">.*?<a href="(.*?)">.{0,5}<span.*?CLICCA QUI.*?</a></h3>')
|
||||
if match != '':
|
||||
item.url = match
|
||||
link = True
|
||||
if link:
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
|
||||
data = scrapertoolsV2.get_match(data, '<div class="su-accordion">(.+?)<div class="clear">')
|
||||
|
||||
lang_titles = []
|
||||
starts = []
|
||||
patron = r"STAGIONE.*?ITA"
|
||||
matches = re.compile(patron, re.IGNORECASE).finditer(data)
|
||||
|
||||
for match in matches:
|
||||
season_title = match.group()
|
||||
# import web_pdb;
|
||||
# web_pdb.set_trace()
|
||||
if season_title != '':
|
||||
lang_titles.append('SUB ITA' if 'SUB' in season_title.upper() else 'ITA')
|
||||
starts.append(match.end())
|
||||
|
||||
i = 1
|
||||
len_lang_titles = len(lang_titles)
|
||||
|
||||
while i <= len_lang_titles:
|
||||
inizio = starts[i - 1]
|
||||
fine = starts[i] if i < len_lang_titles else -1
|
||||
|
||||
html = data[inizio:fine]
|
||||
lang_title = lang_titles[i - 1]
|
||||
|
||||
load_episodios(html, item, itemlist, lang_title)
|
||||
|
||||
i += 1
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios",
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("kod.eurostreaming findvideos")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = item.url
|
||||
|
||||
matches = re.findall(r'<a href="([^"]+)"[^>]*>[^<]+</a>', data, re.DOTALL)
|
||||
|
||||
data = []
|
||||
for url in matches:
|
||||
url, c = unshortenit.unshorten(url)
|
||||
data.append(url)
|
||||
|
||||
itemlist = servertools.find_video_items(data=str(data))
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title + videoitem.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.show = item.show
|
||||
videoitem.plot = item.plot
|
||||
videoitem.channel = item.channel
|
||||
videoitem.contentType = item.contentType
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
60
plugin.video.alfa/channels/fastsubita.json
Normal file
60
plugin.video.alfa/channels/fastsubita.json
Normal file
@@ -0,0 +1,60 @@
|
||||
{
|
||||
"id": "fastsubita",
|
||||
"name": "Fastsubita",
|
||||
"language": ["ita"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "http://fastsubita.ml/wp-content/uploads/2017/10/Untitled-222255xxx.jpg",
|
||||
"banner": "http://fastsubita.ml/wp-content/uploads/2017/10/Untitled-222255xxx.jpg",
|
||||
"version": "2",
|
||||
"date": "03/06/2017",
|
||||
"changes": "New",
|
||||
"categories": ["tvshow","vos","top channels" ],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_series",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Serie TV",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces_num",
|
||||
"type": "list",
|
||||
"label": "Numero de link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "5", "10", "15", "20" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Non filtrare",
|
||||
"IT"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
262
plugin.video.alfa/channels/fastsubita.py
Normal file
262
plugin.video.alfa/channels/fastsubita.py
Normal file
@@ -0,0 +1,262 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per fastsubita
|
||||
# ------------------------------------------------------------
|
||||
|
||||
import re, urlparse
|
||||
|
||||
from channels import autoplay, filtertools
|
||||
from core import scrapertools, servertools, httptools, tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
host = "http://fastsubita.com"
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['openload', 'speedvideo', 'wstream', 'flashx', 'vidoza', 'vidtome']
|
||||
list_quality = ['default']
|
||||
|
||||
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'fastsubita')
|
||||
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'fastsubita')
|
||||
|
||||
headers = [
|
||||
['Host', 'fastsubita.com'],
|
||||
['User-Agent', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0'],
|
||||
['Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'],
|
||||
['Accept-Language', 'en-US,en;q=0.5'],
|
||||
['Accept-Encoding', 'gzip, deflate'],
|
||||
['Referer', host],
|
||||
['DNT', '1'],
|
||||
['Connection', 'keep-alive'],
|
||||
['Upgrade-Insecure-Requests', '1'],
|
||||
['Cache-Control', 'max-age=0']
|
||||
]
|
||||
|
||||
PERPAGE = 14
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("[fastsubita.py] mainlist")
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = [Item(channel=item.channel,
|
||||
title="[COLOR azure]Aggiornamenti[/COLOR]",
|
||||
action="serietv",
|
||||
extra='serie',
|
||||
url=host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Tutte le Serie TV[/COLOR]",
|
||||
action="all_quick",
|
||||
extra='serie',
|
||||
url="%s/elenco-serie-tv/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR yellow]Cerca...[/COLOR]",
|
||||
action="search",
|
||||
extra='serie',
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info("[fastsubita.py]==> newest" + categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "series":
|
||||
item.url = host
|
||||
item.action = "serietv"
|
||||
itemlist = serietv(item)
|
||||
|
||||
if itemlist[-1].action == "serietv":
|
||||
itemlist.pop()
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def serietv(item):
|
||||
logger.info("[fastsubita.py] peliculas")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
logger.info("[fastsubita.py] peliculas")
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = r'<h3 class="entry-title title-font"><a href="([^"]+)" rel="bookmark">(.*?)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scraped_1 = scrapedtitle.split("×")[0][:-2]
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
scrapedtitle = scrapedtitle.replace(scraped_1, "")
|
||||
|
||||
if "http:" in scrapedurl:
|
||||
scrapedurl = scrapedurl
|
||||
else:
|
||||
scrapedurl = "http:" + scrapedurl
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentTpye="tvshow",
|
||||
title="[COLOR azure]" + scraped_1 + "[/COLOR]" + " " + scrapedtitle,
|
||||
fulltitle=scraped_1,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
show=scraped_1,
|
||||
extra=item.extra,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Paginazione
|
||||
patronvideos = r'<a class="next page-numbers" href="(.*?)">Successivi'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="serietv",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
extra=item.extra,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def all_quick(item):
|
||||
logger.info("[fastsubita.py] peliculas")
|
||||
itemlist = []
|
||||
|
||||
p = 1
|
||||
if '{}' in item.url:
|
||||
item.url, p = item.url.split('{}')
|
||||
p = int(p)
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = r'<a style.*?href="([^"]+)">([^<]+)<\/a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
|
||||
if (p - 1) * PERPAGE > i: continue
|
||||
if i >= p * PERPAGE: break
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
if 'S' in scrapedtitle.lower(): continue
|
||||
|
||||
if "http:" in scrapedurl:
|
||||
scrapedurl = scrapedurl
|
||||
else:
|
||||
scrapedurl = "http:" + scrapedurl
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="serietv",
|
||||
contentType="tvshow",
|
||||
title=scrapedtitle,
|
||||
fulltitle=scrapedtitle,
|
||||
text_color="azure",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
show=scrapedtitle,
|
||||
extra=item.extra,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
if len(matches) >= p * PERPAGE:
|
||||
scrapedurl = item.url + '{}' + str(p + 1)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action="all_quick",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("[fastsubita.py] findvideos")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
bloque = scrapertools.get_match(data, '<div class="entry-content">(.*?)<footer class="entry-footer">')
|
||||
|
||||
patron = r'<a href="([^"]+)">'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
for scrapedurl in matches:
|
||||
if 'is.gd' in scrapedurl:
|
||||
resp = httptools.downloadpage(
|
||||
scrapedurl, follow_redirects=False)
|
||||
data += resp.headers.get("location", "") + '\n'
|
||||
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title + videoitem.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.show = item.show
|
||||
videoitem.plot = item.plot
|
||||
videoitem.channel = item.channel
|
||||
videoitem.contentType = item.contentType
|
||||
videoitem.language = IDIOMAS['Italiano']
|
||||
|
||||
# Requerido para Filtrar enlaces
|
||||
|
||||
if __comprueba_enlaces__:
|
||||
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("[fastsubita.py] " + item.url + " search " + texto)
|
||||
item.url = "%s/?s=%s" % (host, texto)
|
||||
try:
|
||||
return serietv(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
61
plugin.video.alfa/channels/filmgratis.json
Normal file
61
plugin.video.alfa/channels/filmgratis.json
Normal file
@@ -0,0 +1,61 @@
|
||||
{
|
||||
"id": "filmgratis",
|
||||
"name": "Filmgratis",
|
||||
"language": ["it"],
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"thumbnail": "https://www.filmgratis.video/templates/itafilm/images/logo.png",
|
||||
"banner": "https://www.filmgratis.video/templates/itafilm/images/logo.png",
|
||||
"version": "1",
|
||||
"date": "15/09/2017",
|
||||
"changes": "Re-enabled Channel",
|
||||
"categories": ["movie"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_film",
|
||||
"type": "bool",
|
||||
"label": "Includi in novità - Film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
|
||||
{
|
||||
"id": "comprueba_enlaces",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces_num",
|
||||
"type": "list",
|
||||
"label": "Numero de link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "1", "2", "5", "10" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Non filtrare",
|
||||
"IT"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
306
plugin.video.alfa/channels/filmgratis.py
Normal file
306
plugin.video.alfa/channels/filmgratis.py
Normal file
@@ -0,0 +1,306 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per filmgratis
|
||||
# https://alfa-addon.com/categories/kod-addon.50/
|
||||
# ------------------------------------------------------------
|
||||
import re, urlparse
|
||||
|
||||
from platformcode import logger,config
|
||||
from core import scrapertools, httptools, servertools, tmdb
|
||||
from core.item import Item
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
|
||||
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['openload', 'streamango', 'vidoza', 'youtube']
|
||||
list_quality = ['HD', 'SD']
|
||||
|
||||
|
||||
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'filmgratis')
|
||||
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'filmgratis')
|
||||
|
||||
host = "https://www.filmgratis.one"
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def mainlist(item):
|
||||
logger.info("kod.filmgratis mainlist")
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = [Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title=color("Home", "orange"),
|
||||
url=host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="annoattuale",
|
||||
title=color("Film di quest'anno", "azure"),
|
||||
url=host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="categorie",
|
||||
title=color("Categorie", "azure"),
|
||||
url=host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="peranno",
|
||||
title=color("Per anno", "azure"),
|
||||
url=host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="perpaese",
|
||||
title=color("Per paese", "azure"),
|
||||
url=host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="search",
|
||||
title=color("Cerca ...", "yellow"),
|
||||
extra="movie",
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png")]
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def search(item, texto):
|
||||
logger.info("filmgratis.py Search ===> " + texto)
|
||||
item.url = "%s/index.php?story=%s&do=search&subaction=search" % (host, texto)
|
||||
try:
|
||||
return peliculas(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def newest(categoria):
|
||||
logger.info("filmgratis " + categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "film":
|
||||
item.url = host
|
||||
item.action = "peliculas"
|
||||
itemlist = peliculas(item)
|
||||
|
||||
if itemlist[-1].action == "peliculas":
|
||||
itemlist.pop()
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def annoattuale(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
blocco = scrapertools.get_match(data, r'<div class="left-menu-main">(.*?)</div>')
|
||||
patron = r'<a href="([^"]+)">Film\s*\d{4}</a>'
|
||||
|
||||
item.url = urlparse.urljoin(host, scrapertools.find_single_match(blocco, patron))
|
||||
return peliculas(item)
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def categorie(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
blocco = scrapertools.get_match(data, r'<div class="menu-janr-content">(.*?)</div>')
|
||||
patron = r'<a href="([^"]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
if 'film erotici' in scrapedtitle.lower(): continue
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title=scrapedtitle,
|
||||
text_color="azure",
|
||||
url=urlparse.urljoin(host, scrapedurl),
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def peranno(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
blocco = scrapertools.get_match(data, r'<div class="sort-menu-title">\s*Anno di pubblicazione:\s*</div>(.*?)</div>')
|
||||
patron = r'<a href="([^"]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title=scrapedtitle,
|
||||
text_color="azure",
|
||||
url=urlparse.urljoin(host, scrapedurl),
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def perpaese(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
blocco = scrapertools.get_match(data, r'<div class="sort-menu-title">\s*Paesi di produzione:\s*</div>(.*?)</div>')
|
||||
patron = r'<a href="([^"]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title=scrapedtitle,
|
||||
text_color="azure",
|
||||
url=urlparse.urljoin(host, scrapedurl),
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = r'<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)".*?/></a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
year = scrapertools.find_single_match(scrapedtitle, r'\((\d{4})\)')
|
||||
|
||||
html = httptools.downloadpage(scrapedurl).data
|
||||
|
||||
patron = r'<div class="video-player-plugin">([\s\S]*)<div class="wrapper-plugin-video">'
|
||||
matches = re.compile(patron, re.DOTALL).findall(html)
|
||||
for url in matches:
|
||||
if "scrolling" in url:
|
||||
scrapedurl = scrapedurl
|
||||
|
||||
cleantitle = scrapedtitle
|
||||
|
||||
year = scrapertools.find_single_match(scrapedtitle, r'\((\d{4})\)')
|
||||
infolabels = {}
|
||||
if year:
|
||||
cleantitle = cleantitle.replace("(%s)" % year, '').strip()
|
||||
infolabels['year'] = year
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="movie",
|
||||
title=scrapedtitle.replace(year, color("%s" % year, "red")),
|
||||
fulltitle=cleantitle,
|
||||
text_color="azure",
|
||||
url=scrapedurl,
|
||||
extra="movie",
|
||||
show=cleantitle,
|
||||
thumbnail=scrapedthumbnail,
|
||||
infoLabels=infolabels,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Pagine
|
||||
patronvideos = r'<a href="([^"]+)">>'
|
||||
next_page = scrapertools.find_single_match(data, patronvideos)
|
||||
|
||||
if next_page:
|
||||
scrapedurl = urlparse.urljoin(item.url, next_page)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
extra=item.extra,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
server = re.sub(r'[-\[\]\s]+', '', videoitem.title)
|
||||
videoitem.title = "".join(["[%s] " % color(server.capitalize(), 'orange'), item.title])
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.show = item.show
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
videoitem.contentType = item.contentType
|
||||
videoitem.language = IDIOMAS['Italiano']
|
||||
|
||||
# Requerido para Filtrar enlaces
|
||||
|
||||
if __comprueba_enlaces__:
|
||||
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if item.contentType != 'episode':
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow][B]Aggiungi alla videoteca[/B][/COLOR]', url=item.url,
|
||||
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def color(text, color):
|
||||
return "[COLOR "+color+"]"+text+"[/COLOR]"
|
||||
|
||||
# ================================================================================================================
|
||||
32
plugin.video.alfa/channels/filmhdstreaming.json
Normal file
32
plugin.video.alfa/channels/filmhdstreaming.json
Normal file
@@ -0,0 +1,32 @@
|
||||
{
|
||||
"id": "filmhdstreaming",
|
||||
"name": "Filmhdstreaming",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": [
|
||||
"it"
|
||||
],
|
||||
"thumbnail": "http:\/\/hdcineblog01.com\/css\/images\/logo3.png",
|
||||
"bannermenu": "http:\/\/hdcineblog01.com\/css\/images\/logo3.png",
|
||||
"categories": [
|
||||
"movie"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_film",
|
||||
"type": "bool",
|
||||
"label": "Includi in novit\u00e0 - film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
229
plugin.video.alfa/channels/filmhdstreaming.py
Normal file
229
plugin.video.alfa/channels/filmhdstreaming.py
Normal file
@@ -0,0 +1,229 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per filmhdstreaming
|
||||
# https://alfa-addon.com/categories/kod-addon.50/
|
||||
# ----------------------------------------------------------
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from platformcode import logger, config
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core.tmdb import infoIca
|
||||
|
||||
|
||||
|
||||
host = "https://cb01.mobi/"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("filmhdstreaming mainlist")
|
||||
|
||||
itemlist = []
|
||||
# itemlist.append(Item(channel=item.channel, action="elenco_ten", title="[COLOR yellow]Film Top 10[/COLOR]", url=host,thumbnail=NovitaThumbnail, fanart=fanart))
|
||||
# itemlist.append(Item(channel=item.channel, action="elenco_top", title="[COLOR azure]Film Top[/COLOR]", url=host,thumbnail=NovitaThumbnail, fanart=fanart))
|
||||
itemlist.append(Item(channel=item.channel, action="elenco", title="[COLOR azure]Aggiornamenti Film[/COLOR]",
|
||||
url=host + "/page/1.html", thumbnail=NovitaThumbnail, fanart=fanart))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="elenco_genere", title="[COLOR azure]Film per Genere[/COLOR]", url=host,
|
||||
thumbnail=GenereThumbnail, fanart=fanart))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="search", title="[COLOR orange]Cerca film...[/COLOR]", extra="movie",
|
||||
thumbnail=thumbcerca, fanart=fanart))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info("filmhdstreaming newest" + categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "film":
|
||||
item.url = host + "/page/1.html"
|
||||
item.action = "elenco"
|
||||
itemlist = elenco(item)
|
||||
|
||||
if itemlist[-1].action == "elenco":
|
||||
itemlist.pop()
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def elenco_top(item):
|
||||
logger.info("filmhdstreaming elenco_top")
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# metodo che utilizzo pee verificare cosa scarica nella chace
|
||||
# provate and andare nel log di kodi e controllate in fondo...
|
||||
# io uso notepad ++ che ha come vantaggio di auto aggiornarsi ad ogni cambiamento del file
|
||||
# per non stare ad aprire e chidere tutte le vole il file di log di kodi
|
||||
logger.info("ecco la pagina completa ->" + data)
|
||||
|
||||
# nel patron in questo caso tutto ciò che è tra > e class= verrà preso in cosiderazione
|
||||
patron = 'id="box_movies1">(.*?)class="header_slider">'
|
||||
filtro_top = scrapertools.find_single_match(data, patron)
|
||||
|
||||
# controllo log
|
||||
logger.info("filtrato ->" + filtro_top)
|
||||
|
||||
patron = 'class="movie">[^>]+><a href="(.*?)"><img src="(.*?)".*?<h2>(.*?)<\/h2>'
|
||||
|
||||
matches = scrapertools.find_multiple_matches(filtro_top, patron)
|
||||
|
||||
for scrapedurl, scrapedimg, scrapedtitle in matches:
|
||||
# sempre per controllare il log
|
||||
logger.info("Url:" + scrapedurl + " thumbnail:" + scrapedimg + " title:" + scrapedtitle)
|
||||
title = scrapedtitle.split("(")[0]
|
||||
itemlist.append(infoIca(Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedimg,
|
||||
fanart=""
|
||||
)))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def elenco(item):
|
||||
logger.info("filmhdstreaming elenco")
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = r'<a href="([^"]+)" title="([^"]+)"><img src="([^"]+)"[^>]+>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
scrapedtitle = scrapedtitle.replace(" streaming ita", "")
|
||||
scrapedtitle = scrapedtitle.replace(" film streaming", "")
|
||||
scrapedtitle = scrapedtitle.replace(" streaming gratis", "")
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="movie",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
folder=True), tipo='movie'))
|
||||
|
||||
# Paginazione
|
||||
patronvideos = r'<a class="page dark gradient" href=["|\']+([^"]+)["|\']+>AVANTI'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(re.sub(r'\d+.html$', '', item.url), matches[0])
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="elenco",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def elenco_genere(item):
|
||||
logger.info("filmhdstreaming elenco_genere")
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.get_match(data, '<ul>(.*?)</ul>')
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<li><a href="([^"]+)">[^>]+></i>\s*([^<]+)</a></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapedtitle.replace("Film streaming ", "")
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="elenco",
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def elenco_ten(item):
|
||||
logger.info("filmhdstreaming elenco_ten")
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<ul class="lista">(.*?)</ul>'
|
||||
|
||||
filtro = scrapertools.find_single_match(data, patron)
|
||||
patron = '<li>.*?href="(.*?)">(.*?)</a>'
|
||||
matches = scrapertools.find_multiple_matches(filtro, patron)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
logger.info("Url:" + scrapedurl + " title:" + scrapedtitle)
|
||||
itemlist.append(infoIca(Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail="",
|
||||
fanart=""
|
||||
)))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("filmhdstreaming search " + texto)
|
||||
|
||||
itemlist = []
|
||||
|
||||
item.url = host + "/search/" + texto
|
||||
|
||||
try:
|
||||
return elenco(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
|
||||
GenereThumbnail = "https://farm8.staticflickr.com/7562/15516589868_13689936d0_o.png"
|
||||
NovitaThumbnail = "https://superrepo.org/static/images/icons/original/xplugin.video.moviereleases.png.pagespeed.ic.j4bhi0Vp3d.png"
|
||||
thumbcerca = "http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"
|
||||
fanart = "https://superrepo.org/static/images/fanart/original/script.artwork.downloader.jpg"
|
||||
AvantiTxt = config.get_localized_string(30992)
|
||||
AvantiImg = "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"
|
||||
thumbnovita = "http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"
|
||||
23
plugin.video.alfa/channels/filmontv.json
Normal file
23
plugin.video.alfa/channels/filmontv.json
Normal file
@@ -0,0 +1,23 @@
|
||||
{
|
||||
"id": "filmontv",
|
||||
"name": "Filmontv",
|
||||
"language": ["it"],
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"thumbnail": null,
|
||||
"banner": null,
|
||||
"categories": [
|
||||
null
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
86
plugin.video.alfa/channels/filmontv.py
Normal file
86
plugin.video.alfa/channels/filmontv.py
Normal file
@@ -0,0 +1,86 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale filmontv
|
||||
# https://alfa-addon.com/categories/kod-addon.50/
|
||||
# ------------------------------------------------------------
|
||||
|
||||
import re
|
||||
import urllib
|
||||
|
||||
from core import httptools
|
||||
from platformcode import logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from core.tmdb import infoIca
|
||||
|
||||
|
||||
|
||||
host = "https://www.comingsoon.it"
|
||||
|
||||
TIMEOUT_TOTAL = 60
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info(" mainlist")
|
||||
itemlist = [Item(channel=item.channel,
|
||||
title="[COLOR red]IN ONDA ADESSO[/COLOR]",
|
||||
action="tvoggi",
|
||||
url="%s/filmtv/oggi/in-onda/" % host,
|
||||
thumbnail="http://a2.mzstatic.com/eu/r30/Purple/v4/3d/63/6b/3d636b8d-0001-dc5c-a0b0-42bdf738b1b4/icon_256.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Mattina[/COLOR]",
|
||||
action="tvoggi",
|
||||
url="%s/filmtv/oggi/mattina/" % host,
|
||||
thumbnail="http://icons.iconarchive.com/icons/icons-land/weather/256/Sunrise-icon.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Pomeriggio[/COLOR]",
|
||||
action="tvoggi",
|
||||
url="%s/filmtv/oggi/pomeriggio/" % host,
|
||||
thumbnail="http://icons.iconarchive.com/icons/custom-icon-design/weather/256/Sunny-icon.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Sera[/COLOR]",
|
||||
action="tvoggi",
|
||||
url="%s/filmtv/oggi/sera/" % host,
|
||||
thumbnail="http://icons.iconarchive.com/icons/icons-land/vista-people/256/Occupations-Pizza-Deliveryman-Male-Light-icon.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Notte[/COLOR]",
|
||||
action="tvoggi",
|
||||
url="%s/filmtv/oggi/notte/" % host,
|
||||
thumbnail="http://icons.iconarchive.com/icons/oxygen-icons.org/oxygen/256/Status-weather-clear-night-icon.png")]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def tvoggi(item):
|
||||
logger.info(" tvoggi")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# Estrae i contenuti
|
||||
patron = '<div class="col-xs-12 col-sm-6 box-contenitore filmintv">.*?src="([^"]+)[^<]+<[^<]+<[^<]+<[^<]+<[^<]+<.*?titolo">([^<]+)<.*?ore <span>([^<]+)<\/span><br \/>([^<]+)<\/div>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, time, scrapedtv in matches:
|
||||
scrapedurl = ""
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
|
||||
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="do_search",
|
||||
extra=urllib.quote_plus(scrapedtitle) + '{}' + 'movie',
|
||||
title="[COLOR red]" + time + "[/COLOR] - [COLOR azure]" + scrapedtitle + "[/COLOR] [COLOR yellow][" + scrapedtv + "][/COLOR]" ,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
folder=True), tipo="movie"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# Esta es la función que realmente realiza la búsqueda
|
||||
|
||||
def do_search(item):
|
||||
from channels import search
|
||||
return search.do_search(item)
|
||||
30
plugin.video.alfa/channels/filmperevolvere.json
Normal file
30
plugin.video.alfa/channels/filmperevolvere.json
Normal file
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"id": "filmperevolvere",
|
||||
"name": "FilmPerEvolvere",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": [
|
||||
"it"
|
||||
],
|
||||
"thumbnail": "https:\/\/filmperevolvere.it\/wp-content\/uploads\/2017\/06\/cropped-coversito.jpg",
|
||||
"bannermenu": "https:\/\/filmperevolvere.it\/wp-content\/uploads\/2017\/06\/cropped-coversito.jpg",
|
||||
"categories": ["cult","vos","movie"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_film",
|
||||
"type": "bool",
|
||||
"label": "Includi in novità - film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
213
plugin.video.alfa/channels/filmperevolvere.py
Normal file
213
plugin.video.alfa/channels/filmperevolvere.py
Normal file
@@ -0,0 +1,213 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per filmperevolvere
|
||||
# https://alfa-addon.com/categories/kod-addon.50/
|
||||
# ----------------------------------------------------------
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
import lib.pyaes as aes
|
||||
from core import httptools
|
||||
from platformcode import logger, config
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from core.tmdb import infoIca
|
||||
|
||||
|
||||
|
||||
host = "https://filmperevolvere.it"
|
||||
|
||||
headers = [
|
||||
['User-Agent', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:54.0) Gecko/20100101 Firefox/54.0'],
|
||||
['Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'],
|
||||
['Accept-Encoding', 'gzip, deflate'],
|
||||
['Accept-Language', 'en-US,en;q=0.5'],
|
||||
['Referer', host],
|
||||
['DNT', '1'],
|
||||
['Upgrade-Insecure-Requests', '1'],
|
||||
['Cache-Control', 'max-age=0']
|
||||
]
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("kod.filmperevolvere mainlist")
|
||||
itemlist = [Item(channel=item.channel,
|
||||
title="[COLOR azure]Ultimi Film Inseriti[/COLOR]",
|
||||
action="peliculas",
|
||||
url=host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Categorie[/COLOR]",
|
||||
action="categorie",
|
||||
url=host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR yellow]Cerca...[/COLOR]",
|
||||
action="search",
|
||||
extra="movie",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info("[filmperevolvere.py] newest" + categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "film":
|
||||
item.url = host
|
||||
item.action = "peliculas"
|
||||
itemlist = peliculas(item)
|
||||
|
||||
if itemlist[-1].action == "peliculas":
|
||||
itemlist.pop()
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("[filmperevolvere.py] " + item.url + " search " + texto)
|
||||
item.url = host + "/?s=" + texto
|
||||
|
||||
try:
|
||||
return peliculas(item)
|
||||
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
|
||||
return []
|
||||
|
||||
|
||||
def categorie(item):
|
||||
itemlist = []
|
||||
|
||||
c = get_test_cookie(item.url)
|
||||
if c: headers.append(['Cookie', c])
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
bloque = scrapertools.get_match(data,
|
||||
'GENERI<span class="mega-indicator">(.*?)<\/ul>')
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<a class="mega-menu-link" href="(.*?)">(.*?)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
|
||||
if scrapedtitle.startswith(("HOME")):
|
||||
continue
|
||||
if scrapedtitle.startswith(("SERIE TV")):
|
||||
continue
|
||||
if scrapedtitle.startswith(("GENERI")):
|
||||
continue
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title=scrapedtitle,
|
||||
url='c|%s' % scrapedurl,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png",
|
||||
folder=True))
|
||||
|
||||
for i in itemlist:
|
||||
logger.info(i)
|
||||
|
||||
return itemlist
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("kod.filmperevolvere peliculas")
|
||||
itemlist = []
|
||||
|
||||
c = get_test_cookie(item.url)
|
||||
if c: headers.append(['Cookie', c])
|
||||
|
||||
if item.url[1]=="|":
|
||||
patron = 'class="ei-item-title"><a\s*href="([^"]*)">([^<]*)'
|
||||
item.url=item.url[2:]
|
||||
else:
|
||||
patron = '<div class="post-thumbnail">\s*<a href="([^"]+)" title="([^"]+)">\s*<img width="520"'
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Estrae i contenuti
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = scrapedtitle.title()
|
||||
txt = "Serie Tv"
|
||||
if txt in scrapedtitle: continue
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
folder=True), tipo='movie'))
|
||||
|
||||
# Paginazione
|
||||
patronvideos = '<span class=\'current\'>[^<]+</span><a class=[^=]+=[^=]+="(.*?)">'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("kod.filmperevolvere findvideos")
|
||||
|
||||
c = get_test_cookie(item.url)
|
||||
if c: headers.append(['Cookie', c])
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = "".join([item.title, '[COLOR green][B]', videoitem.title, '[/B][/COLOR]'])
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.show = item.show
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
|
||||
return itemlist
|
||||
|
||||
def get_test_cookie(url):
|
||||
data = httptools.downloadpage(url, headers=headers).data
|
||||
a = scrapertools.find_single_match(data, 'a=toNumbers\("([^"]+)"\)')
|
||||
if a:
|
||||
b = scrapertools.find_single_match(data, 'b=toNumbers\("([^"]+)"\)')
|
||||
if b:
|
||||
c = scrapertools.find_single_match(data, 'c=toNumbers\("([^"]+)"\)')
|
||||
if c:
|
||||
cookie = aes.AESModeOfOperationCBC(a.decode('hex'), iv=b.decode('hex')).decrypt(c.decode('hex'))
|
||||
return '__test=%s' % cookie.encode('hex')
|
||||
return ''
|
||||
36
plugin.video.alfa/channels/filmpertutti.json
Normal file
36
plugin.video.alfa/channels/filmpertutti.json
Normal file
@@ -0,0 +1,36 @@
|
||||
{
|
||||
"id": "filmpertutti",
|
||||
"name": "Filmpertutti",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["ita"],
|
||||
"thumbnail": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/filmpertutti.png",
|
||||
"banner": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/filmpertutti.png",
|
||||
"categories": ["tvshow","movie","top channels"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi in ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_film",
|
||||
"type": "bool",
|
||||
"label": "Includi in novità - Film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_series",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Series",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
358
plugin.video.alfa/channels/filmpertutti.py
Normal file
358
plugin.video.alfa/channels/filmpertutti.py
Normal file
@@ -0,0 +1,358 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per filmpertutti.co
|
||||
# https://alfa-addon.com/categories/kod-addon.50/
|
||||
# ------------------------------------------------------------
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from channels import autoplay
|
||||
from core import scrapertools, servertools, httptools
|
||||
from core.item import Item
|
||||
from core.tmdb import infoIca
|
||||
from lib import unshortenit
|
||||
from platformcode import config, logger
|
||||
|
||||
host = "https://www.filmpertutti.uno"
|
||||
list_servers = ['akvideo', 'openload', 'streamango', 'wstream']
|
||||
list_quality = ['default']
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("kod.filmpertutti mainlist")
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = [Item(channel=item.channel,
|
||||
title="[COLOR azure]Ultimi film inseriti[/COLOR]",
|
||||
action="peliculas",
|
||||
extra="movie",
|
||||
url="%s/category/film/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Categorie film[/COLOR]",
|
||||
action="categorias",
|
||||
url="%s/category/film/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR yellow]Cerca...[/COLOR]",
|
||||
action="search",
|
||||
extra="movie",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Serie TV[/COLOR]",
|
||||
extra="tvshow",
|
||||
action="peliculas_tv",
|
||||
url="%s/category/serie-tv/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR yellow]Cerca Serie TV...[/COLOR]",
|
||||
action="search",
|
||||
extra="tvshow",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info("kod.filmpertutti newest" + categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "film":
|
||||
item.url = host + "/category/film/"
|
||||
item.action = "peliculas"
|
||||
item.extra = "movie"
|
||||
itemlist = peliculas(item)
|
||||
|
||||
if itemlist[-1].action == "peliculas":
|
||||
itemlist.pop()
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("kod.filmpertutti peliculas")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<li><a href="([^"]+)" data-thumbnail="([^"]+)"><div>\s*<div class="title">(.*?)<.*?IMDb">([^<]+)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scraprate in matches:
|
||||
scrapedplot = ""
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="movie",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR] - IMDb: " + scraprate,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
extra=item.extra,
|
||||
folder=True), tipo='movie'))
|
||||
|
||||
# Paginazione
|
||||
patronvideos = '<a href="([^"]+)"[^>]+>Pagina'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
extra=item.extra,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas_tv(item):
|
||||
logger.info("kod.filmpertutti peliculas")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<li><a href="([^"]+)" data-thumbnail="([^"]+)"><div>\s*<div class="title">(.*?)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
title = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
scrapedplot = ""
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="episodios",
|
||||
fulltitle=title,
|
||||
show=title,
|
||||
title="[COLOR azure]" + title + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
extra=item.extra,
|
||||
folder=True), tipo='tv'))
|
||||
|
||||
# Paginazione
|
||||
patronvideos = '<a href="([^"]+)"[^>]+>Pagina'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas_tv",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
extra=item.extra,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
def categorias(item):
|
||||
logger.info("kod.filmpertutti categorias")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Narrow search by selecting only the combo
|
||||
patron = '<option>Scegli per Genere</option>(.*?)</select'
|
||||
bloque = scrapertools.get_match(data, patron)
|
||||
|
||||
# The categories are the options for the combo
|
||||
patron = '<option data-src="([^"]+)">([^<]+)</option>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedurl = urlparse.urljoin(item.url, scrapedurl)
|
||||
scrapedthumbnail = ""
|
||||
scrapedplot = ""
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
extra=item.extra,
|
||||
plot=scrapedplot))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("kod.filmpertutti " + item.url + " search " + texto)
|
||||
item.url = host + "/?s=" + texto
|
||||
try:
|
||||
if item.extra == "movie":
|
||||
return peliculas(item)
|
||||
if item.extra == "tvshow":
|
||||
return peliculas_tv(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def episodios(item):
|
||||
def load_episodios(html, item, itemlist, lang_title):
|
||||
patron = '.*?<a[^h]+href="[^"]+"[^>]+>[^<]+<\/a>(?:<br \/>|<\/p>|-)'
|
||||
matches = re.compile(patron).findall(html)
|
||||
for data in matches:
|
||||
# Estrae i contenuti
|
||||
scrapedtitle = data.split('<a ')[0]
|
||||
scrapedtitle = re.sub(r'<[^>]*>', '', scrapedtitle).strip()
|
||||
if scrapedtitle != 'Categorie':
|
||||
scrapedtitle = scrapedtitle.replace('×', 'x')
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="episode",
|
||||
title="[COLOR azure]%s[/COLOR]" % (scrapedtitle + " (" + lang_title + ")"),
|
||||
url=data,
|
||||
thumbnail=item.thumbnail,
|
||||
extra=item.extra,
|
||||
fulltitle=scrapedtitle + " (" + lang_title + ")" + ' - ' + item.show,
|
||||
show=item.show))
|
||||
|
||||
logger.info("[filmpertutti.py] episodios")
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
|
||||
lang_titles = []
|
||||
starts = []
|
||||
patron = r"Stagione.*?ITA"
|
||||
matches = re.compile(patron, re.IGNORECASE).finditer(data)
|
||||
for match in matches:
|
||||
season_title = match.group()
|
||||
if season_title != '':
|
||||
lang_titles.append('SUB ITA' if 'SUB' in season_title.upper() else 'ITA')
|
||||
starts.append(match.end())
|
||||
|
||||
i = 1
|
||||
len_lang_titles = len(lang_titles)
|
||||
|
||||
while i <= len_lang_titles:
|
||||
inizio = starts[i - 1]
|
||||
fine = starts[i] if i < len_lang_titles else -1
|
||||
|
||||
html = data[inizio:fine]
|
||||
lang_title = lang_titles[i - 1]
|
||||
|
||||
load_episodios(html, item, itemlist, lang_title)
|
||||
|
||||
i += 1
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios" + "###" + item.extra,
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
def add_myitem(sitemlist, scontentType, server, stitle, surl):
|
||||
sitemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="play",
|
||||
contentType=scontentType,
|
||||
title=stitle,
|
||||
fulltitle=item.fulltitle,
|
||||
server=server,
|
||||
url=surl,
|
||||
thumbnail=item.thumbnail,
|
||||
extra=item.extra))
|
||||
|
||||
logger.info("kod.filmpertutti findvideos")
|
||||
itemlist = []
|
||||
logger.debug(item)
|
||||
|
||||
# Carica la pagina
|
||||
if item.contentType == "episode":
|
||||
patron = '<a\s*href="(.*?)\s*".*?[^>]+>([^<]+)<\/a>'
|
||||
matches = re.compile(patron).findall(item.url)
|
||||
|
||||
lsrvo = ''
|
||||
for lurl, lsrv in matches:
|
||||
|
||||
if lsrv == 'HD': lsrv = lsrvo + ' HD'
|
||||
lsrvo = lsrv
|
||||
|
||||
add_myitem(itemlist, "episode", lsrv, "[COLOR azure]%s[/COLOR]" % lsrv, lurl)
|
||||
else:
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<strong>\s*(Versione.*?)<p><strong>Download'
|
||||
data = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
if data:
|
||||
vqual = re.compile('ersione.*?:\s*([^|,\s,&,<]+)').findall(data[0])
|
||||
sect = re.compile('Streaming', re.DOTALL).split(data[0])
|
||||
|
||||
## SD links
|
||||
links = re.compile('<a\s*href="([^",\s]+).*?>([^<]+)', re.DOTALL).findall(sect[1])
|
||||
|
||||
for link, srv in links:
|
||||
add_myitem(itemlist, "movie", srv, "[COLOR azure]%s (SD)[/COLOR] - %s" % (srv, vqual[0]), link)
|
||||
|
||||
## HD Links
|
||||
if len(sect) > 2:
|
||||
links = re.compile('<a\s*href="([^",\s]+).*?>([^<]+)', re.DOTALL).findall(sect[2])
|
||||
|
||||
for link, srv in links:
|
||||
add_myitem(itemlist, "movie", srv, "[COLOR azure]%s (HD)[/COLOR] - %s" % (srv, vqual[0]), link)
|
||||
else:
|
||||
itemlist = servertools.find_video_items(item=item)
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info("kod.filmpertutti play: %s" % item.url)
|
||||
|
||||
data = item.url
|
||||
|
||||
data, c = unshortenit.unshorten(data)
|
||||
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title + videoitem.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.show = item.show
|
||||
videoitem.plot = item.plot
|
||||
videoitem.channel = item.channel
|
||||
videoitem.contentType = item.contentType
|
||||
|
||||
return itemlist
|
||||
70
plugin.video.alfa/channels/filmsenzalimiti.json
Normal file
70
plugin.video.alfa/channels/filmsenzalimiti.json
Normal file
@@ -0,0 +1,70 @@
|
||||
{
|
||||
"id": "filmsenzalimiti",
|
||||
"name": "Filmsenzalimiti",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["ita"],
|
||||
"thumbnail": "filmsenzalimiti.png",
|
||||
"banner": "filmsenzalimiti.png",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"top channels"
|
||||
],
|
||||
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_film",
|
||||
"type": "bool",
|
||||
"label": "Includi in novità - Film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_series",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Serie TV",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces_num",
|
||||
"type": "list",
|
||||
"label": "Numero de link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "1", "2", "5", "10" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Non filtrare",
|
||||
"IT"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
321
plugin.video.alfa/channels/filmsenzalimiti.py
Normal file
321
plugin.video.alfa/channels/filmsenzalimiti.py
Normal file
@@ -0,0 +1,321 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per Filmsenzalimiti
|
||||
# Alhaziel
|
||||
# ------------------------------------------------------------
|
||||
import base64
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from channelselector import get_thumb
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import scrapertools, servertools, httptools
|
||||
from platformcode import logger, config
|
||||
from core.item import Item
|
||||
from platformcode import config
|
||||
from core.tmdb import infoIca
|
||||
|
||||
__channel__ = 'filmsenzalimiti'
|
||||
|
||||
host = 'https://filmsenzalimiti.app'
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['openload', 'streamango', 'vidoza', 'okru']
|
||||
list_quality = ['1080p', '720p', '480p', '360']
|
||||
|
||||
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'filmsenzalimiti')
|
||||
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'filmsenzalimiti')
|
||||
|
||||
headers = [['Referer', host]]
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info('[filmsenzalimiti.py] mainlist')
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = [Item(channel=item.channel,
|
||||
action='video',
|
||||
title='Film',
|
||||
contentType='movie',
|
||||
url=host,
|
||||
thumbnail= ''),
|
||||
Item(channel=item.channel,
|
||||
action='video',
|
||||
title='Novità',
|
||||
contentType='movie',
|
||||
url=host + '/category/nuove-uscite',
|
||||
thumbnail=''),
|
||||
Item(channel=item.channel,
|
||||
action='video',
|
||||
title='In Sala',
|
||||
contentType='movie',
|
||||
url=host + '/category/in-sala',
|
||||
thumbnail=''),
|
||||
Item(channel=item.channel,
|
||||
action='video',
|
||||
title='Sottotitolati',
|
||||
contentType='movie',
|
||||
url=host + '/category/sub-ita',
|
||||
thumbnail=''),
|
||||
Item(channel=item.channel,
|
||||
action='sottomenu',
|
||||
title='[B]Categoria[/B]',
|
||||
contentType='movie',
|
||||
url=host,
|
||||
thumbnail=''),
|
||||
Item(channel=item.channel,
|
||||
action='search',
|
||||
extra='tvshow',
|
||||
title='[B]Cerca...[/B]',
|
||||
contentType='movie',
|
||||
thumbnail='')]
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info('[filmsenzalimiti.py] search')
|
||||
|
||||
item.url = host + '/?s=' + texto
|
||||
|
||||
try:
|
||||
return cerca(item)
|
||||
|
||||
# Continua la ricerca in caso di errore .
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error('%s' % line)
|
||||
return []
|
||||
|
||||
|
||||
def sottomenu(item):
|
||||
logger.info('[filmsenzalimiti.py] sottomenu')
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = '<li class="cat-item.*?<a href="([^"]+)">(.*?)<'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='video',
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl))
|
||||
|
||||
# Elimina Film dal Sottomenù
|
||||
itemlist.pop(0)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def video(item):
|
||||
logger.info('[filmsenzalimiti.py] video')
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data.replace('\t','').replace('\n','')
|
||||
logger.info('[filmsenzalimiti.py] video' +data)
|
||||
|
||||
patron = '<div class="col-mt-5 postsh">.*?<a href="([^"]+)" title="([^"]+)">.*?<span class="rating-number">(.*?)<.*?<img src="([^"]+)"'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedrating, scrapedthumbnail in matches:
|
||||
scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
|
||||
scrapedrating = scrapertools.decodeHtmlentities(scrapedrating)
|
||||
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action='findvideos',
|
||||
title=scrapedtitle + ' (' + scrapedrating + ')',
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
show=scrapedtitle,
|
||||
contentType=item.contentType,
|
||||
thumbnail=scrapedthumbnail), tipo='movie'))
|
||||
|
||||
patron = '<a href="([^"]+)"><i class="glyphicon glyphicon-chevron-right"'
|
||||
next_page = scrapertools.find_single_match(data, patron)
|
||||
if next_page != '':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='video',
|
||||
title='[COLOR lightgreen]' + config.get_localized_string(30992) + '[/COLOR]',
|
||||
contentType=item.contentType,
|
||||
url=next_page))
|
||||
|
||||
return itemlist
|
||||
|
||||
def cerca(item):
|
||||
logger.info('[filmsenzalimiti.py] cerca')
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data.replace('\t','').replace('\n','')
|
||||
logger.info('[filmsenzalimiti.py] video' +data)
|
||||
|
||||
patron = '<div class="list-score">(.*?)<.*?<a href="([^"]+)" title="([^"]+)"><img src="([^"]+)"'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedrating, scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
|
||||
scrapedrating = scrapertools.decodeHtmlentities(scrapedrating)
|
||||
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action='findvideos',
|
||||
title=scrapedtitle + ' (' + scrapedrating + ')',
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
show=scrapedtitle,
|
||||
contentType=item.contentType,
|
||||
thumbnail=scrapedthumbnail), tipo='movie'))
|
||||
|
||||
patron = '<a href="([^"]+)"><i class="glyphicon glyphicon-chevron-right"'
|
||||
next_page = scrapertools.find_single_match(data, patron)
|
||||
if next_page != '':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='video',
|
||||
title='[COLOR lightgreen]' + config.get_localized_string(30992) + '[/COLOR]',
|
||||
contentType=item.contentType,
|
||||
url=next_page))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info('[filmsenzalimiti.py] findvideos')
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data.replace('\t', '').replace('\n', '')
|
||||
logger.info('[filmsenzalimiti.py] findvideos page download= '+data)
|
||||
|
||||
patron = r'Streaming in HD<\/a><\/li><\/ul><br><p><iframe width="100%" height="430px" src="([^"]+)"'
|
||||
url = scrapertools.find_single_match(data, patron)
|
||||
|
||||
if 'hdpass' in url:
|
||||
data = httptools.downloadpage('http:%s' % url if 'http' not in url else url).data
|
||||
|
||||
start = data.find('<div class="row mobileRes">')
|
||||
end = data.find('<div id="playerFront">', start)
|
||||
data = data[start:end]
|
||||
|
||||
patron_res = '<div class="row mobileRes">(.*?)</div>'
|
||||
patron_mir = '<div class="row mobileMirrs">(.*?)</div>'
|
||||
patron_media = r'<input type="hidden" name="urlEmbed" data-mirror="[^"]+" id="urlEmbed" value="([^"]+)"[^>]+>'
|
||||
|
||||
res = scrapertools.find_single_match(data, patron_res)
|
||||
|
||||
for res_url, resolution in scrapertools.find_multiple_matches(res, '<option[^v]+value="([^"]*)">([^<]*)</option>'):
|
||||
res_url = urlparse.urljoin(url, res_url)
|
||||
data = httptools.downloadpage('http:%s' % res_url if 'http' not in res_url else res_url).data.replace('\n', '')
|
||||
|
||||
mir = scrapertools.find_single_match(data, patron_mir)
|
||||
|
||||
for mir_url, server in scrapertools.find_multiple_matches(mir, '<option[^v]+value="([^"]*)">([^<]*)</value>'):
|
||||
mir_url = urlparse.urljoin(url, mir_url)
|
||||
data = httptools.downloadpage('http:%s' % mir_url if 'http' not in mir_url else mir_url).data.replace('\n', '')
|
||||
|
||||
for media_url in re.compile(patron_media).findall(data):
|
||||
scrapedurl = url_decode(media_url)
|
||||
logger.info(scrapedurl)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="play",
|
||||
title='[[COLOR green]%s[/COLOR]][[COLOR orange]%s[/COLOR]] %s' % (resolution, server, item.title),
|
||||
url=scrapedurl,
|
||||
server=server,
|
||||
fulltitle=item.fulltitle,
|
||||
thumbnail=item.thumbnail,
|
||||
show=item.show,
|
||||
plot=item.plot,
|
||||
quality=resolution,
|
||||
contentType=item.contentType,
|
||||
folder=False))
|
||||
|
||||
# Link Aggiungi alla Libreria
|
||||
if item.contentType == 'movie':
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findservers':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR lightblue][B]Aggiungi alla videoteca[/B][/COLOR]', url=item.url,
|
||||
action="add_pelicula_to_library", extra="findservers", contentTitle=item.contentTitle))
|
||||
|
||||
#Necessario per filtrare i Link
|
||||
if __comprueba_enlaces__:
|
||||
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
|
||||
# Necessario per FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Necessario per AutoPlay
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
itemlist = servertools.find_video_items(data=item.url)
|
||||
|
||||
return itemlist
|
||||
|
||||
def url_decode(url_enc):
|
||||
lenght = len(url_enc)
|
||||
if lenght % 2 == 0:
|
||||
len2 = lenght / 2
|
||||
first = url_enc[0:len2]
|
||||
last = url_enc[len2:lenght]
|
||||
url_enc = last + first
|
||||
reverse = url_enc[::-1]
|
||||
return base64.b64decode(reverse)
|
||||
|
||||
last_car = url_enc[lenght - 1]
|
||||
url_enc[lenght - 1] = ' '
|
||||
url_enc = url_enc.strip()
|
||||
len1 = len(url_enc)
|
||||
len2 = len1 / 2
|
||||
first = url_enc[0:len2]
|
||||
last = url_enc[len2:len1]
|
||||
url_enc = last + first
|
||||
reverse = url_enc[::-1]
|
||||
reverse = reverse + last_car
|
||||
return base64.b64decode(reverse)
|
||||
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info('[filmsenzalimiti.py] newest' + categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
|
||||
## cambiare i valori 'peliculas, infantiles, series, anime, documentales por los que correspondan aqui en
|
||||
# nel py e nel json ###
|
||||
if categoria == 'peliculas':
|
||||
item.url = host
|
||||
itemlist = video(item)
|
||||
|
||||
if 'Successivo>>' in itemlist[-1].title:
|
||||
itemlist.pop()
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error('{0}'.format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
69
plugin.video.alfa/channels/filmsenzalimiticc.json
Normal file
69
plugin.video.alfa/channels/filmsenzalimiticc.json
Normal file
@@ -0,0 +1,69 @@
|
||||
{
|
||||
"id": "filmsenzalimiticc",
|
||||
"name": "Filmsenzalimiti CC",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["ita"],
|
||||
"thumbnail": "filmsenzalimiticc.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"top channels"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_film",
|
||||
"type": "bool",
|
||||
"label": "Includi in novità - Film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_series",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Serie TV",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces_num",
|
||||
"type": "list",
|
||||
"label": "Numero de link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "1", "3", "5", "10" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Non filtrare",
|
||||
"IT"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
322
plugin.video.alfa/channels/filmsenzalimiticc.py
Normal file
322
plugin.video.alfa/channels/filmsenzalimiticc.py
Normal file
@@ -0,0 +1,322 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per Filmsenzalimiti CC
|
||||
# Alhaziel
|
||||
# ------------------------------------------------------------
|
||||
import base64
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import scrapertools, servertools, httptools
|
||||
from platformcode import logger, config
|
||||
from core.item import Item
|
||||
from platformcode import config
|
||||
from core.tmdb import infoIca
|
||||
|
||||
# Necessario per Autoplay
|
||||
__channel__ = 'filmsenzalimiticc'
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['openload', 'vidlox', 'youtube']
|
||||
list_quality = ['default']
|
||||
|
||||
# Necessario per Verifica Link
|
||||
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'filmsenzalimiticc')
|
||||
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'filmsenzalimiticc')
|
||||
|
||||
host = 'https://filmsenzalimiti.pw'
|
||||
|
||||
headers = [['Referer', host]]
|
||||
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info('[filmsenzalimiticc.py] mainlist')
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality) # Necessario per Autoplay
|
||||
|
||||
# Menu Principale
|
||||
itemlist = [Item(channel=item.channel,
|
||||
action='video',
|
||||
title='Film',
|
||||
url=host,
|
||||
contentType='movie',
|
||||
thumbnail=''),
|
||||
Item(channel=item.channel,
|
||||
action='sottomenu_film',
|
||||
title='Categorie Film',
|
||||
url=host,
|
||||
contentType='movie',
|
||||
thumbnail=''),
|
||||
Item(channel=item.channel,
|
||||
action='video',
|
||||
title='Serie TV',
|
||||
url=host+'/serie-tv/',
|
||||
contentType='episode',
|
||||
thumbnail=''),
|
||||
Item(channel=item.channel,
|
||||
action='sottomenu_serie',
|
||||
title='[B]Categorie Serie TV[/B]',
|
||||
thumbnail=''),
|
||||
Item(channel=item.channel,
|
||||
action='search',
|
||||
extra='tvshow',
|
||||
title='[B]Cerca... (non funziona)[/B]',
|
||||
thumbnail='')
|
||||
]
|
||||
|
||||
autoplay.show_option(item.channel, itemlist) # Necessario per Autoplay (Menu Configurazione)
|
||||
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info('[filmsenzalimiticc.py] search')
|
||||
|
||||
item.url = host + '/?s=' + texto
|
||||
|
||||
try:
|
||||
return video(item)
|
||||
|
||||
# Continua la ricerca in caso di errore .
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error('%s' % line)
|
||||
return []
|
||||
|
||||
def sottomenu_film(item):
|
||||
logger.info('[filmsenzalimiticc.py] sottomenu_film')
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = "<li><a href='([^']+)'>(.*?)<"
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(
|
||||
Item(channel=__channel__,
|
||||
action='video',
|
||||
contentType=item.contentType,
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl))
|
||||
|
||||
# Elimina le Serie al Sottomenù
|
||||
itemlist.pop(3)
|
||||
itemlist.pop(29)
|
||||
itemlist.pop(29)
|
||||
itemlist.pop(32)
|
||||
|
||||
return itemlist
|
||||
|
||||
def sottomenu_serie(item):
|
||||
logger.info('[seriehd.py] sottomenu_serie')
|
||||
itemlist = [
|
||||
Item(channel=item.channel,
|
||||
action='video',
|
||||
title='Serie TV HD',
|
||||
url=host+'/watch-genre/serie-altadefinizione/',
|
||||
contentType='episode',
|
||||
thumbnail=''),
|
||||
Item(channel=item.channel,
|
||||
action='video',
|
||||
title='Miniserie',
|
||||
url=host+'/watch-genre/miniserie/',
|
||||
contentType='episode',
|
||||
thumbnail=''),
|
||||
Item(channel=item.channel,
|
||||
action='video',
|
||||
title='Programmi TV',
|
||||
url=host+'/watch-genre/programmi-tv/',
|
||||
contentType='episode',
|
||||
thumbnail='')
|
||||
]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def video(item):
|
||||
logger.info('[filmsenzalimiticc.py] video')
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data.replace('\n','').replace('\t','')
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = r'<div class="mediaWrap mediaWrapAlt">.*?<a href="([^"]+)".*?src="([^"]+)".*?<p>([^"]+) (\(.*?)streaming<\/p>.*?<p>\s*(\S+).*?<\/p>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedquality in matches:
|
||||
scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
scrapedyear = scrapertools.decodeHtmlentities(scrapedyear)
|
||||
scrapedquality = scrapertools.decodeHtmlentities(scrapedquality)
|
||||
|
||||
year = scrapedyear.replace('(','').replace(')','')
|
||||
infolabels = {}
|
||||
if year:
|
||||
infolabels['year'] = year
|
||||
|
||||
title = scrapedtitle + ' '+ scrapedyear +' [' + scrapedquality + ']'
|
||||
|
||||
# Seleziona fra Serie TV e Film
|
||||
if item.contentType == 'movie':
|
||||
azione = 'findvideos'
|
||||
tipologia = 'movie'
|
||||
if item.contentType == 'episode':
|
||||
azione='episodios'
|
||||
tipologia = 'tv'
|
||||
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action=azione,
|
||||
contentType=item.contentType,
|
||||
title=title,
|
||||
fulltitle=scrapedtitle,
|
||||
text_color='azure',
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
infoLabels=infolabels,
|
||||
show=scrapedtitle), tipo=tipologia))
|
||||
|
||||
# Next page
|
||||
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink".*?href="([^"]+)">')
|
||||
|
||||
if next_page != '':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='film',
|
||||
title='[COLOR lightgreen]' + config.get_localized_string(30992) + '[/COLOR]',
|
||||
url=next_page,
|
||||
contentType=item.contentType,
|
||||
thumbnail='http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def findvideos(item): # Questa def. deve sempre essere nominata findvideos
|
||||
logger.info('[filmsenzalimiticc.py] findvideos')
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Aggira protectlink
|
||||
if 'protectlink' in data:
|
||||
urls = scrapertools.find_multiple_matches(data, r'<iframe src="[^=]+=(.*?)"')
|
||||
for url in urls:
|
||||
url= url.decode('base64')
|
||||
if '\t' in url: #fix alcuni link presentano una tabulazione finale.
|
||||
url = url[:-1]
|
||||
data += '\t' + url
|
||||
if 'nodmca' in data: #fix player Openload sezione Serie TV
|
||||
page = httptools.downloadpage(url, headers=headers).data
|
||||
data += '\t' + scrapertools.find_single_match(page,'<meta name="og:url" content="([^=]+)">')
|
||||
|
||||
|
||||
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.fulltitle + ' - [[COLOR limegreen]'+videoitem.title+'[/COLOR] ]'
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.show = item.show
|
||||
videoitem.plot = item.plot
|
||||
videoitem.channel = item.channel
|
||||
videoitem.contentType = item.contentType
|
||||
|
||||
# Link Aggiungi alla Libreria
|
||||
if item.contentType != 'episode':
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findservers':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR lightblue][B]Aggiungi alla videoteca[/B][/COLOR]', url=item.url,
|
||||
action='add_pelicula_to_library', extra='findservers', contentTitle=item.contentTitle))
|
||||
|
||||
# Necessario per filtrare i Link
|
||||
if __comprueba_enlaces__:
|
||||
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
|
||||
# Necessario per FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Necessario per AutoPlay
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def episodios(item): # Questa def. deve sempre essere nominata episodios
|
||||
logger.info('[filmsenzalimiticc.py] episodios')
|
||||
itemlist = []
|
||||
|
||||
# Trova le Stagioni
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = r'<iframe src="([^"]+)".*?>'
|
||||
url = scrapertools.find_single_match(data, patron)
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(url).data.replace('\t', '').replace('\n', '')
|
||||
|
||||
# Estrae i contenuti
|
||||
section_stagione = scrapertools.find_single_match(data, r'Stagioni<\/a>(.*?)<\/ul>')
|
||||
patron = r'<a href="([^"]+)" >.*?<\/i>\s(.*?)<\/a>'
|
||||
seasons = re.compile(patron, re.DOTALL).findall(section_stagione)
|
||||
|
||||
for scrapedseason_url, scrapedseason in seasons:
|
||||
|
||||
# Trova gli Episodi
|
||||
|
||||
season_url = urlparse.urljoin(url, scrapedseason_url)
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(season_url).data.replace('\t', '').replace('\n', '')
|
||||
|
||||
# Estrae i contenuti
|
||||
section_episodio = scrapertools.find_single_match(data, r'Episodio<\/a>(.*?)<\/ul>')
|
||||
patron = r'<a href="([^"]+)" >.*?<\/i>\s(.*?)<\/a>'
|
||||
episodes = re.compile(patron, re.DOTALL).findall(section_episodio)
|
||||
|
||||
for scrapedepisode_url, scrapedepisode in episodes:
|
||||
episode_url = urlparse.urljoin(url, scrapedepisode_url)
|
||||
|
||||
title = scrapedseason + 'x' + scrapedepisode.zfill(2)
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='findvideos',
|
||||
contentType='episode',
|
||||
title=title,
|
||||
url=episode_url,
|
||||
fulltitle=title + ' - ' + item.show,
|
||||
show=item.show,
|
||||
thumbnail=item.thumbnail))
|
||||
|
||||
# Link Aggiungi alla Libreria
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title='[COLOR lightblue][B]Aggiungi Serie alla videoteca[/B][/COLOR]',
|
||||
url=item.url,
|
||||
action='add_serie_to_library',
|
||||
extra='episodios' + '###' + item.extra,
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
60
plugin.video.alfa/channels/filmstreaminggratis.json
Normal file
60
plugin.video.alfa/channels/filmstreaminggratis.json
Normal file
@@ -0,0 +1,60 @@
|
||||
{
|
||||
"id": "filmstreaminggratis",
|
||||
"name": "FilmStreamingGratis",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["ita"],
|
||||
"thumbnail": "http:\/\/www.filmstreaminggratis.org\/wp-content\/uploads\/2016\/10\/filmstreaminglogo.png",
|
||||
"bannermenu": "http:\/\/www.filmstreaminggratis.org\/wp-content\/uploads\/2016\/10\/filmstreaminglogo.png",
|
||||
"categories": [
|
||||
"movie"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_film",
|
||||
"type": "bool",
|
||||
"label": "Includi in novità - Film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
|
||||
{
|
||||
"id": "comprueba_enlaces",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces_num",
|
||||
"type": "list",
|
||||
"label": "Numero di link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "3", "5", "10", "15" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Non filtrare",
|
||||
"IT"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
230
plugin.video.alfa/channels/filmstreaminggratis.py
Normal file
230
plugin.video.alfa/channels/filmstreaminggratis.py
Normal file
@@ -0,0 +1,230 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per filmstreaminggratis
|
||||
# https://alfa-addon.com/categories/kod-addon.50/
|
||||
# ----------------------------------------------------------
|
||||
|
||||
import re
|
||||
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import scrapertools, servertools, httptools
|
||||
from core.item import Item
|
||||
from core.tmdb import infoIca
|
||||
from platformcode import logger, config
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['openload', 'thevideome', 'okru', 'mailru']
|
||||
list_quality = ['default']
|
||||
|
||||
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'filmstreaminggratis')
|
||||
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'filmstreaminggratis')
|
||||
|
||||
host = "https://www.filmstreaminggratis.org"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("kod.filmstreaminggratis mainlist")
|
||||
logger.info("[FilmStreamingGratis.py]==> mainlist")
|
||||
itemlist = [Item(channel=item.channel,
|
||||
action="ultimifilm",
|
||||
title=color("Ultimi Film", "azure"),
|
||||
url=host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="categorie",
|
||||
title=color("Categorie", "azure"),
|
||||
url=host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="search",
|
||||
title=color("Cerca film ...", "yellow"),
|
||||
extra="movie",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info("[FilmStreamingGratis.py]==> newest" + categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "film":
|
||||
item.url = host
|
||||
item.action = "ultimifilm"
|
||||
itemlist = ultimifilm(item)
|
||||
|
||||
if itemlist[-1].action == "ultimifilm":
|
||||
itemlist.pop()
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("[FilmStreamingGratis.py]==> search")
|
||||
item.url = host + "/?s=" + texto
|
||||
try:
|
||||
return loadfilms(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
|
||||
def ultimifilm(item):
|
||||
logger.info("[FilmStreamingGratis.py]==> ultimifilm")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
blocco = scrapertools.get_match(data, '<div class="es-carousel">(.*?)</div></li></ul>')
|
||||
patron = '<h5><a href="([^"]+)"[^>]+>([^<]+)</a></h5>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="movie",
|
||||
title=scrapedtitle,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
extra="movie",
|
||||
thumbnail=item.thumbnail,
|
||||
folder=True), tipo="movie"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def categorie(item):
|
||||
logger.info("[FilmStreamingGratis.py]==> categorie")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
blocco = scrapertools.get_match(data, '<div class="list styled custom-list"><ul>(.*?)</ul></div>')
|
||||
patron = '<li><a href="([^"]+)" title="[^"]+" >([^<]+)</a></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
if "Serie TV" not in scrapedtitle: # Il sito non ha una buona gestione per le Serie TV
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="loadfilms",
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
extra="movie",
|
||||
thumbnail=item.thumbnail,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def loadfilms(item):
|
||||
logger.info("[FilmStreamingGratis.py]==> loadfilms")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = '<h2 class="post-title"><a href="([^"]+)" title="[^"]+">'
|
||||
patron += '([^<]+)</a></h2>[^>]+>[^>]+>[^>]+><.*?data-src="([^"]+)"'
|
||||
patron += '[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s+?([^<]+)</div>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedplot in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot.strip())
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
title=scrapedtitle,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
plot=scrapedplot,
|
||||
thumbnail=scrapedthumbnail,
|
||||
folder=True), tipo=item.extra))
|
||||
|
||||
patronvideos = '<link rel="next" href="([^"]+)"\s*/>'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = matches[0]
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="loadfilms",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("[FilmStreamingGratis.py]==> findvideos")
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
if '%s/go/' % host in data:
|
||||
urls = scrapertools.find_multiple_matches(data, r'%s/go/[0-9\-]{6}' % host) # Multiple matches con go/9575-2/
|
||||
data = ""
|
||||
for url in urls:
|
||||
data += httptools.downloadpage(url).url + '\n'
|
||||
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
server = re.sub(r'[-\[\]\s]+', '', videoitem.title)
|
||||
videoitem.title = "".join(["[%s] " % color(server.capitalize(), 'orange'), item.title])
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.show = item.show
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
videoitem.contentType = item.contentType
|
||||
videoitem.language = IDIOMAS['Italiano']
|
||||
|
||||
# Requerido para Filtrar enlaces
|
||||
|
||||
if __comprueba_enlaces__:
|
||||
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if item.contentType != 'episode':
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow][B]Aggiungi alla videoteca[/B][/COLOR]', url=item.url,
|
||||
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def color(text, color):
|
||||
return "[COLOR %s]%s[/COLOR]" % (color, text)
|
||||
|
||||
67
plugin.video.alfa/channels/filmzstreaming.json
Normal file
67
plugin.video.alfa/channels/filmzstreaming.json
Normal file
@@ -0,0 +1,67 @@
|
||||
{
|
||||
"id": "filmzstreaming",
|
||||
"name": "Filmzstreaming",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": [
|
||||
"it"
|
||||
],
|
||||
"thumbnail": "https:\/\/filmzstreaming.pw\/wp-content\/uploads\/2017\/10\/FilmZStreaming-2.png",
|
||||
"bannermenu": "https:\/\/filmzstreaming.pw\/wp-content\/uploads\/2017\/10\/FilmZStreaming-2.png",
|
||||
"categories": ["movie", "tvshow"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_film",
|
||||
"type": "bool",
|
||||
"label": "Includi in novità - Film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_series",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Serie TV",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces_num",
|
||||
"type": "list",
|
||||
"label": "Numero de link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "5", "10", "15", "20" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Non filtrare",
|
||||
"IT"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
372
plugin.video.alfa/channels/filmzstreaming.py
Normal file
372
plugin.video.alfa/channels/filmzstreaming.py
Normal file
@@ -0,0 +1,372 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per filmzstreaming
|
||||
# https://alfa-addon.com/categories/kod-addon.50/
|
||||
# ----------------------------------------------------------
|
||||
import re, urlparse, urllib
|
||||
|
||||
from platformcode import logger, config
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import scrapertools, servertools, httptools
|
||||
from core.item import Item
|
||||
from core.tmdb import infoIca
|
||||
|
||||
|
||||
|
||||
host = "https://filmzstreaming.is"
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['openload', 'streamango', 'youtube']
|
||||
list_quality = ['default']
|
||||
|
||||
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'filmzstreaming')
|
||||
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'filmzstreaming')
|
||||
|
||||
headers = [['Referer', host]]
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("kod.filmzstreaming mainlist")
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = [Item(channel=item.channel,
|
||||
title="[COLOR azure]Ultimi film inseriti[/COLOR]",
|
||||
action="peliculas",
|
||||
extra="movie",
|
||||
url="%s/film/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Categorie film[/COLOR]",
|
||||
action="categorias",
|
||||
url=host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Serie TV[/COLOR]",
|
||||
action="peliculas_tv",
|
||||
extra="tvshow",
|
||||
url="%s/serietv/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR yellow]Cerca...[/COLOR]",
|
||||
action="search",
|
||||
extra="movie",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR yellow]Cerca Serie TV...[/COLOR]",
|
||||
action="search",
|
||||
extra="tvshow",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("kod.filmzstreaming peliculas")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
blocco = scrapertools.find_single_match(data, '</h1>(.*?)<div class="sidebar scrolling">')
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = r'<h3><a href="([^"]+)">(.*?)</a></h3>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
scrapedtitle = scrapedtitle.replace("Streaming ", "")
|
||||
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="movie",
|
||||
title=scrapedtitle,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail), tipo="movie"))
|
||||
|
||||
# Paginazione
|
||||
patronvideos = '<span class="current">[^>]+</span><a href=\'(.*?)\' class="inactive">'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
def peliculas_tv(item):
|
||||
logger.info("kod.filmzstreaming peliculas")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
blocco = scrapertools.find_single_match(data, '</h1>(.*?)<div class="sidebar scrolling">')
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = r'<h3><a href="([^"]+)">(.*?)</a></h3>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
scrapedtitle = scrapedtitle.replace(" Streaming", "")
|
||||
scrapedtitle = scrapedtitle.title()
|
||||
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="episodios",
|
||||
contentType="tv",
|
||||
title=scrapedtitle,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail), tipo="tv"))
|
||||
|
||||
# Paginazione
|
||||
patronvideos = '<span class="current">[^>]+</span><a href=\'(.*?)\' class="inactive">'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas_tv",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
def episodios(item):
|
||||
logger.info("kod.filmzstreaming peliculas_tv")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<div class="numerando">(.*?)</div><div class="episodiotitle"> <a href="([^"]+)">(.*?)</a> '
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scraped_1, scrapedurl, scraped_2 in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = scraped_1 + " " + scraped_2
|
||||
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="episode",
|
||||
extra=item.extra,
|
||||
title=scrapedtitle,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail), tipo="tv"))
|
||||
|
||||
# Comandi di servizio
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios",
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
|
||||
def peliculas_src_tv(item):
|
||||
logger.info("kod.filmzstreaming peliculas")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<div class="title">\s*<a href="([^"]+)">(.*?)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
scrapedtitle = scrapedtitle.replace("Streaming ", "")
|
||||
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="episodios",
|
||||
contentType="tv",
|
||||
title=scrapedtitle,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail), tipo="tv"))
|
||||
|
||||
# Paginazione
|
||||
patronvideos = '<span class="current">[^>]+</span><a href=\'(.*?)\' class="inactive">'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas_src_tv",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
def peliculas_src(item):
|
||||
logger.info("kod.filmzstreaming peliculas")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<div class="title">\s*<a href="([^"]+)">(.*?)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
scrapedtitle = scrapedtitle.replace("Streaming ", "")
|
||||
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="movie",
|
||||
title=scrapedtitle,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail), tipo="movie"))
|
||||
|
||||
# Paginazione
|
||||
patronvideos = '<span class="current">[^>]+</span><a href=\'(.*?)\' class="inactive">'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas_src",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
def categorias(item):
|
||||
logger.info("kod.filmzstreaming categorias")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Narrow search by selecting only the combo
|
||||
bloque = scrapertools.get_match(data, '<ul class="sub-menu">(.*?)</ul>')
|
||||
|
||||
# The categories are the options for the combo
|
||||
patron = '<a href="([^"]+)">(.*?)</a></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("[filmzstreaming.py] " + item.url + " search " + texto)
|
||||
item.url = host + "/?s=" + texto
|
||||
try:
|
||||
if item.extra == "movie":
|
||||
return peliculas_src(item)
|
||||
if item.extra == "tvshow":
|
||||
return peliculas_src_tv(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("[filmzstreaming.py] findvideos")
|
||||
|
||||
# Carica la pagina
|
||||
|
||||
if item.contentType == 'episode':
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<li id=[^=]+="dooplay_player_option[^=]+="([^"]+)" data-nume="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for posts, numes in matches:
|
||||
uri = "%s/wp-admin/admin-ajax.php" % host
|
||||
payload = urllib.urlencode({'action': 'doo_player_ajax', 'post': posts, 'nume': numes})
|
||||
data += httptools.downloadpage(uri, post=payload).data
|
||||
else:
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<span class="loader"></span></li><li id=[^=]+="dooplay_player_[^=]+="([^"]+)" data-nume="([^"]+)">'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for posts, numes in matches:
|
||||
uri = "%s/wp-admin/admin-ajax.php" % host
|
||||
payload = urllib.urlencode({'action': 'doo_player_ajax', 'post': posts, 'nume': numes})
|
||||
data += httptools.downloadpage(uri, post=payload).data
|
||||
|
||||
#import requests
|
||||
#payload = {'action': 'doo_player_ajax', 'post': posts, 'nume': numes}
|
||||
#req = requests.post(uri, data=payload)
|
||||
#data += str(req.text)
|
||||
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = "".join([item.title, '[COLOR green][B]' + videoitem.title + '[/B][/COLOR]'])
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.show = item.show
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
videoitem.contentType = item.contentType
|
||||
videoitem.language = IDIOMAS['Italiano']
|
||||
|
||||
# Requerido para Filtrar enlaces
|
||||
|
||||
if __comprueba_enlaces__:
|
||||
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if item.contentType != 'episode':
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow][B]Aggiungi alla videoteca[/B][/COLOR]', url=item.url,
|
||||
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
|
||||
|
||||
return itemlist
|
||||
|
||||
33
plugin.video.alfa/channels/guardarefilm.json
Normal file
33
plugin.video.alfa/channels/guardarefilm.json
Normal file
@@ -0,0 +1,33 @@
|
||||
{
|
||||
"id": "guardarefilm",
|
||||
"name": "Guardarefilm",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": [
|
||||
"it"
|
||||
],
|
||||
"thumbnail": "https:\/\/raw.githubusercontent.com\/Zanzibar82\/images\/master\/posters\/guardarefilm.png",
|
||||
"bannermenu": "https:\/\/raw.githubusercontent.com\/Zanzibar82\/images\/master\/posters\/guardarefilm.png",
|
||||
"categories": [
|
||||
"tvshow",
|
||||
"movie"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_film",
|
||||
"type": "bool",
|
||||
"label": "Includi in novità - film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
313
plugin.video.alfa/channels/guardarefilm.py
Normal file
313
plugin.video.alfa/channels/guardarefilm.py
Normal file
@@ -0,0 +1,313 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per guardarefilm
|
||||
# https://alfa-addon.com/categories/kod-addon.50/
|
||||
# ----------------------------------------------------------
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from core.tmdb import infoIca
|
||||
from platformcode import logger, config
|
||||
|
||||
host = "https://www.guardarefilm.video"
|
||||
|
||||
headers = [['Referer', host]]
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("kod.guardarefilm mainlist")
|
||||
itemlist = [Item(channel=item.channel,
|
||||
title="[COLOR azure]Novita'[/COLOR]",
|
||||
action="peliculas",
|
||||
url="%s/streaming-al-cinema/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]HD[/COLOR]",
|
||||
action="peliculas",
|
||||
url="%s/film-streaming-hd/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Popolari[/COLOR]",
|
||||
action="pelis_top100",
|
||||
url="%s/top100.html" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Categorie[/COLOR]",
|
||||
action="categorias",
|
||||
url=host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Animazione[/COLOR]",
|
||||
action="peliculas",
|
||||
url="%s/streaming-cartoni-animati/" % host,
|
||||
thumbnail="http://orig09.deviantart.net/df5a/f/2014/169/2/a/fist_of_the_north_star_folder_icon_by_minacsky_saya-d7mq8c8.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR yellow]Cerca...[/COLOR]",
|
||||
action="search",
|
||||
extra="movie",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Serie TV[/COLOR]",
|
||||
action="peliculas_tv",
|
||||
extra="tvshow",
|
||||
url="%s/serie-tv-streaming/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR yellow]Cerca Serie TV...[/COLOR]",
|
||||
action="search",
|
||||
extra="tvshow",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info("kod.guardarefilm newest" + categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "film":
|
||||
item.url = host + "/streaming-al-cinema/"
|
||||
item.action = "peliculas"
|
||||
itemlist = peliculas(item)
|
||||
|
||||
if itemlist[-1].action == "peliculas":
|
||||
itemlist.pop()
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info("kod.guardarefilm categorias")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Narrow search by selecting only the combo
|
||||
bloque = scrapertools.get_match(data, '<ul class="reset dropmenu">(.*?)</ul>')
|
||||
|
||||
# The categories are the options for the combo
|
||||
patron = '<li><a href="([^"]+)">(.*?)</a></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedurl = urlparse.urljoin(item.url, scrapedurl)
|
||||
scrapedthumbnail = ""
|
||||
scrapedplot = ""
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("[guardarefilm.py] " + item.url + " search " + texto)
|
||||
section = ""
|
||||
if item.extra == "tvshow":
|
||||
section = "0"
|
||||
elif item.extra == "movie":
|
||||
section = "1"
|
||||
item.url = '%s?do=search_advanced&q=%s§ion=%s&director=&actor=&year_from=&year_to=' % (host, texto, section)
|
||||
try:
|
||||
if item.extra == "movie":
|
||||
return peliculas(item)
|
||||
if item.extra == "tvshow":
|
||||
return peliculas_tv(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("kod.guardarefilm peliculas")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<div class="poster"><a href="([^"]+)".*?><img src="([^"]+)".*?><span.*?</div>\s*'
|
||||
patron += '<div.*?><a.*?>(.*?)</a></div>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="episodios" if item.extra == "tvshow" else "findvideos",
|
||||
contentType="movie",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=urlparse.urljoin(host, scrapedthumbnail),
|
||||
plot=scrapedplot,
|
||||
folder=True), tipo='movie'))
|
||||
|
||||
# Paginazione
|
||||
patronvideos = '<div class="pages".*?<span>.*?<a href="([^"]+)">'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas_tv(item):
|
||||
logger.info("kod.guardarefilm peliculas")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<div class="poster"><a href="([^"]+)".*?><img src="([^"]+)".*?><span.*?</div>\s*'
|
||||
patron += '<div.*?><a.*?>(.*?)</a></div>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="episodios" if item.extra == "tvshow" else "findvideos",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=urlparse.urljoin(host, scrapedthumbnail),
|
||||
plot=scrapedplot,
|
||||
folder=True), tipo='tv'))
|
||||
|
||||
# Paginazione
|
||||
patronvideos = '<div class="pages".*?<span>.*?<a href="([^"]+)">'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas_tv",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
def pelis_top100(item):
|
||||
logger.info("kod.guardarefilm peliculas")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = r'<span class="top100_title"><a href="([^"]+)">(.*?\(\d+\))</a>'
|
||||
matches = re.compile(patron).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
html = httptools.downloadpage(scrapedurl, headers=headers).data
|
||||
start = html.find("<div class=\"textwrap\" itemprop=\"description\">")
|
||||
end = html.find("</div>", start)
|
||||
scrapedplot = html[start:end]
|
||||
scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
|
||||
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
scrapedthumbnail = scrapertools.find_single_match(html, r'class="poster-wrapp"><a href="([^"]+)"')
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="episodios" if item.extra == "tvshow" else "findvideos",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=urlparse.urljoin(host, scrapedthumbnail),
|
||||
plot=scrapedplot,
|
||||
folder=True,
|
||||
fanart=host + scrapedthumbnail))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info("kod.guardarefilm episodios")
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = r'<li id="serie-[^"]+" data-title="Stai guardando: ([^"]+)">'
|
||||
patron += r'[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(.*?)</span>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedtitle, scrapedurl in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="episode",
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=item.thumbnail,
|
||||
extra=item.extra,
|
||||
fulltitle=item.fulltitle,
|
||||
show=item.show))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios",
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("kod.guardarefilm findvideos")
|
||||
|
||||
# Carica la pagina
|
||||
data = item.url if item.contentType == "episode" else httptools.downloadpage(item.url).data
|
||||
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title + videoitem.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.show = item.show
|
||||
videoitem.plot = item.plot
|
||||
videoitem.channel = item.channel
|
||||
videoitem.contentType = item.contentType
|
||||
|
||||
return itemlist
|
||||
24
plugin.video.alfa/channels/guardaseriecc.json
Normal file
24
plugin.video.alfa/channels/guardaseriecc.json
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"id": "guardaseriecc",
|
||||
"name": "Guardaserie.cc",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": [
|
||||
"it"
|
||||
],
|
||||
"thumbnail": "https:\/\/raw.githubusercontent.com\/costaplus\/images\/master\/posters\/guardaseriecc.png",
|
||||
"bannermenu": "https:\/\/raw.githubusercontent.com\/costaplus\/images\/master\/posters\/guardaseriecc.png",
|
||||
"categories": [
|
||||
"tvshow"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
260
plugin.video.alfa/channels/guardaseriecc.py
Normal file
260
plugin.video.alfa/channels/guardaseriecc.py
Normal file
@@ -0,0 +1,260 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per guardaseriecc
|
||||
#
|
||||
# ----------------------------------------------------------
|
||||
import re
|
||||
|
||||
from core import httptools, scrapertools, servertools
|
||||
from core.item import Item
|
||||
from core.tmdb import infoIca
|
||||
from lib import unshortenit
|
||||
from platformcode import logger, config
|
||||
|
||||
|
||||
host = 'https://guardaserie.site'
|
||||
headers = [['Referer', host]]
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("Kodi on Demand.leserietv mainlist")
|
||||
itemlist = [Item(channel=item.channel,
|
||||
action="lista_serie",
|
||||
title="[COLOR azure]Tutte le serie[/COLOR]",
|
||||
url="%s/serietv/" % host,
|
||||
thumbnail=thumbnail_lista,
|
||||
fanart=FilmFanart),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Categorie[/COLOR]",
|
||||
action="categoria",
|
||||
url=host,
|
||||
thumbnail=thumbnail_categoria,
|
||||
fanart=FilmFanart),
|
||||
Item(channel=item.channel,
|
||||
action="search",
|
||||
title="[COLOR orange]Cerca...[/COLOR]",
|
||||
thumbnail=thumbnail_cerca,
|
||||
fanart=FilmFanart)]
|
||||
return itemlist
|
||||
|
||||
|
||||
def categoria(item):
|
||||
logger.info("[Kodi on Demand].[guardareseriecc] [categoria]")
|
||||
itemlist = []
|
||||
|
||||
patron = '<li class="cat-item cat-item.*?"><a href="(.*?)".*?>(.*?)</a>'
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="lista_serie",
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=item.thumbnail,
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle, viewmode="movie"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista_serie(item):
|
||||
logger.info("[Kodi on Demand].[guardareseriecc] [lista_serie]")
|
||||
itemlist = []
|
||||
patron = '<div.*?class="poster">[^<]+<img.*?src="(.*?)".*?alt="(.*?)"[^<]+<[^<]+<[^<]+<[^<]+<[^<]+<[^<]+<[^<]+<a.*?href="(.*?)">'
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
|
||||
scrapedtitle = scrapedtitle.split("(")[0]
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="episodios",
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle, viewmode="movie"), tipo='tv'))
|
||||
|
||||
# Paginazione
|
||||
# ===========================================================
|
||||
patron = 'class="current">.*?</span><a href=\'(.*?)\''
|
||||
matches = scrapertools.find_single_match(data, patron)
|
||||
logger.debug("pag " + matches)
|
||||
|
||||
# ===========================================================
|
||||
if len(matches) > 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="lista_serie",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=matches,
|
||||
thumbnail=thumbnail_successivo,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info("[Kodi on Demand].[guardareseriecc] [stagione]")
|
||||
itemlist = []
|
||||
|
||||
patron = '<iframe.*class="metaframe rptss".*?src="(.*?)".*?frameborder=".*?".*?scrolling=".*?".*?allowfullscreen>.*?</iframe>'
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
elenco = scrapertools.find_single_match(data, patron)
|
||||
|
||||
patron = '</i>.*?Stagioni</a>.*?</ul>[^<]+<select.*?name="sea_select"'
|
||||
data = httptools.downloadpage(elenco, headers=headers).data
|
||||
select = scrapertools.find_single_match(data, patron)
|
||||
|
||||
patron = '<a.*?href="(.*?)".*?><i.*?<\/i>(.*?)</a></li>'
|
||||
stagione = scrapertools.find_multiple_matches(select, patron)
|
||||
scrapertools.printMatches(stagione)
|
||||
|
||||
for stagioneurl, stagionetitle in stagione:
|
||||
patron = '</i>.*?Episodio</a>(.*?)<select name="ep_select"'
|
||||
data = httptools.downloadpage(stagioneurl, headers=headers).data
|
||||
elenco = scrapertools.find_single_match(data, patron, 0)
|
||||
patron = '<a href="(.*?)" ><i class="fa.*?"></i>(.*?)</a></li>'
|
||||
episodi = scrapertools.find_multiple_matches(elenco, patron)
|
||||
|
||||
for scrapedurl, scrapedtitle in episodi:
|
||||
scrapedtitle = stagionetitle + "x" + scrapedtitle.replace(" ", "").zfill(2)
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=item.thumbnail,
|
||||
fanart=item.fanart,
|
||||
plot=item.plot,
|
||||
fulltitle=scrapedtitle,
|
||||
contentType="episode",
|
||||
show=scrapedtitle, viewmode="movie"))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios",
|
||||
contentType="episode",
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("[Kodi on Demand].[guardareseriecc] [findvideos]")
|
||||
itemlist = []
|
||||
listurl = set()
|
||||
|
||||
patron = r'<select.*?style="width:100px;" class="dynamic_select">(.*?)</select>'
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
elenco = scrapertools.find_single_match(data, patron, 0)
|
||||
|
||||
patron = '<a class="" href="(.*?)">(.*?)</a>'
|
||||
elenco_link = scrapertools.find_multiple_matches(elenco, patron)
|
||||
|
||||
for scrapedurl, scrapedtitle in elenco_link:
|
||||
data = httptools.downloadpage(scrapedurl, headers=headers).data
|
||||
if 'protectlink' in data:
|
||||
urls = scrapertools.find_multiple_matches(data, r'<iframe src="[^=]+=(.*?)"')
|
||||
for url in urls:
|
||||
url = url.decode('base64')
|
||||
# tiro via l'ultimo carattere perchè non c'entra
|
||||
url = unshortenit.unwrap_30x_only(url[:-1])
|
||||
listurl.add(url)
|
||||
|
||||
if listurl:
|
||||
itemlist = servertools.find_video_items(data=str(listurl))
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title + '[COLOR orange][B]' + videoitem.title + '[/B][/COLOR]'
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.show = item.show
|
||||
videoitem.plot = item.plot
|
||||
videoitem.channel = item.channel
|
||||
videoitem.contentType = item.contentType
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("[Kodi on Demand].[guardareseriecc][search] " + texto)
|
||||
|
||||
item.url = host + "/?s=" + texto
|
||||
|
||||
try:
|
||||
return ricerca(item)
|
||||
|
||||
# Continua la ricerca in caso di errore .
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def ricerca(item):
|
||||
logger.info("[Kodi on Demand].[guardareseriecc][ricerca] ")
|
||||
itemlist = []
|
||||
|
||||
patron = '<div class="result-item">[^>]+>[^>]+>[^>]+>[^<]+<a href="(.*?)">[^<]+<img src="(.*?)" alt="(.*?)" '
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
scrapedtitle = scrapedtitle.split("(")[0]
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="episodios",
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle, viewmode="movie"), tipo='tv'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
FilmFanart = "https://superrepo.org/static/images/fanart/original/script.artwork.downloader.jpg"
|
||||
ThumbnailHome = "https://upload.wikimedia.org/wikipedia/commons/thumb/8/81/Dynamic-blue-up.svg/580px-Dynamic-blue-up.svg.png"
|
||||
thumbnail_novita = "http://www.ilmioprofessionista.it/wp-content/uploads/2015/04/TVSeries3.png"
|
||||
thumbnail_lista = "http://www.ilmioprofessionista.it/wp-content/uploads/2015/04/TVSeries3.png"
|
||||
thumbnail_categoria = "https://farm8.staticflickr.com/7562/15516589868_13689936d0_o.png"
|
||||
thumbnail_top = "http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"
|
||||
thumbnail_cerca = "http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"
|
||||
thumbnail_successivo = "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"
|
||||
|
||||
'''
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("[Kodi on Demand].[guardareseriecc][search] " + texto)
|
||||
itemlist = []
|
||||
|
||||
item.url = host + "/?s=" + texto
|
||||
patron = '<div class="result-item">[^>]+>[^>]+>[^>]+>[^<]+<a href="(.*?)">[^<]+<img src="(.*?)" alt="(.*?)" '
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
matches = scrapertools.find_multiple_matches(data,patron)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
scrapedtitle = scrapedtitle.split("(")[0]
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="stagione",
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle, viewmode="movie"), tipo='tv'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
'''
|
||||
36
plugin.video.alfa/channels/guardaserieclick.json
Normal file
36
plugin.video.alfa/channels/guardaserieclick.json
Normal file
@@ -0,0 +1,36 @@
|
||||
{
|
||||
"id": "guardaserieclick",
|
||||
"name": "GuardaSerie.click",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": [
|
||||
"it"
|
||||
],
|
||||
"thumbnail": "http://www.guardaserie.click/wp-content/themes/guardaserie/images/logogd.png",
|
||||
"bannermenu": "http://www.guardaserie.click/wp-content/themes/guardaserie/images/logogd.png",
|
||||
"version": "1",
|
||||
"date": "24/05/2018",
|
||||
"changes": "Re Enabled Channel",
|
||||
"categories": [
|
||||
"tvshow",
|
||||
"anime"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_series",
|
||||
"type": "bool",
|
||||
"label": "Includi in novità - Serie TV",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
303
plugin.video.alfa/channels/guardaserieclick.py
Normal file
303
plugin.video.alfa/channels/guardaserieclick.py
Normal file
@@ -0,0 +1,303 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per guardaserie.click
|
||||
# https://alfa-addon.com/categories/kod-addon.50/
|
||||
# ------------------------------------------------------------
|
||||
|
||||
import re
|
||||
|
||||
from core import httptools, scrapertools, servertools
|
||||
from core.item import Item
|
||||
from core.tmdb import infoIca
|
||||
from platformcode import logger, config
|
||||
|
||||
|
||||
|
||||
host = "http://www.guardaserie.watch"
|
||||
|
||||
headers = [['Referer', host]]
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def mainlist(item):
|
||||
logger.info("[GuardaSerieClick.py]==> mainlist")
|
||||
itemlist = [Item(channel=item.channel,
|
||||
action="nuoveserie",
|
||||
title=color("Nuove serie TV", "orange"),
|
||||
url="%s/lista-serie-tv" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="serietvaggiornate",
|
||||
title=color("Serie TV Aggiornate", "azure"),
|
||||
url="%s/lista-serie-tv" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="lista_serie",
|
||||
title=color("Anime", "azure"),
|
||||
url="%s/category/animazione/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="categorie",
|
||||
title=color("Categorie", "azure"),
|
||||
url=host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="search",
|
||||
title=color("Cerca ...", "yellow"),
|
||||
extra="serie",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def newest(categoria):
|
||||
logger.info("[GuardaSerieClick.py]==> newest" + categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "series":
|
||||
item.url = "%s/lista-serie-tv" % host
|
||||
item.action = "serietvaggiornate"
|
||||
itemlist = serietvaggiornate(item)
|
||||
|
||||
if itemlist[-1].action == "serietvaggiornate":
|
||||
itemlist.pop()
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def search(item, texto):
|
||||
logger.info("[GuardaSerieClick.py]==> search")
|
||||
item.url = host + "/?s=" + texto
|
||||
try:
|
||||
return lista_serie(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def nuoveserie(item):
|
||||
logger.info("[GuardaSerieClick.py]==> nuoveserie")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
blocco = scrapertools.get_match(data, '<div\s*class="container container-title-serie-new container-scheda" meta-slug="new">(.*?)</div></div><div')
|
||||
|
||||
patron = r'<a\s*href="([^"]+)".*?>\s*<img\s*.*?src="([^"]+)" />[^>]+>[^>]+>[^>]+>[^>]+>'
|
||||
patron += r'[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)</p>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="episodi",
|
||||
contentType="tv",
|
||||
title=scrapedtitle,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
extra="tv",
|
||||
show=scrapedtitle,
|
||||
thumbnail=scrapedthumbnail,
|
||||
folder=True), tipo="tv"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def serietvaggiornate(item):
|
||||
logger.info("[GuardaSerieClick.py]==> serietvaggiornate")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
blocco = scrapertools.get_match(data,
|
||||
r'<div\s*class="container container-title-serie-lastep container-scheda" meta-slug="lastep">(.*?)</div></div><div')
|
||||
|
||||
patron = r'<a\s*rel="nofollow" href="([^"]+)"[^>]+> <img\s*.*?src="([^"]+)"[^>]+>[^>]+>'
|
||||
patron += r'[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<[^>]+>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedep, scrapedtitle in matches:
|
||||
episode = re.compile(r'^(\d+)x(\d+)', re.DOTALL).findall(scrapedep) # Prendo stagione ed episodio
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
title = "%s %s" % (scrapedtitle, scrapedep)
|
||||
extra = r'<span\s*.*?meta-stag="%s" meta-ep="%s" meta-embed="([^"]+)"[^>]*>' % (
|
||||
episode[0][0], episode[0][1].lstrip("0"))
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="findepvideos",
|
||||
contentType="tv",
|
||||
title=title,
|
||||
show=title,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
extra=extra,
|
||||
thumbnail=scrapedthumbnail,
|
||||
folder=True), tipo="tv"))
|
||||
return itemlist
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def categorie(item):
|
||||
logger.info("[GuardaSerieClick.py]==> categorie")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
blocco = scrapertools.get_match(data, r'<ul\s*class="dropdown-menu category">(.*?)</ul>')
|
||||
patron = r'<li>\s*<a\s*href="([^"]+)"[^>]+>([^<]+)</a></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="lista_serie",
|
||||
title=scrapedtitle,
|
||||
contentType="tv",
|
||||
url="".join([host, scrapedurl]),
|
||||
thumbnail=item.thumbnail,
|
||||
extra="tv",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def lista_serie(item):
|
||||
logger.info("[GuardaSerieClick.py]==> lista_serie")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
patron = r'<a\s*href="([^"]+)".*?>\s*<img\s*.*?src="([^"]+)" />[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)</p></div>'
|
||||
blocco = scrapertools.get_match(data,
|
||||
r'<div\s*class="col-xs-\d+ col-sm-\d+-\d+">(.*?)<div\s*class="container-fluid whitebg" style="">')
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
|
||||
for scrapedurl, scrapedimg, scrapedtitle in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="episodi",
|
||||
title=scrapedtitle,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedimg,
|
||||
extra=item.extra,
|
||||
show=scrapedtitle,
|
||||
folder=True), tipo="tv"))
|
||||
return itemlist
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def episodi(item):
|
||||
logger.info("[GuardaSerieClick.py]==> episodi")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
patron = r'<div\s*class="[^"]+">([^<]+)<\/div>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>'
|
||||
patron += r'[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s*<span\s*.*?'
|
||||
patron += r'embed="([^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s*'
|
||||
patron += r'<img\s*class="[^"]+" src="" data-original="([^"]+)"[^>]+>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedtitle, scrapedurl, scrapedthumbnail in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
title=scrapedtitle,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
contentType="episode",
|
||||
thumbnail=scrapedthumbnail,
|
||||
folder=True))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodi",
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def findepvideos(item):
|
||||
logger.info("[GuardaSerieClick.py]==> findepvideos")
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
data = scrapertools.find_single_match(data, item.extra)
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
server = re.sub(r'[-\[\]\s]+', '', videoitem.title).capitalize()
|
||||
videoitem.title = "".join(["[%s] " % color(server.capitalize(), 'orange'), item.title])
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.show = item.show
|
||||
videoitem.plot = item.plot
|
||||
videoitem.channel = item.channel
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def findvideos(item):
|
||||
logger.info("[GuardaSerieClick.py]==> findvideos")
|
||||
|
||||
itemlist = servertools.find_video_items(data=item.url)
|
||||
|
||||
for videoitem in itemlist:
|
||||
server = re.sub(r'[-\[\]\s]+', '', videoitem.title).capitalize()
|
||||
videoitem.title = "".join(["[%s] " % color(server.capitalize(), 'orange'), item.title])
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.show = item.show
|
||||
videoitem.plot = item.plot
|
||||
videoitem.channel = item.channel
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def color(text, color):
|
||||
return "[COLOR %s]%s[/COLOR]" % (color, text)
|
||||
|
||||
# ================================================================================================================
|
||||
25
plugin.video.alfa/channels/guardogratis.json
Normal file
25
plugin.video.alfa/channels/guardogratis.json
Normal file
@@ -0,0 +1,25 @@
|
||||
{
|
||||
"id": "guardogratis",
|
||||
"name": "GuardoGratis",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": [
|
||||
"it"
|
||||
],
|
||||
"thumbnail": "https:\/\/guardogratis.com\/wp-content\/uploads\/2018\/01\/Logo-4.png",
|
||||
"bannermenu": "https:\/\/guardogratis.com\/wp-content\/uploads\/2018\/01\/Logo-4.png",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
219
plugin.video.alfa/channels/guardogratis.py
Normal file
219
plugin.video.alfa/channels/guardogratis.py
Normal file
@@ -0,0 +1,219 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per guardogratis
|
||||
#
|
||||
# ----------------------------------------------------------
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from platformcode import logger, config
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from core.tmdb import infoIca
|
||||
|
||||
__channel__ = "guardogratis"
|
||||
|
||||
host = "https://guardogratis.it/"
|
||||
|
||||
headers = [['Referer', host]]
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("[guardogratis.py] mainlist")
|
||||
|
||||
# Main options
|
||||
itemlist = [Item(channel=item.channel,
|
||||
action="list_titles",
|
||||
title="[COLOR azure]Film[/COLOR]",
|
||||
url="%s/movies/" % host,
|
||||
extra="movie",
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="list_titles",
|
||||
title="[COLOR azure]Top Film[/COLOR]",
|
||||
url="%s/top-imdb/" % host,
|
||||
extra="movie",
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="categorie",
|
||||
title="[COLOR azure]Categorie[/COLOR]",
|
||||
url="%s" % host,
|
||||
extra="categorie",
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="list_titles",
|
||||
title="[COLOR azure]Serie Tv[/COLOR]",
|
||||
url="%s/series/" % host,
|
||||
extra="tvshow",
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="search",
|
||||
title="[COLOR yellow]Cerca Film[/COLOR]",
|
||||
extra="movie",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"),
|
||||
Item(channel=item.channel,
|
||||
action="search",
|
||||
title="[COLOR yellow]Cerca SerieTV[/COLOR]",
|
||||
extra="tvshow",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
|
||||
return itemlist
|
||||
|
||||
def list_titles(item):
|
||||
logger.info("[guardogratis.py] list_titles")
|
||||
itemlist = []
|
||||
|
||||
tipo='movie'
|
||||
if 'tvshow' in item.extra: tipo='tv'
|
||||
|
||||
if item.url == "":
|
||||
item.url = host
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
patronvideos = '<div data-movie-id=.*?href="([^"]+)".*?data-original="([^"]+)".*?<h2>([^<]+)<\/h2>.*?[I,T]MDb:\s*([^<]+)<\/div>'
|
||||
|
||||
matches = re.compile(patronvideos, re.DOTALL).finditer(data)
|
||||
|
||||
for match in matches:
|
||||
scrapedurl = urlparse.urljoin(item.url, match.group(1))
|
||||
scrapedthumbnail = urlparse.urljoin(item.url, match.group(2))
|
||||
scrapedthumbnail = scrapedthumbnail.replace(" ", "%20")
|
||||
rate=' IMDb: [[COLOR orange]%s[/COLOR]]' % match.group(4) if match.group(4)!='N/A'else ''
|
||||
scrapedtitle = scrapertools.unescape(match.group(3))
|
||||
#scrapedtitle = scrapertools.unescape(match.group(3))+rate
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos" if not 'tvshow' in item.extra else 'serietv',
|
||||
contentType="movie" if not 'tvshow' in item.extra else 'serie',
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
extra=item.extra,
|
||||
viewmode="movie_with_plot"), tipo=tipo))
|
||||
|
||||
nextpage_regex=''
|
||||
if item.extra in "movies,tvshow":
|
||||
nextpage_regex='<div id="pagination" style="margin: 0;">.*?active.*?href=\'([^\']+)\'.*?</div>'
|
||||
elif item.extra=="categorie":
|
||||
nextpage_regex='<li class=\'active\'>.*?href=\'([^\']+)\'.*?</a></li>'
|
||||
|
||||
if nextpage_regex:
|
||||
next_page = scrapertools.find_single_match(data, nextpage_regex)
|
||||
if next_page != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="list_titles",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url="%s" % next_page,
|
||||
extra=item.extra,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
|
||||
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("[guardogratis.py] search")
|
||||
item.url = host + "/?s=" + texto
|
||||
try:
|
||||
if item.extra == "movie":
|
||||
return list_titles(item)
|
||||
if item.extra == "tvshow":
|
||||
return list_titles(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
def categorie(item):
|
||||
logger.info("[guardogratis.py] categorie")
|
||||
itemlist = []
|
||||
|
||||
if item.url == "":
|
||||
item.url = host
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
patronvideos = '<li id="menu-item-.*?category.*?href="([^"]+)">([^"]+)</a>'
|
||||
|
||||
matches = re.compile(patronvideos, re.DOTALL).finditer(data)
|
||||
|
||||
for match in matches:
|
||||
scrapedurl = urlparse.urljoin(item.url, match.group(1))
|
||||
scrapedtitle = match.group(2)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="list_titles",
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
extra=item.extra,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def serietv(item):
|
||||
logger.info("[guardogratis.py] serietv")
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
patron = '<a href="([^"]+)">Episode[^<]+</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = scrapedurl
|
||||
scrapedtitle = scrapedtitle.replace(host, "")
|
||||
scrapedtitle = scrapedtitle.replace("episode/", "")
|
||||
scrapedtitle = scrapedtitle.replace("/", "")
|
||||
scrapedtitle = scrapedtitle.replace("-", " ")
|
||||
scrapedtitle = scrapedtitle.title()
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
folder=True), tipo='tv'))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="serietv",
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("[guardogratis.py] findvideos")
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for i in itemlist:
|
||||
tab=re.compile('<div\s*id="(tab[^"]+)"[^>]+>[^>]+>[^>]+src="http[s]*:%s[^"]+"'%i.url.replace('http:','').replace('https:',''), re.DOTALL).findall(data)
|
||||
qual=''
|
||||
if tab:
|
||||
qual=re.compile('<a\s*href="#%s">([^<]+)<'%tab[0], re.DOTALL).findall(data)[0].replace("'","")
|
||||
qual="[COLOR orange]%s[/COLOR] - "%qual
|
||||
i.title='%s[COLOR green][B]%s[/B][/COLOR] - %s'%(qual,i.title[2:],item.title)
|
||||
i.channel=__channel__
|
||||
i.fulltitle=item.title
|
||||
|
||||
return itemlist
|
||||
|
||||
25
plugin.video.alfa/channels/hdblog.json
Normal file
25
plugin.video.alfa/channels/hdblog.json
Normal file
@@ -0,0 +1,25 @@
|
||||
{
|
||||
"id": "hdblog",
|
||||
"name": "Hdblog",
|
||||
"language": ["it"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": "it",
|
||||
"thumbnail": "http://css.hd-cdn.it/new_files/templates/theme_darklight/img/logos_wt/logohdhardware.png",
|
||||
"banner": "http://css.hd-cdn.it/new_files/templates/theme_darklight/img/logos_wt/logohdhardware.png",
|
||||
"version": "7",
|
||||
"date": "26/05/2017",
|
||||
"changes": "re-added",
|
||||
"categories": ["documentary"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
95
plugin.video.alfa/channels/hdblog.py
Normal file
95
plugin.video.alfa/channels/hdblog.py
Normal file
@@ -0,0 +1,95 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale hdblog
|
||||
# https://alfa-addon.com/categories/kod-addon.50/
|
||||
# ------------------------------------------------------------
|
||||
import re, urlparse
|
||||
|
||||
from core import httptools, scrapertools
|
||||
from platformcode import logger, config
|
||||
from core.item import Item
|
||||
|
||||
|
||||
|
||||
host = "https://www.hdblog.it"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("kod.hdblog mainlist")
|
||||
itemlist = [Item(channel=item.channel,
|
||||
title="[COLOR azure]Video recensioni tecnologiche[/COLOR]",
|
||||
action="peliculas",
|
||||
url=host + "/video/",
|
||||
thumbnail="http://www.crat-arct.org/uploads/images/tic%201.jpg"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Categorie[/COLOR]",
|
||||
action="categorias",
|
||||
url=host + "/video/",
|
||||
thumbnail="http://www.crat-arct.org/uploads/images/tic%201.jpg")]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info("kod.hdblog categorias")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
logger.info(data)
|
||||
|
||||
# Narrow search by selecting only the combo
|
||||
start = data.find('<section class="left_toolbar" style="float: left;width: 125px;margin-right: 18px;">')
|
||||
end = data.find('</section>', start)
|
||||
bloque = data[start:end]
|
||||
|
||||
# The categories are the options for the combo
|
||||
patron = '<a href="([^"]+)"[^>]+><span>(.*?)</span>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedthumbnail = ""
|
||||
scrapedplot = ""
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl + "video/",
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("kod.hdblog peliculas")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<a class="thumb_new_image" href="([^"]+)">\s*<img[^s]+src="([^"]+)"[^>]+>\s*</a>\s*[^>]+>\s*(.*?)\s*<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
scrapedplot = ""
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", fulltitle=scrapedtitle, show=scrapedtitle,
|
||||
title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot,
|
||||
folder=True))
|
||||
|
||||
# Paginazione
|
||||
patronvideos = '<span class="attiva">[^>]+>[^=]+="next" href="(.*?)" class="inattiva">'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="peliculas", title="[COLOR orange]Avanti >>[/COLOR]", url=scrapedurl,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
69
plugin.video.alfa/channels/ilgeniodellostreaming.json
Normal file
69
plugin.video.alfa/channels/ilgeniodellostreaming.json
Normal file
@@ -0,0 +1,69 @@
|
||||
{
|
||||
"id": "ilgeniodellostreaming",
|
||||
"name": "IlGenioDelloStreaming",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["ita"],
|
||||
"thumbnail": "https://i.imgur.com/Nsa81r0.png",
|
||||
"banner": "https://i.imgur.com/Nsa81r0.png",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"anime"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_film",
|
||||
"type": "bool",
|
||||
"label": "Includi in novità - Film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_series",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Serie TV",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces_num",
|
||||
"type": "list",
|
||||
"label": "Numero de link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "5", "10", "15", "20" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"IT"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
389
plugin.video.alfa/channels/ilgeniodellostreaming.py
Normal file
389
plugin.video.alfa/channels/ilgeniodellostreaming.py
Normal file
@@ -0,0 +1,389 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per ilgeniodellostreaming
|
||||
# ------------------------------------------------------------
|
||||
import re, urlparse
|
||||
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools, servertools, httptools
|
||||
from core.item import Item
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core.tmdb import infoIca
|
||||
|
||||
__channel__ = "ilgeniodellostreaming"
|
||||
|
||||
host = "https://ilgeniodellostreaming.red"
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['openload', 'streamango', 'youtube']
|
||||
list_quality = ['default']
|
||||
|
||||
|
||||
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'ilgeniodellostreaming')
|
||||
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'ilgeniodellostreaming')
|
||||
|
||||
headers = [['Referer', host]]
|
||||
|
||||
PERPAGE = 10
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("kod.ilgeniodellostreaming mainlist")
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = [Item(channel=__channel__,
|
||||
title="[COLOR azure]Ultimi Film Inseriti[/COLOR]",
|
||||
action="peliculas",
|
||||
url="%s/film/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=__channel__,
|
||||
title="[COLOR azure]Film Per Categoria[/COLOR]",
|
||||
action="categorias",
|
||||
url=host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=__channel__,
|
||||
title="[COLOR azure]Serie TV[/COLOR]",
|
||||
action="serie",
|
||||
url="%s/serie/" % host,
|
||||
thumbnail="http://www.ilmioprofessionista.it/wp-content/uploads/2015/04/TVSeries3.png"),
|
||||
Item(channel=__channel__,
|
||||
title="[COLOR azure]Nuovi Episodi Serie TV[/COLOR]",
|
||||
action="nuoviep",
|
||||
url="%s/aggiornamenti-serie/" % host,
|
||||
thumbnail="http://www.ilmioprofessionista.it/wp-content/uploads/2015/04/TVSeries3.png"),
|
||||
Item(channel=__channel__,
|
||||
title="[COLOR azure]Anime[/COLOR]",
|
||||
action="serie",
|
||||
url="%s/anime/" % host,
|
||||
thumbnail="http://orig09.deviantart.net/df5a/f/2014/169/2/a/fist_of_the_north_star_folder_icon_by_minacsky_saya-d7mq8c8.png"),
|
||||
Item(channel=__channel__,
|
||||
title="[COLOR yellow]Cerca...[/COLOR]",
|
||||
action="search",
|
||||
extra="movie",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info("kod.ilgeniodellostreaming newest" + categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "film":
|
||||
item.url = "%s/film/" % host
|
||||
item.action = "peliculas"
|
||||
itemlist = peliculas(item)
|
||||
|
||||
if itemlist[-1].action == "peliculas":
|
||||
itemlist.pop()
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info("kod.ilgeniodellostreaming categorias")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.get_match(data, '<ul class="genres scrolling">(.*?)</ul>')
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<li[^>]+><a href="(.*?)"[^>]+>(.*?)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
logger.info("title=[" + scrapedtitle + "]")
|
||||
itemlist.append(
|
||||
Item(channel=__channel__,
|
||||
action="peliculas",
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("[ilgeniodellostreaming.py] " + item.url + " search " + texto)
|
||||
item.url = host + "/?s=" + texto
|
||||
|
||||
try:
|
||||
return peliculas_src(item)
|
||||
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
|
||||
return []
|
||||
|
||||
|
||||
def peliculas_src(item):
|
||||
logger.info("kod.ilgeniodellostreaming peliculas")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = '<div class="thumbnail animation-2"><a href="(.*?)"><img src="(.*?)" alt="(.*?)" />[^>]+>(.*?)</span>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedtipo in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
|
||||
|
||||
if scrapedtipo == "TV":
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=__channel__,
|
||||
action="episodios",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
folder=True), tipo='tv'))
|
||||
else:
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=__channel__,
|
||||
action="findvideos",
|
||||
contentType="movie",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
folder=True), tipo='movie'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("kod.ilgeniodellostreaming peliculas")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<div class="poster">\s*<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)"></a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=__channel__,
|
||||
action="findvideos",
|
||||
contentType="movie",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
folder=True), tipo='movie'))
|
||||
|
||||
# Paginazione
|
||||
patronvideos = '<span class="current">[^<]+<[^>]+><a href=\'(.*?)\''
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
itemlist.append(
|
||||
Item(channel=__channel__,
|
||||
action="peliculas",
|
||||
title="[COLOR orange]Successivo >>[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def nuoviep(item):
|
||||
logger.info("kod.ilgeniodellostreaming nuoviep")
|
||||
itemlist = []
|
||||
|
||||
p = 1
|
||||
if '{}' in item.url:
|
||||
item.url, p = item.url.split('{}')
|
||||
p = int(p)
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
#blocco = scrapertools.get_match(data,
|
||||
# r'<div class="items" style="margin-bottom:0px!important">(.*?)<div class="items" style="margin-bottom:0px!important">')
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = r'<div class="poster"><img src="([^"]+)" alt="([^"]+)">[^>]+><a href="([^"]+)">'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for i, (scrapedthumbnail, scrapedtitle, scrapedurl) in enumerate(matches):
|
||||
if (p - 1) * PERPAGE > i: continue
|
||||
if i >= p * PERPAGE: break
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=__channel__,
|
||||
action="findvideos",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
folder=True), tipo='tv'))
|
||||
|
||||
if len(matches) >= p * PERPAGE:
|
||||
scrapedurl = item.url + '{}' + str(p + 1)
|
||||
itemlist.append(
|
||||
Item(channel=__channel__,
|
||||
extra=item.extra,
|
||||
action="nuoviep",
|
||||
title="[COLOR orange]Successivo >>[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def serie(item):
|
||||
logger.info("kod.ilgeniodellostreaming peliculas")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<div class="poster">\s*<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)"></a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=__channel__,
|
||||
action="episodios",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
folder=True), tipo='tv'))
|
||||
|
||||
# Paginazione
|
||||
patronvideos = '<span class="current">[^<]+<[^>]+><a href=\'(.*?)\''
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
itemlist.append(
|
||||
Item(channel=__channel__,
|
||||
action="peliculas",
|
||||
title="[COLOR orange]Successivo >>[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info("kod.ilgeniodellostreaming episodios")
|
||||
itemlist = []
|
||||
|
||||
patron = '<ul class="episodios">.*?</ul>'
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for match in matches:
|
||||
|
||||
patron = '<li><div class="imagen"><a href="(.*?)">[^>]+>[^>]+>[^>]+><.*?numerando">(.*?)<[^>]+>[^>]+>[^>]+>(.*?)</a>'
|
||||
episodi = re.compile(patron, re.DOTALL).findall(match)
|
||||
|
||||
for scrapedurl, scrapednumber, scrapedtitle in episodi:
|
||||
n0 = scrapednumber.replace(" ", "")
|
||||
n1 = n0.replace("-", "x")
|
||||
|
||||
itemlist.append(Item(channel=__channel__,
|
||||
action="findvideos",
|
||||
contentType="episode",
|
||||
fulltitle=n1 + " " + scrapedtitle,
|
||||
show=n1 + " " + scrapedtitle,
|
||||
title=n1 + " [COLOR orange] " + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=item.thumbnail,
|
||||
plot=item.plot,
|
||||
folder=True))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
itemlist.append(
|
||||
Item(channel=__channel__,
|
||||
title=config.get_localized_string(30161),
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios",
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = '<td><a class="link_a" href="(.*?)" target="_blank">'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for url in matches:
|
||||
html = httptools.downloadpage(url).data
|
||||
data += str(scrapertools.find_multiple_matches(html, 'window.location.href=\'(.*?)\''))
|
||||
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title + videoitem.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.show = item.show
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = __channel__
|
||||
videoitem.contentType = item.contentType
|
||||
videoitem.language = IDIOMAS['Italiano']
|
||||
|
||||
# Requerido para Filtrar enlaces
|
||||
|
||||
if __comprueba_enlaces__:
|
||||
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if item.contentType != 'episode':
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow][B]Aggiungi alla videoteca[/B][/COLOR]', url=item.url,
|
||||
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
|
||||
|
||||
return itemlist
|
||||
|
||||
25
plugin.video.alfa/channels/ilgiramondo.json
Normal file
25
plugin.video.alfa/channels/ilgiramondo.json
Normal file
@@ -0,0 +1,25 @@
|
||||
{
|
||||
"id": "ilgiramondo",
|
||||
"name": "IlGiramondo",
|
||||
"language": ["it"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": "it",
|
||||
"thumbnail": "http://www.ilgiramondo.net/wp-content/uploads/2013/05/logo-fixed.jpg",
|
||||
"banner": "http://www.ilgiramondo.net/wp-content/uploads/2013/05/logo-fixed.jpg",
|
||||
"version": "7",
|
||||
"date": "26/05/2017",
|
||||
"changes": "re-added",
|
||||
"categories": ["documentary"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
68
plugin.video.alfa/channels/ilgiramondo.py
Normal file
68
plugin.video.alfa/channels/ilgiramondo.py
Normal file
@@ -0,0 +1,68 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale ilgiramondo
|
||||
# https://alfa-addon.com/categories/kod-addon.50/
|
||||
# ------------------------------------------------------------
|
||||
import re, urlparse
|
||||
|
||||
from core import httptools, scrapertools
|
||||
from platformcode import logger, config
|
||||
from core.item import Item
|
||||
|
||||
|
||||
|
||||
host = "http://www.ilgiramondo.net"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("kod.ilgiramondo mainlist")
|
||||
itemlist = [Item(channel=item.channel, title="[COLOR azure]Video di Viaggi[/COLOR]", action="peliculas",
|
||||
url=host + "/video-vacanze-viaggi/",
|
||||
thumbnail="http://hotelsjaisalmer.com/wp-content/uploads/2016/10/Travel1.jpg")]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("kod.ilgiramondo peliculas")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<article id=[^>]+><div class="space">\s*<a href="([^"]+)"><img[^s]+src="(.*?)"[^>]+><\/a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedurl, scrapedthumbnail in matches:
|
||||
html = httptools.downloadpage(scrapedurl).data
|
||||
start = html.find("</script></div>")
|
||||
end = html.find("</p>", start)
|
||||
scrapedplot = html[start:end]
|
||||
scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
|
||||
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
|
||||
html = httptools.downloadpage(scrapedurl).data
|
||||
start = html.find("<title>")
|
||||
end = html.find("</title>", start)
|
||||
scrapedtitle = html[start:end]
|
||||
scrapedtitle = re.sub(r'<[^>]*>', '', scrapedtitle)
|
||||
scrapedtitle = scrapedtitle.replace(" | Video Di Viaggi E Vacanze", "")
|
||||
# scrapedplot = ""
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", fulltitle=scrapedtitle, show=scrapedtitle,
|
||||
title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot,
|
||||
folder=True))
|
||||
|
||||
# Paginazione
|
||||
patronvideos = '<a class="next page-numbers" href="(.*?)">Successivo'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="peliculas", title="[COLOR orange]Avanti >>[/COLOR]", url=scrapedurl,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
28
plugin.video.alfa/channels/istitutoluce.json
Normal file
28
plugin.video.alfa/channels/istitutoluce.json
Normal file
@@ -0,0 +1,28 @@
|
||||
{
|
||||
"id": "istitutoluce",
|
||||
"name": "Istituto Luce",
|
||||
"language": ["it"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": "it",
|
||||
"thumbnail": "http://www.archivioluce.com/wp-content/themes/wpbootstrap/bootstrap/img/luce-logo.png",
|
||||
"banner": "http://www.archivioluce.com/wp-content/themes/wpbootstrap/bootstrap/img/luce-logo.png",
|
||||
"version": "7",
|
||||
"date": "26/05/2017",
|
||||
"changes": "re-added",
|
||||
"categories": [
|
||||
"cult", "documentary"
|
||||
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
289
plugin.video.alfa/channels/istitutoluce.py
Normal file
289
plugin.video.alfa/channels/istitutoluce.py
Normal file
@@ -0,0 +1,289 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale istitutoluce
|
||||
# https://alfa-addon.com/categories/kod-addon.50/
|
||||
# ------------------------------------------------------------
|
||||
|
||||
import re, urlparse
|
||||
|
||||
from core import httptools, scrapertools, servertools
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
|
||||
|
||||
|
||||
host = "https://patrimonio.archivioluce.com"
|
||||
host2 = "https://www.archivioluce.com"
|
||||
|
||||
headers = [['Referer', host]]
|
||||
|
||||
PERPAGE = 7
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("kod.istitutoluce mainlist")
|
||||
itemlist = [
|
||||
Item(
|
||||
channel=item.channel,
|
||||
title="[COLOR azure]Archivio - Tutti i Filmati[/COLOR]",
|
||||
action="peliculas",
|
||||
url="%s/luce-web/search/result.html?query=&perPage=7" % host,
|
||||
thumbnail="http://www.archivioluce.com/wp-content/themes/wpbootstrap/bootstrap/img/luce-logo.png"
|
||||
),
|
||||
Item(
|
||||
channel=item.channel,
|
||||
title="[COLOR azure]Categorie Tematiche[/COLOR]",
|
||||
action="categorie",
|
||||
url="%s/navigazione-tematica/" % host2,
|
||||
thumbnail="http://www.archivioluce.com/wp-content/themes/wpbootstrap/bootstrap/img/luce-logo.png"
|
||||
),
|
||||
Item(
|
||||
channel=item.channel,
|
||||
title="[COLOR yellow]Cerca...[/COLOR]",
|
||||
action="search",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"
|
||||
)
|
||||
]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorie(item):
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
bloque = scrapertools.get_match(data, '<section class="container directory">(.*?)<footer class="main">')
|
||||
patron = '<a class="label label-white" href="(.*?)">\s*(.*?)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapedtitle.title()
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="cat_results",
|
||||
fulltitle=scrapedtitle,
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
viewmode="movie_with_plot",
|
||||
Folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def cat_results(item):
|
||||
logger.info("kod.istitutoluce cat_results")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<a href="([^"]+)" class="thumbnail">\s*<h1>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl in matches:
|
||||
scrapedtitle = scrapedurl
|
||||
scrapedtitle = scrapedtitle.rsplit('/', 1)[-1].rsplit(".", 1)[0].replace("-", " ").title()
|
||||
scrapedurl = host + scrapedurl
|
||||
scrapedplot = ""
|
||||
# html = scrapertools.cache_page(scrapedurl)
|
||||
# start = html.find('<p class="abstract">')
|
||||
# end = html.find('</p>', start)
|
||||
# scrapedplot = html[start:end]
|
||||
# scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
|
||||
# scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
|
||||
scrapedthumbnail = ""
|
||||
# cache = httptools.downloadpage(scrapedurl, headers=headers).data
|
||||
# patron = 'image: "(.*?)"'
|
||||
# matches = re.compile(patron, re.DOTALL).findall(cache)
|
||||
# for scrapedthumbnail in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
folder=True))
|
||||
|
||||
# Paginazione
|
||||
patron = r'</span></td>\s*<td>\s*<a href="([^"]+)" class="btn-pag-luce">'
|
||||
next_page = scrapertools.find_single_match(data, patron)
|
||||
|
||||
if next_page > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, next_page)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="cat_results",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("kod.istitutoluce peliculas")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<a href="([^"]+)" class="thumbnail">\s*<h1>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl in matches:
|
||||
scrapedtitle = scrapedurl
|
||||
scrapedtitle = scrapedtitle.rsplit('/', 1)[-1].rsplit(".", 1)[0].replace("-", " ").title()
|
||||
scrapedurl = host + scrapedurl
|
||||
|
||||
html = scrapertools.cache_page(scrapedurl)
|
||||
start = html.find('<p class="abstract">')
|
||||
end = html.find('</p>', start)
|
||||
scrapedplot = html[start:end]
|
||||
scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
|
||||
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
|
||||
|
||||
html = httptools.downloadpage(scrapedurl, headers=headers).data
|
||||
patron = 'image: "(.*?)"'
|
||||
scrapedthumbnail = scrapertools.find_single_match(html, patron)
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
folder=True))
|
||||
|
||||
# Paginazione
|
||||
patron = r'</span></td>\s*<td>\s*<a href="([^"]+)" class="btn-pag-luce">'
|
||||
next_page = scrapertools.find_single_match(data, patron)
|
||||
|
||||
if next_page:
|
||||
scrapedurl = urlparse.urljoin(item.url, next_page)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas_src(item):
|
||||
logger.info("kod.istitutoluce peliculas")
|
||||
itemlist = []
|
||||
|
||||
p = 1
|
||||
if '{}' in item.url:
|
||||
item.url, p = item.url.split('{}')
|
||||
p = int(p)
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<a href="([^"]+)" class="thumbnail">\s*<h1>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for i, (scrapedurl) in enumerate(matches):
|
||||
if (p - 1) * PERPAGE > i: continue
|
||||
if i >= p * PERPAGE: break
|
||||
scrapedtitle = scrapedurl
|
||||
scrapedtitle = scrapedtitle.rsplit('/', 1)[-1].rsplit(".", 1)[0].replace("-", " ").title()
|
||||
scrapedurl = urlparse.urljoin(host, scrapedurl)
|
||||
|
||||
html = httptools.downloadpage(scrapedurl, headers=headers).data
|
||||
start = html.find('<p class="abstract">')
|
||||
end = html.find('</p>', start)
|
||||
scrapedplot = html[start:end]
|
||||
scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
|
||||
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
|
||||
|
||||
html = httptools.downloadpage(scrapedurl, headers=headers).data
|
||||
patron = 'image: "(.*?)"'
|
||||
scrapedthumbnail = scrapertools.find_single_match(html, patron)
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
folder=True))
|
||||
|
||||
# Paginazione
|
||||
if len(matches) >= p * PERPAGE:
|
||||
scrapedurl = item.url + '{}' + str(p + 1)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action="peliculas_src",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("[istitutoluce.py] search")
|
||||
|
||||
item.url = host + '/luce-web/search/result.html?archiveType_string="xDamsCineLuce"&archiveName_string="luceFondoCinegiornali"&archiveName_string="luceFondoDocumentari"&archiveName_string="luceFondoRepertori"&titoloADV=&descrizioneADV="' + texto + '"'
|
||||
|
||||
try:
|
||||
return peliculas_src(item)
|
||||
|
||||
# Continua la ricerca in caso di errore .
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("kod.istitutoluce findvideos")
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title + videoitem.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.show = item.show
|
||||
videoitem.plot = item.plot
|
||||
videoitem.channel = item.channel
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = 'file: "rtsp:([^"]+)"\s*}'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for video in matches:
|
||||
video = "rtsp:" + video
|
||||
itemlist.append(
|
||||
Item(
|
||||
channel=item.channel,
|
||||
action="play",
|
||||
title=item.title + " [[COLOR orange]Diretto[/COLOR]]",
|
||||
url=video,
|
||||
folder=False))
|
||||
|
||||
return itemlist
|
||||
42
plugin.video.alfa/channels/italiafilm.json
Normal file
42
plugin.video.alfa/channels/italiafilm.json
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
"id": "italiafilm",
|
||||
"name": "Italia-Film.co",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": [
|
||||
"it"
|
||||
],
|
||||
"thumbnail": "https:\/\/raw.githubusercontent.com\/Zanzibar82\/images\/master\/posters\/italiafilm.png",
|
||||
"bannermenu": "https:\/\/raw.githubusercontent.com\/Zanzibar82\/images\/master\/posters\/italiafilm.png",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"top channels"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_film",
|
||||
"type": "bool",
|
||||
"label": "Includi in novità - film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_series",
|
||||
"type": "bool",
|
||||
"label": "Includi in novità - Serie TV",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
494
plugin.video.alfa/channels/italiafilm.py
Normal file
494
plugin.video.alfa/channels/italiafilm.py
Normal file
@@ -0,0 +1,494 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per italiafilm
|
||||
# ----------------------------------------------------------
|
||||
import re
|
||||
import time
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from core.tmdb import infoIca
|
||||
from lib.unshortenit import unshorten_only
|
||||
from platformcode import logger, config
|
||||
|
||||
host = "https://www.italia-film.pro"
|
||||
|
||||
headers = [['Referer', host]]
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("[italiafilm.py] mainlist")
|
||||
itemlist = [Item(channel=item.channel,
|
||||
title="[COLOR azure]Film - Novita'[/COLOR]",
|
||||
action="peliculas",
|
||||
extra="movie",
|
||||
url="%s/novita-streaming-1/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Film HD[/COLOR]",
|
||||
action="peliculas",
|
||||
extra="movie",
|
||||
url="%s/category/film-hd/" % host,
|
||||
thumbnail="http://i.imgur.com/3ED6lOP.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Categorie[/COLOR]",
|
||||
action="categorias",
|
||||
extra="movie",
|
||||
url="%s/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR yellow]Cerca...[/COLOR]",
|
||||
action="search",
|
||||
extra="movie",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Serie TV[/COLOR]",
|
||||
action="peliculas_tv",
|
||||
extra="tvshow",
|
||||
url="%s/category/serie-tv/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Ultime serie TV[/COLOR]",
|
||||
action="pel_tv",
|
||||
extra="tvshow",
|
||||
url="%s/ultimi-telefilm-streaming/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Ultimi Episodi[/COLOR]",
|
||||
action="latestep",
|
||||
extra="tvshow",
|
||||
url="%s/ultime-serie-tv-streaming/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR yellow]Cerca Serie TV...[/COLOR]",
|
||||
action="search",
|
||||
extra="tvshow",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "film":
|
||||
item.url = "%s/novita-streaming-1/" % host
|
||||
item.action = "peliculas"
|
||||
item.extra = "movie"
|
||||
itemlist = peliculas(item)
|
||||
|
||||
if itemlist[-1].action == "peliculas":
|
||||
itemlist.pop()
|
||||
elif categoria == "series":
|
||||
item.url = "%s/ultime-serie-tv-streaming/" % host
|
||||
item.action = "latestep"
|
||||
itemlist = latestep(item)
|
||||
|
||||
if itemlist[-1].action == "series":
|
||||
itemlist.pop()
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info("[italiafilm.py] categorias")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
data = scrapertools.find_single_match(data, '<a href=".">Categorie</a>(.*?)</div>')
|
||||
|
||||
patron = '<li[^>]+><a href="([^"]+)">Film([^<]+)</a></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for url, title in matches:
|
||||
scrapedtitle = title
|
||||
scrapedurl = urlparse.urljoin(item.url, url)
|
||||
|
||||
if scrapedtitle.startswith((" Porno")):
|
||||
continue
|
||||
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='peliculas',
|
||||
extra=item.extra,
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("[italiafilm.py] search " + texto)
|
||||
item.url = host + "/?s=" + texto
|
||||
|
||||
try:
|
||||
if item.extra == "movie":
|
||||
return peliculas(item)
|
||||
if item.extra == "tvshow":
|
||||
return peliculas_tv(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def latestep(item):
|
||||
logger.info("[italiafilm.py] latestep")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
blocco = scrapertools.find_single_match(data, r'<li class="section_date">(.*?)<li class="section_date">')
|
||||
patron = r'<li class="[^"]+">\s*[^>]+>([^<|^(]+)[^>]+>\s*<a href="([^"]+)"'
|
||||
patron += r'[^>]+>[^>]+>[^>]+>(?:[^>]+>[^>]+>|)([^<]+)(?:[^>]+>[^>]+>|)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
|
||||
for scrapedtitle, scrapedurl, scrapedepisode in matches:
|
||||
scrapedepisode = scrapertools.decodeHtmlentities(scrapedepisode)
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
|
||||
completetitle = "%s - %s" % (scrapedtitle, scrapedepisode)
|
||||
|
||||
unsupportedeps = re.compile(r'\d+\-\d+', re.DOTALL).findall(scrapedepisode)
|
||||
if len(unsupportedeps) > 0:
|
||||
continue
|
||||
|
||||
if 'completa' in scrapedtitle.lower():
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="episodios",
|
||||
title=completetitle,
|
||||
contentSerieName=completetitle,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
folder=True), tipo='tv'))
|
||||
else:
|
||||
if 'episodio' not in scrapedepisode:
|
||||
replace = re.compile(r'(\d+)x(\d+)')
|
||||
ep_pattern = r'%s(.*?(?:<br\s*/>|</p>))' % replace.sub(r'\g<1>×\g<2>', scrapedepisode)
|
||||
else:
|
||||
ep_pattern = r'%s(.*?(?:<br\s*/>|</p>))' % scrapedepisode
|
||||
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos_single_ep",
|
||||
title=completetitle,
|
||||
contentSerieName=completetitle,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
extra=ep_pattern,
|
||||
folder=True), tipo='tv'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("[italiafilm.py] peliculas")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
patron = '<article(.*?)</article>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for match in matches:
|
||||
title = scrapertools.find_single_match(match, '<h3[^<]+<a href="[^"]+"[^<]+>([^<]+)</a>')
|
||||
title = title.replace("Streaming", "")
|
||||
title = scrapertools.decodeHtmlentities(title).strip()
|
||||
url = scrapertools.find_single_match(match, '<h3[^<]+<a href="([^"]+)"')
|
||||
if 'film-porno' in url: continue
|
||||
plot = ""
|
||||
thumbnail = scrapertools.find_single_match(match, 'data-echo="([^"]+)"')
|
||||
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action='findvideos',
|
||||
contentType="movie",
|
||||
fulltitle=title,
|
||||
show=title,
|
||||
title="[COLOR azure]" + title + "[/COLOR]",
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
plot=plot,
|
||||
viewmode="movie_with_plot",
|
||||
folder=True), tipo='movie'))
|
||||
|
||||
# Pagina successiva
|
||||
try:
|
||||
pagina_siguiente = scrapertools.get_match(data, '<a class="next page-numbers" href="([^"]+)"')
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
extra=item.extra,
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=pagina_siguiente,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
except:
|
||||
pass
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvid(item):
|
||||
logger.info("kod.italiafilm findvid")
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Estrae i contenuti redirect
|
||||
urls = scrapertools.find_multiple_matches(data, '<a href="([^"]+)" target="_blank" rel="noopener">') #
|
||||
for url in urls: # Fix
|
||||
page = httptools.downloadpage(url, headers=headers).data #
|
||||
data += '\n' + scrapertools.find_single_match(page,'<meta name="og:url" content="([^=]+)">') #
|
||||
|
||||
|
||||
for videoitem in servertools.find_video_items(data=data):
|
||||
videoitem.title = item.title + videoitem.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.show = item.show
|
||||
videoitem.plot = item.plot
|
||||
videoitem.channel = item.channel
|
||||
videoitem.contentType = item.contentType
|
||||
itemlist.append(videoitem)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas_tv(item):
|
||||
logger.info("[italiafilm.py] peliculas")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
patron = '<article(.*?)</article>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for match in matches:
|
||||
title = scrapertools.find_single_match(match, '<h3[^<]+<a href="[^"]+"[^<]+>([^<]+)</a>')
|
||||
title = title.replace("Streaming", "")
|
||||
title = scrapertools.decodeHtmlentities(title).strip()
|
||||
show_title = re.sub('\(.*?\)', '', title.replace('Serie TV', ''))
|
||||
url = scrapertools.find_single_match(match, '<h3[^<]+<a href="([^"]+)"')
|
||||
plot = ""
|
||||
thumbnail = scrapertools.find_single_match(match, 'data-echo="([^"]+)"')
|
||||
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action='episodios',
|
||||
fulltitle=title,
|
||||
show=show_title,
|
||||
title="[COLOR azure]" + title + "[/COLOR]",
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
plot=plot,
|
||||
viewmode="movie_with_plot",
|
||||
folder=True), tipo='tv'))
|
||||
|
||||
# Successivo
|
||||
try:
|
||||
pagina_siguiente = scrapertools.get_match(data, '<a class="next page-numbers" href="([^"]+)"')
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas_tv",
|
||||
extra=item.extra,
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=pagina_siguiente,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
except:
|
||||
pass
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def pel_tv(item):
|
||||
logger.info("[italiafilm.py] peliculas")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
patron = '<span class="tvseries_name">(.*?)</span>\s*<a href="([^"]+)"[^>]+><i class="icon-link"></i>(.*?)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scraptitle1, scrapedurl, scraptitle2 in matches:
|
||||
title = scraptitle1 + scraptitle2
|
||||
plot = ""
|
||||
thumbnail = ""
|
||||
url = scrapedurl
|
||||
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action='episodios',
|
||||
fulltitle=title,
|
||||
show=title,
|
||||
title="[COLOR azure]" + title + "[/COLOR]",
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
plot=plot,
|
||||
viewmode="movie_with_plot",
|
||||
folder=True), tipo='tv'))
|
||||
|
||||
# Siguiente
|
||||
try:
|
||||
pagina_siguiente = scrapertools.get_match(data, '<a class="next page-numbers" href="([^"]+)"')
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="pel_tv",
|
||||
extra=item.extra,
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=pagina_siguiente,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
except:
|
||||
pass
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
def load_episodios(html, item, itemlist, lang_title):
|
||||
for data in scrapertools.decodeHtmlentities(html).splitlines():
|
||||
# Estrae i contenuti
|
||||
end = data.find('<a ')
|
||||
if end > 0:
|
||||
scrapedtitle = re.sub(r'<[^>]*>', '', data[:end]).strip()
|
||||
else:
|
||||
scrapedtitle = ''
|
||||
if scrapedtitle == '':
|
||||
patron = '<a.*?href="[^"]+".*?>([^<]+)</a>'
|
||||
scrapedtitle = scrapertools.find_single_match(data, patron).strip()
|
||||
title = scrapertools.find_single_match(scrapedtitle, '\d+[^\d]+\d+')
|
||||
if title == '':
|
||||
title = scrapedtitle
|
||||
if title != '':
|
||||
title = re.sub(r"(\d+)[^\d]+(\d+)", r"\1x\2", title)
|
||||
title += " (" + lang_title + ")"
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="episode",
|
||||
title=title,
|
||||
url=data,
|
||||
thumbnail=item.thumbnail,
|
||||
extra=item.extra,
|
||||
fulltitle=title + ' - ' + item.show,
|
||||
show=item.show))
|
||||
|
||||
logger.info("[italiafilm.py] episodios")
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data.replace('<br>','\n') # fix
|
||||
|
||||
start = data.find('id="pd_rating_holder')
|
||||
end = data.find('id="linkcorrotto-show"', start)
|
||||
|
||||
data = data[start:end]
|
||||
|
||||
lang_titles = []
|
||||
starts = []
|
||||
patron = r"STAGION[I|E](.*?ITA)?"
|
||||
matches = re.compile(patron, re.IGNORECASE).finditer(data)
|
||||
for match in matches:
|
||||
season_title = match.group()
|
||||
# if season_title != '':
|
||||
lang_titles.append('SUB ITA' if 'SUB' in season_title.upper() else 'ITA')
|
||||
starts.append(match.end())
|
||||
|
||||
i = 1
|
||||
len_lang_titles = len(lang_titles)
|
||||
|
||||
while i <= len_lang_titles:
|
||||
inizio = starts[i - 1]
|
||||
fine = starts[i] if i < len_lang_titles else -1
|
||||
|
||||
html = data[inizio:fine]
|
||||
lang_title = lang_titles[i - 1]
|
||||
|
||||
load_episodios(html, item, itemlist, lang_title)
|
||||
|
||||
i += 1
|
||||
|
||||
if len(itemlist) == 0:
|
||||
load_episodios(data, item, itemlist, 'ITA')
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios",
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("kod.italiafilm findvideos")
|
||||
|
||||
if item.contentType == "movie":
|
||||
return findvid(item)
|
||||
|
||||
# Carica la pagina
|
||||
data = item.url
|
||||
|
||||
urls = scrapertools.find_multiple_matches(data, '<a.*?href="([^"]+)".*?>') #
|
||||
for url in urls: # Fix
|
||||
page = httptools.downloadpage(url, headers=headers).data #
|
||||
data += '\n' + scrapertools.find_single_match(page,'<meta name="og:url" content="([^=]+)">') #
|
||||
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title + videoitem.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.show = item.show
|
||||
videoitem.plot = item.plot
|
||||
videoitem.channel = item.channel
|
||||
videoitem.contentType = item.contentType
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos_single_ep(item):
|
||||
logger.info("[italiafilm.py] findvideos_single_ep")
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
data = scrapertools.find_single_match(data, item.extra)
|
||||
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
server = re.sub(r'[-\[\]\s]+', '', videoitem.title)
|
||||
videoitem.title = "".join(["[[COLOR orange]%s[/COLOR]] " % server.capitalize(), item.title])
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.show = item.show
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
|
||||
return itemlist
|
||||
64
plugin.video.alfa/channels/italiafilmhd.json
Normal file
64
plugin.video.alfa/channels/italiafilmhd.json
Normal file
@@ -0,0 +1,64 @@
|
||||
{
|
||||
"id": "italiafilmhd",
|
||||
"name": "ItaliaFilm HD",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": [
|
||||
"it"
|
||||
],
|
||||
"thumbnail": "https://italiafilm.network/wp-content/uploads/2018/06/ITALIAFILM-HD.png",
|
||||
"bannermenu": "https://italiafilm.network/wp-content/uploads/2018/06/ITALIAFILM-HD.png",
|
||||
"categories": [
|
||||
"movie"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi in ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_film",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
|
||||
},
|
||||
|
||||
|
||||
{
|
||||
"id": "comprueba_enlaces",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces_num",
|
||||
"type": "list",
|
||||
"label": "Numero di link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "2", "5", "10", "15" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Non filtrare",
|
||||
"IT"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
386
plugin.video.alfa/channels/italiafilmhd.py
Normal file
386
plugin.video.alfa/channels/italiafilmhd.py
Normal file
@@ -0,0 +1,386 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per italiafilmhd
|
||||
# https://alfa-addon.com/categories/kod-addon.50/
|
||||
# ----------------------------------------------------------
|
||||
import base64
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import scrapertools, servertools, httptools
|
||||
from core.item import Item
|
||||
from core.tmdb import infoIca
|
||||
from platformcode import logger, config
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['openload', 'youtube']
|
||||
list_quality = ['default']
|
||||
|
||||
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'italiafilmhd')
|
||||
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'italiafilmhd')
|
||||
|
||||
host = "https://italiafilm.network"
|
||||
|
||||
headers = [['Referer', host]]
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("kod.italiafilmhd mainlist")
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = [
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Novita'[/COLOR]",
|
||||
action="fichas",
|
||||
url=host + "/cinema/",
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Ultimi Film Inseriti[/COLOR]",
|
||||
action="fichas",
|
||||
url=host + "/film/",
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Film per Genere[/COLOR]",
|
||||
action="genere",
|
||||
url=host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="Serie TV",
|
||||
text_color="azure",
|
||||
action="tv_series",
|
||||
url="%s/serie-tv-hd/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR orange]Cerca...[/COLOR]",
|
||||
action="search",
|
||||
extra="movie",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info("[italiafilmvideohd.py] newest" + categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "film":
|
||||
item.url = host + "/cinema/"
|
||||
item.action = "fichas"
|
||||
itemlist = fichas(item)
|
||||
|
||||
if itemlist[-1].action == "fichas":
|
||||
itemlist.pop()
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("[italiafilmvideohd.py] " + item.url + " search " + texto)
|
||||
|
||||
item.url = host + "/?s=" + texto
|
||||
|
||||
try:
|
||||
return fichas(item)
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def genere(item):
|
||||
logger.info("[italiafilmvideohd.py] genere")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
patron = '<div class="sub_title">Genere</div>(.+?)</div>'
|
||||
data = scrapertools.find_single_match(data, patron)
|
||||
|
||||
patron = '<li>.*?'
|
||||
patron += 'href="([^"]+)".*?'
|
||||
patron += '<i>([^"]+)</i>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapedtitle.replace('&', '-')
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="fichas",
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def fichas(item):
|
||||
logger.info("[italiafilmvideohd.py] fichas")
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
# fix - calidad
|
||||
|
||||
patron = '<li class="item">.*?'
|
||||
patron += 'href="([^"]+)".*?'
|
||||
patron += 'title="([^"]+)".*?'
|
||||
patron += '<img src="([^"]+)".*?'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scraped_2, scrapedtitle, scrapedthumbnail in matches:
|
||||
scrapedurl = scraped_2
|
||||
|
||||
title = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
# title += " (" + scrapedcalidad + ")
|
||||
|
||||
# ------------------------------------------------
|
||||
scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
|
||||
# ------------------------------------------------
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="movie",
|
||||
title=title,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
fulltitle=title,
|
||||
show=scrapedtitle), tipo='movie'))
|
||||
|
||||
# Paginación
|
||||
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)"\s*><span aria-hidden="true">»')
|
||||
|
||||
if next_page != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="fichas",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=next_page,
|
||||
text_color="orange",
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def tv_series(item):
|
||||
logger.info("[italiafilmvideohd.py] tv_series")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
blocco = scrapertools.find_single_match(data, r'<ul class="list_mt">(.*?)</ul>')
|
||||
patron = r'<a class="poster" href="([^"]+)" title="([^"]+)"[^>]*>\s*<img src="([^"]+)"[^>]+>'
|
||||
matches = re.findall(patron, blocco, re.DOTALL)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
|
||||
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="seasons",
|
||||
contentType="tv",
|
||||
title=scrapedtitle,
|
||||
text_color="azure",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle), tipo='tv'))
|
||||
|
||||
# Pagine
|
||||
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)"\s*><span aria-hidden="true">»')
|
||||
|
||||
if next_page != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="fichas",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
text_color="orange",
|
||||
url=next_page,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def seasons(item):
|
||||
logger.info("[italiafilmvideohd.py] seasons")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
url = scrapertools.find_single_match(data,
|
||||
r'<div class="playerhdpass" id="playerhdpass">\s*[^>]+>\s*<iframe[^s]+src="([^"]+)"[^>]*></iframe>')
|
||||
data = httptools.downloadpage(url, headers=headers).data
|
||||
blocco = scrapertools.find_single_match(data, r'<h3>STAGIONE</h3>\s*<ul>(.*?)</ul>')
|
||||
seasons = re.findall(r'<li[^>]*><a href="([^"]+)">([^<]+)</a></li>', blocco, re.DOTALL)
|
||||
|
||||
for scrapedurl, season in seasons:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="episodes",
|
||||
contentType=item.contentType,
|
||||
title="Stagione: %s" % season,
|
||||
text_color="azure",
|
||||
url="https://hdpass.net/%s" % scrapedurl,
|
||||
thumbnail=item.thumbnail,
|
||||
fulltitle=item.fulltitle,
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodes(item):
|
||||
logger.info("[italiafilmvideohd.py] episodes")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
blocco = scrapertools.find_single_match(data, r'<section id="seasons">(.*?)</section>')
|
||||
episodes = re.findall(r'<li[^>]*><a href="([^"]+)">([^<]+)</a></li>', blocco, re.DOTALL)
|
||||
|
||||
for scrapedurl, episode in episodes:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvid_series",
|
||||
contentType=item.contentType,
|
||||
title="Episodio: %s" % episode,
|
||||
text_color="azure",
|
||||
url="https://hdpass.net/%s" % scrapedurl,
|
||||
thumbnail=item.thumbnail,
|
||||
fulltitle=item.fulltitle,
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("[italiafilmvideohd.py] findvideos")
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
patron = r'<div class="playerhdpass" id="playerhdpass"><iframe width=".+?" height=".+?" src="([^"]+)"'
|
||||
url = scrapertools.find_single_match(data, patron)
|
||||
|
||||
if url:
|
||||
data += httptools.downloadpage(url, headers=headers).data
|
||||
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title + videoitem.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.show = item.show
|
||||
videoitem.plot = item.plot
|
||||
videoitem.channel = item.channel
|
||||
videoitem.contentType = item.contentType
|
||||
videoitem.language = IDIOMAS['Italiano']
|
||||
|
||||
# Requerido para Filtrar enlaces
|
||||
|
||||
if __comprueba_enlaces__:
|
||||
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if item.contentType != 'episode':
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title='[COLOR yellow][B]Aggiungi alla videoteca[/B][/COLOR]', url=item.url,
|
||||
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvid_series(item):
|
||||
logger.info("[italiafilmvideohd.py] findvideos")
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data.replace('\n', '')
|
||||
patron = r'<iframe id="[^"]+" width="[^"]+" height="[^"]+" src="([^"]+)"[^>]+><\/iframe>'
|
||||
url = scrapertools.find_single_match(data, patron).replace("?alta", "")
|
||||
url = "https:" + url.replace("&download=1", "")
|
||||
|
||||
data = httptools.downloadpage(url, headers=headers).data
|
||||
|
||||
start = data.find('<div class="row mobileRes">')
|
||||
end = data.find('<div id="playerFront">', start)
|
||||
data = data[start:end]
|
||||
|
||||
patron_res = '<div class="row mobileRes">(.*?)</div>'
|
||||
patron_mir = '<div class="row mobileMirrs">(.*?)</div>'
|
||||
patron_media = r'<input type="hidden" name="urlEmbed" data-mirror="([^"]+)" id="urlEmbed" value="([^"]+)"\s*/>'
|
||||
|
||||
res = scrapertools.find_single_match(data, patron_res)
|
||||
|
||||
urls = []
|
||||
for res_url, res_video in scrapertools.find_multiple_matches(res, '<option.*?value="([^"]+?)">([^<]+?)</option>'):
|
||||
|
||||
data = httptools.downloadpage(urlparse.urljoin(url, res_url), headers=headers).data.replace('\n', '')
|
||||
mir = scrapertools.find_single_match(data, patron_mir)
|
||||
for mir_url in scrapertools.find_multiple_matches(mir, '<option.*?value="([^"]+?)">[^<]+?</value>'):
|
||||
data = httptools.downloadpage(urlparse.urljoin(url, mir_url), headers=headers).data.replace('\n', '')
|
||||
|
||||
for media_label, media_url in re.compile(patron_media).findall(data):
|
||||
urls.append(url_decode(media_url))
|
||||
|
||||
itemlist = servertools.find_video_items(data='\n'.join(urls))
|
||||
for videoitem in itemlist:
|
||||
server = re.sub(r'[-\[\]\s]+', '', videoitem.title)
|
||||
videoitem.text_color = "azure"
|
||||
videoitem.title = "".join(["[%s] " % ("[COLOR orange]%s[/COLOR]" % server.capitalize()), item.title])
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.show = item.show
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
|
||||
return itemlist
|
||||
|
||||
def url_decode(url_enc):
|
||||
lenght = len(url_enc)
|
||||
if lenght % 2 == 0:
|
||||
len2 = lenght / 2
|
||||
first = url_enc[0:len2]
|
||||
last = url_enc[len2:lenght]
|
||||
url_enc = last + first
|
||||
reverse = url_enc[::-1]
|
||||
return base64.b64decode(reverse)
|
||||
|
||||
last_car = url_enc[lenght - 1]
|
||||
url_enc[lenght - 1] = ' '
|
||||
url_enc = url_enc.strip()
|
||||
len1 = len(url_enc)
|
||||
len2 = len1 / 2
|
||||
first = url_enc[0:len2]
|
||||
last = url_enc[len2:len1]
|
||||
url_enc = last + first
|
||||
reverse = url_enc[::-1]
|
||||
reverse = reverse + last_car
|
||||
return base64.b64decode(reverse)
|
||||
32
plugin.video.alfa/channels/italiaserie.json
Normal file
32
plugin.video.alfa/channels/italiaserie.json
Normal file
@@ -0,0 +1,32 @@
|
||||
{
|
||||
"id": "italiaserie",
|
||||
"name": "Italia Serie",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": [
|
||||
"it"
|
||||
],
|
||||
"thumbnail": "https:\/\/raw.githubusercontent.com\/Zanzibar82\/images\/master\/posters\/italiaserie.png",
|
||||
"bannermenu": "https:\/\/raw.githubusercontent.com\/Zanzibar82\/images\/master\/posters\/italiaserie.png",
|
||||
"categories": [
|
||||
"tvshow"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_series",
|
||||
"type": "bool",
|
||||
"label": "Includi in novit\u00e0 - Serie TV",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
300
plugin.video.alfa/channels/italiaserie.py
Normal file
300
plugin.video.alfa/channels/italiaserie.py
Normal file
@@ -0,0 +1,300 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per italiaserie
|
||||
# https://alfa-addon.com/categories/kod-addon.50/
|
||||
# ----------------------------------------------------------
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import httptools, scrapertools, servertools
|
||||
from core.item import Item
|
||||
from core.tmdb import infoIca
|
||||
from lib import unshortenit
|
||||
from platformcode import config, logger
|
||||
|
||||
host = "https://italiaserie.org"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("kod.italiaserie mainlist")
|
||||
itemlist = [Item(channel=item.channel,
|
||||
title="[COLOR azure]Aggiornamenti Serie TV[/COLOR]",
|
||||
action="peliculas",
|
||||
url=host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Ultimi Episodi[/COLOR]",
|
||||
action="latestep",
|
||||
url="%s/aggiornamento-episodi/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR yellow]Cerca...[/COLOR]",
|
||||
action="search",
|
||||
extra="tvshow",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info("[italiaserie.py]==> newest" + categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "series":
|
||||
item.url = "%s/aggiornamento-episodi/" % host
|
||||
item.action = "latestep"
|
||||
itemlist = latestep(item)
|
||||
|
||||
if itemlist[-1].action == "latestep":
|
||||
itemlist.pop()
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def latestep(item):
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
blocco = scrapertools.find_single_match(data,
|
||||
r'<h1 class="entry-title">Aggiornamento Episodi</h1>\s*<div class="entry">(.*?)<p> </p>')
|
||||
patron = r'(?:<span[^>]+>|<strong>|)(<?[^<]*)<a href="([^"]+)"[^>]*>([^<]+)<\/a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
|
||||
for scrapedtitle, scrapedurl, scraped_number_and_title in matches:
|
||||
scrapedlang = scrapertools.find_single_match(scraped_number_and_title, r'(SUB-ITA)')
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).replace(scrapedlang, scrapedlang)
|
||||
scrapedtitle = scrapertools.htmlclean(scrapedtitle).strip()
|
||||
fulltitle = scrapedtitle.replace("–", "").replace(' ', '').strip()
|
||||
|
||||
scraped_number_and_title = scrapertools.decodeHtmlentities(scraped_number_and_title.replace('×', 'x'))
|
||||
seasonandep = scrapertools.find_single_match(scraped_number_and_title, r'(\d+x[0-9\-?]+)')
|
||||
extra = r'%s(.*?)(?:<br\s*/>|</p>)'
|
||||
|
||||
# Multi Ep
|
||||
if re.compile(r'[,-]\s*\d+', re.DOTALL).findall(scraped_number_and_title):
|
||||
season = scrapertools.find_single_match(scraped_number_and_title, r'(\d+x)')
|
||||
scraped_number_and_title = scraped_number_and_title.split(',')
|
||||
for ep in scraped_number_and_title:
|
||||
ep = (season + ep if season not in ep else ep).strip()
|
||||
seasonandep = scrapertools.find_single_match(ep, r'(\d+x[0-9\-?]+)')
|
||||
completetitle = "%s %s" % (scrapedtitle, ep)
|
||||
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="findepvideos",
|
||||
title=completetitle,
|
||||
contentSerieName=completetitle,
|
||||
fulltitle=fulltitle,
|
||||
url=scrapedurl,
|
||||
extra=extra % seasonandep.replace('x', '×'),
|
||||
folder=True), tipo='tv'))
|
||||
continue
|
||||
|
||||
# Ep singolo
|
||||
correct_scraped_number = seasonandep.replace('x', '×')
|
||||
extra = extra % (correct_scraped_number)
|
||||
completetitle = ("%s %s %s" % (
|
||||
scrapedtitle, scraped_number_and_title, "(%s)" % scrapedlang if scrapedlang else scrapedlang)).strip()
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="findepvideos",
|
||||
title=completetitle,
|
||||
contentSerieName=completetitle,
|
||||
fulltitle=fulltitle,
|
||||
url=scrapedurl,
|
||||
extra=extra,
|
||||
folder=True), tipo='tv'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("kod.italiaserie peliculas")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<div class="post-thumb">\s*<a href="([^"]+)" title="([^"]+)">\s*<img src="([^"]+)"[^>]+>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
scrapedurl = scrapedurl.replace("-1/", "-links/")
|
||||
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="episodios",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
folder=True), tipo='tv'))
|
||||
|
||||
# Paginazione
|
||||
patronvideos = '<a class="next page-numbers" href="(.*?)">'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("[italiaserie.py] " + item.url + " search " + texto)
|
||||
item.url = host + "/?s=" + texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def episodios(item):
|
||||
def load_episodios(html, item, itemlist, lang_title):
|
||||
patron = '((?:.*?<a[^h]+href="[^"]+"[^>]+>[^<][^<]+<(?:b|\/)[^>]+>)+)'
|
||||
matches = re.compile(patron).findall(html)
|
||||
for data in matches:
|
||||
# Estrazione
|
||||
scrapedtitle = data.split('<a ')[0]
|
||||
scrapedtitle = re.sub(r'<[^>]*>', '', scrapedtitle).strip()
|
||||
if scrapedtitle != 'Categorie':
|
||||
scrapedtitle = scrapedtitle.replace('×', 'x')
|
||||
scrapedtitle = scrapedtitle.replace('×', 'x')
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="episode",
|
||||
title="[COLOR azure]%s[/COLOR]" % (scrapedtitle + " (" + lang_title + ")"),
|
||||
url=data,
|
||||
thumbnail=item.thumbnail,
|
||||
extra=item.extra,
|
||||
fulltitle=scrapedtitle + " (" + lang_title + ")" + ' - ' + item.show,
|
||||
show=item.show))
|
||||
|
||||
logger.info("[italiaserie.py] episodios")
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Download pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
if 'CLICCA QUI PER GUARDARE TUTTI GLI EPISODI' in data:
|
||||
item.url = re.sub('\-\d+[^\d]+$', '-links', item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
data = scrapertools.get_match(data, '<div class="entry-content">(.*?)<div class="clear"></div>')
|
||||
|
||||
lang_titles = []
|
||||
starts = []
|
||||
patron = r"Stagione.*?ITA"
|
||||
matches = re.compile(patron, re.IGNORECASE).finditer(data)
|
||||
for match in matches:
|
||||
season_title = match.group()
|
||||
if season_title != '':
|
||||
lang_titles.append('SUB ITA' if 'SUB' in season_title.upper() else 'ITA')
|
||||
starts.append(match.end())
|
||||
|
||||
i = 1
|
||||
len_lang_titles = len(lang_titles)
|
||||
|
||||
while i <= len_lang_titles:
|
||||
inizio = starts[i - 1]
|
||||
fine = starts[i] if i < len_lang_titles else -1
|
||||
|
||||
html = data[inizio:fine]
|
||||
lang_title = lang_titles[i - 1]
|
||||
|
||||
load_episodios(html, item, itemlist, lang_title)
|
||||
|
||||
i += 1
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios",
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("kod.italiaserie findvideos")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = item.url
|
||||
|
||||
matches = re.findall(r'<a href="([^"]+)" target="_blank" rel="nofollow">', data, re.DOTALL)
|
||||
|
||||
data = []
|
||||
for url in matches:
|
||||
url, c = unshortenit.unshorten(url)
|
||||
data.append(url)
|
||||
|
||||
itemlist = servertools.find_video_items(data=str(data))
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title + videoitem.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.show = item.show
|
||||
videoitem.plot = item.plot
|
||||
videoitem.channel = item.channel
|
||||
videoitem.contentType = item.contentType
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findepvideos(item):
|
||||
logger.info("kod.italiaserie findepvideos")
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = item.url
|
||||
|
||||
matches = re.findall(r'<a href="([^"]+)"[^>]*>[^<]+</a>', data, re.DOTALL)
|
||||
|
||||
data = []
|
||||
for url in matches:
|
||||
url, c = unshortenit.unshorten(url)
|
||||
data.append(url)
|
||||
|
||||
itemlist = servertools.find_video_items(data=str(data))
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title + videoitem.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.show = item.show
|
||||
videoitem.plot = item.plot
|
||||
videoitem.channel = item.channel
|
||||
videoitem.contentType = item.contentType
|
||||
|
||||
return itemlist
|
||||
33
plugin.video.alfa/channels/itastreaming.json
Normal file
33
plugin.video.alfa/channels/itastreaming.json
Normal file
@@ -0,0 +1,33 @@
|
||||
{
|
||||
"id": "itastreaming",
|
||||
"name": "ItaStreaming",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": [
|
||||
"it"
|
||||
],
|
||||
"thumbnail": "https:\/\/raw.githubusercontent.com\/Zanzibar82\/images\/master\/posters\/itastreaming.png",
|
||||
"bannermenu": "https:\/\/raw.githubusercontent.com\/Zanzibar82\/images\/master\/posters\/itastreaming.png",
|
||||
"categories": [
|
||||
"movie",
|
||||
"cult"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_film",
|
||||
"type": "bool",
|
||||
"label": "Includi in novità - film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
363
plugin.video.alfa/channels/itastreaming.py
Normal file
363
plugin.video.alfa/channels/itastreaming.py
Normal file
@@ -0,0 +1,363 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per itastreaming
|
||||
# https://alfa-addon.com/categories/kod-addon.50/
|
||||
# ----------------------------------------------------------
|
||||
import base64
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import scrapertools, httptools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from core.tmdb import infoIca
|
||||
from platformcode import logger, config
|
||||
|
||||
|
||||
|
||||
host = "https://itastreaming.film"
|
||||
|
||||
headers = [['Referer', host]]
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("[itastreaming.py] mainlist")
|
||||
|
||||
itemlist = [
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Home[/COLOR]",
|
||||
action="fichas",
|
||||
url=host,
|
||||
thumbnail=""),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Nuove uscite[/COLOR]",
|
||||
action="fichas",
|
||||
url=host + "/nuove-uscite/",
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Film per Genere[/COLOR]",
|
||||
action="genere",
|
||||
url=host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Film per Qualita'[/COLOR]",
|
||||
action="quality",
|
||||
url=host,
|
||||
thumbnail="http://files.softicons.com/download/computer-icons/disks-icons-by-wil-nichols/png/256x256/Blu-Ray.png"),
|
||||
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Film A-Z[/COLOR]",
|
||||
action="atoz",
|
||||
url=host + "/tag/a/",
|
||||
thumbnail="http://i.imgur.com/IjCmx5r.png"),
|
||||
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR orange]Cerca...[/COLOR]",
|
||||
action="search",
|
||||
extra="movie",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info("[itastreaming.py] newest" + categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "film":
|
||||
item.url = host + "/nuove-uscite/"
|
||||
item.action = "fichas"
|
||||
itemlist = fichas(item)
|
||||
|
||||
if itemlist[-1].action == "fichas":
|
||||
itemlist.pop()
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("[itastreaming.py] " + item.url + " search " + texto)
|
||||
|
||||
item.url = host + "/?s=" + texto
|
||||
|
||||
try:
|
||||
return searchfilm(item)
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def searchfilm(item):
|
||||
logger.info("[itastreaming.py] fichas")
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
# fix - calidad
|
||||
data = re.sub(
|
||||
r'<div class="wrapperImage"[^<]+<a',
|
||||
'<div class="wrapperImage"><fix>SD</fix><a',
|
||||
data
|
||||
)
|
||||
# fix - IMDB
|
||||
data = re.sub(
|
||||
r'<h5> </div>',
|
||||
'<fix>IMDB: 0.0</fix>',
|
||||
data
|
||||
)
|
||||
|
||||
patron = '<li class="s-item">.*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += 'alt="([^"]+)".*?'
|
||||
patron += 'href="([^"]+)".*?'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
|
||||
# ------------------------------------------------
|
||||
scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
|
||||
# ------------------------------------------------
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
title=scrapedtitle,
|
||||
contentType="movie",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle), tipo='movie'))
|
||||
|
||||
# Paginación
|
||||
next_page = scrapertools.find_single_match(data, "href='([^']+)'>Seguente ›")
|
||||
if next_page != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="searchfilm",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=next_page,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def genere(item):
|
||||
logger.info("[itastreaming.py] genere")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
patron = '<ul class="sub-menu">(.+?)</ul>'
|
||||
data = scrapertools.find_single_match(data, patron)
|
||||
|
||||
patron = '<li[^>]+><a href="([^"]+)">(.*?)</a></li>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapedtitle.replace('&', '-')
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="fichas",
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def atoz(item):
|
||||
logger.info("[itastreaming.py] genere")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
patron = '<div class="generos">(.+?)</ul>'
|
||||
data = scrapertools.find_single_match(data, patron)
|
||||
|
||||
patron = '<li>.*?'
|
||||
patron += 'href="([^"]+)".*?'
|
||||
patron += '>([^"]+)</a>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapedtitle.replace('&', '-')
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="fichas",
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def quality(item):
|
||||
logger.info("[itastreaming.py] genere")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
patron = '<a>Qualità</a>(.+?)</ul>'
|
||||
data = scrapertools.find_single_match(data, patron)
|
||||
|
||||
patron = '<li id=".*?'
|
||||
patron += 'href="([^"]+)".*?'
|
||||
patron += '>([^"]+)</a>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapedtitle.replace('&', '-')
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="fichas",
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def fichas(item):
|
||||
logger.info("[itastreaming.py] fichas")
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
# fix - calidad
|
||||
data = re.sub(
|
||||
r'<div class="wrapperImage"[^<]+<a',
|
||||
'<div class="wrapperImage"><fix>SD</fix><a',
|
||||
data
|
||||
)
|
||||
# fix - IMDB
|
||||
data = re.sub(
|
||||
r'<h5> </div>',
|
||||
'<fix>IMDB: 0.0</fix>',
|
||||
data
|
||||
)
|
||||
|
||||
patron = '<div class="item">.*?'
|
||||
patron += 'href="([^"]+)".*?'
|
||||
patron += 'title="([^"]+)".*?'
|
||||
patron += '<img src="([^"]+)".*?'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
|
||||
# ------------------------------------------------
|
||||
scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
|
||||
# ------------------------------------------------
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle), tipo='movie'))
|
||||
|
||||
# Paginación
|
||||
next_page = scrapertools.find_single_match(data, "href='([^']+)'>Seguente ›")
|
||||
if next_page != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="fichas",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=next_page,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("[italiafilmvideohd.py] findvideos")
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data.replace('\n', '')
|
||||
|
||||
patron = r'<iframe width=".+?" height=".+?" src="([^"]+)" allowfullscreen frameborder="0">'
|
||||
url = scrapertools.find_single_match(data, patron).replace("?ita", "")
|
||||
|
||||
if 'hdpass' in url:
|
||||
data = httptools.downloadpage(url, headers=headers).data
|
||||
|
||||
start = data.find('<div class="row mobileRes">')
|
||||
end = data.find('<div id="playerFront">', start)
|
||||
data = data[start:end]
|
||||
|
||||
patron_res = r'<div class="row mobileRes">([\s\S]*)<\/div>'
|
||||
patron_mir = r'<div class="row mobileMirrs">([\s\S]*)<\/div>'
|
||||
patron_media = r'<input type="hidden" name="urlEmbed" data-mirror="([^"]+)" id="urlEmbed" value="([^"]+)"[^>]+>'
|
||||
|
||||
res = scrapertools.find_single_match(data, patron_res)
|
||||
|
||||
urls = []
|
||||
for res_url, res_video in scrapertools.find_multiple_matches(res, '<option.*?value="([^"]+?)">([^<]+?)</option>'):
|
||||
|
||||
data = httptools.downloadpage(urlparse.urljoin(url, res_url), headers=headers).data.replace('\n', '')
|
||||
|
||||
mir = scrapertools.find_single_match(data, patron_mir)
|
||||
|
||||
for mir_url in scrapertools.find_multiple_matches(mir, '<option.*?value="([^"]+?)">[^<]+?</value>'):
|
||||
|
||||
data = httptools.downloadpage(urlparse.urljoin(url, mir_url), headers=headers).data.replace('\n', '')
|
||||
|
||||
for media_label, media_url in re.compile(patron_media).findall(data):
|
||||
urls.append(url_decode(media_url))
|
||||
|
||||
itemlist = servertools.find_video_items(data='\n'.join(urls))
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title + videoitem.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.show = item.show
|
||||
videoitem.plot = item.plot
|
||||
videoitem.channel = item.channel
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def url_decode(url_enc):
|
||||
lenght = len(url_enc)
|
||||
if lenght % 2 == 0:
|
||||
len2 = lenght / 2
|
||||
first = url_enc[0:len2]
|
||||
last = url_enc[len2:lenght]
|
||||
url_enc = last + first
|
||||
reverse = url_enc[::-1]
|
||||
return base64.b64decode(reverse)
|
||||
|
||||
last_car = url_enc[lenght - 1]
|
||||
url_enc[lenght - 1] = ' '
|
||||
url_enc = url_enc.strip()
|
||||
len1 = len(url_enc)
|
||||
len2 = len1 / 2
|
||||
first = url_enc[0:len2]
|
||||
last = url_enc[len2:len1]
|
||||
url_enc = last + first
|
||||
reverse = url_enc[::-1]
|
||||
reverse = reverse + last_car
|
||||
return base64.b64decode(reverse)
|
||||
463
plugin.video.alfa/channels/kodpreferiti.py
Normal file
463
plugin.video.alfa/channels/kodpreferiti.py
Normal file
@@ -0,0 +1,463 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand preferiti (I miei Link)
|
||||
# ============================
|
||||
# - Elenco di collegamenti salvati come preferiti, solo in Kodi on Demand, non in Kodi.
|
||||
# - I collegamenti sono organizzati in cartelle che l'utente può definire.
|
||||
# - Un singolo file viene utilizzato per salvare tutte le cartelle e i collegamenti: user_favorites.json
|
||||
# - È possibile copiare user_favorites.json su altri dispositivi poiché l'unica dipendenza locale è il thumbnail associato ai collegamenti,
|
||||
# ma viene rilevato dal codice e adattato al dispositivo corrente.
|
||||
|
||||
# Requisiti in altri moduli per eseguire questo canale:
|
||||
# - Aggiungere un collegamento a questo canale in channelselector.py
|
||||
# - Modificare platformtools.py per controllare il menu contestuale e aggiungere "Salva link" in set_context_commands
|
||||
# ------------------------------------------------------------
|
||||
|
||||
import os, re
|
||||
|
||||
from core import filetools
|
||||
from core import jsontools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from platformcode import platformtools
|
||||
|
||||
|
||||
# Classe da caricare e salvare nel file Kodpreferiti
|
||||
# --------------------------------------------------------
|
||||
class KodpreferitiData:
|
||||
|
||||
def __init__(self):
|
||||
self.user_favorites_file = os.path.join(config.get_data_path(), 'user_favorites.json')
|
||||
|
||||
if not os.path.exists(self.user_favorites_file):
|
||||
self.user_favorites = []
|
||||
else:
|
||||
try:
|
||||
self.user_favorites = jsontools.load(filetools.read(self.user_favorites_file))
|
||||
except:
|
||||
self.user_favorites = []
|
||||
|
||||
if len(self.user_favorites) == 0:
|
||||
self.user_favorites.append({ 'title': config.get_localized_string(70528), 'items': [] })
|
||||
self.save()
|
||||
|
||||
def save(self):
|
||||
filetools.write(self.user_favorites_file, jsontools.dump(self.user_favorites))
|
||||
|
||||
|
||||
# ============================
|
||||
# Aggiungere dal menu contestuale
|
||||
# ============================
|
||||
|
||||
def addFavourite(item):
|
||||
logger.info()
|
||||
icapref = KodpreferitiData()
|
||||
|
||||
# Se arrivi qui tramite il menu di scelta rapida, devi recuperare i parametri di azione e canale
|
||||
if item.from_action:
|
||||
item.__dict__["action"] = item.__dict__.pop("from_action")
|
||||
if item.from_channel:
|
||||
item.__dict__["channel"] = item.__dict__.pop("from_channel")
|
||||
|
||||
# Cancellare il titolo e rimuovere il colore
|
||||
item.title = re.sub(r'\[COLOR [^\]]*\]', '', item.title.replace('[/COLOR]', '')).strip()
|
||||
if item.text_color:
|
||||
item.__dict__.pop("text_color")
|
||||
|
||||
# Finestra di dialogo per scegliere/creare una cartella
|
||||
i_perfil = _selecciona_perfil(icapref, config.get_localized_string(70546))
|
||||
if i_perfil == -1: return False
|
||||
|
||||
# Rileva che lo stesso link non esiste già nella cartella
|
||||
campos = ['channel','action','url','extra'] # Se tutti questi campi corrispondono, si considera che il collegamento esiste già
|
||||
for enlace in icapref.user_favorites[i_perfil]['items']:
|
||||
it = Item().fromurl(enlace)
|
||||
repe = True
|
||||
for prop in campos:
|
||||
if prop in it.__dict__ and prop in item.__dict__ and it.__dict__[prop] != item.__dict__[prop]:
|
||||
repe = False
|
||||
break
|
||||
if repe:
|
||||
platformtools.dialog_notification(config.get_localized_string(70529), config.get_localized_string(70530))
|
||||
return False
|
||||
|
||||
# Salvare
|
||||
icapref.user_favorites[i_perfil]['items'].append(item.tourl())
|
||||
icapref.save()
|
||||
|
||||
platformtools.dialog_notification(config.get_localized_string(70531), config.get_localized_string(70532) % icapref.user_favorites[i_perfil]['title'])
|
||||
|
||||
return True
|
||||
|
||||
|
||||
# ====================
|
||||
# NAVIGAZIONE
|
||||
# ====================
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
icapref = KodpreferitiData()
|
||||
|
||||
itemlist = []
|
||||
last_i = len(icapref.user_favorites) - 1
|
||||
|
||||
for i_perfil, perfil in enumerate(icapref.user_favorites):
|
||||
context = []
|
||||
|
||||
context.append({'title': config.get_localized_string(70533), 'channel': item.channel, 'action': 'editar_perfil_titulo',
|
||||
'i_perfil': i_perfil})
|
||||
context.append({'title': config.get_localized_string(70534), 'channel': item.channel, 'action': 'eliminar_perfil',
|
||||
'i_perfil': i_perfil})
|
||||
|
||||
if i_perfil > 0:
|
||||
context.append({'title': config.get_localized_string(70535), 'channel': item.channel, 'action': 'mover_perfil',
|
||||
'i_perfil': i_perfil, 'direccion': 'top'})
|
||||
context.append({'title': config.get_localized_string(70536), 'channel': item.channel, 'action': 'mover_perfil',
|
||||
'i_perfil': i_perfil, 'direccion': 'arriba'})
|
||||
if i_perfil < last_i:
|
||||
context.append({'title': config.get_localized_string(70537), 'channel': item.channel, 'action': 'mover_perfil',
|
||||
'i_perfil': i_perfil, 'direccion': 'abajo'})
|
||||
context.append({'title': config.get_localized_string(70538), 'channel': item.channel, 'action': 'mover_perfil',
|
||||
'i_perfil': i_perfil, 'direccion': 'bottom'})
|
||||
|
||||
plot = config.get_localized_string(70556) % len(perfil['items'])
|
||||
itemlist.append(Item(channel=item.channel, action='mostrar_perfil', title=perfil['title'], plot=plot, i_perfil=i_perfil, context=context))
|
||||
|
||||
plot = config.get_localized_string(70539)
|
||||
plot += config.get_localized_string(70540)
|
||||
plot += config.get_localized_string(70541)
|
||||
itemlist.append(item.clone(action='crear_perfil', title=config.get_localized_string(70542), plot=plot, folder=False))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def mostrar_perfil(item):
|
||||
logger.info()
|
||||
icapref = KodpreferitiData()
|
||||
|
||||
itemlist = []
|
||||
|
||||
i_perfil = item.i_perfil
|
||||
if not icapref.user_favorites[i_perfil]: return itemlist
|
||||
last_i = len(icapref.user_favorites[i_perfil]['items']) - 1
|
||||
|
||||
ruta_runtime = config.get_runtime_path()
|
||||
|
||||
for i_enlace, enlace in enumerate(icapref.user_favorites[i_perfil]['items']):
|
||||
context = []
|
||||
|
||||
if i_enlace > 0:
|
||||
context.append({'title': config.get_localized_string(70535), 'channel': item.channel, 'action': 'mover_enlace',
|
||||
'i_enlace': i_enlace, 'i_perfil': i_perfil, 'direccion': 'top'})
|
||||
context.append({'title': config.get_localized_string(70536), 'channel': item.channel, 'action': 'mover_enlace',
|
||||
'i_enlace': i_enlace, 'i_perfil': i_perfil, 'direccion': 'arriba'})
|
||||
if i_enlace < last_i:
|
||||
context.append({'title': config.get_localized_string(70537), 'channel': item.channel, 'action': 'mover_enlace',
|
||||
'i_enlace': i_enlace, 'i_perfil': i_perfil, 'direccion': 'abajo'})
|
||||
context.append({'title': config.get_localized_string(70538), 'channel': item.channel, 'action': 'mover_enlace',
|
||||
'i_enlace': i_enlace, 'i_perfil': i_perfil, 'direccion': 'bottom'})
|
||||
|
||||
if len(icapref.user_favorites) > 1: # se hai più di una cartella, permette di spostarti tra esse
|
||||
context.append({'title': config.get_localized_string(70543), 'channel': item.channel, 'action': 'editar_enlace_carpeta',
|
||||
'i_enlace': i_enlace, 'i_perfil': i_perfil})
|
||||
|
||||
context.append({'title': config.get_localized_string(70544), 'channel': item.channel, 'action': 'editar_enlace_titulo',
|
||||
'i_enlace': i_enlace, 'i_perfil': i_perfil})
|
||||
|
||||
context.append({'title': config.get_localized_string(70545), 'channel': item.channel, 'action': 'editar_enlace_color',
|
||||
'i_enlace': i_enlace, 'i_perfil': i_perfil})
|
||||
|
||||
context.append({'title': config.get_localized_string(70547), 'channel': item.channel, 'action': 'editar_enlace_thumbnail',
|
||||
'i_enlace': i_enlace, 'i_perfil': i_perfil})
|
||||
|
||||
context.append({'title': config.get_localized_string(70548), 'channel': item.channel, 'action': 'eliminar_enlace',
|
||||
'i_enlace': i_enlace, 'i_perfil': i_perfil})
|
||||
|
||||
it = Item().fromurl(enlace)
|
||||
it.context = context
|
||||
it.plot = '[COLOR blue]Canal: ' + it.channel + '[/COLOR][CR]' + it.plot
|
||||
|
||||
# Se non è un url, né ha il percorso di sistema, converti il percorso poiché sarà stato copiato da un altro dispositivo.
|
||||
# Sarebbe più ottimale se la conversione fosse eseguita con un menu di importazione, ma per il momento è controllata in run-time.
|
||||
if it.thumbnail and '://' not in it.thumbnail and not it.thumbnail.startswith(ruta_runtime):
|
||||
ruta, fichero = filetools.split(it.thumbnail)
|
||||
if ruta == '' and fichero == it.thumbnail: # in Linux la divisione con un percorso di Windows non si separa correttamente
|
||||
ruta, fichero = filetools.split(it.thumbnail.replace('\\','/'))
|
||||
if 'channels' in ruta and 'thumb' in ruta:
|
||||
it.thumbnail = filetools.join(ruta_runtime, 'resources', 'media', 'channels', 'thumb', fichero)
|
||||
elif 'themes' in ruta and 'default' in ruta:
|
||||
it.thumbnail = filetools.join(ruta_runtime, 'resources', 'media', 'themes', 'default', fichero)
|
||||
|
||||
itemlist.append(it)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# Routine interne condivise
|
||||
# ----------------------------
|
||||
|
||||
# Finestra di dialogo per selezionare/creare una cartella. Restituisce l'indice della cartella in user_favorites (-1 se cancella)
|
||||
def _selecciona_perfil(icapref, titulo=config.get_localized_string(70549), i_actual=-1):
|
||||
acciones = [(perfil['title'] if i_p != i_actual else '[I][COLOR pink]%s[/COLOR][/I]' % perfil['title']) for i_p, perfil in enumerate(icapref.user_favorites)]
|
||||
acciones.append(config.get_localized_string(70550))
|
||||
|
||||
i_perfil = -1
|
||||
while i_perfil == -1: # Ripeti fino a quando non selezioni una cartella o annulli
|
||||
ret = platformtools.dialog_select(titulo, acciones)
|
||||
if ret == -1: return -1 # richiesta annullata
|
||||
if ret < len(icapref.user_favorites):
|
||||
i_perfil = ret
|
||||
else: # creare nuova cartella
|
||||
if _crea_perfil(icapref):
|
||||
i_perfil = len(icapref.user_favorites) - 1
|
||||
|
||||
return i_perfil
|
||||
|
||||
|
||||
# Finestra di dialogo per creare una cartella
|
||||
def _crea_perfil(icapref):
|
||||
titulo = platformtools.dialog_input(default='', heading=config.get_localized_string(70551))
|
||||
if titulo is None or titulo == '':
|
||||
return False
|
||||
|
||||
icapref.user_favorites.append({'title': titulo, 'items': []})
|
||||
icapref.save()
|
||||
|
||||
return True
|
||||
|
||||
|
||||
# Gestione dei profili e dei link
|
||||
# -----------------------------
|
||||
|
||||
def crear_perfil(item):
|
||||
logger.info()
|
||||
icapref = KodpreferitiData()
|
||||
|
||||
if not _crea_perfil(icapref): return False
|
||||
|
||||
platformtools.itemlist_refresh()
|
||||
return True
|
||||
|
||||
|
||||
def editar_perfil_titulo(item):
|
||||
logger.info()
|
||||
icapref = KodpreferitiData()
|
||||
|
||||
if not icapref.user_favorites[item.i_perfil]: return False
|
||||
|
||||
titulo = platformtools.dialog_input(default=icapref.user_favorites[item.i_perfil]['title'], heading=config.get_localized_string(70551))
|
||||
if titulo is None or titulo == '' or titulo == icapref.user_favorites[item.i_perfil]['title']:
|
||||
return False
|
||||
|
||||
icapref.user_favorites[item.i_perfil]['title'] = titulo
|
||||
icapref.save()
|
||||
|
||||
platformtools.itemlist_refresh()
|
||||
return True
|
||||
|
||||
|
||||
def eliminar_perfil(item):
|
||||
logger.info()
|
||||
icapref = KodpreferitiData()
|
||||
|
||||
if not icapref.user_favorites[item.i_perfil]: return False
|
||||
|
||||
# Chiedere conferma
|
||||
if not platformtools.dialog_yesno(config.get_localized_string(70534), config.get_localized_string(70552)): return False
|
||||
|
||||
del icapref.user_favorites[item.i_perfil]
|
||||
icapref.save()
|
||||
|
||||
platformtools.itemlist_refresh()
|
||||
return True
|
||||
|
||||
|
||||
def editar_enlace_titulo(item):
|
||||
logger.info()
|
||||
icapref = KodpreferitiData()
|
||||
|
||||
if not icapref.user_favorites[item.i_perfil]: return False
|
||||
if not icapref.user_favorites[item.i_perfil]['items'][item.i_enlace]: return False
|
||||
|
||||
it = Item().fromurl(icapref.user_favorites[item.i_perfil]['items'][item.i_enlace])
|
||||
|
||||
titulo = platformtools.dialog_input(default=it.title, heading=config.get_localized_string(70553))
|
||||
if titulo is None or titulo == '' or titulo == it.title:
|
||||
return False
|
||||
|
||||
it.title = titulo
|
||||
|
||||
icapref.user_favorites[item.i_perfil]['items'][item.i_enlace] = it.tourl()
|
||||
icapref.save()
|
||||
|
||||
platformtools.itemlist_refresh()
|
||||
return True
|
||||
|
||||
|
||||
def editar_enlace_color(item):
|
||||
logger.info()
|
||||
icapref = KodpreferitiData()
|
||||
|
||||
if not icapref.user_favorites[item.i_perfil]: return False
|
||||
if not icapref.user_favorites[item.i_perfil]['items'][item.i_enlace]: return False
|
||||
|
||||
it = Item().fromurl(icapref.user_favorites[item.i_perfil]['items'][item.i_enlace])
|
||||
|
||||
colores = ['green','yellow','red','blue','white','orange','lime','aqua','pink','violet','purple','tomato','olive','antiquewhite','gold']
|
||||
opciones = ['[COLOR %s]%s[/COLOR]' % (col, col) for col in colores]
|
||||
|
||||
ret = platformtools.dialog_select(config.get_localized_string(70558), opciones)
|
||||
|
||||
if ret == -1: return False # richiesta annullata
|
||||
it.text_color = colores[ret]
|
||||
|
||||
icapref.user_favorites[item.i_perfil]['items'][item.i_enlace] = it.tourl()
|
||||
icapref.save()
|
||||
|
||||
platformtools.itemlist_refresh()
|
||||
return True
|
||||
|
||||
|
||||
def editar_enlace_thumbnail(item):
|
||||
logger.info()
|
||||
icapref = KodpreferitiData()
|
||||
|
||||
if not icapref.user_favorites[item.i_perfil]: return False
|
||||
if not icapref.user_favorites[item.i_perfil]['items'][item.i_enlace]: return False
|
||||
|
||||
it = Item().fromurl(icapref.user_favorites[item.i_perfil]['items'][item.i_enlace])
|
||||
|
||||
# Da Kodi 17 puoi usare xbmcgui.Dialog (). Seleziona con thumbnails (ListItem & useDetails = True)
|
||||
is_kodi17 = (config.get_platform(True)['num_version'] >= 17.0)
|
||||
if is_kodi17:
|
||||
import xbmcgui
|
||||
|
||||
# Finestra di dialogo per scegliere il thumnail (quello del canale o le icone predefinite)
|
||||
opciones = []
|
||||
ids = []
|
||||
try:
|
||||
from core import channeltools
|
||||
channel_parameters = channeltools.get_channel_parameters(it.channel)
|
||||
if channel_parameters['thumbnail'] != '':
|
||||
nombre = 'Canal %s' % it.channel
|
||||
if is_kodi17:
|
||||
it_thumb = xbmcgui.ListItem(nombre)
|
||||
it_thumb.setArt({ 'thumb': channel_parameters['thumbnail'] })
|
||||
opciones.append(it_thumb)
|
||||
else:
|
||||
opciones.append(nombre)
|
||||
ids.append(channel_parameters['thumbnail'])
|
||||
except:
|
||||
pass
|
||||
|
||||
resource_path = os.path.join(config.get_runtime_path(), 'resources', 'media', 'themes', 'default')
|
||||
for f in sorted(os.listdir(resource_path)):
|
||||
if f.startswith('thumb_') and not f.startswith('thumb_intervenido') and f != 'thumb_back.png':
|
||||
nombre = f.replace('thumb_', '').replace('_', ' ').replace('.png', '')
|
||||
if is_kodi17:
|
||||
it_thumb = xbmcgui.ListItem(nombre)
|
||||
it_thumb.setArt({ 'thumb': os.path.join(resource_path, f) })
|
||||
opciones.append(it_thumb)
|
||||
else:
|
||||
opciones.append(nombre)
|
||||
ids.append(os.path.join(resource_path, f))
|
||||
|
||||
if is_kodi17:
|
||||
ret = xbmcgui.Dialog().select(config.get_localized_string(70554), opciones, useDetails=True)
|
||||
else:
|
||||
ret = platformtools.dialog_select(config.get_localized_string(70554), opciones)
|
||||
|
||||
if ret == -1: return False # richiesta annullata
|
||||
|
||||
it.thumbnail = ids[ret]
|
||||
|
||||
icapref.user_favorites[item.i_perfil]['items'][item.i_enlace] = it.tourl()
|
||||
icapref.save()
|
||||
|
||||
platformtools.itemlist_refresh()
|
||||
return True
|
||||
|
||||
|
||||
def editar_enlace_carpeta(item):
|
||||
logger.info()
|
||||
icapref = KodpreferitiData()
|
||||
|
||||
if not icapref.user_favorites[item.i_perfil]: return False
|
||||
if not icapref.user_favorites[item.i_perfil]['items'][item.i_enlace]: return False
|
||||
|
||||
# Finestra di dialogo per scegliere/creare una cartella
|
||||
i_perfil = _selecciona_perfil(icapref, config.get_localized_string(70555), item.i_perfil)
|
||||
if i_perfil == -1 or i_perfil == item.i_perfil: return False
|
||||
|
||||
icapref.user_favorites[i_perfil]['items'].append(icapref.user_favorites[item.i_perfil]['items'][item.i_enlace])
|
||||
del icapref.user_favorites[item.i_perfil]['items'][item.i_enlace]
|
||||
icapref.save()
|
||||
|
||||
platformtools.itemlist_refresh()
|
||||
return True
|
||||
|
||||
|
||||
def eliminar_enlace(item):
|
||||
logger.info()
|
||||
icapref = KodpreferitiData()
|
||||
|
||||
if not icapref.user_favorites[item.i_perfil]: return False
|
||||
if not icapref.user_favorites[item.i_perfil]['items'][item.i_enlace]: return False
|
||||
|
||||
del icapref.user_favorites[item.i_perfil]['items'][item.i_enlace]
|
||||
icapref.save()
|
||||
|
||||
platformtools.itemlist_refresh()
|
||||
return True
|
||||
|
||||
|
||||
# Sposta profili e collegamenti (su, giù, in alto, in basso)
|
||||
# ------------------------
|
||||
def mover_perfil(item):
|
||||
logger.info()
|
||||
icapref = KodpreferitiData()
|
||||
|
||||
icapref.user_favorites = _mover_item(icapref.user_favorites, item.i_perfil, item.direccion)
|
||||
icapref.save()
|
||||
|
||||
platformtools.itemlist_refresh()
|
||||
return True
|
||||
|
||||
def mover_enlace(item):
|
||||
logger.info()
|
||||
icapref = KodpreferitiData()
|
||||
|
||||
if not icapref.user_favorites[item.i_perfil]: return False
|
||||
icapref.user_favorites[item.i_perfil]['items'] = _mover_item(icapref.user_favorites[item.i_perfil]['items'], item.i_enlace, item.direccion)
|
||||
icapref.save()
|
||||
|
||||
platformtools.itemlist_refresh()
|
||||
return True
|
||||
|
||||
|
||||
# Sposta un oggetto (numerico) specifico da un elenco (su, giù, in alto, in basso) e restituisce l'elenco modificato
|
||||
def _mover_item(lista, i_selected, direccion):
|
||||
last_i = len(lista) - 1
|
||||
if i_selected > last_i or i_selected < 0: return lista # indice inesistente nella lista
|
||||
|
||||
if direccion == 'arriba':
|
||||
if i_selected == 0: # È già al di sopra di tutto
|
||||
return lista
|
||||
lista.insert(i_selected - 1, lista.pop(i_selected))
|
||||
|
||||
elif direccion == 'abajo':
|
||||
if i_selected == last_i: # È già al di sopra di tutto
|
||||
return lista
|
||||
lista.insert(i_selected + 1, lista.pop(i_selected))
|
||||
|
||||
elif direccion == 'top':
|
||||
if i_selected == 0: # È già al di sopra di tutto
|
||||
return lista
|
||||
lista.insert(0, lista.pop(i_selected))
|
||||
|
||||
elif direccion == 'bottom':
|
||||
if i_selected == last_i: # È già al di sopra di tutto
|
||||
return lista
|
||||
lista.insert(last_i, lista.pop(i_selected))
|
||||
|
||||
return lista
|
||||
32
plugin.video.alfa/channels/marapcana.json
Normal file
32
plugin.video.alfa/channels/marapcana.json
Normal file
@@ -0,0 +1,32 @@
|
||||
{
|
||||
"id": "marapcana",
|
||||
"name": "Marapcana",
|
||||
"language": ["it"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "http://marapcana.site/maraplogo7.png",
|
||||
"banner": "http://marapcana.site/maraplogo7.png",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"top channels"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_film",
|
||||
"type": "bool",
|
||||
"label": "Includi in novità - Film",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
257
plugin.video.alfa/channels/marapcana.py
Normal file
257
plugin.video.alfa/channels/marapcana.py
Normal file
@@ -0,0 +1,257 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per marapcana
|
||||
# https://alfa-addon.com/categories/kod-addon.50/
|
||||
# ------------------------------------------------------------
|
||||
import re
|
||||
|
||||
from core import scrapertools, httptools, servertools
|
||||
from core.item import Item
|
||||
from core.tmdb import infoIca
|
||||
from lib import unshortenit
|
||||
from platformcode import logger, config
|
||||
|
||||
host = "http://marapcana.live"
|
||||
# in caso di oscuramento verificare l'indirizzo http://marapcana.online/
|
||||
|
||||
headers = [['Referer', host]]
|
||||
|
||||
PERPAGE = 12
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info(" mainlist")
|
||||
|
||||
# Main options
|
||||
itemlist = [Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR azure]Film[/COLOR]",
|
||||
url="%s/film-categoria/dvdrip-bdrip/" % host,
|
||||
extra="movie",
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="categorie",
|
||||
title="[COLOR azure]Categorie[/COLOR]",
|
||||
url="%s/elenchi-film/" % host,
|
||||
extra="movie",
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="search",
|
||||
title="[COLOR yellow]Cerca...[/COLOR]",
|
||||
extra="movie",
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="peliculas_tv",
|
||||
title="[COLOR azure]Serie TV[/COLOR]",
|
||||
url="%s/lista-serie-tv/" % host,
|
||||
extra="tvshow",
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="search",
|
||||
title="[COLOR yellow]Cerca SerieTV...[/COLOR]",
|
||||
extra="tvshow",
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png")]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info(" peliculas")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
patron = '<a href="([^"]+)" title="([^"]+)" class="teaser-thumb">'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
extra=item.extra,
|
||||
viewmode="movie_with_plot",
|
||||
Folder=True), tipo='movie'))
|
||||
|
||||
nextpage_regex = '<a class="nextpostslink".*?href="([^"]+)".*?<\/a>'
|
||||
next_page = scrapertools.find_single_match(data, nextpage_regex)
|
||||
if next_page != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url="%s" % next_page,
|
||||
extra=item.extra,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorie(item):
|
||||
itemlist = []
|
||||
|
||||
if item.url == "":
|
||||
item.url = host
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
bloque = scrapertools.get_match(data, 'Genere(.*?)</select>')
|
||||
patron = '<option value="([^"]+)">(.*?)</option>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
if "adesso" in scrapedtitle:
|
||||
continue
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
fulltitle=scrapedtitle,
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
viewmode="movie_with_plot",
|
||||
Folder=True))
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas_tv(item):
|
||||
itemlist = []
|
||||
|
||||
if item.url == "":
|
||||
item.url = host
|
||||
|
||||
p = 1
|
||||
if '{}' in item.url:
|
||||
item.url, p = item.url.split('{}')
|
||||
p = int(p)
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
bloque = scrapertools.get_match(data, 'Lista Serie Tv</h2>(.*?)</section>')
|
||||
patron = '<a href=\'(/serie/[^\']+)\'>([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
|
||||
if (p - 1) * PERPAGE > i: continue
|
||||
if i >= p * PERPAGE: break
|
||||
title = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action="episodios",
|
||||
title=title,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
fulltitle=title,
|
||||
show=title,
|
||||
plot=scrapedplot,
|
||||
folder=True), tipo='tv'))
|
||||
|
||||
if len(matches) >= p * PERPAGE:
|
||||
scrapedurl = item.url + '{}' + str(p + 1)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action="peliculas_tv",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
itemlist = []
|
||||
|
||||
if item.url == "":
|
||||
item.url = host
|
||||
if host not in item.url:
|
||||
item.url = '%s%s' % (host, item.url)
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
bloque = scrapertools.find_single_match(data, '<table>(.*?)</table>')
|
||||
patron = '<tr><td>([^<]+)</td>.*?</tr>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
for scrapedtitle in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="episodio",
|
||||
fulltitle=scrapedtitle,
|
||||
title=scrapedtitle,
|
||||
url=item.url,
|
||||
viewmode="movie_with_plot",
|
||||
Folder=True))
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodio(item):
|
||||
if item.url == "":
|
||||
item.url = host
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
patron = '<tr><td>' + item.title + '</td>.*?</tr>'
|
||||
data = scrapertools.find_single_match(data, patron)
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for i in itemlist:
|
||||
tab = re.compile(
|
||||
'<div\s*id="(tab[^"]+)"[^>]+>[^>]+>[^>]+src="http[s]*:%s[^"]+"' % i.url.replace('http:', '').replace(
|
||||
'https:', ''), re.DOTALL).findall(data)
|
||||
qual = ''
|
||||
if tab:
|
||||
qual = re.compile('<a\s*href="#%s">([^<]+)<' % tab[0], re.DOTALL).findall(data)[0].replace("'", "")
|
||||
qual = "[COLOR orange]%s[/COLOR] - " % qual
|
||||
i.title = '%s[COLOR green][B]%s[/B][/COLOR] - %s' % (qual, i.title[2:], item.title)
|
||||
i.channel = item.channel
|
||||
i.fulltitle = item.title
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("[marapcana.py] " + item.url + " search " + texto)
|
||||
item.url = host + "/?s=" + texto
|
||||
try:
|
||||
if item.extra == "movie":
|
||||
return peliculas(item)
|
||||
if item.extra == "tvshow":
|
||||
return peliculas_tv(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info(" findvideos")
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
urls = re.findall(r'href="([^"]+)" target="_blank" rel="noopener noreferrer">', data, re.DOTALL)
|
||||
|
||||
if urls:
|
||||
for url in urls:
|
||||
url, c = unshortenit.unshorten(url)
|
||||
data += url + '\n'
|
||||
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for i in itemlist:
|
||||
tab = re.compile(
|
||||
'<div\s*id="(tab[^"]+)"[^>]+>[^>]+>[^>]+src="http[s]*:%s[^"]+"' % i.url.replace('http:', '').replace(
|
||||
'https:', ''), re.DOTALL).findall(data)
|
||||
qual = ''
|
||||
if tab:
|
||||
qual = re.compile('<a\s*href="#%s">([^<]+)<' % tab[0], re.DOTALL).findall(data)[0].replace("'", "")
|
||||
qual = "[COLOR orange]%s[/COLOR] - " % qual
|
||||
i.title = '%s[COLOR green][B]%s[/B][/COLOR] - %s' % (qual, i.title[1:], item.title)
|
||||
i.channel = item.channel
|
||||
i.fulltitle = item.title
|
||||
|
||||
return itemlist
|
||||
24
plugin.video.alfa/channels/mmaiptv.json
Normal file
24
plugin.video.alfa/channels/mmaiptv.json
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"id": "mmaiptv",
|
||||
"name": "MmaIptv",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": [
|
||||
"it"
|
||||
],
|
||||
"thumbnail": "https:\/\/imageshack.com\/a\/img924\/5981\/XN1yc1.png",
|
||||
"bannermenu": "https:\/\/imageshack.com\/a\/img924\/5981\/XN1yc1.png",
|
||||
"categories": [
|
||||
"anime"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
108
plugin.video.alfa/channels/mmaiptv.py
Normal file
108
plugin.video.alfa/channels/mmaiptv.py
Normal file
@@ -0,0 +1,108 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale per mmaiptv
|
||||
# https://alfa-addon.com/categories/kod-addon.50/
|
||||
# ----------------------------------------------------------
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from platformcode import logger
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from core.tmdb import infoIca
|
||||
|
||||
|
||||
|
||||
|
||||
host = "http://mmaiptv.it"
|
||||
|
||||
headers = [['Referer', host]]
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("[mmaiptv.py] mainlist")
|
||||
|
||||
# Main options
|
||||
itemlist = [Item(channel=item.channel,
|
||||
action="list_titles",
|
||||
title="[COLOR azure]Tutti[/COLOR]",
|
||||
url="%s/b.php" % host,
|
||||
extra="anime",
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="search",
|
||||
title="[COLOR yellow]Cerca[/COLOR]",
|
||||
url="%s/b.php" % host,
|
||||
extra="search",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
|
||||
return itemlist
|
||||
|
||||
def list_titles(item):
|
||||
logger.info("[mmaiptv.py] list_titles")
|
||||
itemlist = []
|
||||
|
||||
if item.url == "":
|
||||
item.url = host
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
patronvideos = '<div class="tab-pane active".*?<font color="#000000">([^<]+)<\/font>.*?<a href="([^"]+)"><img src="([^"]+)".*?<\/div>'
|
||||
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedtitle, scrapedurl, scrapedthumbnail in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="episodes",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl if not 'search' in item.extra else (host + "/"+scrapedurl),
|
||||
thumbnail=scrapedthumbnail,
|
||||
extra=item.extra,
|
||||
viewmode="movie_with_plot",
|
||||
Folder=True))
|
||||
return itemlist
|
||||
|
||||
def episodes(item):
|
||||
logger.info("[mmaiptv.py] serietv")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
patronvideos = '<div class="tab-pane active".*?<font color="#000000">([^<]+)<\/font>.*?<a href="([^"]+)"><img src="([^"]+)".*?<\/div>'
|
||||
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
for scrapedtitle,scrapedurl,scrapedthumbnail in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
extra=item.extra,
|
||||
viewmode="movie_with_plot"))
|
||||
return list(reversed(itemlist))
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("[mmaiptv.py] search")
|
||||
item.url = host + "/d.php?search=" + texto
|
||||
return list_titles(item)
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("[mmaiptv.py] findvideos")
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
patron = "file: \"([^\"]+)\""
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
headers.append(['Referer', item.url])
|
||||
for video in matches:
|
||||
itemlist.append(Item(channel=item.channel, action="play", title="[.mp4] [COLOR azure]%s[/COLOR]" % item.title,url=video, folder=False))
|
||||
|
||||
return itemlist
|
||||
|
||||
23
plugin.video.alfa/channels/mondolunatico.json
Normal file
23
plugin.video.alfa/channels/mondolunatico.json
Normal file
@@ -0,0 +1,23 @@
|
||||
{
|
||||
"id": "mondolunatico",
|
||||
"name": "Mondo Lunatico",
|
||||
"language": ["it"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "http://mondolunatico.org/wp-content/uploads/2016/02/images-111.jpg",
|
||||
"banner": "http://mondolunatico.org/wp-content/uploads/2016/02/images-111.jpg",
|
||||
"categories": [
|
||||
"movie", "cult"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
448
plugin.video.alfa/channels/mondolunatico.py
Normal file
448
plugin.video.alfa/channels/mondolunatico.py
Normal file
@@ -0,0 +1,448 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Kodi on Demand - Kodi Addon
|
||||
# Canale mondolunatico
|
||||
# https://alfa-addon.com/categories/kod-addon.50/
|
||||
# ------------------------------------------------------------
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import urllib
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from platformcode import config
|
||||
from platformcode import logger
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from core.tmdb import infoIca
|
||||
|
||||
__channel__ = "mondolunatico"
|
||||
|
||||
host = "http://mondolunatico.org"
|
||||
|
||||
captcha_url = '%s/pass/CaptchaSecurityImages.php?width=100&height=40&characters=5' % host
|
||||
|
||||
PERPAGE = 25
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("kod.mondolunatico mainlist")
|
||||
itemlist = [Item(channel=item.channel,
|
||||
title="[COLOR azure]Novità[/COLOR]",
|
||||
extra="movie",
|
||||
action="peliculas",
|
||||
url=host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Categorie[/COLOR]",
|
||||
extra="movie",
|
||||
action="categorias",
|
||||
url=host,
|
||||
thumbnail="http://xbmc-repo-ackbarr.googlecode.com/svn/trunk/dev/skin.cirrus%20extended%20v2/extras/moviegenres/All%20Movies%20by%20Genre.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR yellow]Cerca...[/COLOR]",
|
||||
extra="movie",
|
||||
action="search",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
|
||||
#Item(channel=item.channel,
|
||||
# title="[COLOR azure]Serie TV[/COLOR]",
|
||||
# extra="tvshow",
|
||||
# action="serietv",
|
||||
# url="%s/serietv/lista-alfabetica/" % host,
|
||||
# thumbnail="http://i.imgur.com/rO0ggX2.png"),
|
||||
#Item(channel=item.channel,
|
||||
# title="[COLOR yellow]Cerca Serie TV...[/COLOR]",
|
||||
# extra="tvshow",
|
||||
# action="search",
|
||||
# thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"),]
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info("kod.mondolunatico categorias")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Narrow search by selecting only the combo
|
||||
bloque = scrapertools.get_match(data, '<option class="level-0" value="7">(.*?)<option class="level-0" value="8">')
|
||||
|
||||
# The categories are the options for the combo
|
||||
patron = '<option class=[^=]+="([^"]+)">(.*?)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapedtitle.replace(" ", "")
|
||||
scrapedtitle = scrapedtitle.replace("(", "")
|
||||
scrapedtitle = scrapedtitle.replace(")", "")
|
||||
scrapedtitle = scrapedtitle.replace("0", "")
|
||||
scrapedtitle = scrapedtitle.replace("1", "")
|
||||
scrapedtitle = scrapedtitle.replace("2", "")
|
||||
scrapedtitle = scrapedtitle.replace("3", "")
|
||||
scrapedtitle = scrapedtitle.replace("4", "")
|
||||
scrapedtitle = scrapedtitle.replace("5", "")
|
||||
scrapedtitle = scrapedtitle.replace("6", "")
|
||||
scrapedtitle = scrapedtitle.replace("7", "")
|
||||
scrapedtitle = scrapedtitle.replace("8", "")
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.replace("9", ""))
|
||||
scrapedurl = host + "/category/film-per-genere/" + scrapedtitle
|
||||
scrapedthumbnail = ""
|
||||
scrapedplot = ""
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action="peliculas",
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("[mondolunatico.py] " + item.url + " search " + texto)
|
||||
item.url = host + "/?s=" + texto
|
||||
try:
|
||||
if item.extra == "movie":
|
||||
return peliculas(item)
|
||||
if item.extra == "tvshow":
|
||||
item.url = "%s/serietv/lista-alfabetica/" % host
|
||||
return search_serietv(item, texto)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("kod.mondolunatico peliculas")
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<div class="boxentry">\s*<a href="([^"]+)"[^>]+>\s*<img src="([^"]+)" alt="([^"]+)"[^>]+>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
scrapedplot = ""
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, in matches:
|
||||
title = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action="findvideos",
|
||||
contentType="movie",
|
||||
title=title,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
fulltitle=title,
|
||||
show=title,
|
||||
plot=scrapedplot,
|
||||
folder=True), tipo='movie'))
|
||||
|
||||
# Paginazione
|
||||
patronvideos = '<a class="nextpostslink" rel="next" href="([^"]+)">'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action="peliculas",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def serietv(item):
|
||||
logger.info("kod.mondolunatico serietv")
|
||||
|
||||
itemlist = []
|
||||
|
||||
p = 1
|
||||
if '{}' in item.url:
|
||||
item.url, p = item.url.split('{}')
|
||||
p = int(p)
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.find_single_match(data, '<h1>Lista Alfabetica</h1>(.*?)</div>')
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<li><a href="([^"]+)">([^<]+)</a></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
|
||||
if (p - 1) * PERPAGE > i: continue
|
||||
if i >= p * PERPAGE: break
|
||||
title = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action="episodios",
|
||||
title=title,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
fulltitle=title,
|
||||
show=title,
|
||||
plot=scrapedplot,
|
||||
folder=True), tipo='tv'))
|
||||
|
||||
if len(matches) >= p * PERPAGE:
|
||||
scrapedurl = item.url + '{}' + str(p + 1)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action="serietv",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search_serietv(item, texto):
|
||||
logger.info("kod.mondolunatico serietv")
|
||||
|
||||
texto = urllib.unquote_plus(texto).lower()
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.find_single_match(data, '<h1>Lista Alfabetica</h1>(.*?)</div>')
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<li><a href="([^"]+)">([^<]+)</a></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
|
||||
title = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
if texto not in title.lower(): continue
|
||||
itemlist.append(infoIca(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action="episodios",
|
||||
title=title,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
fulltitle=title,
|
||||
show=title,
|
||||
plot=scrapedplot,
|
||||
folder=True), tipo='tv'))
|
||||
|
||||
return itemlist
|
||||
|
||||
def episodios(item):
|
||||
logger.info("kod.mondolunatico episodios")
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
html = []
|
||||
|
||||
for i in range(2):
|
||||
patron = 'href="(https?://www\.keeplinks\.eu/p92/([^"]+))"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for keeplinks, id in matches:
|
||||
_headers = [['Cookie', 'flag[' + id + ']=1; defaults=1; nopopatall=' + str(int(time.time()))],
|
||||
['Referer', keeplinks]]
|
||||
|
||||
html.append(httptools.downloadpage(keeplinks, headers=_headers).data)
|
||||
|
||||
patron = r'="(%s/pass/index\.php\?ID=[^"]+)"' % host
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl in matches:
|
||||
tmp = httptools.downloadpage(scrapedurl).data
|
||||
|
||||
if 'CaptchaSecurityImages.php' in tmp:
|
||||
# Descarga el captcha
|
||||
img_content = httptools.downloadpage(captcha_url).data
|
||||
|
||||
captcha_fname = os.path.join(config.get_data_path(), __channel__ + "captcha.img")
|
||||
with open(captcha_fname, 'wb') as ff:
|
||||
ff.write(img_content)
|
||||
|
||||
from platformcode import captcha
|
||||
|
||||
keyb = captcha.Keyboard(heading='', captcha=captcha_fname)
|
||||
keyb.doModal()
|
||||
if keyb.isConfirmed():
|
||||
captcha_text = keyb.getText()
|
||||
post_data = urllib.urlencode({'submit1': 'Invia', 'security_code': captcha_text})
|
||||
tmp = httptools.downloadpage(scrapedurl, post=post_data).data
|
||||
|
||||
try:
|
||||
os.remove(captcha_fname)
|
||||
except:
|
||||
pass
|
||||
|
||||
html.append(tmp)
|
||||
|
||||
data = '\n'.join(html)
|
||||
|
||||
encontrados = set()
|
||||
|
||||
patron = '<p><a href="([^"]+?)">([^<]+?)</a></p>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapedtitle.split('/')[-1]
|
||||
if not scrapedtitle or scrapedtitle in encontrados: continue
|
||||
encontrados.add(scrapedtitle)
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action="findvideos",
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=item.thumbnail,
|
||||
fulltitle=item.fulltitle,
|
||||
show=item.show))
|
||||
|
||||
patron = '<a href="([^"]+)" target="_blank" class="selecttext live">([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapedtitle.split('/')[-1]
|
||||
if not scrapedtitle or scrapedtitle in encontrados: continue
|
||||
encontrados.add(scrapedtitle)
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action="findvideos",
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=item.thumbnail,
|
||||
fulltitle=item.fulltitle,
|
||||
show=item.show))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("kod.mondolunatico findvideos")
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = item.url if item.extra == "tvshow" else httptools.downloadpage(item.url).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = r'noshade>(.*?)<br>.*?<a href="(%s/pass/index\.php\?ID=[^"]+)"' % host
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedtitle, scrapedurl in matches:
|
||||
scrapedtitle = scrapedtitle.replace('*', '').replace('Streaming', '').strip()
|
||||
title = '%s - [%s]' % (item.title, scrapedtitle)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="play",
|
||||
title=title,
|
||||
url=scrapedurl,
|
||||
thumbnail=item.thumbnail,
|
||||
fulltitle=item.fulltitle,
|
||||
show=item.show,
|
||||
server='captcha',
|
||||
folder=False))
|
||||
|
||||
patron = 'href="(%s/stream/links/\d+/)"' % host
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl in matches:
|
||||
data += httptools.downloadpage(scrapedurl).data
|
||||
|
||||
### robalo fix obfuscator - start ####
|
||||
|
||||
patron = 'href="(https?://www\.keeplinks\.(?:co|eu)/p92/([^"]+))"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for keeplinks, id in matches:
|
||||
headers = [['Cookie', 'flag[' + id + ']=1; defaults=1; nopopatall=' + str(int(time.time()))],
|
||||
['Referer', keeplinks]]
|
||||
|
||||
html = httptools.downloadpage(keeplinks, headers=headers).data
|
||||
data += str(scrapertools.find_multiple_matches(html, '</lable><a href="([^"]+)" target="_blank"'))
|
||||
|
||||
### robalo fix obfuscator - end ####
|
||||
|
||||
patron = 'src="([^"]+)" frameborder="0"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl in matches:
|
||||
data += httptools.downloadpage(scrapedurl).data
|
||||
|
||||
for videoitem in servertools.find_video_items(data=data):
|
||||
videoitem.title = item.title + videoitem.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.show = item.show
|
||||
videoitem.plot = item.plot
|
||||
videoitem.channel = item.channel
|
||||
itemlist.append(videoitem)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info("kod.mondolunatico play")
|
||||
|
||||
itemlist = []
|
||||
|
||||
if item.server == 'captcha':
|
||||
headers = [['Referer', item.url]]
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
if 'CaptchaSecurityImages.php' in data:
|
||||
# Descarga el captcha
|
||||
img_content = httptools.downloadpage(captcha_url, headers=headers).data
|
||||
|
||||
captcha_fname = os.path.join(config.get_data_path(), __channel__ + "captcha.img")
|
||||
with open(captcha_fname, 'wb') as ff:
|
||||
ff.write(img_content)
|
||||
|
||||
from platformcode import captcha
|
||||
|
||||
keyb = captcha.Keyboard(heading='', captcha=captcha_fname)
|
||||
keyb.doModal()
|
||||
if keyb.isConfirmed():
|
||||
captcha_text = keyb.getText()
|
||||
post_data = urllib.urlencode({'submit1': 'Invia', 'security_code': captcha_text})
|
||||
data = httptools.downloadpage(item.url, post=post_data, headers=headers).data
|
||||
|
||||
try:
|
||||
os.remove(captcha_fname)
|
||||
except:
|
||||
pass
|
||||
|
||||
itemlist.extend(servertools.find_video_items(data=data))
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.show = item.show
|
||||
videoitem.plot = item.plot
|
||||
videoitem.channel = item.channel
|
||||
else:
|
||||
itemlist.append(item)
|
||||
|
||||
return itemlist
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user