eurostreaming

Sostituiscono gli attuali che hanno i seguenti problemi:
1. Non aprono tutte le serie, in quanto nella pagina del sito si deve cliccare su una voce per aprire la lista degli episodi
2. Quando si aggiungono una serie nella videoteca e si hanno episodi in italiano e sottotitolati, vengono aggiunti correttamente i titoli in italiano ma i video sono sottotitolati.
This commit is contained in:
greko17
2019-04-30 13:00:29 +02:00
committed by GitHub
parent 048abf9a2d
commit 784e8c82a7
2 changed files with 388 additions and 185 deletions

View File

@@ -1,44 +1,82 @@
{
"id": "eurostreaming",
"name": "Eurostreaming",
"language": ["ita"],
"active": true,
"active": true,
"adult": false,
"thumbnail": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/eurostreaming.png",
"banner": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/eurostreaming.png",
"language": ["ita"],
"thumbnail": "",
"bannermenu": "",
"categories": ["tvshow","anime"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Includi in Novità - Serie TV",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Includi in Novità - Anime",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
}
]
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Includi in novità - Serie TV",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verifica se i link esistono",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Numero de link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"Non filtrare",
"ITA",
"SUB ITA"
]
},
{
"id": "perfil",
"type": "list",
"label": "profilo dei colori",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"Sin color",
"Perfil 5",
"Perfil 4",
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
}
]
}

View File

@@ -1,65 +1,115 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per eurostreaming
# ------------------------------------------------------------
import re, urlparse
# -*- Created or modificated for Alfa-Addon -*-
# -*- adpted for KOD -*-
# -*- By Greko -*-
from channels import autoplay
from core import scrapertools, httptools, servertools, tmdb, scrapertoolsV2
from core.item import Item
#import base64
import re
import urlparse
# gli import sopra sono da includere all'occorrenza
# per url con ad.fly
from lib import unshortenit
from platformcode import logger, config
from channelselector import thumb
host = "https://eurostreaming.cafe"
from channelselector import get_thumb
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertoolsV2
from core import servertools
from core.item import Item
from core import channeltools
from core import tmdb
from platformcode import config, logger
__channel__ = "eurostreaming" #stesso di id nel file json
#host = "https://eurostreaming.zone/"
#host = "https://eurostreaming.black/"
host = "https://eurostreaming.cafe/" #aggiornato al 30-04-2019
# ======== def per utility INIZIO =============================
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
__perfil__ = int(config.get_setting('perfil', __channel__))
except:
__modo_grafico__ = True
__perfil__ = 0
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
if __perfil__ < 3:
color1, color2, color3, color4, color5 = perfil[__perfil__]
else:
color1 = color2 = color3 = color4 = color5 = ""
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', __channel__)
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', __channel__)
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]#,['Accept-Language','it-IT,it;q=0.8,en-US;q=0.5,en;q=0.3']]
parameters = channeltools.get_channel_parameters(__channel__)
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
IDIOMAS = {'Italiano': 'IT', 'VOSI':'SUB ITA'}
list_language = IDIOMAS.values()
# per l'autoplay
list_servers = ['openload', 'speedvideo', 'wstream', 'streamango' 'flashx', 'nowvideo']
list_quality = ['default']
list_quality = ['default']
# =========== home menu ===================
def mainlist(item):
logger.info("kod.eurostreaming mainlist")
autoplay.init(item.channel, list_servers, list_quality)
logger.info("icarus.eurostreaming mainlist")
itemlist = []
title = ''
itemlist = [
Item(
channel=item.channel,
title="[B]Serie TV[/B]",
action="serietv",
extra="tvshow",
Item(channel=__channel__, title="Serie TV",
contentTitle = __channel__, action="serietv",
#extra="tvshow",
text_color=color4,
url="%s/category/serie-tv-archive/" % host,
thumbnail=
"http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"
),
Item(
channel=item.channel,
title="[B]Anime / Cartoni[/B]",
infoLabels={'plot': item.category},
thumbnail = get_thumb(title, auto = True)
),
Item(channel=__channel__, title="Ultimi Aggiornamenti",
contentTitle = __channel__, action="elenco_aggiornamenti_serietv",
text_color=color4, url="%saggiornamento-episodi/" % host,
#category = __channel__,
extra="tvshow",
infoLabels={'plot': item.category},
thumbnail = get_thumb(title, auto = True)
),
Item(channel=__channel__,
title="Anime / Cartoni",
action="serietv",
extra="tvshow",
text_color=color4,
url="%s/category/anime-cartoni-animati/" % host,
thumbnail=
"http://orig09.deviantart.net/df5a/f/2014/169/2/a/fist_of_the_north_star_folder_icon_by_minacsky_saya-d7mq8c8.png"
),
Item(
channel=item.channel,
title="[COLOR blue]Cerca...[/COLOR]",
thumbnail= get_thumb(title, auto = True)
),
Item(channel=__channel__,
title="[COLOR yellow]Cerca...[/COLOR]",
action="search",
extra="tvshow",
thumbnail=
"http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")
text_color=color4,
thumbnail= get_thumb(title, auto = True)
),
]
autoplay.show_option(item.channel, itemlist)
itemlist = thumb(itemlist)
return itemlist
# ======== def in ordine di menu ===========================
def serietv(item):
logger.info("kod.eurostreaming peliculas")
logger.info("%s serietv log: %s" % (__channel__, item))
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
@@ -68,29 +118,30 @@ def serietv(item):
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedplot = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.replace("Streaming", ""))
#scrapedplot = ""
scrapedtitle = scrapertoolsV2.decodeHtmlentities(scrapedtitle)#.replace("Streaming", ""))
if scrapedtitle.startswith("Link to "):
scrapedtitle = scrapedtitle[8:]
# num = scrapertools.find_single_match(scrapedurl, '(-\d+/)')
# if num:
# scrapedurl = scrapedurl.replace(num, "-episodi/")
num = scrapertoolsV2.find_single_match(scrapedurl, '(-\d+/)')
if num:
scrapedurl = scrapedurl.replace(num, "-episodi/")
itemlist.append(
Item(
channel=item.channel,
Item(channel=item.channel,
action="episodios",
contentType="tvshow",
#contentType="tvshow",
contentSerieName = scrapedtitle,
title=scrapedtitle,
fulltitle=scrapedtitle,
text_color="azure",
#text_color="azure",
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
show=scrapedtitle,
#plot=scrapedplot,
show=item.show,
extra=item.extra,
folder=True))
folder=True
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# locandine e trama e altro da tmdb se presente l'anno migliora la ricerca
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True, idioma_busqueda='it')
# Paginazione
patronvideos = '<a class="next page-numbers" href="?([^>"]+)">Avanti &raquo;</a>'
@@ -102,17 +153,125 @@ def serietv(item):
Item(
channel=item.channel,
action="serietv",
title="[COLOR blue]" + config.get_localized_string(30992) + "[/COLOR]",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=scrapedurl,
thumbnail=thumb(),
thumbnail=
"http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
extra=item.extra,
folder=True))
return itemlist
def episodios(item):
#logger.info("%s episodios log: %s" % (__channel__, item))
itemlist = []
if not(item.lang):
lang_season = {'ITA':0, 'SUB ITA' :0}
# Download pagina
data = httptools.downloadpage(item.url).data
#========
if 'clicca qui per aprire' in data.lower():
logger.info("%s CLICCA QUI PER APRIRE GLI EPISODI log: %s" % (__channel__, item))
item.url = scrapertoolsV2.find_single_match(data, '"go_to":"(.*?)"')
item.url = item.url.replace("\\","")
# Carica la pagina
data = httptools.downloadpage(item.url).data
#logger.info("%s FINE CLICCA QUI PER APRIRE GLI EPISODI log: %s" % (__channel__, item))
elif 'clicca qui</span>' in data.lower():
logger.info("%s inizio CLICCA QUI</span> log: %s" % (__channel__, item))
item.url = scrapertoolsV2.find_single_match(data, '<h2 style="text-align: center;"><a href="(.*?)">')
data = httptools.downloadpage(item.url).data
#logger.info("%s fine CLICCA QUI</span> log: %s" % (__channel__, item))
#=========
data = scrapertoolsV2.decodeHtmlentities(data)
bloque = scrapertoolsV2.find_single_match(data, '<div class="su-accordion">(.*?)<div class="clear"></div>')
patron = '<span class="su-spoiler-icon"></span>(.*?)</div>'
matches = scrapertoolsV2.find_multiple_matches(bloque, patron)
for scrapedseason in matches:
#logger.info("%s scrapedseason log: %s" % (__channel__, scrapedseason))
if "(SUB ITA)" in scrapedseason.upper():
lang = "SUB ITA"
lang_season['SUB ITA'] +=1
else:
lang = "ITA"
lang_season['ITA'] +=1
#logger.info("%s lang_dict log: %s" % (__channel__, lang_season))
for lang in sorted(lang_season):
if lang_season[lang] > 0:
itemlist.append(
Item(channel = item.channel,
action = "episodios",
#contentType = "episode",
contentSerieName = item.title,
title = '%s (%s)' % (item.title, lang),
url = item.url,
fulltitle = item.title,
data = data,
lang = lang,
show = item.show,
folder = True,
))
# locandine e trama e altro da tmdb se presente l'anno migliora la ricerca
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True, idioma_busqueda='it')
return itemlist
else:
# qui ci vanno le puntate delle stagioni
html = item.data
logger.info("%s else log: [%s]" % (__channel__, item))
if item.lang == 'SUB ITA':
item.lang = '\(SUB ITA\)'
logger.info("%s item.lang log: %s" % (__channel__, item.lang))
bloque = scrapertoolsV2.find_single_match(html, '<div class="su-accordion">(.*?)<div class="clear"></div>')
patron = '<span class="su-spoiler-icon"></span>.*?'+item.lang+'</div>(.*?)</div>' # leggo tutte le stagioni
#logger.info("%s patronpatron log: %s" % (__channel__, patron))
matches = scrapertoolsV2.find_multiple_matches(bloque, patron)
for scrapedseason in matches:
#logger.info("%s scrapedseasonscrapedseason log: %s" % (__channel__, scrapedseason))
scrapedseason = scrapedseason.replace('<strong>','').replace('</strong>','')
patron = '(\d+)×(\d+)(.*?)<(.*?)<br />' # stagione - puntanta - titolo - gruppo link
matches = scrapertoolsV2.find_multiple_matches(scrapedseason, patron)
for scrapedseason, scrapedpuntata, scrapedtitolo, scrapedgroupurl in matches:
#logger.info("%s finale log: %s" % (__channel__, patron))
scrapedtitolo = scrapedtitolo.replace('','')
itemlist.append(Item(channel = item.channel,
action = "findvideos",
contentType = "episode",
#contentSerieName = item.contentSerieName,
contentTitle = scrapedtitolo,
title = '%sx%s %s' % (scrapedseason, scrapedpuntata, scrapedtitolo),
url = scrapedgroupurl,
fulltitle = item.fulltitle,
#show = item.show,
#folder = True,
))
logger.info("%s itemlistitemlist log: %s" % (__channel__, itemlist))
# Opción "Añadir esta película a la biblioteca de KODI"
if item.extra != "library":
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel, title="%s" % config.get_localized_string(30161),
text_color="green", extra="episodios",
action="add_serie_to_library", url=item.url,
thumbnail= get_thumb('videolibrary', auto = True),
contentTitle=item.contentSerieName, lang = item.lang,
show=item.show, data = html
#, infoLabels = item.infoLabels
))
return itemlist
# =========== def ricerca =============
def search(item, texto):
logger.info("[eurostreaming.py] " + item.url + " search " + texto)
item.url = "%s/?s=%s" % (host, texto)
#logger.info("[eurostreaming.py] " + item.url + " search " + texto)
logger.info("%s search log: %s" % (__channel__, item))
item.url = "%s?s=%s" % (host, texto)
try:
return serietv(item)
# Continua la ricerca in caso di errore
@@ -122,116 +281,122 @@ def search(item, texto):
logger.error("%s" % line)
return []
def episodios(item):
def load_episodios(html, item, itemlist, lang_title):
patron = '((?:.*?<a[^h]+href="[^"]+"[^>]+>[^<][^<]+<(?:b|\/)[^>]+>)+)'
matches = re.compile(patron).findall(html)
for data in matches:
# Estrazione
scrapedtitle = data.split('<a ')[0]
scrapedtitle = re.sub(r'<[^>]*>', '', scrapedtitle).strip()
if scrapedtitle != 'Categorie':
scrapedtitle = scrapedtitle.replace('&#215;', 'x')
scrapedtitle = scrapedtitle.replace('×', 'x')
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="episode",
title="[COLOR azure]%s[/COLOR]" % (scrapedtitle + " (" + lang_title + ")"),
url=data,
thumbnail=item.thumbnail,
extra=item.extra,
fulltitle=scrapedtitle + " (" + lang_title + ")" + ' - ' + item.show,
show=item.show))
logger.info("[eurostreaming.py] episodios")
# =========== def novità in ricerca globale =============
def newest(categoria):
logger.info("%s newest log: %s" % (__channel__, categoria))
itemlist = []
item = Item()
try:
item.url = "%saggiornamento-episodi/" % host
item.action = "elenco_aggiornamenti_serietv"
itemlist = elenco_aggiornamenti_serietv(item)
# Download pagina
data = httptools.downloadpage(item.url).data
data = scrapertools.decodeHtmlentities(data)
link = False
if itemlist[-1].action == "elenco_aggiornamenti_serietv":
itemlist.pop()
if scrapertoolsV2.find_single_match(data, '<div class="nano_cp_container"><span.*?CLICCA QUI'):
item.url = scrapertoolsV2.find_single_match(data, '<script type="text\/javascript">.*?var nano_ajax_object =.*?"go_to":"(.*?)"').replace('\\', '')
link = True
else:
match = scrapertoolsV2.find_single_match(data, '<h3 style="text-align: center;">.*?<a href="(.*?)">.{0,5}<span.*?CLICCA QUI.*?</a></h3>')
if match != '':
item.url = match
link = True
if link:
data = httptools.downloadpage(item.url).data
data = scrapertools.decodeHtmlentities(data)
data = scrapertoolsV2.find_single_match(data, '<div class="su-accordion">(.+?)<div class="clear">')
lang_titles = []
starts = []
patron = r"STAGIONE.*?ITA"
matches = re.compile(patron, re.IGNORECASE).finditer(data)
for match in matches:
season_title = match.group()
if season_title != '':
lang_titles.append('SUB ITA' if 'SUB' in season_title.upper() else 'ITA')
starts.append(match.end())
i = 1
len_lang_titles = len(lang_titles)
while i <= len_lang_titles:
inizio = starts[i - 1]
fine = starts[i] if i < len_lang_titles else -1
html = data[inizio:fine]
lang_title = lang_titles[i - 1]
load_episodios(html, item, itemlist, lang_title)
i += 1
if config.get_videolibrary_support() and len(itemlist) != 0:
itemlist.append(
Item(channel=item.channel,
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
url=item.url,
action="add_serie_to_library",
extra="episodios",
show=item.show))
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
# =========== def pagina aggiornamenti =============
# ======== Ultimi Aggiornamenti ===========================
def elenco_aggiornamenti_serietv(item):
"""
def per la lista degli aggiornamenti
"""
logger.info("%s elenco_aggiornamenti_serietv log: %s" % (__channel__, item))
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
# Estrae i contenuti
#bloque = scrapertoolsV2.get_match(data, '<div class="entry">(.*?)<div class="clear"></div>')
bloque = scrapertoolsV2.find_single_match(data, '<div class="entry">(.*?)<div class="clear"></div>')
patron = '<span class="serieTitle".*?>(.*?)<.*?href="(.*?)".*?>(.*?)<'
matches = scrapertoolsV2.find_multiple_matches(bloque, patron)
for scrapedtitle, scrapedurl, scrapedepisodies in matches:
if "(SUB ITA)" in scrapedepisodies.upper():
lang = "SUB ITA"
scrapedepisodies = scrapedepisodies.replace('(SUB ITA)','')
else:
lang = "ITA"
scrapedepisodies = scrapedepisodies.replace(lang,'')
#num = scrapertoolsV2.find_single_match(scrapedepisodies, '(-\d+/)')
#if num:
# scrapedurl = scrapedurl.replace(num, "-episodi/")
scrapedtitle = scrapedtitle.replace("", "").replace('\xe2\x80\x93 ','').strip()
scrapedepisodies = scrapedepisodies.replace('\xe2\x80\x93 ','').strip()
itemlist.append(
Item(
channel=item.channel,
action="episodios",
contentType="tvshow",
title = "%s" % scrapedtitle, # %s" % (scrapedtitle, scrapedepisodies),
fulltitle = "%s %s" % (scrapedtitle, scrapedepisodies),
text_color = color5,
url = scrapedurl,
#show = "%s %s" % (scrapedtitle, scrapedepisodies),
extra=item.extra,
#lang = lang,
#data = data,
folder=True))
# locandine e trama e altro da tmdb se presente l'anno migliora la ricerca
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True, idioma_busqueda='it')
return itemlist
# =========== def per trovare i video =============
def findvideos(item):
logger.info("kod.eurostreaming findvideos")
logger.info("%s findvideos log: %s" % (__channel__, item))
itemlist = []
# Carica la pagina
data = item.url
matches = re.findall(r'<a href="([^"]+)"[^>]*>[^<]+</a>', data, re.DOTALL)
matches = re.findall(r'a href="([^"]+)"[^>]*>[^<]+</a>', data, re.DOTALL)
data = []
for url in matches:
url, c = unshortenit.unshorten(url)
data.append(url)
itemlist = servertools.find_video_items(data=str(data))
try:
itemlist = servertools.find_video_items(data=str(data))
for videoitem in itemlist:
videoitem.title = item.title + videoitem.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.show = item.show
videoitem.plot = item.plot
videoitem.channel = item.channel
videoitem.contentType = item.contentType
for videoitem in itemlist:
logger.info("Videoitemlist2: %s" % videoitem)
videoitem.title = "%s [%s]" % (item.contentTitle, videoitem.title)#"[%s] %s" % (videoitem.server, item.title) #"[%s]" % (videoitem.title)
videoitem.show = item.show
videoitem.contentTitle = item.contentTitle
videoitem.contentType = item.contentType
videoitem.channel = item.channel
videoitem.text_color = color5
#videoitem.language = item.language
videoitem.year = item.infoLabels['year']
videoitem.infoLabels['plot'] = item.infoLabels['plot']
except AttributeError:
logger.error("data doesn't contain expected URL")
# Controlla se i link sono validi
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist