Merge branch 'master' of https://github.com/kodiondemand/addon
This commit is contained in:
@@ -4,15 +4,23 @@
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["ita"],
|
||||
"fanart": "",
|
||||
"thumbnail": "",
|
||||
"banner": "http://altadefinizione01.link/templates/Dark/img/logonyy.png",
|
||||
"fanart": "https://altadefinizione01.estate/templates/Dark/img/nlogo.png",
|
||||
"thumbnail": "https://altadefinizione01.estate/templates/Dark/img/nlogo.png",
|
||||
"banner": "https://altadefinizione01.estate/templates/Dark/img/nlogo.png",
|
||||
"fix" : "reimpostato url e modificato file per KOD",
|
||||
"change_date": "2019-30-04",
|
||||
"categories": [
|
||||
"movie"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "channel_host",
|
||||
"type": "text",
|
||||
"label": "Host del canale",
|
||||
"default": "https://altadefinizione01.estate/",
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
|
||||
@@ -3,14 +3,13 @@
|
||||
# -*- Creato per Alfa-addon -*-
|
||||
# -*- e adattato for KOD -*-
|
||||
# -*- By Greko -*-
|
||||
# -*- last change: 04/05/2019
|
||||
# -*- last change: 26/05/2019
|
||||
|
||||
|
||||
from channelselector import get_thumb
|
||||
from core import httptools, scrapertools, servertools, tmdb, support
|
||||
import channelselector
|
||||
from specials import autoplay
|
||||
from core import servertools, support, jsontools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from specials import autoplay, filtertools
|
||||
|
||||
__channel__ = "altadefinizione01_link"
|
||||
|
||||
@@ -18,21 +17,19 @@ __channel__ = "altadefinizione01_link"
|
||||
#host = "http://altadefinizione01.art/" # aggiornato al 22 marzo 2019
|
||||
#host = "https://altadefinizione01.network/" #aggiornato al 22 marzo 2019
|
||||
#host = "http://altadefinizione01.date/" #aggiornato al 3 maggio 2019
|
||||
host = "https://altadefinizione01.voto/" #aggiornato al 3 maggio 2019
|
||||
#host = "https://altadefinizione01.voto/" #aggiornato al 3 maggio 2019
|
||||
#host = "https://altadefinizione01.estate/" # aggiornato al 23 maggio 2019
|
||||
|
||||
# ======== def per utility INIZIO ============================
|
||||
|
||||
|
||||
list_servers = ['supervideo', 'streamcherry','rapidvideo', 'streamango', 'openload']
|
||||
list_quality = ['default']
|
||||
|
||||
host = config.get_setting("channel_host", __channel__)
|
||||
checklinks = config.get_setting('checklinks', __channel__)
|
||||
checklinks_number = config.get_setting('checklinks_number', __channel__)
|
||||
|
||||
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
|
||||
['Referer', host]]#,['Accept-Language','it-IT,it;q=0.8,en-US;q=0.5,en;q=0.3']]
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['openload', 'streamcherry','rapidvideo', 'streamango', 'supervideo']
|
||||
list_quality = ['default']
|
||||
|
||||
headers = [['Referer', host]]
|
||||
# =========== home menu ===================
|
||||
|
||||
def mainlist(item):
|
||||
@@ -41,164 +38,88 @@ def mainlist(item):
|
||||
:param item:
|
||||
:return: itemlist []
|
||||
"""
|
||||
logger.info("%s mainlist log: %s" % (__channel__, item))
|
||||
support.log()
|
||||
itemlist = []
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
# Menu Principale
|
||||
support.menu(itemlist, 'Film Ultimi Arrivi bold', 'peliculas', host)#, args='film')
|
||||
support.menu(itemlist, 'Genere', 'categorie', host, args=['','genres'])
|
||||
support.menu(itemlist, 'Per anno submenu', 'categorie', host, args=['Film per Anno','years'])
|
||||
support.menu(itemlist, 'Per qualità submenu', 'categorie', host, args=['Film per qualità','quality'])
|
||||
support.menu(itemlist, 'Novità bold', 'peliculas', host)
|
||||
support.menu(itemlist, 'Film per Genere', 'genres', host, args='genres')
|
||||
support.menu(itemlist, 'Film per Anno submenu', 'genres', host, args='years')
|
||||
support.menu(itemlist, 'Film per Qualità submenu', 'genres', host, args='quality')
|
||||
support.menu(itemlist, 'Al Cinema bold', 'peliculas', host+'film-del-cinema')
|
||||
support.menu(itemlist, 'Popolari bold', 'categorie', host+'piu-visti.html', args=['popular',''])
|
||||
support.menu(itemlist, 'Mi sento fortunato bold', 'categorie', host, args=['fortunato','lucky'])
|
||||
support.menu(itemlist, 'Popolari bold', 'peliculas', host+'piu-visti.html')
|
||||
support.menu(itemlist, 'Mi sento fortunato bold', 'genres', host, args='lucky')
|
||||
support.menu(itemlist, 'Sub-ITA bold', 'peliculas', host+'film-sub-ita/')
|
||||
support.menu(itemlist, 'Cerca film submenu', 'search', host)
|
||||
|
||||
# per autoplay
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
itemlist.append(
|
||||
Item(channel='setting',
|
||||
action="channel_config",
|
||||
title=support.typo("Configurazione Canale color lime"),
|
||||
config=item.channel,
|
||||
folder=False,
|
||||
thumbnail=channelselector.get_thumb('setting_0.png'))
|
||||
)
|
||||
|
||||
return itemlist
|
||||
|
||||
# ======== def in ordine di menu ===========================
|
||||
# ======== def in ordine di action dal menu ===========================
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("%s mainlist peliculas log: %s" % (__channel__, item))
|
||||
support.log
|
||||
itemlist = []
|
||||
# scarico la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
# da qui fare le opportuni modifiche
|
||||
patron = 'class="innerImage">.*?href="([^"]+)".*?src="([^"]+)".*?'\
|
||||
'class="ml-item-title">([^"]+)</.*?class="ml-item-label">'\
|
||||
'(.*?)<.*?class="ml-item-label">.*?class="ml-item-label">(.*?)</'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedimg, scrapedtitle, scrapedyear, scrapedlang in matches:
|
||||
if 'italiano' in scrapedlang.lower():
|
||||
scrapedlang = 'ITA'
|
||||
else:
|
||||
scrapedlang = 'Sub-Ita'
|
||||
itemlist.append(Item(
|
||||
channel=item.channel,
|
||||
action="findvideos",
|
||||
contentTitle=scrapedtitle,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
infoLabels={'year': scrapedyear},
|
||||
contenType="movie",
|
||||
thumbnail=scrapedimg,
|
||||
title="%s [%s]" % (scrapedtitle, scrapedlang),
|
||||
language=scrapedlang,
|
||||
context="buscar_trailer"
|
||||
))
|
||||
patron = r'class="innerImage">.*?href="([^"]+)".*?src="([^"]+)"'\
|
||||
'.*?class="ml-item-title">([^<]+)</.*?class="ml-item-label"> (\d{4}) <'\
|
||||
'.*?class="ml-item-label">.*?class="ml-item-label ml-item-label-.+?"> '\
|
||||
'(.+?) </div>.*?class="ml-item-label"> (.+?) </'
|
||||
listGroups = ['url', 'thumb', 'title', 'year', 'quality', 'lang']
|
||||
|
||||
# poichè il sito ha l'anno del film con TMDB la ricerca titolo-anno è esatta quindi inutile fare lo scrap delle locandine
|
||||
# e della trama dal sito che a volte toppano
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Paginazione
|
||||
support.nextPage(itemlist,item,data,'<span>\d</span> <a href="([^"]+)">')
|
||||
|
||||
patronNext = '<span>\d</span> <a href="([^"]+)">'
|
||||
|
||||
itemlist = support.scrape(item, patron=patron, listGroups=listGroups,
|
||||
headers= headers, patronNext=patronNext,
|
||||
action='findvideos')
|
||||
|
||||
return itemlist
|
||||
|
||||
# =========== def pagina categorie ======================================
|
||||
|
||||
def categorie(item):
|
||||
logger.info("%s mainlist categorie log: %s" % (__channel__, item))
|
||||
def genres(item):
|
||||
support.log
|
||||
itemlist = []
|
||||
# scarico la pagina
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
#data = httptools.downloadpage(item.url, headers=headers).data
|
||||
action = 'peliculas'
|
||||
if item.args == 'genres':
|
||||
bloque = r'<ul class="listSubCat" id="Film">(.*?)</ul>'
|
||||
elif item.args == 'years':
|
||||
bloque = r'<ul class="listSubCat" id="Anno">(.*?)</ul>'
|
||||
elif item.args == 'quality':
|
||||
bloque = r'<ul class="listSubCat" id="Qualita">(.*?)</ul>'
|
||||
elif item.args == 'lucky': # sono i titoli random nella pagina, cambiano 1 volta al dì
|
||||
bloque = r'FILM RANDOM.*?class="listSubCat">(.*?)</ul>'
|
||||
action = 'findvideos'
|
||||
|
||||
patron = r'<li><a href="([^"]+)">(.*?)<'
|
||||
|
||||
# da qui fare le opportuni modifiche
|
||||
if item.args[1] == 'genres':
|
||||
bloque = scrapertools.find_single_match(data, '<ul class="listSubCat" id="Film">(.*?)</ul>')
|
||||
elif item.args[1] == 'years':
|
||||
bloque = scrapertools.find_single_match(data, '<ul class="listSubCat" id="Anno">(.*?)</ul>')
|
||||
elif item.args[1] == 'quality':
|
||||
bloque = scrapertools.find_single_match(data, '<ul class="listSubCat" id="Qualita">(.*?)</ul>')
|
||||
elif item.args[1] == 'lucky': # sono i titoli random nella pagina, alcuni rimandano solo a server a pagamento
|
||||
bloque = scrapertools.find_single_match(data, 'FILM RANDOM.*?class="listSubCat">(.*?)</ul>')
|
||||
patron = '<li><a href="/(.*?)">(.*?)<'
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
|
||||
if item.args[1] == 'lucky':
|
||||
bloque = scrapertools.find_single_match(data, 'FILM RANDOM.*?class="listSubCat">(.*?)</ul>')
|
||||
patron = '<li><a href="(.*?)">(.*?)<'
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
|
||||
for scrapurl, scraptitle in sorted(matches):
|
||||
if item.args[1] != 'lucky':
|
||||
url = host+scrapurl
|
||||
action="peliculas"
|
||||
else:
|
||||
url = scrapurl
|
||||
action = "findvideos_film"
|
||||
itemlist.append(Item(
|
||||
channel=item.channel,
|
||||
action=action,
|
||||
title = scraptitle,
|
||||
url=url,
|
||||
thumbnail=get_thumb(scraptitle, auto = True),
|
||||
Folder = True,
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# =========== def pagina del film con i server per verderlo =============
|
||||
# da sistemare che ne da solo 1 come risultato
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("%s mainlist findvideos_film log: %s" % (__channel__, item))
|
||||
itemlist = []
|
||||
# scarico la pagina
|
||||
#data = scrapertools.cache_page(item.url) #non funziona più?
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
# da qui fare le opportuni modifiche
|
||||
patron = '<li.*?<a href="#" data-target="(.*?)">'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
#logger.info("altadefinizione01_linkMATCHES: %s " % matches)
|
||||
for scrapedurl in matches:
|
||||
|
||||
try:
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
logger.info("Videoitemlist2: %s" % videoitem)
|
||||
videoitem.title = "%s [%s]" % (item.contentTitle, videoitem.title)#"[%s] %s" % (videoitem.server, item.title) #"[%s]" % (videoitem.title)
|
||||
videoitem.show = item.show
|
||||
videoitem.contentTitle = item.contentTitle
|
||||
videoitem.contentType = item.contentType
|
||||
videoitem.channel = item.channel
|
||||
videoitem.year = item.infoLabels['year']
|
||||
videoitem.infoLabels['plot'] = item.infoLabels['plot']
|
||||
except AttributeError:
|
||||
logger.error("data doesn't contain expected URL")
|
||||
|
||||
# Controlla se i link sono validi
|
||||
if checklinks:
|
||||
itemlist = servertools.check_list_links(itemlist, checklinks_number)
|
||||
|
||||
# Requerido para FilterTools
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
# Aggiunge alla videoteca
|
||||
if item.extra != 'findvideos' and item.extra != "library" and config.get_videolibrary_support() and len(itemlist) != 0 :
|
||||
support.videolibrary(itemlist, item)
|
||||
listGroups = ['url','title']
|
||||
itemlist = support.scrape(item, patron=patron, listGroups=listGroups,
|
||||
headers= headers, patron_block = bloque,
|
||||
action=action)
|
||||
|
||||
return itemlist
|
||||
|
||||
# =========== def per cercare film/serietv =============
|
||||
#host+/index.php?do=search&story=avatar&subaction=search
|
||||
def search(item, text):
|
||||
logger.info("%s mainlist search log: %s %s" % (__channel__, item, text))
|
||||
support.log()
|
||||
itemlist = []
|
||||
text = text.replace(" ", "+")
|
||||
item.url = host+"/index.php?do=search&story=%s&subaction=search" % (text)
|
||||
#item.extra = "search"
|
||||
try:
|
||||
return peliculas(item)
|
||||
# Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla
|
||||
@@ -211,20 +132,18 @@ def search(item, text):
|
||||
# =========== def per le novità nel menu principale =============
|
||||
|
||||
def newest(categoria):
|
||||
logger.info("%s mainlist search log: %s" % (__channel__, categoria))
|
||||
support.log(categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
#item.extra = 'film'
|
||||
try:
|
||||
if categoria == "film":
|
||||
if categoria == "peliculas":
|
||||
item.url = host
|
||||
item.action = "peliculas"
|
||||
itemlist = peliculas(item)
|
||||
|
||||
if itemlist[-1].action == "peliculas":
|
||||
itemlist.pop()
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -232,3 +151,18 @@ def newest(categoria):
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
support.log()
|
||||
|
||||
itemlist = support.server(item, headers=headers)
|
||||
|
||||
# Requerido para FilterTools
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
support.videolibrary(itemlist, item, 'color kod')
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -4,9 +4,18 @@
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["ita"],
|
||||
"thumbnail": "https://cdn.animeworld.it/static/images/general/logoaw.png",
|
||||
"categories": ["anime"],
|
||||
"thumbnail": "animeworld.png",
|
||||
"banner": "animeworld.png",
|
||||
"categories": ["anime"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "channel_host",
|
||||
"type": "text",
|
||||
"label": "Host del canale",
|
||||
"default": "https://www.animeworld.it",
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
|
||||
@@ -3,16 +3,20 @@
|
||||
# Canale per animeworld
|
||||
# ----------------------------------------------------------
|
||||
import re
|
||||
import time
|
||||
import urllib
|
||||
|
||||
import urlparse
|
||||
|
||||
import channelselector
|
||||
from channelselector import thumb
|
||||
from core import httptools, scrapertoolsV2, servertools, tmdb, support
|
||||
from core import httptools, scrapertoolsV2, servertools, tmdb, support, jsontools
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
from specials import autoplay, autorenumber
|
||||
|
||||
host = "https://www.animeworld.it"
|
||||
|
||||
__channel__ = 'animeworld'
|
||||
host = config.get_setting("channel_host", __channel__)
|
||||
headers = [['Referer', host]]
|
||||
|
||||
IDIOMAS = {'Italiano': 'Italiano'}
|
||||
@@ -25,21 +29,51 @@ checklinks_number = config.get_setting('checklinks_number', 'animeworld')
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("[animeworld.py] mainlist")
|
||||
logger.info(__channel__+" mainlist")
|
||||
|
||||
itemlist =[]
|
||||
|
||||
support.menu(itemlist, 'Anime ITA submenu bold', 'build_menu', host+'/filter?language[]=1')
|
||||
support.menu(itemlist, 'Anime SUB submenu bold', 'build_menu', host+'/filter?language[]=0')
|
||||
support.menu(itemlist, 'Anime A-Z sub', 'alfabetico', host+'/az-list')
|
||||
support.menu(itemlist, 'Anime - Ultimi Aggiunti', 'alfabetico', host+'/newest')
|
||||
support.menu(itemlist, 'Anime - Ultimi Episodi', 'alfabetico', host+'/newest')
|
||||
support.menu(itemlist, 'Anime bold', 'lista_anime', host+'/az-list')
|
||||
support.menu(itemlist, 'ITA submenu', 'build_menu', host+'/filter?language[]=1', args=["anime"])
|
||||
support.menu(itemlist, 'Sub-ITA submenu', 'build_menu', host+'/filter?language[]=0', args=["anime"])
|
||||
support.menu(itemlist, 'Archivio A-Z submenu', 'alfabetico', host+'/az-list', args=["tvshow","a-z"])
|
||||
support.menu(itemlist, 'In corso submenu', 'video', host+'/', args=["in sala"])
|
||||
support.menu(itemlist, 'Generi submenu', 'generi', host+'/')
|
||||
support.menu(itemlist, 'Ultimi Aggiunti bold', 'video', host+'/newest', args=["anime"])
|
||||
support.menu(itemlist, 'Ultimi Episodi bold', 'video', host+'/updated', args=["novita'"])
|
||||
support.menu(itemlist, 'Cerca...', 'search')
|
||||
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
itemlist.append(
|
||||
Item(channel='setting',
|
||||
action="channel_config",
|
||||
title=support.typo("Configurazione Canale color lime"),
|
||||
config=item.channel,
|
||||
folder=False,
|
||||
thumbnail=channelselector.get_thumb('setting_0.png'))
|
||||
)
|
||||
|
||||
return itemlist
|
||||
|
||||
# Crea menu dei generi =================================================
|
||||
|
||||
def generi(item):
|
||||
support.log(item.channel+" generi")
|
||||
itemlist = []
|
||||
patron_block = r'</i>\sGeneri</a>\s*<ul class="sub">(.*?)</ul>'
|
||||
patron = r'<a href="([^"]+)"\stitle="([^"]+)">'
|
||||
matches = support.match(item,patron, patron_block, headers)[0]
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(Item(
|
||||
channel=item.channel,
|
||||
action="video",
|
||||
title=scrapedtitle,
|
||||
url="%s%s" % (host,scrapedurl)))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -103,7 +137,7 @@ def build_sub_menu(item):
|
||||
# Novità ======================================================
|
||||
|
||||
def newest(categoria):
|
||||
logger.info("[animeworld.py] newest")
|
||||
logger.info(__channel__+" newest")
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
@@ -144,7 +178,7 @@ def search(item, texto):
|
||||
# Lista A-Z ====================================================
|
||||
|
||||
def alfabetico(item):
|
||||
logger.info("[animeworld.py] alfabetico")
|
||||
logger.info(__channel__+" alfabetico")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
@@ -170,7 +204,7 @@ def alfabetico(item):
|
||||
return itemlist
|
||||
|
||||
def lista_anime(item):
|
||||
logger.info("[animeworld.py] lista_anime")
|
||||
logger.info(__channel__+" lista_anime")
|
||||
|
||||
itemlist = []
|
||||
|
||||
@@ -202,7 +236,7 @@ def lista_anime(item):
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
contentType="tvshow",
|
||||
contentType="episode",
|
||||
action="episodios",
|
||||
text_color="azure",
|
||||
title=title,
|
||||
@@ -217,23 +251,24 @@ def lista_anime(item):
|
||||
autorenumber.renumber(itemlist)
|
||||
|
||||
# Next page
|
||||
next_page = scrapertoolsV2.find_single_match(data, '<a class="page-link" href="([^"]+)" rel="next"')
|
||||
|
||||
if next_page != '':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='lista_anime',
|
||||
title='[B]' + config.get_localized_string(30992) + ' >[/B]',
|
||||
url=next_page,
|
||||
contentType=item.contentType,
|
||||
thumbnail=thumb()))
|
||||
support.nextPage(itemlist, item, data, r'<a class="page-link" href="([^"]+)" rel="next"')
|
||||
# next_page = scrapertoolsV2.find_single_match(data, '<a class="page-link" href="([^"]+)" rel="next"')
|
||||
#
|
||||
# if next_page != '':
|
||||
# itemlist.append(
|
||||
# Item(channel=item.channel,
|
||||
# action='lista_anime',
|
||||
# title='[B]' + config.get_localized_string(30992) + ' >[/B]',
|
||||
# url=next_page,
|
||||
# contentType=item.contentType,
|
||||
# thumbnail=thumb()))
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def video(item):
|
||||
logger.info("[animeworld.py] video")
|
||||
logger.info(__channel__+" video")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
@@ -296,7 +331,7 @@ def video(item):
|
||||
|
||||
# Controlla se sono Episodi o Film
|
||||
if movie == '':
|
||||
contentType = 'tvshow'
|
||||
contentType = 'episode'
|
||||
action = 'episodios'
|
||||
else:
|
||||
contentType = 'movie'
|
||||
@@ -317,33 +352,38 @@ def video(item):
|
||||
autorenumber.renumber(itemlist)
|
||||
|
||||
# Next page
|
||||
next_page = scrapertoolsV2.find_single_match(data, '<a class="page-link" href=".*?page=([^"]+)" rel="next"')
|
||||
|
||||
if next_page != '':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='video',
|
||||
title='[B]' + config.get_localized_string(30992) + ' >[/B]',
|
||||
url=re.sub('&page=([^"]+)', '', item.url) + '&page=' + next_page,
|
||||
contentType=item.contentType,
|
||||
thumbnail=thumb()))
|
||||
support.nextPage(itemlist, item, data, r'<a\sclass="page-link"\shref="([^"]+)"\srel="next"\saria-label="Successiva')
|
||||
# next_page = scrapertoolsV2.find_single_match(data, '<a class="page-link" href=".*?page=([^"]+)" rel="next"')
|
||||
#
|
||||
# if next_page != '':
|
||||
# itemlist.append(
|
||||
# Item(channel=item.channel,
|
||||
# action='video',
|
||||
# title='[B]' + config.get_localized_string(30992) + ' >[/B]',
|
||||
# url=re.sub('&page=([^"]+)', '', item.url) + '&page=' + next_page,
|
||||
# contentType=item.contentType,
|
||||
# thumbnail=thumb()))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info("[animeworld.py] episodios")
|
||||
logger.info(__channel__+" episodios")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data.replace('\n', '')
|
||||
data = re.sub(r'>\s*<', '><', data)
|
||||
block = scrapertoolsV2.find_single_match(data, r'<div class="widget servers".*?>(.*?)<div id="download"')
|
||||
block = scrapertoolsV2.find_single_match(block,r'<div class="server.*?>(.*?)<div class="server.*?>')
|
||||
block1 = scrapertoolsV2.find_single_match(data, r'<div class="widget servers".*?>(.*?)<div id="download"')
|
||||
block = scrapertoolsV2.find_single_match(block1,r'<div class="server.*?>(.*?)<div class="server.*?>')
|
||||
|
||||
patron = r'<li><a.*?href="([^"]+)".*?>(.*?)<\/a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(block)
|
||||
|
||||
|
||||
extra = {}
|
||||
extra['data'] = block1.replace('<strong>Attenzione!</strong> Non ci sono episodi in questa sezione, torna indietro!.','')
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
extra['episode'] = scrapedtitle
|
||||
scrapedtitle = '[B] Episodio ' + scrapedtitle + '[/B]'
|
||||
itemlist.append(
|
||||
Item(
|
||||
@@ -356,46 +396,113 @@ def episodios(item):
|
||||
show=scrapedtitle,
|
||||
plot=item.plot,
|
||||
fanart=item.thumbnail,
|
||||
extra=extra,
|
||||
thumbnail=item.thumbnail))
|
||||
|
||||
autorenumber.renumber(itemlist, item,'bold')
|
||||
|
||||
|
||||
# Aggiungi a Libreria
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
itemlist.append(
|
||||
Item(
|
||||
channel=item.channel,
|
||||
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios",
|
||||
show=item.show))
|
||||
# if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
# itemlist.append(
|
||||
# Item(
|
||||
# channel=item.channel,
|
||||
# title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
|
||||
# url=item.url,
|
||||
# action="add_serie_to_library",
|
||||
# extra="episodios",
|
||||
# show=item.show))
|
||||
support.videolibrary(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("[animeworld.py] findvideos")
|
||||
logger.info(__channel__+" findvideos")
|
||||
|
||||
itemlist = []
|
||||
|
||||
# logger.debug(item.extra)
|
||||
episode = '1'
|
||||
#recupero i server disponibili
|
||||
if item.extra and item.extra['episode']:
|
||||
data = item.extra['data']
|
||||
episode = item.extra['episode']
|
||||
else:
|
||||
data = httptools.downloadpage(item.url,headers=headers).data
|
||||
block = scrapertoolsV2.find_single_match(data,r'data-target="\.widget\.servers.*?>(.*?)</div>')
|
||||
servers = scrapertoolsV2.find_multiple_matches(block,r'class="tab.*?data-name="([0-9]+)">([^<]+)</span')
|
||||
videolist = []
|
||||
videoData = ''
|
||||
for serverid, servername in servers:
|
||||
#recupero l'id del video per questo server
|
||||
block = scrapertoolsV2.find_single_match(data,r'<div class="server.*?data-id="'+serverid+'">(.*?)</ul>')
|
||||
id = scrapertoolsV2.find_single_match(block,r'<a\sdata-id="([^"]+)"\sdata-base="'+episode+'"')
|
||||
|
||||
dataJson = httptools.downloadpage('%s/ajax/episode/info?id=%s&server=%s&ts=%s' % (host, id, serverid, int(time.time())), headers=[['x-requested-with', 'XMLHttpRequest']]).data
|
||||
json = jsontools.load(dataJson)
|
||||
|
||||
videoData +='\n'+json['grabber']
|
||||
|
||||
if serverid == '33':
|
||||
post = urllib.urlencode({'r': '', 'd': 'www.animeworld.biz'})
|
||||
dataJson = httptools.downloadpage(json['grabber'].replace('/v/','/api/source/'),headers=[['x-requested-with', 'XMLHttpRequest']],post=post).data
|
||||
json = jsontools.load(dataJson)
|
||||
logger.debug(json['data'])
|
||||
if json['data']:
|
||||
for file in json['data']:
|
||||
itemlist.append(
|
||||
Item(
|
||||
channel=item.channel,
|
||||
action="play",
|
||||
title='diretto',
|
||||
url=file['file'],
|
||||
quality=file['label'],
|
||||
server='directo',
|
||||
show=item.show,
|
||||
contentType=item.contentType,
|
||||
folder=False))
|
||||
|
||||
if serverid == '28':
|
||||
itemlist.append(
|
||||
Item(
|
||||
channel=item.channel,
|
||||
action="play",
|
||||
title='diretto',
|
||||
quality='',
|
||||
url=json['grabber'],
|
||||
server='directo',
|
||||
show=item.show,
|
||||
contentType=item.contentType,
|
||||
folder=False))
|
||||
|
||||
|
||||
itemlist += servertools.find_video_items(item,videoData)
|
||||
|
||||
return support.server(item,itemlist=itemlist)
|
||||
|
||||
anime_id = scrapertoolsV2.find_single_match(item.url, r'.*\..*?\/(.*)')
|
||||
data = httptools.downloadpage(host + "/ajax/episode/serverPlayer?id=" + anime_id).data
|
||||
patron = '<source src="([^"]+)"'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
|
||||
for video in matches:
|
||||
itemlist.append(
|
||||
Item(
|
||||
channel=item.channel,
|
||||
action="play",
|
||||
title=item.title + " [[COLOR orange]Diretto[/COLOR]]",
|
||||
# title=item.title + " [[COLOR orange]Diretto[/COLOR]]",
|
||||
title='diretto',
|
||||
quality='',
|
||||
url=video,
|
||||
server='directo',
|
||||
show=item.show,
|
||||
contentType=item.contentType,
|
||||
folder=False))
|
||||
|
||||
return support.server(item,data, itemlist)
|
||||
|
||||
# Requerido para Filtrar enlaces
|
||||
|
||||
if checklinks:
|
||||
|
||||
@@ -4,10 +4,18 @@
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["ita"],
|
||||
"thumbnail": "http://www.guardaserie.click/wp-content/themes/guardaserie/images/logogd.png",
|
||||
"bannermenu": "http://www.guardaserie.click/wp-content/themes/guardaserie/images/logogd.png",
|
||||
"thumbnail": "guardaserieclick.png",
|
||||
"bannermenu": "guardaserieclick.png",
|
||||
"categories": ["tvshow","anime"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "channel_host",
|
||||
"type": "text",
|
||||
"label": "Host del canale",
|
||||
"default": "https://www.guardaserie.media",
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
@@ -31,6 +39,32 @@
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "checklinks",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "checklinks_number",
|
||||
"type": "list",
|
||||
"label": "Numero de link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "1", "3", "5", "10" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": ["Non filtrare","IT"]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,49 +1,56 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Ringraziamo Icarus crew
|
||||
# Canale per guardaserie.click
|
||||
# Thanks to Icarus crew & Alfa addon
|
||||
# ------------------------------------------------------------
|
||||
|
||||
import re
|
||||
|
||||
import channelselector
|
||||
from core import httptools, scrapertools, servertools, support
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
from specials import autoplay
|
||||
|
||||
host = "http://www.guardaserie.watch"
|
||||
__channel__ = 'guardaserieclick'
|
||||
host = config.get_setting("channel_host", __channel__)
|
||||
headers = [['Referer', host]]
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['speedvideo','openload']
|
||||
list_quality = ['default']
|
||||
|
||||
headers = [['Referer', host]]
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def mainlist(item):
|
||||
logger.info("[GuardaSerieClick.py]==> mainlist")
|
||||
itemlist = [Item(channel=item.channel,
|
||||
action="nuoveserie",
|
||||
title=support.color("Nuove serie TV", "orange"),
|
||||
url="%s/lista-serie-tv" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="serietvaggiornate",
|
||||
title=support.color("Serie TV Aggiornate", "azure"),
|
||||
url="%s/lista-serie-tv" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="lista_serie",
|
||||
title=support.color("Anime", "azure"),
|
||||
url="%s/category/animazione/" % host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="categorie",
|
||||
title=support.color("Categorie", "azure"),
|
||||
url=host,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
action="search",
|
||||
title=support.color("Cerca ...", "yellow"),
|
||||
extra="serie",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
support.log(item.channel+" mainlist")
|
||||
|
||||
itemlist = []
|
||||
# support.menu(itemlist, 'Serie TV bold')
|
||||
support.menu(itemlist, 'Novità bold', 'serietvaggiornate', "%s/lista-serie-tv" % host,'tvshow')
|
||||
support.menu(itemlist, 'Nuove serie', 'nuoveserie', "%s/lista-serie-tv" % host,'tvshow')
|
||||
support.menu(itemlist, 'Serie inedite Sub-ITA', 'nuoveserie', "%s/lista-serie-tv" % host,'tvshow',args=['inedite'])
|
||||
support.menu(itemlist, 'Da non perdere bold', 'nuoveserie', "%s/lista-serie-tv" % host,'tvshow',args=['tv','da non perdere'])
|
||||
support.menu(itemlist, 'Classiche bold', 'nuoveserie', "%s/lista-serie-tv" % host,'tvshow',args=['tv','classiche'])
|
||||
support.menu(itemlist, 'Anime', 'lista_serie', "%s/category/animazione/" % host,'tvshow')
|
||||
support.menu(itemlist, 'Categorie', 'categorie', host,'tvshow',args=['serie'])
|
||||
support.menu(itemlist, 'Cerca', 'search', host,'tvshow',args=['serie'])
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
itemlist.append(
|
||||
Item(channel='setting',
|
||||
action="channel_config",
|
||||
title=support.typo("Configurazione Canale color lime"),
|
||||
config=item.channel,
|
||||
folder=False,
|
||||
thumbnail=channelselector.get_thumb('setting_0.png'))
|
||||
)
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -52,7 +59,7 @@ def mainlist(item):
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def newest(categoria):
|
||||
logger.info("[GuardaSerieClick.py]==> newest" + categoria)
|
||||
support.log(__channel__+" newest" + categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
@@ -78,7 +85,7 @@ def newest(categoria):
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def search(item, texto):
|
||||
logger.info("[GuardaSerieClick.py]==> search")
|
||||
support.log(item.channel+" search")
|
||||
item.url = host + "/?s=" + texto
|
||||
try:
|
||||
return lista_serie(item)
|
||||
@@ -91,29 +98,44 @@ def search(item, texto):
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def cleantitle(scrapedtitle):
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip()).replace('"',"'")
|
||||
|
||||
return scrapedtitle.strip()
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def nuoveserie(item):
|
||||
logger.info("[GuardaSerieClick.py]==> nuoveserie")
|
||||
support.log(item.channel+" nuoveserie")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
blocco = scrapertools.find_single_match(data, '<div\s*class="container container-title-serie-new container-scheda" meta-slug="new">(.*?)</div></div><div')
|
||||
patron_block = ''
|
||||
if 'inedite' in item.args:
|
||||
patron_block = r'<div\s*class="container container-title-serie-ined container-scheda" meta-slug="ined">(.*?)</div></div><div'
|
||||
elif 'da non perder' in item.args:
|
||||
patron_block = r'<div\s*class="container container-title-serie-danonperd container-scheda" meta-slug="danonperd">(.*?)</div></div><div'
|
||||
elif 'classiche' in item.args:
|
||||
patron_block = r'<div\s*class="container container-title-serie-classiche container-scheda" meta-slug="classiche">(.*?)</div></div><div'
|
||||
else:
|
||||
patron_block = r'<div\s*class="container container-title-serie-new container-scheda" meta-slug="new">(.*?)</div></div><div'
|
||||
|
||||
patron = r'<a\s*href="([^"]+)".*?>\s*<img\s*.*?src="([^"]+)" />[^>]+>[^>]+>[^>]+>[^>]+>'
|
||||
patron += r'[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)</p>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
|
||||
matches = support.match(item, patron, patron_block, headers)[0]
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
scrapedtitle = cleantitle(scrapedtitle)
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="episodi",
|
||||
contentType="tv",
|
||||
action="episodios",
|
||||
contentType="episode",
|
||||
title=scrapedtitle,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
extra="tv",
|
||||
show=scrapedtitle,
|
||||
thumbnail=scrapedthumbnail,
|
||||
folder=True))
|
||||
@@ -126,35 +148,48 @@ def nuoveserie(item):
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def serietvaggiornate(item):
|
||||
logger.info("[GuardaSerieClick.py]==> serietvaggiornate")
|
||||
support.log(item.channel+" serietvaggiornate")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
blocco = scrapertools.find_single_match(data,
|
||||
r'<div\s*class="container container-title-serie-lastep container-scheda" meta-slug="lastep">(.*?)</div></div><div')
|
||||
|
||||
patron_block = r'<div\s*class="container container-title-serie-lastep container-scheda" meta-slug="lastep">(.*?)</div></div><div'
|
||||
patron = r'<a\s*rel="nofollow" href="([^"]+)"[^>]+> <img\s*.*?src="([^"]+)"[^>]+>[^>]+>'
|
||||
patron += r'[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<[^>]+>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
|
||||
matches = support.match(item,patron, patron_block, headers)[0]
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedep, scrapedtitle in matches:
|
||||
episode = re.compile(r'^(\d+)x(\d+)', re.DOTALL).findall(scrapedep) # Prendo stagione ed episodio
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
||||
title = "%s %s" % (scrapedtitle, scrapedep)
|
||||
extra = r'<span\s*.*?meta-stag="%s" meta-ep="%s" meta-embed="([^"]+)"[^>]*>' % (
|
||||
episode = re.compile(r'^(\d+)x(\d+)', re.DOTALL).findall(scrapedep) # Prendo stagione ed episodioso
|
||||
scrapedtitle = cleantitle(scrapedtitle)
|
||||
|
||||
contentlanguage = ""
|
||||
if 'sub-ita' in scrapedep.strip().lower():
|
||||
contentlanguage = 'Sub-ITA'
|
||||
|
||||
extra = r'<span\s*.*?meta-stag="%s" meta-ep="%s" meta-embed="([^"]+)"\s*.*?embed2="([^"]+)?"\s*.*?embed3="([^"]+)?"[^>]*>' % (
|
||||
episode[0][0], episode[0][1].lstrip("0"))
|
||||
|
||||
infoLabels = {}
|
||||
infoLabels['episode'] = episode[0][1].lstrip("0")
|
||||
infoLabels['season'] = episode[0][0]
|
||||
|
||||
title = str("%s - %sx%s %s" % (scrapedtitle,infoLabels['season'],infoLabels['episode'],contentlanguage)).strip()
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findepvideos",
|
||||
contentType="tv",
|
||||
contentType="episode",
|
||||
title=title,
|
||||
show=title,
|
||||
show=scrapedtitle,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
extra=extra,
|
||||
thumbnail=scrapedthumbnail,
|
||||
contentLanguage=contentlanguage,
|
||||
infoLabels=infoLabels,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -162,20 +197,17 @@ def serietvaggiornate(item):
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def categorie(item):
|
||||
logger.info("[GuardaSerieClick.py]==> categorie")
|
||||
support.log(item.channel+" categorie")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
blocco = scrapertools.find_single_match(data, r'<ul\s*class="dropdown-menu category">(.*?)</ul>')
|
||||
patron = r'<li>\s*<a\s*href="([^"]+)"[^>]+>([^<]+)</a></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
matches = support.match(item, r'<li>\s*<a\s*href="([^"]+)"[^>]+>([^<]+)</a></li>', r'<ul\s*class="dropdown-menu category">(.*?)</ul>', headers)[0]
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="lista_serie",
|
||||
title=scrapedtitle,
|
||||
contentType="tv",
|
||||
contentType="tvshow",
|
||||
url="".join([host, scrapedurl]),
|
||||
thumbnail=item.thumbnail,
|
||||
extra="tv",
|
||||
@@ -188,66 +220,93 @@ def categorie(item):
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def lista_serie(item):
|
||||
logger.info("[GuardaSerieClick.py]==> lista_serie")
|
||||
support.log(item.channel+" lista_serie")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
# data = httptools.downloadpage(item.url, headers=headers).data
|
||||
#
|
||||
# patron = r'<a\s*href="([^"]+)".*?>\s*<img\s*.*?src="([^"]+)" />[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)</p></div>'
|
||||
# blocco = scrapertools.find_single_match(data,
|
||||
# r'<div\s*class="col-xs-\d+ col-sm-\d+-\d+">(.*?)<div\s*class="container-fluid whitebg" style="">')
|
||||
# matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
|
||||
patron_block = r'<div\s*class="col-xs-\d+ col-sm-\d+-\d+">(.*?)<div\s*class="container-fluid whitebg" style="">'
|
||||
patron = r'<a\s*href="([^"]+)".*?>\s*<img\s*.*?src="([^"]+)" />[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)</p></div>'
|
||||
blocco = scrapertools.find_single_match(data,
|
||||
r'<div\s*class="col-xs-\d+ col-sm-\d+-\d+">(.*?)<div\s*class="container-fluid whitebg" style="">')
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
|
||||
matches, data = support.match(item, patron, patron_block, headers)
|
||||
|
||||
|
||||
for scrapedurl, scrapedimg, scrapedtitle in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="episodi",
|
||||
title=scrapedtitle,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedimg,
|
||||
extra=item.extra,
|
||||
show=scrapedtitle,
|
||||
folder=True))
|
||||
scrapedtitle = cleantitle(scrapedtitle)
|
||||
|
||||
if scrapedtitle not in ['DMCA','Contatti','Lista di tutte le serie tv']:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="episodios",
|
||||
contentType="episode",
|
||||
title=scrapedtitle,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedimg,
|
||||
extra=item.extra,
|
||||
show=scrapedtitle,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
support.nextPage(itemlist,item,data,r"<link\s.*?rel='next'\shref='([^']*)'")
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def episodi(item):
|
||||
logger.info("[GuardaSerieClick.py]==> episodi")
|
||||
def episodios(item):
|
||||
support.log(item.channel+" episodios")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
# data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
patron = r'<div\s*class="[^"]+">([^<]+)<\/div>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>'
|
||||
patron += r'[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s*<span\s*.*?'
|
||||
patron += r'embed="([^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s*'
|
||||
patron += r'<img\s*class="[^"]+" src="" data-original="([^"]+)"[^>]+>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedtitle, scrapedurl, scrapedthumbnail in matches:
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
title=scrapedtitle,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
contentType="episode",
|
||||
thumbnail=scrapedthumbnail,
|
||||
folder=True))
|
||||
patron = r'<div\s*class="[^"]+">\s*([^<]+)<\/div>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)?[^>]+>[^>]+>[^>]+>[^>]+>[^>]+><p[^>]+>([^<]+)<[^>]+>[^>]+>[^>]+>'
|
||||
patron += r'[^<]+[^"]+".*?serie="([^"]+)".*?stag="([0-9]*)".*?ep="([0-9]*)"\s*'
|
||||
patron += r'.*?embed="([^"]+)"\s*.*?embed2="([^"]+)?"\s*.*?embed3="([^"]+)?"?[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s*'
|
||||
patron += r'(?:<img\s*class="[^"]+" meta-src="([^"]+)"[^>]+>|<img\s*class="[^"]+" src="" data-original="([^"]+)"[^>]+>)?'
|
||||
# matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
# logger.debug(matches)
|
||||
|
||||
matches = support.match(item, patron, headers=headers)[0]
|
||||
|
||||
|
||||
for scrapedtitle, scrapedepisodetitle, scrapedplot, scrapedserie, scrapedseason, scrapedepisode, scrapedurl, scrapedurl2,scrapedurl3,scrapedthumbnail,scrapedthumbnail2 in matches:
|
||||
scrapedtitle = cleantitle(scrapedtitle)
|
||||
scrapedepisode = scrapedepisode.zfill(2)
|
||||
scrapedepisodetitle = cleantitle(scrapedepisodetitle)
|
||||
title = str("%sx%s %s" % (scrapedseason, scrapedepisode, scrapedepisodetitle)).strip()
|
||||
if 'SUB-ITA' in scrapedtitle:
|
||||
title +=" Sub-ITA"
|
||||
|
||||
infoLabels = {}
|
||||
infoLabels['season'] = scrapedseason
|
||||
infoLabels['episode'] = scrapedepisode
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodi",
|
||||
show=item.show))
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
title=title,
|
||||
fulltitle=scrapedtitle,
|
||||
url=scrapedurl+"\r\n"+scrapedurl2+"\r\n"+scrapedurl3,
|
||||
contentType="episode",
|
||||
plot=scrapedplot,
|
||||
contentSerieName=scrapedserie,
|
||||
contentLanguage='Sub-ITA' if 'Sub-ITA' in title else '',
|
||||
infoLabels=infoLabels,
|
||||
thumbnail=scrapedthumbnail2 if scrapedthumbnail2 != '' else scrapedthumbnail,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
support.videolibrary(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -256,20 +315,12 @@ def episodi(item):
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def findepvideos(item):
|
||||
logger.info("[GuardaSerieClick.py]==> findepvideos")
|
||||
|
||||
support.log(item.channel+" findepvideos")
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
data = scrapertools.find_single_match(data, item.extra)
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
|
||||
for videoitem in itemlist:
|
||||
server = re.sub(r'[-\[\]\s]+', '', videoitem.title).capitalize()
|
||||
videoitem.title = "".join(["[%s] " % support.color(server.capitalize(), 'orange'), item.title])
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.show = item.show
|
||||
videoitem.plot = item.plot
|
||||
videoitem.channel = item.channel
|
||||
matches = scrapertools.find_multiple_matches(data, item.extra)
|
||||
data = "\r\n".join(matches[0])
|
||||
item.contentType = 'movie'
|
||||
itemlist = support.server(item, data=data)
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -278,17 +329,8 @@ def findepvideos(item):
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def findvideos(item):
|
||||
logger.info("[GuardaSerieClick.py]==> findvideos")
|
||||
|
||||
itemlist = servertools.find_video_items(data=item.url)
|
||||
|
||||
for videoitem in itemlist:
|
||||
server = re.sub(r'[-\[\]\s]+', '', videoitem.title).capitalize()
|
||||
videoitem.title = "".join(["[%s] " % support.color(server.capitalize(), 'orange'), item.title])
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.show = item.show
|
||||
videoitem.plot = item.plot
|
||||
videoitem.channel = item.channel
|
||||
support.log(item.channel+" findvideos")
|
||||
logger.debug(item.url)
|
||||
itemlist = support.server(item, data=item.url)
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -383,7 +383,11 @@ def thumb(itemlist=[]):
|
||||
item.thumbnail = get_thumb(thumb + '.png')
|
||||
else:
|
||||
thumb = item.thumbnails
|
||||
# REmove args from title
|
||||
|
||||
if item.thumbnail != '':
|
||||
break
|
||||
|
||||
# Remove args from title
|
||||
if item.args: item.title = item.title.replace(' || ' + str(item.args), '')
|
||||
return itemlist
|
||||
else:
|
||||
|
||||
@@ -95,7 +95,7 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
|
||||
patronNext="", action="findvideos", addVideolibrary = True, type_content_dict={}, type_action_dict={}):
|
||||
# patron: the patron to use for scraping page, all capturing group must match with listGroups
|
||||
# listGroups: a list containing the scraping info obtained by your patron, in order
|
||||
# accepted values are: url, title, thumb, quality, year, plot, duration, genre, rating
|
||||
# accepted values are: url, title, thumb, quality, year, plot, duration, genre, rating, episode, lang
|
||||
|
||||
# header: values to pass to request header
|
||||
# blacklist: titles that you want to exclude(service articles for example)
|
||||
@@ -112,7 +112,7 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
|
||||
# patron = 'blablabla'
|
||||
# headers = [['Referer', host]]
|
||||
# blacklist = 'Request a TV serie!'
|
||||
# return support.scrape(item, itemlist, patron, ['thumb', 'quality', 'url', 'title', 'year', 'plot'],
|
||||
# return support.scrape(item, itemlist, patron, ['thumb', 'quality', 'url', 'title', 'year', 'plot', 'episode', 'lang'],
|
||||
# headers=headers, blacklist=blacklist)
|
||||
# 'type' is a check for typologies of content e.g. Film or TV Series
|
||||
# 'episode' is a key to grab episode numbers if it is separated from the title
|
||||
@@ -136,7 +136,7 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
|
||||
blocks = scrapertoolsV2.find_multiple_matches(block, regex)
|
||||
block = ""
|
||||
for b in blocks:
|
||||
block += "\n" + b
|
||||
block += "\n" + str(b)
|
||||
log('BLOCK ', n, '=', block)
|
||||
else:
|
||||
block = data
|
||||
@@ -144,7 +144,8 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
|
||||
matches = scrapertoolsV2.find_multiple_matches(block, patron)
|
||||
log('MATCHES =', matches)
|
||||
|
||||
known_keys = ['url', 'title', 'title2', 'episode', 'thumb', 'quality', 'year', 'plot', 'duration', 'genere', 'rating', 'type'] #by greko aggiunto episode
|
||||
known_keys = ['url', 'title', 'title2', 'episode', 'thumb', 'quality', 'year', 'plot', 'duration', 'genere', 'rating', 'type', 'lang'] #by greko aggiunto episode
|
||||
|
||||
for match in matches:
|
||||
if len(listGroups) > len(match): # to fix a bug
|
||||
match = list(match)
|
||||
@@ -157,7 +158,7 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
|
||||
val = scrapertoolsV2.find_single_match(item.url, 'https?://[a-z0-9.-]+') + val
|
||||
scraped[kk] = val
|
||||
|
||||
title = scrapertoolsV2.decodeHtmlentities(scraped["title"]).strip()
|
||||
title = scrapertoolsV2.decodeHtmlentities(scraped["title"]).replace('"', "'").strip() # fix by greko da " a '
|
||||
plot = scrapertoolsV2.htmlclean(scrapertoolsV2.decodeHtmlentities(scraped["plot"]))
|
||||
|
||||
longtitle = typo(title, 'bold')
|
||||
@@ -168,6 +169,12 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
|
||||
if scraped['title2']:
|
||||
title2 = scrapertoolsV2.decodeHtmlentities(scraped["title2"]).strip()
|
||||
longtitle = longtitle + typo(title2, 'bold _ -- _')
|
||||
if scraped["lang"]:
|
||||
if 'sub' in scraped["lang"].lower():
|
||||
lang = 'Sub-ITA'
|
||||
else:
|
||||
lang = 'ITA'
|
||||
longtitle += typo(lang, '_ [] color kod')
|
||||
|
||||
if item.infoLabels["title"] or item.fulltitle: # if title is set, probably this is a list of episodes or video sources
|
||||
infolabels = item.infoLabels
|
||||
|
||||
BIN
resources/media/channels/banner/animeworld.png
Normal file
BIN
resources/media/channels/banner/animeworld.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 15 KiB |
BIN
resources/media/channels/banner/guardaserieclick.png
Normal file
BIN
resources/media/channels/banner/guardaserieclick.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 21 KiB |
BIN
resources/media/channels/thumb/animeworld.png
Normal file
BIN
resources/media/channels/thumb/animeworld.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 6.9 KiB |
BIN
resources/media/channels/thumb/guardaserieclick.png
Normal file
BIN
resources/media/channels/thumb/guardaserieclick.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 9.7 KiB |
Reference in New Issue
Block a user