This commit is contained in:
Alhaziel
2019-05-27 09:47:34 +02:00
29 changed files with 392 additions and 563 deletions

View File

@@ -8,10 +8,10 @@ import re
import urlparse
import channelselector
from core import httptools, tmdb, scrapertools, support
from core import httptools, tmdb, support, scrapertools, jsontools
from core.item import Item
from platformcode import logger, config
from specials import autoplay
from specials import autoplay, autorenumber
__channel__ = "animesaturn"
host = config.get_setting("channel_host", __channel__)
@@ -22,45 +22,14 @@ list_language = IDIOMAS.values()
list_servers = ['openload','fembed']
list_quality = ['default']
# checklinks = config.get_setting('checklinks', __channel__)
# checklinks_number = config.get_setting('checklinks_number', __channel__)
def mainlist(item):
support.log(item.channel + 'mainlist')
itemlist = []
support.menu(itemlist, 'Anime bold', 'lista_anime', "%s/animelist?load_all=1" % host,'anime')
itemlist.append(
Item(channel=item.channel,
action="ultimiep",
url="%s/fetch_pages.php?request=episodios" % host,
title=support.typo("Novità submenu"),
extra="",
contentType='anime',
folder=True,
thumbnail=support.thumb())
)
# itemlist.append(
# Item(channel=item.channel,
# action="lista_anime",
# url="%s/animeincorso" % host,
# title=support.typo("In corso submenu"),
# extra="anime",
# contentType='anime',
# folder=True,
# thumbnail=channelselector.get_thumb('on_the_air.png'))
# )
itemlist.append(
Item(channel=item.channel,
action="list_az",
url="%s/animelist?load_all=1" % host,
title=support.typo("Archivio A-Z submenu"),
extra="anime",
contentType='anime',
folder=True,
thumbnail=channelselector.get_thumb('channels_tvshow_az.png'))
)
support.menu(itemlist, 'Cerca', 'search', host,'anime')
support.menu(itemlist, 'Anime bold', 'lista_anime', "%s/animelist?load_all=1" % host)
support.menu(itemlist, 'Novità submenu', 'ultimiep', "%s/fetch_pages.php?request=episodes" % host,'tvshow')
support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', '%s/animelist?load_all=1' % host,args=['tvshow','alfabetico'])
support.menu(itemlist, 'Cerca', 'search', host)
autoplay.init(item.channel, list_servers, list_quality)
@@ -131,9 +100,14 @@ def lista_anime(item):
title += ' '+support.typo(' (ITA)')
infoLabels = {}
# if 'Akira' in title:
# movie = True
# infoLabels['year']= 1988
if 'Akira' in title:
movie = True
infoLabels['year']= 1988
if 'Dragon Ball Super Movie' in title:
movie = True
infoLabels['year'] = 2019
itemlist.append(
Item(channel=item.channel,
@@ -213,7 +187,7 @@ def episodios(item):
fanart=item.thumbnail,
thumbnail=item.thumbnail))
if((len(itemlist) == 1 and 'Movie' in itemlist[0].title) or movie):
if(((len(itemlist) == 1 and 'Movie' in itemlist[0].title) or movie) and item.contentType!='movie'):
item.url = itemlist[0].url
item.contentType = 'movie'
return findvideos(item)
@@ -235,6 +209,7 @@ def findvideos(item):
if(item.contentType == 'movie'):
episodes = episodios(item)
if(len(episodes)>0):
item.url = episodes[0].url
@@ -245,23 +220,11 @@ def findvideos(item):
url = scrapertools.find_single_match(data, patron)
data = httptools.downloadpage(url).data
# patron = r"""<source\s*src=(?:"|')([^"']+?)(?:"|')\s*type=(?:"|')video/mp4(?:"|')>"""
# matches = re.compile(patron, re.DOTALL).findall(data)
# for video in matches:
# itemlist.append(
# Item(
# channel=item.channel,
# action="play",
# fulltitle=item.fulltitle,
# title="".join([item.title, ' ', support.typo(video.title, 'color kod []')]),
# url=video,
# contentType=item.contentType,
# folder=False))
itemlist = support.server(item, data=data)
# itemlist = filtertools.get_links(itemlist, item, list_language)
if item.contentType == 'movie':
support.videolibrary(itemlist, item, 'color kod')
# Controlla se i link sono validi
# if checklinks:
# itemlist = servertools.check_list_links(itemlist, checklinks_number)
@@ -279,16 +242,13 @@ def ultimiep(item):
logger.info(item.channel + "ultimiep")
itemlist = []
post = "page=%s" % item.extra if item.extra else None
logger.debug(post)
logger.debug(item.url)
post = "page=%s" % item.args['page'] if item.args and item.args['page'] else None
data = httptools.downloadpage(
item.url, post=post, headers={
'X-Requested-With': 'XMLHttpRequest'
}).data
logger.debug(data)
patron = r"""<a href='[^']+'><div class="locandina"><img alt="[^"]+" src="([^"]+)" title="[^"]+" class="grandezza"></div></a>\s*"""
patron += r"""<a href='([^']+)'><div class="testo">(.+?)</div></a>\s*"""
patron += r"""<a href='[^']+'><div class="testo2">(.+?)</div></a>"""
@@ -298,6 +258,7 @@ def ultimiep(item):
scrapedtitle1 = cleantitle(scrapedtitle1)
scrapedtitle2 = cleantitle(scrapedtitle2)
scrapedtitle = scrapedtitle1 + ' - ' + scrapedtitle2 + ''
itemlist.append(
Item(channel=item.channel,
contentType="tvshow",
@@ -313,16 +274,15 @@ def ultimiep(item):
# Pagine
patronvideos = r'data-page="(\d+)" title="Next">Pagina Successiva'
next_page = scrapertools.find_single_match(data, patronvideos)
if next_page:
itemlist.append(
Item(
channel=item.channel,
action="ultimiep",
title=support.typo(config.get_localized_string(30992), 'color kod bold'),
url=host + "/fetch_pages?request=episodios",
url=item.url,
thumbnail= support.thumb(),
extra=next_page,
args={'page':next_page},
folder=True))
return itemlist
@@ -362,42 +322,73 @@ def newest(categoria):
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def search_anime(item):
logger.info(item.channel + " search_anime")
def search_anime(item, texto):
logger.info(item.channel + " search_anime: "+texto)
itemlist = []
data = httptools.downloadpage(host + "/animelist?load_all=1").data
data = scrapertools.decodeHtmlentities(data)
# data = httptools.downloadpage(host + "/animelist?load_all=1").data
# data = scrapertools.decodeHtmlentities(data)
#
# texto = texto.lower().split('+')
#
# patron = r'<a href="([^"]+)"[^>]*?>[^>]*?>(.+?)<'
# matches = re.compile(patron, re.DOTALL).findall(data)
#
# for scrapedurl, scrapedtitle in [(scrapedurl, scrapedtitle)
# for scrapedurl, scrapedtitle in matches
# if all(t in scrapedtitle.lower()
# for t in texto)]:
#
# title = cleantitle(scrapedtitle).replace('(ita)','(ITA)')
# showtitle = title
# if '(ITA)' in title:
# title = title.replace('(ITA)','').strip()
# showtitle = title
# title += ' '+support.typo(' [ITA] color kod')
#
# itemlist.append(
# Item(
# channel=item.channel,
# contentType="episode",
# action="episodios",
# title=title,
# url=scrapedurl,
# fulltitle=title,
# show=showtitle,
# thumbnail=""))
#
# tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
texto = item.url.lower().split('+')
data = httptools.downloadpage(host + "/index.php?search=1&key=%s" % texto).data
jsondata = jsontools.load(data)
patron = r'<a href="([^"]+)"[^>]*?>[^>]*?>(.+?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for title in jsondata:
data = str(httptools.downloadpage("%s/templates/header?check=1" % host, post="typeahead=%s" % title).data)
for scrapedurl, scrapedtitle in [(scrapedurl, scrapedtitle)
for scrapedurl, scrapedtitle in matches
if all(t in scrapedtitle.lower()
for t in texto)]:
title = cleantitle(scrapedtitle).replace('(ita)','(ITA)')
showtitle = title
if '(ITA)' in title:
title = title.replace('(ITA)','').strip()
if 'Anime non esistente' in data:
continue
else:
title = title.replace('(ita)','(ITA)')
showtitle = title
title += ' '+support.typo(' [ITA] color kod')
if '(ITA)' in title:
title = title.replace('(ITA)', '').strip()
showtitle = title
title += ' ' + support.typo(' (ITA)')
itemlist.append(
Item(
channel=item.channel,
contentType="episode",
action="episodios",
title=title,
url=scrapedurl,
fulltitle=title,
show=showtitle,
thumbnail=""))
url = "%s/anime/%s" % (host, data)
logger.debug(title)
logger.debug(url)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
itemlist.append(
Item(
channel=item.channel,
contentType="episode",
action="episodios",
title=title,
url=url,
fulltitle=title,
show=showtitle,
thumbnail=""))
return itemlist
@@ -408,10 +399,9 @@ def search_anime(item):
def search(item, texto):
logger.info(item.channel + " search")
itemlist = []
item.url = texto
try:
return search_anime(item)
return search_anime(item, texto)
except:
import sys

View File

@@ -25,7 +25,7 @@ def findhost():
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['verystream', 'openload', 'streamango', 'wstream']
list_quality = ['HD', 'default']
list_quality = ['HD', 'SD', 'default']
checklinks = config.get_setting('checklinks', 'cineblog01')
checklinks_number = config.get_setting('checklinks_number', 'cineblog01')
@@ -33,7 +33,7 @@ checklinks_number = config.get_setting('checklinks_number', 'cineblog01')
# esclusione degli articoli 'di servizio'
blacklist = ['BENVENUTI', 'Richieste Serie TV', 'CB01.UNO &#x25b6; TROVA L&#8217;INDIRIZZO UFFICIALE ',
'Aggiornamento Quotidiano Serie TV', 'OSCAR 2019 ▶ CB01.UNO: Vota il tuo film preferito! 🎬',
'Openload: la situazione. Benvenuto Verystream']
'Openload: la situazione. Benvenuto Verystream', 'Openload: lo volete ancora?']
def mainlist(item):
@@ -105,6 +105,7 @@ def newest(categoria):
findhost()
itemlist = []
item = Item()
item.contentType = 'movie'
item.url = host + '/lista-film-ultimi-100-film-aggiunti/'
return support.scrape(item, r'<a href=([^>]+)>([^<([]+)(?:\[([A-Z]+)\])?\s\(([0-9]{4})\)<\/a>',
['url', 'title', 'quality', 'year'],
@@ -229,11 +230,10 @@ def findvideos(item):
matches = re.compile(patron, re.DOTALL).findall(streaming)
for scrapedurl, scrapedtitle in matches:
logger.debug("##### findvideos %s ## %s ## %s ##" % (desc_txt, scrapedurl, scrapedtitle))
title = "[COLOR " + color + "]" + desc_txt + ":[/COLOR] " + item.fulltitle + " [COLOR grey]" + QualityStr + "[/COLOR] [COLOR blue][" + scrapedtitle + "][/COLOR]"
itemlist.append(
Item(channel=item.channel,
action="play",
title=title,
title=scrapedtitle,
url=scrapedurl,
server=scrapedtitle,
fulltitle=item.fulltitle,
@@ -264,37 +264,18 @@ def findvideos(item):
# Estrae i contenuti - Streaming HD
load_links(itemlist, '<strong>Streaming HD[^<]+</strong>(.*?)<tableclass=cbtable height=30>', "yellow", "Streaming HD", "HD")
autoplay.start(itemlist, item)
# Estrae i contenuti - Streaming 3D
load_links(itemlist, '<strong>Streaming 3D[^<]+</strong>(.*?)<tableclass=cbtable height=30>', "pink", "Streaming 3D")
support.videolibrary(itemlist, item)
return support.server(item, itemlist=itemlist)
# Estrae i contenuti - Download
# load_links(itemlist, '<strong>Download:</strong>(.*?)<tableclass=cbtable height=30>', "aqua", "Download")
# Estrae i contenuti - Download HD
# load_links(itemlist, '<strong>Download HD[^<]+</strong>(.*?)<tableclass=cbtable width=100% height=20>', "azure", "Download HD")
if len(itemlist) == 0:
itemlist = servertools.find_video_items(item=item)
# Requerido para Filtrar enlaces
if checklinks:
itemlist = servertools.check_list_links(itemlist, checklinks_number)
# Requerido para FilterTools
# itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
support.videolibrary(itemlist, item)
return itemlist
def findvid_serie(item):
def load_vid_series(html, item, itemlist, blktxt):
@@ -305,11 +286,11 @@ def findvid_serie(item):
for match in matches:
scrapedurl = match.group(1)
scrapedtitle = match.group(2)
title = item.title + " [COLOR blue][" + scrapedtitle + "][/COLOR]"
# title = item.title + " [COLOR blue][" + scrapedtitle + "][/COLOR]"
itemlist.append(
Item(channel=item.channel,
action="play",
title=title,
title=scrapedtitle,
url=scrapedurl,
server=scrapedtitle,
fulltitle=item.fulltitle,
@@ -353,9 +334,7 @@ def findvid_serie(item):
else:
load_vid_series(data[lnkblkp[i]:lnkblkp[i + 1]], item, itemlist, lnkblk[i])
autoplay.start(itemlist, item)
return itemlist
return support.server(item, itemlist=itemlist)
def play(item):
@@ -389,4 +368,4 @@ def play(item):
else:
data = support.swzz_get_url(item)
return support.server(item, data, headers)
return servertools.find_video_items(data=data)

View File

@@ -56,7 +56,7 @@ def serietv(item):
if item.args:
# il titolo degli episodi viene inglobato in episode ma non sono visibili in newest!!!
patron = r'<span class="serieTitle" style="font-size:20px">(.*?).[^]<a href="([^"]+)"\s+target="_blank">(.*?)<\/a>'
listGroups = ['title', 'url', 'episode']
listGroups = ['title', 'url', 'title2']
patronNext = ''
else:
patron = r'<div class="post-thumb">.*?\s<img src="([^"]+)".*?><a href="([^"]+)".*?>(.*?(?:\((\d{4})\)|(\d{4}))?)<\/a><\/h2>'

View File

@@ -4,8 +4,8 @@
"language": ["ita"],
"active": true,
"adult": false,
"thumbnail": "http://fastsubita.com/wp-content/uploads/2017/10/Untitled-222255xxx.jpg",
"banner": "http://fastsubita.com/wp-content/uploads/2017/10/Untitled-222255xxx.jpg",
"thumbnail": "fastsubita.png",
"banner": "fastsubita.png",
"categories": ["tvshow", "vosi"],
"settings": [
{

View File

@@ -6,7 +6,8 @@
import re
from core import scrapertools, httptools, tmdb
import channelselector
from core import scrapertools, httptools, tmdb, support
from core.item import Item
from platformcode import config, logger
from specials import autoplay
@@ -44,8 +45,8 @@ def mainlist(item):
support.menu(itemlist, 'Serie TV bold', 'lista_serie', host,'tvshow')
support.menu(itemlist, 'Novità submenu', 'pelicuals_tv', host,'tvshow')
support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', host,'tvshow',args=['serie'])
support.menu(itemlist, 'Cerca', 'search', host,'tvshow')
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
@@ -113,24 +114,31 @@ def pelicuals_tv(item):
episode = scrapertools.find_multiple_matches(scrapedtitle, r'((\d*)x(\d*))')[0]
scrapedtitle = scrapedtitle.replace(scraped_1, "")
infoLabels = {}
infoLabels['season'] = episode[1]
infoLabels['episode'] = episode[2]
if "http:" in scrapedurl:
scrapedurl = scrapedurl
else:
scrapedurl = "http:" + scrapedurl
title = scraped_1+" - "+infoLabels['season']+"x"+infoLabels['episode']+" Sub-ITA"
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentTpye="tvshow",
title=scraped_1 + " " + scrapedtitle,
fulltitle=scraped_1 + " " + scrapedtitle,
title=title,
fulltitle=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
show=scraped_1,
extra=item.extra,
contentSerieName=scraped_1+" ("+episode[0]+" Sub-Ita)",
contentSerieName=scraped_1,
contentLanguage='Sub-ITA',
infoLabels=infoLabels,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
@@ -380,7 +388,7 @@ def episodios(item,itemlist = []):
infoLabels = {}
infoLabels['season'] = season
infoLabels['episode'] = episode[2]
title = infoLabels['season']+'x'+infoLabels['episode']
title = infoLabels['season']+'x'+infoLabels['episode']+" Sub-ITA"
if "http:" not in scrapedurl:
scrapedurl = "http:" + scrapedurl

View File

@@ -4,83 +4,51 @@
# Canale per ilgeniodellostreaming
# ------------------------------------------------------------
import re
import urlparse
from core import scrapertools, servertools, httptools
from core import tmdb
from platformcode import logger
from core import scrapertoolsV2, httptools, tmdb, support
from core.support import log, menu, aplay
from core.item import Item
from platformcode import config, logger
from specials import autoplay
__channel__ = "ilgeniodellostreaming"
host = "https://ilgeniodellostreaming.pw"
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['verystream', 'openload', 'streamango', 'youtube']
list_servers = ['verystream', 'openload', 'streamango']
list_quality = ['default']
checklinks = config.get_setting('checklinks', 'ilgeniodellostreaming')
checklinks_number = config.get_setting('checklinks_number', 'ilgeniodellostreaming')
headers = [['Referer', host]]
PERPAGE = 10
def mainlist(item):
logger.info("kod.ilgeniodellostreaming mainlist")
autoplay.init(item.channel, list_servers, list_quality)
itemlist = [Item(channel=__channel__,
title="[COLOR azure]Ultimi Film Inseriti[/COLOR]",
action="peliculas",
url="%s/film/" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=__channel__,
title="[COLOR azure]Film Per Categoria[/COLOR]",
action="categorias",
url=host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=__channel__,
title="[COLOR azure]Serie TV[/COLOR]",
action="serie",
url="%s/serie/" % host,
thumbnail="http://www.ilmioprofessionista.it/wp-content/uploads/2015/04/TVSeries3.png"),
Item(channel=__channel__,
title="[COLOR azure]Nuovi Episodi Serie TV[/COLOR]",
action="nuoviep",
url="%s/aggiornamenti-serie/" % host,
thumbnail="http://www.ilmioprofessionista.it/wp-content/uploads/2015/04/TVSeries3.png"),
Item(channel=__channel__,
title="[COLOR azure]Anime[/COLOR]",
action="serie",
url="%s/anime/" % host,
thumbnail="http://orig09.deviantart.net/df5a/f/2014/169/2/a/fist_of_the_north_star_folder_icon_by_minacsky_saya-d7mq8c8.png"),
Item(channel=__channel__,
title="[COLOR yellow]Cerca...[/COLOR]",
action="search",
extra="movie",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
autoplay.show_option(item.channel, itemlist)
log()
itemlist = []
menu(itemlist, 'Film', 'peliculas', host + '/film/')
menu(itemlist, 'Film Per Categoria', 'category', host, args='genres')
menu(itemlist, 'Film Per Anno', 'category', host, args='year')
menu(itemlist, 'Serie TV', 'peliculas', host + '/serie/', 'episode')
menu(itemlist, 'Nuovi Episodi Serie TV submenu', 'newep', host + '/aggiornamenti-serie/', 'episode')
menu(itemlist, 'Anime', 'peliculas', host + '/anime/', 'episode')
menu(itemlist, 'TV Show', 'peliculas', host + '/tv-show/', 'episode')
menu(itemlist, 'Cerca...', 'search', contentType='search')
aplay(item, itemlist, list_servers, list_quality)
return itemlist
def newest(categoria):
logger.info("kod.ilgeniodellostreaming newest" + categoria)
log(categoria)
itemlist = []
item = Item()
try:
if categoria == "film":
item.url = "%s/film/" % host
item.action = "peliculas"
itemlist = peliculas(item)
if categoria == "movie": item.url = host + '/film/'
elif categoria == "tvshow": item.url = host + '/serie/'
elif categoria == "anime": item.url = host + '/anime/'
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
@@ -91,37 +59,16 @@ def newest(categoria):
return itemlist
def categorias(item):
logger.info("kod.ilgeniodellostreaming categorias")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<ul class="genres scrolling">(.*?)</ul>')
# Estrae i contenuti
patron = '<li[^>]+><a href="(.*?)"[^>]+>(.*?)</a>'
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedurl, scrapedtitle in matches:
logger.info("title=[" + scrapedtitle + "]")
itemlist.append(
Item(channel=__channel__,
action="peliculas",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png",
folder=True))
return itemlist
def category(item):
return support.scrape(item, r'<li.*?><a href="(.*?)"[^>]+>(.*?)<\/a>' ,['url', 'title'], action='peliculas', patron_block= r'<ul class="' + item.args + r' scrolling">(.*?)<\/ul>')
def search(item, texto):
logger.info("[ilgeniodellostreaming.py] " + item.url + " search " + texto)
log(texto)
item.url = host + "/?s=" + texto
try:
return peliculas_src(item)
return peliculas(item)
except:
import sys
@@ -132,262 +79,57 @@ def search(item, texto):
def peliculas_src(item):
logger.info("kod.ilgeniodellostreaming peliculas")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
patron = '<div class="thumbnail animation-2"><a href="(.*?)"><img src="(.*?)" alt="(.*?)" />[^>]+>(.*?)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedtipo in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
if scrapedtipo == "TV":
itemlist.append(
Item(channel=__channel__,
action="episodios",
fulltitle=scrapedtitle,
show=scrapedtitle,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
folder=True))
else:
itemlist.append(
Item(channel=__channel__,
action="findvideos",
contentType="movie",
fulltitle=scrapedtitle,
show=scrapedtitle,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
patron = r'<div class="thumbnail animation-2"><a href="([^"]+)"><img src="([^"]+)" alt="[^"]+" \/>[^>]+>([^<]+)<\/span>.*?<a href.*?>([^<]+)<\/a>[^>]+>[^>]+>(?:<span class="rating">IMDb\s*([0-9.]+)<\/span>)?.*?(?:<span class="year">([0-9]+)<\/span>)?[^>]+>[^>]+><p>(.*?)<\/p>'
return support.scrape(item, patron, ['url', 'thumb', 'type', 'title', 'lang' 'rating', 'year', 'plot'], headers, type_content_dict={'movie':['Film'], 'episode':['TV']}, type_action_dict={'findvideos':['Film'], 'episodios':['TV']})
def peliculas(item):
logger.info("kod.ilgeniodellostreaming peliculas")
if item.contentType == 'movie':
patron = r'<div class="poster">\s*<a href="([^"]+)"><img src="([^"]+)" alt="[^"]+"><\/a>[^>]+>[^>]+>[^>]+>\s*([0-9.]+)<\/div><span class="quality">([^<]+)<\/span>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<\/a>[^>]+>[^>]+>([^<]+)<\/span>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<div'
return support.scrape(item, patron, ['url', 'thumb', 'rating', 'quality', 'title', 'year', 'plot'], headers, patronNext='<span class="current">[^<]+<[^>]+><a href="([^"]+)"')
elif item.contentType == 'episode':
patron = r'<div class="poster">\s*<a href="([^"]+)"><img src="([^"]+)" alt="[^"]+"><\/a>[^>]+>[^>]+>[^>]+> ([0-9.]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<[^>]+>[^>]+>[^>]+>([^<]+)<.*?<div class="texto">([^<]+)'
return support.scrape(item, patron, ['url', 'thumb', 'rating', 'title', 'year', 'plot'], headers, action='episodios', patronNext='<span class="current">[^<]+<[^>]+><a href="([^"]+)"')
else:
patron = r'<div class="thumbnail animation-2"><a href="([^"]+)"><img src="([^"]+)" alt="[^"]+" \/>[^>]+>([^<]+)<\/span>.*?<a href.*?>([^<]+)<\/a>[^>]+>[^>]+>(?:<span class="rating">IMDb\s*([0-9.]+)<\/span>)?.*?(?:<span class="year">([0-9]+)<\/span>)?[^>]+>[^>]+><p>(.*?)<\/p>'
return support.scrape(item, patron, ['url', 'thumb', 'type', 'title', 'lang' 'rating', 'year', 'plot'], headers, type_content_dict={'movie':['Film'], 'episode':['TV']}, type_action_dict={'findvideos':['Film'], 'episodios':['TV']})
def newep(item):
log()
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
page = 1
if item.page:
page = item.page
# Estrae i contenuti
patron = '<div class="poster">\s*<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)"></a>'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = support.match(item, r'<div class="poster"><img src="([^"]+)" alt="([^"]+)">[^>]+><a href="([^"]+)">')[0]
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedplot = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
for i, (thumb, title, url) in enumerate(matches):
if (page - 1) * PERPAGE > i: continue
if i >= page * PERPAGE: break
title = scrapertoolsV2.decodeHtmlentities(title)
itemlist.append(
Item(channel=__channel__,
Item(channel=item.channel,
action="findvideos",
contentType="movie",
fulltitle=scrapedtitle,
show=scrapedtitle,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
folder=True))
# Paginazione
patronvideos = '<span class="current">[^<]+<[^>]+><a href=\'(.*?)\''
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
itemlist.append(
Item(channel=__channel__,
action="peliculas",
title="[COLOR orange]Successivo >>[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
fulltitle=title,
show=title,
title= support.typo(title,'bold'),
url=url,
thumbnail=thumb))
support.pagination(itemlist, item, page, PERPAGE)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def nuoviep(item):
logger.info("kod.ilgeniodellostreaming nuoviep")
itemlist = []
p = 1
if '{}' in item.url:
item.url, p = item.url.split('{}')
p = int(p)
# Carica la pagina
data = httptools.downloadpage(item.url).data
#blocco = scrapertools.find_single_match(data,
# r'<div class="items" style="margin-bottom:0px!important">(.*?)<div class="items" style="margin-bottom:0px!important">')
# Estrae i contenuti
patron = r'<div class="poster"><img src="([^"]+)" alt="([^"]+)">[^>]+><a href="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
for i, (scrapedthumbnail, scrapedtitle, scrapedurl) in enumerate(matches):
if (p - 1) * PERPAGE > i: continue
if i >= p * PERPAGE: break
scrapedplot = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=__channel__,
action="findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
folder=True))
if len(matches) >= p * PERPAGE:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=__channel__,
extra=item.extra,
action="nuoviep",
title="[COLOR orange]Successivo >>[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def serie(item):
logger.info("kod.ilgeniodellostreaming peliculas")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
# Estrae i contenuti
patron = '<div class="poster">\s*<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)"></a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedplot = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=__channel__,
action="episodios",
fulltitle=scrapedtitle,
show=scrapedtitle,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
folder=True))
# Paginazione
patronvideos = '<span class="current">[^<]+<[^>]+><a href=\'(.*?)\''
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
itemlist.append(
Item(channel=__channel__,
action="peliculas",
title="[COLOR orange]Successivo >>[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def episodios(item):
logger.info("kod.ilgeniodellostreaming episodios")
itemlist = []
patron = '<ul class="episodios">.*?</ul>'
data = httptools.downloadpage(item.url).data
matches = re.compile(patron, re.DOTALL).findall(data)
for match in matches:
patron = '<li><div class="imagen"><a href="(.*?)">[^>]+>[^>]+>[^>]+><.*?numerando">(.*?)<[^>]+>[^>]+>[^>]+>(.*?)</a>'
episodi = re.compile(patron, re.DOTALL).findall(match)
for scrapedurl, scrapednumber, scrapedtitle in episodi:
n0 = scrapednumber.replace(" ", "")
n1 = n0.replace("-", "x")
itemlist.append(Item(channel=__channel__,
action="findvideos",
contentType="episode",
fulltitle=n1 + " " + scrapedtitle,
show=n1 + " " + scrapedtitle,
title=n1 + " [COLOR orange] " + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=item.thumbnail,
plot=item.plot,
folder=True))
if config.get_videolibrary_support() and len(itemlist) != 0:
itemlist.append(
Item(channel=__channel__,
title=config.get_localized_string(30161),
url=item.url,
action="add_serie_to_library",
extra="episodios",
show=item.show))
return itemlist
return support.scrape(item, r'<a href="([^"]+)"><img src="([^"]+)">.*?<div class="numerando">([^<]+).*?<div class="episodiotitle">[^>]+>([^<]+)<\/a>',['url', 'thumb', 'episode', 'title'], patron_block='<div id="seasons">(.*?)<div class="sbox')
def findvideos(item):
logger.info()
data = httptools.downloadpage(item.url).data
patron = '<td><a class="link_a" href="(.*?)" target="_blank">'
matches = re.compile(patron, re.DOTALL).findall(data)
log()
matches, data = support.match(item, '<iframe class="metaframe rptss" src="([^"]+)"[^>]+>',headers=headers)
for url in matches:
html = httptools.downloadpage(url).data
data += str(scrapertools.find_multiple_matches(html, 'window.location.href=\'(.*?)\''))
html = httptools.downloadpage(url, headers=headers).data
data += str(scrapertoolsV2.find_multiple_matches(html, '<meta name="og:url" content="([^"]+)">'))
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title + " - " + videoitem.title
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
videoitem.channel = __channel__
videoitem.contentType = item.contentType
videoitem.language = IDIOMAS['Italiano']
# Requerido para Filtrar enlaces
if checklinks:
itemlist = servertools.check_list_links(itemlist, checklinks_number)
# Requerido para FilterTools
# itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow][B]Aggiungi alla videoteca[/B][/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
return support.server(item, data)

View File

@@ -4,8 +4,8 @@
"active": true,
"adult": false,
"language": ["ita"],
"thumbnail": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/seriehd.png",
"banner": "https://raw.githubusercontent.com/Zanzibar82/images/master/posters/seriehd.png",
"thumbnail": "seriehd.png",
"banner": "seriehd.png",
"categories": ["tvshow"],
"settings": [
{

View File

@@ -4,8 +4,8 @@
"active": true,
"adult": false,
"language": ["ita"],
"thumbnail": "http://serietvsubita.xyz/wp-content/uploads/2012/07/logo.jpg",
"banner": "http://serietvsubita.xyz/wp-content/uploads/2012/07/logo.jpg",
"thumbnail": "serietvsubita.png",
"banner": "serietvsubita.png",
"categories": ["tvshow"],
"settings": [
{

View File

@@ -52,7 +52,7 @@ def mainlist(item):
# ----------------------------------------------------------------------------------------------------------------
def cleantitle(scrapedtitle):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
scrapedtitle = scrapedtitle.replace('[HD]', '').replace('', '\'').replace('×','x').replace('Game of Thrones ','')
scrapedtitle = scrapedtitle.replace('[HD]', '').replace('', '\'').replace('×','x').replace('Game of Thrones ','').replace('In The Dark 2019','In The Dark (2019)').strip()
year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)')
if year:
scrapedtitle = scrapedtitle.replace('(' + year + ')', '')
@@ -195,7 +195,7 @@ def episodios(item, itemlist=[]):
infoLabels = {}
infoLabels['season'] = season
infoLabels['episode'] = episode
fullepisode+=' Sub-ITA'
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
@@ -293,23 +293,27 @@ def peliculas_tv(item):
scrapedthumbnail = ""
scrapedplot = ""
scrapedtitle = cleantitle(scrapedtitle)
infoLabels = {}
episode = scrapertools.find_multiple_matches(scrapedtitle, r'((\d*)x(\d*))')[0]
title = scrapedtitle.split(" S0")[0].strip()
title = title.split(" S1")[0].strip()
title = title.split(" S2")[0].strip()
infoLabels['season'] = episode[1]
infoLabels['episode'] = episode[2]
itemlist.append(
Item(channel=item.channel,
action="findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=scrapedtitle,
title=title+" - "+episode[0]+" Sub-ITA",
url=scrapedurl,
thumbnail=scrapedthumbnail,
contentSerieName=title+" ("+episode[0]+" Sub-Ita)",
contentSerieName=title,
contentLanguage='Sub-ITA',
plot=scrapedplot,
infoLabels=infoLabels,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

View File

@@ -4,8 +4,8 @@
"active": true,
"adult": false,
"language": ["ita"],
"thumbnail": "https://www.serietvu.club/wp-content/themes/gurarjbar/images/logo.png",
"banner": "https://www.serietvu.club/wp-content/themes/gurarjbar/images/logo.png",
"thumbnail": "serietvu.png",
"banner": "serietvu.png",
"categories": ["tvshow"],
"settings": [
{

View File

@@ -52,7 +52,7 @@ def mainlist(item):
# ----------------------------------------------------------------------------------------------------------------
def cleantitle(scrapedtitle):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
scrapedtitle = scrapedtitle.replace('[HD]', '').replace('', '\'').replace('Game of Thrones ','').replace('Flash 2014','Flash')
scrapedtitle = scrapedtitle.replace('[HD]', '').replace('', '\'').replace(' Il Trono di Spade','').replace('Flash 2014','Flash')
year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)')
if year:
scrapedtitle = scrapedtitle.replace('(' + year + ')', '')
@@ -121,10 +121,14 @@ def episodios(item):
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedextra, scrapedurl, scrapedimg, scrapedtitle in matches:
number = scrapertools.decodeHtmlentities(scrapedtitle.replace("Episodio", "")).strip()
title = value + "x" + number.zfill(2)
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title=value + "x" + number.zfill(2),
title=title,
fulltitle=scrapedtitle,
contentType="episode",
url=scrapedurl,
@@ -145,6 +149,7 @@ def findvideos(item):
support.log(item.channel + " findvideos")
itemlist = support.server(item, data=item.url)
# itemlist = filtertools.get_links(itemlist, item, list_language)
# Controlla se i link sono validi
@@ -192,6 +197,45 @@ def findepisodevideo(item):
def latestep(item):
support.log(item.channel + " latestep")
itemlist = []
titles = []
#recupero gli episodi in home nella sezione Ultimi episodi aggiunti
data = httptools.downloadpage(host, headers=headers).data
block = scrapertools.find_single_match(data,r"Ultimi episodi aggiunti.*?<h2>")
regex = r'<a href="([^"]*)"\sdata-src="([^"]*)"\sclass="owl-lazy.*?".*?class="title">(.*?)<small>\((\d*?)x(\d*?)\s(Sub-Ita|Ita)'
matches = re.compile(regex, re.DOTALL).findall(block)
for scrapedurl, scrapedimg, scrapedtitle, scrapedseason, scrapedepisode, scrapedlanguage in matches:
infoLabels = {}
year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)')
if year:
infoLabels['year'] = year
infoLabels['episode'] = scrapedepisode
infoLabels['season'] = scrapedseason
episode = scrapedseason+"x"+scrapedepisode
scrapedtitle = cleantitle(scrapedtitle)
title = scrapedtitle+" - "+episode
contentlanguage = ""
if scrapedlanguage.strip().lower() != 'ita':
title +=" Sub-ITA"
contentlanguage = 'Sub-ITA'
titles.append(title)
itemlist.append(
Item(channel=item.channel,
action="findepisodevideo",
title=title,
fulltitle=title,
url=scrapedurl,
extra=[[scrapedseason,scrapedepisode]],
thumbnail=scrapedimg,
contentSerieName=scrapedtitle,
contentLanguage=contentlanguage,
contentType='episode',
infoLabels=infoLabels,
folder=True))
data = httptools.downloadpage(item.url, headers=headers).data
@@ -209,23 +253,38 @@ def latestep(item):
infoLabels['tvshowtitle'] = scrapedtitle
episodio = re.compile(r'(\d+)x(\d+)', re.DOTALL).findall(scrapedinfo)
title = "%s %s" % (scrapedtitle, scrapedinfo)
infoLabels['episode'] = episodio[0][1]
infoLabels['season'] = episodio[0][0]
episode = infoLabels['season'] + "x" + infoLabels['episode']
title = "%s - %s" % (scrapedtitle, episode)
title = title.strip()
contentlanguage = ""
if 'sub-ita' in scrapedinfo.lower():
title+=" Sub-ITA"
contentlanguage = 'Sub-ITA'
if title in titles: continue
itemlist.append(
Item(channel=item.channel,
action="findepisodevideo",
title=title,
fulltitle=scrapedtitle,
fulltitle=title,
url=scrapedurl,
extra=episodio,
thumbnail=scrapedimg,
show=scrapedtitle,
contentTitle=scrapedtitle,
contentSerieName=title,
contentSerieName=scrapedtitle,
contentLanguage=contentlanguage,
infoLabels=infoLabels,
contentType='episode',
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# logger.debug("".join(map(str,itemlist)))
return itemlist
@@ -290,7 +349,7 @@ def categorie(item):
Item(channel=item.channel,
action="lista_serie",
title=scrapedtitle,
contentType="tv",
contentType="tvshow",
url="%s%s" % (host, scrapedurl),
thumbnail=item.thumbnail,
folder=True))

View File

@@ -78,18 +78,18 @@ def get_url_headers(url):
def load_cookies(alfa_s=False):
cookies_lock.acquire()
if os.path.isfile(ficherocookies):
if not alfa_s: logger.info("Leyendo fichero cookies")
if not alfa_s: logger.info("Reading cookies File")
try:
cj.load(ficherocookies, ignore_discard=True)
except:
if not alfa_s: logger.info("El fichero de cookies existe pero es ilegible, se borra")
if not alfa_s: logger.info("The cookie file exists but is illegible. Deleted")
os.remove(ficherocookies)
cookies_lock.release()
def save_cookies(alfa_s=False):
cookies_lock.acquire()
if not alfa_s: logger.info("Guardando cookies...")
if not alfa_s: logger.info("Saving cookies...")
cj.save(ficherocookies, ignore_discard=True)
cookies_lock.release()
@@ -243,18 +243,18 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
if not alfa_s:
logger.info("----------------------------------------------")
logger.info("downloadpage Alfa: %s" %__version)
logger.info("downloadpage KOD: %s" %__version)
logger.info("----------------------------------------------")
logger.info("Timeout: %s" % timeout)
logger.info("URL: " + url)
logger.info("Dominio: " + urlparse.urlparse(url)[1])
logger.info("Domain: " + urlparse.urlparse(url)[1])
if post:
logger.info("Peticion: POST" + proxy_stat)
logger.info("Request: POST" + proxy_stat)
else:
logger.info("Peticion: GET" + proxy_stat)
logger.info("Usar Cookies: %s" % cookies)
logger.info("Descargar Pagina: %s" % (not only_headers))
logger.info("Fichero de Cookies: " + ficherocookies)
logger.info("Request: GET" + proxy_stat)
logger.info("Use Cookies: %s" % cookies)
logger.info("Download Page: %s" % (not only_headers))
logger.info("Cookie File: " + ficherocookies)
logger.info("Headers:")
for header in request_headers:
logger.info("- %s: %s" % (header, request_headers[header]))
@@ -269,7 +269,7 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
opener = urllib2.build_opener(*handlers)
if not alfa_s:
logger.info("Realizando Peticion")
logger.info("Making Request")
# Contador
inicio = time.time()
@@ -321,7 +321,7 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
response["url"] = handle.geturl()
if not alfa_s:
logger.info("Terminado en %.2f segundos" % (response["time"]))
logger.info("Finished in %.2f seconds" % (response["time"]))
logger.info("Response sucess: %s" % (response["sucess"]))
logger.info("Response code: %s" % (response["code"]))
logger.info("Response error: %s" % (response["error"]))
@@ -350,22 +350,22 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
if response["headers"].get('content-encoding') == 'gzip':
if not alfa_s:
logger.info("Descomprimiendo...")
logger.info("Decompressing...")
data_alt = response["data"]
try:
response["data"] = gzip.GzipFile(fileobj=StringIO(response["data"])).read()
if not alfa_s:
logger.info("Descomprimido")
logger.info("Decompressed")
except:
if not alfa_s:
logger.info("No se ha podido descomprimir con gzip. Intentando con zlib")
logger.info("Could not decompress with gzip. Trying with zlib")
response["data"] = data_alt
try:
import zlib
response["data"] = zlib.decompressobj(16 + zlib.MAX_WBITS).decompress(response["data"])
except:
if not alfa_s:
logger.info("No se ha podido descomprimir con zlib")
logger.info("Could not decompress with zlib")
response["data"] = data_alt
# Anti Cloudflare
@@ -375,16 +375,16 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
if cf.is_cloudflare:
count_retries += 1
if not alfa_s:
logger.info("cloudflare detectado, esperando %s segundos..." % cf.wait_time)
logger.info("CloudFlare detected, waiting %s seconds..." % cf.wait_time)
auth_url = cf.get_url()
if not alfa_s:
logger.info("Autorizando... intento %d url: %s" % (count_retries, auth_url))
logger.info("Authorizing... attempt %d url: %s" % (count_retries, auth_url))
tt = downloadpage(auth_url, headers=request_headers, replace_headers=True, count_retries=count_retries, ignore_response_code=True, count_retries_tot=count_retries_tot, proxy=proxy, proxy_web=proxy_web, forced_proxy=forced_proxy, proxy_addr_forced=proxy_addr_forced, alfa_s=alfa_s)
if tt.code == 403:
tt = downloadpage(url, headers=request_headers, replace_headers=True, count_retries=count_retries, ignore_response_code=True, count_retries_tot=count_retries_tot, proxy=proxy, proxy_web=proxy_web, forced_proxy=forced_proxy, proxy_addr_forced=proxy_addr_forced, alfa_s=alfa_s)
if tt.sucess:
if not alfa_s:
logger.info("Autorización correcta, descargando página")
logger.info("Correct authorization, downloading page")
resp = downloadpage(url=response["url"], post=post, headers=headers, timeout=timeout,
follow_redirects=follow_redirects, count_retries=count_retries,
cookies=cookies, replace_headers=replace_headers, add_referer=add_referer, proxy=proxy, proxy_web=proxy_web, count_retries_tot=count_retries_tot, forced_proxy=forced_proxy, proxy_addr_forced=proxy_addr_forced, alfa_s=alfa_s)
@@ -397,7 +397,7 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
response["url"] = resp.url
else:
if not alfa_s:
logger.info("No se ha podido autorizar")
logger.info("Unable to authorize")
# Si hay errores usando un Proxy, se refrescan el Proxy y se reintenta el número de veces indicado en proxy_retries
try:
@@ -456,7 +456,7 @@ def channel_proxy_list(url, forced_proxy=None):
proxy_channel_bloqued = dict()
proxy_channel_bloqued = ast.literal_eval(proxy_channel_bloqued_str)
except:
logger.debug('Proxytools no inicializado correctamente')
logger.debug('Proxytools not initialized correctly')
return False
if not url.endswith('/'):

View File

@@ -365,6 +365,9 @@ def get_season_and_episode(title):
@return: Numero de temporada y episodio en formato "1x01" o cadena vacia si no se han encontrado
"""
filename = ""
# 4l3x87 - fix for series example 9-1-1
# original_title = title
# title = title.replace('9-1-1','')
patrons = ["(\d+)\s*[x-]\s*(\d+)", "(\d+)\s*×\s*(\d+)", "(?:s|t)(\d+)e(\d+)",
"(?:season|temp|stagione\w*)\s*(\d+)\s*(?:capitulo|epi|episode|episodio\w*)\s*(\d+)"]
@@ -372,6 +375,7 @@ def get_season_and_episode(title):
for patron in patrons:
try:
matches = re.compile(patron, re.I).search(title)
if matches:
if len(matches.group(1)) == 1:
filename = matches.group(1) + "x" + matches.group(2).zfill(2)

View File

@@ -141,7 +141,7 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
matches = scrapertoolsV2.find_multiple_matches(block, patron)
log('MATCHES =', matches)
known_keys = ['url', 'title', 'episode', 'thumb', 'quality', 'year', 'plot', 'duration', 'genere', 'rating'] #by greko aggiunto episode
known_keys = ['url', 'title', 'title2', 'episode', 'thumb', 'quality', 'year', 'plot', 'duration', 'genere', 'rating', 'type'] #by greko aggiunto episode
for match in matches:
if len(listGroups) > len(match): # to fix a bug
match = list(match)
@@ -157,12 +157,14 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
title = scrapertoolsV2.decodeHtmlentities(scraped["title"]).strip()
plot = scrapertoolsV2.htmlclean(scrapertoolsV2.decodeHtmlentities(scraped["plot"]))
if scraped["quality"] and scraped["episode"]: # by greko aggiunto episode
longtitle = '[B]' + title + '[/B] - [B]' + scraped["episode"] + '[/B][COLOR blue][' + scraped["quality"] + '][/COLOR]' # by greko aggiunto episode
elif scraped["episode"]: # by greko aggiunto episode
longtitle = '[B]' + title + '[/B] - [B]' + scraped["episode"] + '[/B]' # by greko aggiunto episode
else:
longtitle = '[B]' + title + '[/B]'
longtitle = typo(title, 'bold')
if scraped['quality']: longtitle = longtitle + typo(scraped['quality'], '_ [] color kod')
if scraped['episode']:
scraped['episode'] = re.sub(r'\s-\s|-|x|&#8211', 'x' , scraped['episode'])
longtitle = typo(scraped['episode'] + ' - ', 'bold') + longtitle
if scraped['title2']:
title2 = scrapertoolsV2.decodeHtmlentities(scraped["title2"]).strip()
longtitle = longtitle + typo(title2, 'bold _ -- _')
if item.infoLabels["title"] or item.fulltitle: # if title is set, probably this is a list of episodes or video sources
infolabels = item.infoLabels
@@ -492,15 +494,29 @@ def nextPage(itemlist, item, data, patron, function_level=1):
return itemlist
def server(item, data='', headers='', AutoPlay=True, CheckLinks=True):
def pagination(itemlist, item, page, perpage, function_level=1):
if len(itemlist) >= page * perpage:
itemlist.append(
Item(channel=item.channel,
action=inspect.stack()[function_level][3],
contentType=item.contentType,
title=typo(config.get_localized_string(30992), 'color kod bold'),
url=item.url,
args=item.args,
page=page + 1,
thumbnail=thumb()))
return itemlist
def server(item, data='', itemlist='', headers='', AutoPlay=True, CheckLinks=True):
if not data:
data = httptools.downloadpage(item.url, headers=headers).data
itemlist = servertools.find_video_items(data=str(data))
if not itemlist:
itemlist = servertools.find_video_items(data=str(data))
for videoitem in itemlist:
videoitem.title = "".join([item.title, ' ', typo(videoitem.title, 'color kod []')])
videoitem.title = "".join([item.title, ' ', typo(videoitem.title, 'color kod []'), typo(videoitem.quality, 'color kod []') if videoitem.quality else ""])
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
@@ -530,6 +546,7 @@ def controls(itemlist, item, AutoPlay=True, CheckLinks=True):
if AutoPlay == True:
autoplay.start(itemlist, item)
videolibrary(itemlist, item)
return itemlist

View File

@@ -324,8 +324,9 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda=def_lang, lock=None
__leer_datos(otmdb_global)
if lock and lock.locked():
lock.release()
# 4l3x87 - fix for overlap infoLabels if there is episode or season
# if lock and lock.locked():
# lock.release()
if item.infoLabels['episode']:
try:
@@ -354,6 +355,10 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda=def_lang, lock=None
item.infoLabels['rating'] = episodio['episodio_vote_average']
item.infoLabels['votes'] = episodio['episodio_vote_count']
# 4l3x87 - fix for overlap infoLabels if there is episode or season
if lock and lock.locked():
lock.release()
return len(item.infoLabels)
else:
@@ -374,8 +379,17 @@ def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda=def_lang, lock=None
if temporada['poster_path']:
item.infoLabels['poster_path'] = 'http://image.tmdb.org/t/p/original' + temporada['poster_path']
item.thumbnail = item.infoLabels['poster_path']
# 4l3x87 - fix for overlap infoLabels if there is episode or season
if lock and lock.locked():
lock.release()
return len(item.infoLabels)
# 4l3x87 - fix for overlap infoLabels if there is episode or season
if lock and lock.locked():
lock.release()
# Buscar...
else:
otmdb = copy.copy(otmdb_global)

View File

@@ -65,7 +65,7 @@ def run(item=None):
item = Item(channel="channelselector", action="getmainlist", viewmode="movie")
if not config.get_setting('show_once'):
from platformcode import xbmc_videolibrary
xbmc_videolibrary.ask_set_content(1)
xbmc_videolibrary.ask_set_content(1, config.get_setting('videolibrary_kodi_force'))
config.set_setting('show_once', True)
logger.info(item.tostring())

View File

@@ -23,51 +23,51 @@ def check_addon_init():
logger.info()
# Subtarea de monitor. Se activa cada X horas para comprobar si hay FIXES al addon
def check_addon_monitor():
logger.info()
# Obtiene el íntervalo entre actualizaciones y si se quieren mensajes
try:
timer = int(config.get_setting('addon_update_timer')) # Intervalo entre actualizaciones, en Ajustes de Alfa
if timer <= 0:
return # 0. No se quieren actualizaciones
verbose = config.get_setting('addon_update_message')
except:
timer = 12 # Por defecto cada 12 horas
verbose = False # Por defecto, sin mensajes
timer = timer * 3600 # Lo pasamos a segundos
if config.get_platform(True)['num_version'] >= 14: # Si es Kodi, lanzamos el monitor
import xbmc
monitor = xbmc.Monitor()
else: # Lanzamos solo una actualización y salimos
check_addon_updates(verbose) # Lanza la actualización
return
while not monitor.abortRequested(): # Loop infinito hasta cancelar Kodi
check_addon_updates(verbose) # Lanza la actualización
if monitor.waitForAbort(timer): # Espera el tiempo programado o hasta que cancele Kodi
break # Cancelación de Kodi, salimos
return
# Lanzamos en Servicio de actualización de FIXES
try:
threading.Thread(target=check_addon_monitor).start() # Creamos un Thread independiente, hasta el fin de Kodi
time.sleep(5) # Dejamos terminar la primera verificación...
except: # Si hay problemas de threading, se llama una sola vez
try:
timer = int(config.get_setting('addon_update_timer')) # Intervalo entre actualizaciones, en Ajustes de Alfa
if timer <= 0:
return # 0. No se quieren actualizaciones
verbose = config.get_setting('addon_update_message')
except:
verbose = False # Por defecto, sin mensajes
pass
check_addon_updates(verbose) # Lanza la actualización, en Ajustes de Alfa
time.sleep(5) # Dejamos terminar la primera verificación...
# def check_addon_monitor():
# logger.info()
#
# # Obtiene el íntervalo entre actualizaciones y si se quieren mensajes
# try:
# timer = int(config.get_setting('addon_update_timer')) # Intervalo entre actualizaciones, en Ajustes de Alfa
# if timer <= 0:
# return # 0. No se quieren actualizaciones
# verbose = config.get_setting('addon_update_message')
# except:
# timer = 12 # Por defecto cada 12 horas
# verbose = False # Por defecto, sin mensajes
# timer = timer * 3600 # Lo pasamos a segundos
#
# if config.get_platform(True)['num_version'] >= 14: # Si es Kodi, lanzamos el monitor
# import xbmc
# monitor = xbmc.Monitor()
# else: # Lanzamos solo una actualización y salimos
# check_addon_updates(verbose) # Lanza la actualización
# return
#
# while not monitor.abortRequested(): # Loop infinito hasta cancelar Kodi
#
# check_addon_updates(verbose) # Lanza la actualización
#
# if monitor.waitForAbort(timer): # Espera el tiempo programado o hasta que cancele Kodi
# break # Cancelación de Kodi, salimos
#
# return
#
# # Lanzamos en Servicio de actualización de FIXES
# try:
# threading.Thread(target=check_addon_monitor).start() # Creamos un Thread independiente, hasta el fin de Kodi
# time.sleep(5) # Dejamos terminar la primera verificación...
# except: # Si hay problemas de threading, se llama una sola vez
# try:
# timer = int(config.get_setting('addon_update_timer')) # Intervalo entre actualizaciones, en Ajustes de Alfa
# if timer <= 0:
# return # 0. No se quieren actualizaciones
# verbose = config.get_setting('addon_update_message')
# except:
# verbose = False # Por defecto, sin mensajes
# pass
# check_addon_updates(verbose) # Lanza la actualización, en Ajustes de Alfa
# time.sleep(5) # Dejamos terminar la primera verificación...
return

View File

@@ -535,12 +535,17 @@ def set_content(content_type, silent=False):
continuar = True
msg_text = ""
videolibrarypath = config.get_setting("videolibrarypath")
forced = config.get_setting('videolibrary_kodi_force')
if content_type == 'movie':
scraper = [config.get_localized_string(70093), config.get_localized_string(70096)]
seleccion = platformtools.dialog_select(config.get_localized_string(70094), scraper)
if forced:
seleccion = 0 # tmdb
else:
seleccion = platformtools.dialog_select(config.get_localized_string(70094), scraper)
# Instalar The Movie Database
# Instalar The Movie Database
if seleccion == -1 or seleccion == 0:
if not xbmc.getCondVisibility('System.HasAddon(metadata.themoviedb.org)'):
if not silent:
@@ -560,7 +565,7 @@ def set_content(content_type, silent=False):
continuar = (install and xbmc.getCondVisibility('System.HasAddon(metadata.themoviedb.org)'))
if not continuar:
msg_text = config.get_localized_string(60047)
if continuar:
if continuar and not forced:
xbmc.executebuiltin('xbmc.addon.opensettings(metadata.themoviedb.org)', True)
# Instalar Universal Movie Scraper
@@ -584,12 +589,15 @@ def set_content(content_type, silent=False):
continuar = (install and continuar)
if not continuar:
msg_text = config.get_localized_string(70097)
if continuar:
if continuar and not forced:
xbmc.executebuiltin('xbmc.addon.opensettings(metadata.universal)', True)
else: # SERIES
scraper = [config.get_localized_string(70098), config.get_localized_string(70093)]
seleccion = platformtools.dialog_select(config.get_localized_string(70107), scraper)
if forced:
seleccion = 0 # tvdb
else:
seleccion = platformtools.dialog_select(config.get_localized_string(70107), scraper)
# Instalar The TVDB
if seleccion == -1 or seleccion == 0:
@@ -611,7 +619,7 @@ def set_content(content_type, silent=False):
continuar = (install and xbmc.getCondVisibility('System.HasAddon(metadata.tvdb.com)'))
if not continuar:
msg_text = config.get_localized_string(70099)
if continuar:
if continuar and not forced:
xbmc.executebuiltin('xbmc.addon.opensettings(metadata.tvdb.com)', True)
# Instalar The Movie Database
@@ -636,7 +644,7 @@ def set_content(content_type, silent=False):
continuar = (install and continuar)
if not continuar:
msg_text = config.get_localized_string(60047)
if continuar:
if continuar and not forced:
xbmc.executebuiltin('xbmc.addon.opensettings(metadata.tvshows.themoviedb.org)', True)
idPath = 0

Binary file not shown.

After

Width:  |  Height:  |  Size: 113 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.9 KiB

View File

@@ -40,6 +40,7 @@
<setting id="folder_tvshows" type="text" label="70118" default="SERIES"/>
<setting id="folder_movies" type="text" label="70119" default="CINE"/>
<setting id="videolibrary_kodi_flag" type="number" label="" default="0" visible="false"/>
<setting id="videolibrary_kodi_force" type="bool" label="" default="false" visible="false"/>
<setting id="videolibrary_kodi" type="bool" label="70120" enable="lt(-1,2)+eq(0,false)" default="false"/>
</category>
<category label="70121">
@@ -145,5 +146,4 @@
<setting label="70583" type="lsep"/>
<setting id="addon_quasar_update" type="bool" label="70584" default="false"/>
</category>
</settings>

View File

@@ -143,7 +143,7 @@ def start(itemlist, item):
# 2: Solo servidores
# 3: Solo calidades
# 4: No ordenar
if (settings_node['custom_servers'] and settings_node['custom_quality']):
if (settings_node['custom_servers'] and settings_node['custom_quality']) or get_setting('autoplay'):
priority = settings_node['priority'] # 0: Servidores y calidades o 1: Calidades y servidores
elif settings_node['custom_servers']:
priority = 2 # Solo servidores

View File

@@ -396,6 +396,10 @@ def get_title(item):
if not item.contentSeason:
item.contentSeason = '1'
title = "%s - %sx%s" % (title, item.contentSeason, str(item.contentEpisodeNumber).zfill(2))
#4l3x87 - fix to add Sub-ITA in newest
if item.contentLanguage:
title+=" "+item.contentLanguage
elif item.contentTitle: # Si es una pelicula con el canal adaptado
title = item.contentTitle