Merge pull request #19 from kodiondemand/master

up 1 giugno fix vari e
This commit is contained in:
greko
2019-06-01 15:53:28 +02:00
committed by GitHub
18 changed files with 469 additions and 827 deletions

View File

@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per AnimeSaturn
# Thanks to me
# Thanks to 4l3x87
# ----------------------------------------------------------
import re
@@ -10,6 +10,7 @@ import urlparse
import channelselector
from core import httptools, tmdb, support, scrapertools, jsontools
from core.item import Item
from core.support import log
from platformcode import logger, config
from specials import autoplay, autorenumber
@@ -19,30 +20,19 @@ headers = [['Referer', host]]
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['openload','fembed']
list_quality = ['default']
list_servers = ['openload', 'fembed', 'animeworld']
list_quality = ['default', '480p', '720p', '1080p']
def mainlist(item):
support.log(item.channel + 'mainlist')
log()
itemlist = []
support.menu(itemlist, 'Anime bold', 'lista_anime', "%s/animelist?load_all=1" % host)
support.menu(itemlist, 'Novità submenu', 'ultimiep', "%s/fetch_pages.php?request=episodes" % host,'tvshow')
support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', '%s/animelist?load_all=1' % host,args=['tvshow','alfabetico'])
support.menu(itemlist, 'Novità bold', 'ultimiep', "%s/fetch_pages.php?request=episodes" % host, 'tvshow')
support.menu(itemlist, 'Anime bold', 'lista_anime', "%s/animelist?load_all=1" % host)
support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', '%s/animelist?load_all=1' % host, args=['tvshow', 'alfabetico'])
support.menu(itemlist, 'Cerca', 'search', host)
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
itemlist.append(
Item(channel='setting',
action="channel_config",
title=support.typo("Configurazione Canale color lime"),
config=item.channel,
folder=False,
thumbnail=channelselector.get_thumb('setting_0.png'))
)
support.aplay(item, itemlist, list_servers, list_quality)
support.channel_config(item, itemlist)
return itemlist
@@ -50,7 +40,7 @@ def mainlist(item):
# ----------------------------------------------------------------------------------------------------------------
def cleantitle(scrapedtitle):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
scrapedtitle = scrapedtitle.replace('[HD]', '').replace('', '\'').replace('×','x')
scrapedtitle = scrapedtitle.replace('[HD]', '').replace('', '\'').replace('×', 'x').replace('"', "'")
year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)')
if year:
scrapedtitle = scrapedtitle.replace('(' + year + ')', '')
@@ -62,7 +52,7 @@ def cleantitle(scrapedtitle):
# ----------------------------------------------------------------------------------------------------------------
def lista_anime(item):
support.log(item.channel + " lista_anime")
log()
itemlist = []
PERPAGE = 15
@@ -78,37 +68,33 @@ def lista_anime(item):
for i, serie in enumerate(series):
matches.append(serie.split('||'))
else:
# Carica la pagina
data = httptools.downloadpage(item.url).data
# Estrae i contenuti
patron = r'<a href="([^"]+)"[^>]*?>[^>]*?>(.+?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = support.match(item, patron, headers=headers)[0]
scrapedplot = ""
scrapedthumbnail = ""
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
if (p - 1) * PERPAGE > i: continue
if i >= p * PERPAGE: break
title = cleantitle(scrapedtitle).replace('(ita)','(ITA)')
title = cleantitle(scrapedtitle).replace('(ita)', '(ITA)')
movie = False
showtitle = title
if '(ITA)' in title:
title = title.replace('(ITA)','').strip()
title = title.replace('(ITA)', '').strip()
showtitle = title
title += ' '+support.typo(' (ITA)')
else:
title += ' ' + support.typo('Sub-ITA', '_ [] color kod')
infoLabels = {}
if 'Akira' in title:
movie = True
infoLabels['year']= 1988
infoLabels['year'] = 1988
if 'Dragon Ball Super Movie' in title:
movie = True
infoLabels['year'] = 2019
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
@@ -130,15 +116,7 @@ def lista_anime(item):
# Paginazione
if len(matches) >= p * PERPAGE:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=item.channel,
action='lista_anime',
contentType=item.contentType,
title=support.typo(config.get_localized_string(30992), 'color kod bold'),
url=scrapedurl,
args=item.args,
thumbnail=support.thumb()))
support.nextPage(itemlist, item, next_page=(item.url + '{}' + str(p + 1)))
return itemlist
@@ -148,17 +126,14 @@ def lista_anime(item):
# ----------------------------------------------------------------------------------------------------------------
def episodios(item):
support.log(item.channel + " episodios")
log()
itemlist = []
data = httptools.downloadpage(item.url).data
data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data
anime_id = scrapertools.find_single_match(data, r'\?anime_id=(\d+)')
#movie or series
# movie or series
movie = scrapertools.find_single_match(data, r'\Episodi:</b>\s(\d*)\sMovie')
data = httptools.downloadpage(
host + "/loading_anime?anime_id=" + anime_id,
headers={
@@ -167,7 +142,7 @@ def episodios(item):
patron = r'<td style="[^"]+"><b><strong" style="[^"]+">(.+?)</b></strong></td>\s*'
patron += r'<td style="[^"]+"><a href="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedtitle, scrapedurl in matches:
scrapedtitle = cleantitle(scrapedtitle)
@@ -187,62 +162,57 @@ def episodios(item):
fanart=item.thumbnail,
thumbnail=item.thumbnail))
if(((len(itemlist) == 1 and 'Movie' in itemlist[0].title) or movie) and item.contentType!='movie'):
if ((len(itemlist) == 1 and 'Movie' in itemlist[0].title) or movie) and item.contentType != 'movie':
item.url = itemlist[0].url
item.contentType = 'movie'
return findvideos(item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
autorenumber.renumber(itemlist, item)
support.videolibrary(itemlist,item,'bold color kod')
support.videolibrary(itemlist, item, 'bold color kod')
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def findvideos(item):
support.log(item.channel + " findvideos")
log()
originalItem = item
if(item.contentType == 'movie'):
if item.contentType == 'movie':
episodes = episodios(item)
if(len(episodes)>0):
if len(episodes) > 0:
item.url = episodes[0].url
itemlist = []
data = httptools.downloadpage(item.url).data
data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data
data = re.sub(r'\n|\t|\s+', ' ', data)
patron = r'<a href="([^"]+)"><div class="downloadestreaming">'
url = scrapertools.find_single_match(data, patron)
data = httptools.downloadpage(url).data
data = httptools.downloadpage(url, headers=headers, ignore_response_code=True).data
data = re.sub(r'\n|\t|\s+', ' ', data)
itemlist = support.server(item, data=data)
if item.contentType == 'movie':
support.videolibrary(itemlist, item, 'color kod')
# Controlla se i link sono validi
# if checklinks:
# itemlist = servertools.check_list_links(itemlist, checklinks_number)
#
# autoplay.start(itemlist, item)
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def ultimiep(item):
logger.info(item.channel + "ultimiep")
log()
itemlist = []
post = "page=%s" % item.args['page'] if item.args and item.args['page'] else None
p = 1
if '{}' in item.url:
item.url, p = item.url.split('{}')
p = int(p)
post = "page=%s" % p if p > 1 else None
data = httptools.downloadpage(
item.url, post=post, headers={
@@ -259,14 +229,23 @@ def ultimiep(item):
scrapedtitle2 = cleantitle(scrapedtitle2)
scrapedtitle = scrapedtitle1 + ' - ' + scrapedtitle2 + ''
title = scrapedtitle
showtitle = scrapedtitle
if '(ITA)' in title:
title = title.replace('(ITA)', '').strip()
showtitle = title
else:
title += ' ' + support.typo('Sub-ITA', '_ [] color kod')
itemlist.append(
Item(channel=item.channel,
contentType="tvshow",
contentType="episode",
action="findvideos",
title=scrapedtitle,
title=title,
url=scrapedurl,
fulltitle=scrapedtitle1,
show=scrapedtitle1,
show=showtitle,
thumbnail=scrapedthumbnail))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
@@ -275,27 +254,17 @@ def ultimiep(item):
patronvideos = r'data-page="(\d+)" title="Next">Pagina Successiva'
next_page = scrapertools.find_single_match(data, patronvideos)
if next_page:
itemlist.append(
Item(
channel=item.channel,
action="ultimiep",
title=support.typo(config.get_localized_string(30992), 'color kod bold'),
url=item.url,
thumbnail= support.thumb(),
args={'page':next_page},
folder=True))
support.nextPage(itemlist, item, next_page=(item.url + '{}' + next_page))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def newest(categoria):
logger.info(__channel__ + " newest" + categoria)
log(categoria)
itemlist = []
item = Item()
item.url = host
@@ -323,42 +292,9 @@ def newest(categoria):
# ----------------------------------------------------------------------------------------------------------------
def search_anime(item, texto):
logger.info(item.channel + " search_anime: "+texto)
log(texto)
itemlist = []
# data = httptools.downloadpage(host + "/animelist?load_all=1").data
# data = scrapertools.decodeHtmlentities(data)
#
# texto = texto.lower().split('+')
#
# patron = r'<a href="([^"]+)"[^>]*?>[^>]*?>(.+?)<'
# matches = re.compile(patron, re.DOTALL).findall(data)
#
# for scrapedurl, scrapedtitle in [(scrapedurl, scrapedtitle)
# for scrapedurl, scrapedtitle in matches
# if all(t in scrapedtitle.lower()
# for t in texto)]:
#
# title = cleantitle(scrapedtitle).replace('(ita)','(ITA)')
# showtitle = title
# if '(ITA)' in title:
# title = title.replace('(ITA)','').strip()
# showtitle = title
# title += ' '+support.typo(' [ITA] color kod')
#
# itemlist.append(
# Item(
# channel=item.channel,
# contentType="episode",
# action="episodios",
# title=title,
# url=scrapedurl,
# fulltitle=title,
# show=showtitle,
# thumbnail=""))
#
# tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
data = httptools.downloadpage(host + "/index.php?search=1&key=%s" % texto).data
jsondata = jsontools.load(data)
@@ -368,16 +304,15 @@ def search_anime(item, texto):
if 'Anime non esistente' in data:
continue
else:
title = title.replace('(ita)','(ITA)')
title = title.replace('(ita)', '(ITA)')
showtitle = title
if '(ITA)' in title:
title = title.replace('(ITA)', '').strip()
showtitle = title
title += ' ' + support.typo(' (ITA)')
else:
title += ' ' + support.typo('Sub-ITA', '_ [] color kod')
url = "%s/anime/%s" % (host, data)
logger.debug(title)
logger.debug(url)
itemlist.append(
Item(
@@ -397,7 +332,7 @@ def search_anime(item, texto):
# ----------------------------------------------------------------------------------------------------------------
def search(item, texto):
logger.info(item.channel + " search")
log(texto)
itemlist = []
try:
@@ -416,23 +351,20 @@ def search(item, texto):
def list_az(item):
support.log(item.channel+" list_az")
log()
itemlist = []
alphabet = dict()
# Scarico la pagina
data = httptools.downloadpage(item.url).data
# Articoli
patron = r'<a href="([^"]+)"[^>]*?>[^>]*?>(.+?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = support.match(item, patron, headers=headers)[0]
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
letter = scrapedtitle[0].upper()
if letter not in alphabet:
alphabet[letter] = []
alphabet[letter].append(scrapedurl+'||'+scrapedtitle)
alphabet[letter].append(scrapedurl + '||' + scrapedtitle)
for letter in sorted(alphabet):
itemlist.append(
@@ -444,4 +376,4 @@ def list_az(item):
return itemlist
# ================================================================================================================
# ================================================================================================================

View File

@@ -18,11 +18,9 @@ headers = [['Referer', host]]
IDIOMAS = {'Italiano': 'Italiano'}
list_language = IDIOMAS.values()
list_servers = ['diretto']
list_quality = []
list_servers = ['animeworld', 'verystream', 'streamango', 'openload', 'directo']
list_quality = ['default', '480p', '720p', '1080p']
checklinks = config.get_setting('checklinks', 'animeworld')
checklinks_number = config.get_setting('checklinks_number', 'animeworld')
def mainlist(item):
@@ -30,7 +28,6 @@ def mainlist(item):
itemlist =[]
support.menu(itemlist, 'Anime bold', 'lista_anime', host+'/az-list')
support.menu(itemlist, 'ITA submenu', 'build_menu', host + '/filter?', args=["anime", 'language[]=1'])
support.menu(itemlist, 'Sub-ITA submenu', 'build_menu', host + '/filter?', args=["anime", 'language[]=0'])
support.menu(itemlist, 'Archivio A-Z submenu', 'alfabetico', host+'/az-list', args=["tvshow","a-z"])
@@ -47,19 +44,10 @@ def mainlist(item):
def generi(item):
log()
itemlist = []
patron_block = r'</i>\sGeneri</a>\s*<ul class="sub">(.*?)</ul>'
patron = r'<a href="([^"]+)"\stitle="([^"]+)">'
matches = support.match(item,patron, patron_block, headers)[0]
for scrapedurl, scrapedtitle in matches:
itemlist.append(Item(
channel=item.channel,
action="video",
title=scrapedtitle,
url="%s%s" % (host,scrapedurl)))
return itemlist
return support.scrape(item, patron, ['url','title'], patron_block=patron_block, action='video')
# Crea Menu Filtro ======================================================
@@ -154,14 +142,14 @@ def lista_anime(item):
title = scrapedtitle.replace(year,'').replace(lang,'').strip()
original = scrapedoriginal.replace(year,'').replace(lang,'').strip()
if lang: lang = support.typo(lang,'_ color kod')
title = '[B]' + title + '[/B]' + lang + original
longtitle = '[B]' + title + '[/B]' + lang + original
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
contentType="episode",
action="episodios",
title=title,
title=longtitle,
url=scrapedurl,
thumbnail=scrapedthumb,
fulltitle=title,
@@ -183,7 +171,7 @@ def video(item):
log()
itemlist = []
matches, data = support.match(item, r'<a href="([^"]+)" class="poster.*?> <img src="([^"]+)"(.*?)data-jtitle="([^"]+)" .*?>(.*?)<\/a>')
matches, data = support.match(item, r'<a href="([^"]+)" class[^>]+><img src="([^"]+)"(.*?)data-jtitle="([^"]+)" .*?>(.*?)<\/a>', headers=headers)
for scrapedurl, scrapedthumb ,scrapedinfo, scrapedoriginal, scrapedtitle in matches:
# Cerca Info come anno o lingua nel Titolo
@@ -231,6 +219,9 @@ def video(item):
# Concatena le informazioni
lang = support.typo('Sub-ITA', '_ [] color kod') if '(ita)' not in lang.lower() else ''
info = ep + lang + year + ova + ona + movie + special
# Crea il title da visualizzare
@@ -268,7 +259,6 @@ def episodios(item):
itemlist = []
data = httptools.downloadpage(item.url).data.replace('\n', '')
data = re.sub(r'>\s*<', '><', data)
block1 = scrapertoolsV2.find_single_match(data, r'<div class="widget servers".*?>(.*?)<div id="download"')
block = scrapertoolsV2.find_single_match(block1,r'<div class="server.*?>(.*?)<div class="server.*?>')
@@ -305,7 +295,7 @@ def findvideos(item):
log()
itemlist = []
episode = '1'
episode = ''
if item.extra and item.extra['episode']:
data = item.extra['data']
@@ -329,25 +319,6 @@ def findvideos(item):
videoData +='\n'+json['grabber']
if serverid == '33':
post = urllib.urlencode({'r': '', 'd': 'www.animeworld.biz'})
dataJson = httptools.downloadpage(json['grabber'].replace('/v/','/api/source/'),headers=[['x-requested-with', 'XMLHttpRequest']],post=post).data
json = jsontools.load(dataJson)
log(json['data'])
if json['data']:
for file in json['data']:
itemlist.append(
Item(
channel=item.channel,
action="play",
title='diretto',
url=file['file'],
quality=file['label'],
server='directo',
show=item.show,
contentType=item.contentType,
folder=False))
if serverid == '28':
itemlist.append(
Item(

View File

@@ -122,7 +122,7 @@ def last(item):
if item.contentType == 'episode':
matches = support.match(item, r'<a href="([^">]+)".*?>([^(:(|[)]+)([^<]+)<\/a>', '<article class="sequex-post-content.*?</article>', headers)[0]
else:
matches = support.match(item, r'<ahref=([^>]+)>([^(:(|[)]+)([^<]+)<\/a>', r'<strong>Ultimi 100 film Aggiornati:<\/a><\/strong>(.*?)<td>', headers)[0]
matches = support.match(item, r'<a href=([^>]+)>([^(:(|[)]+)([^<]+)<\/a>', r'<strong>Ultimi 100 film Aggiornati:<\/a><\/strong>(.*?)<td>', headers)[0]
for url, title, info in matches:
add = True
@@ -267,7 +267,6 @@ def findvideos(item):
# Estrae i contenuti - Streaming 3D
load_links(itemlist, '<strong>Streaming 3D[^<]+</strong>(.*?)<tableclass=cbtable height=30>', "pink", "Streaming 3D")
support.videolibrary(itemlist, item)
return support.server(item, itemlist=itemlist)
# Estrae i contenuti - Download

View File

@@ -1,16 +1,13 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Thanks Icarus crew & Alfa addon
# Canale per fastsubita
# Thanks Icarus crew & Alfa addon & 4l3x87
# ------------------------------------------------------------
import re
import channelselector
from core import scrapertools, httptools, tmdb, support
from core.item import Item
from core.support import log
from platformcode import config, logger
from specials import autoplay
__channel__ = 'fastsubita'
host = config.get_setting("channel_host", __channel__)
@@ -19,11 +16,8 @@ list_language = IDIOMAS.values()
list_servers = ['verystream', 'openload', 'speedvideo', 'wstream', 'flashx', 'vidoza', 'vidtome']
list_quality = ['default']
# checklinks = config.get_setting('checklinks', 'fastsubita')
# checklinks_number = config.get_setting('checklinks_number', 'fastsubita')
headers = [
['Host', 'fastsubita.com'],
['Host', host.split("//")[-1].split("/")[0]],
['User-Agent', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0'],
['Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'],
['Accept-Language', 'en-US,en;q=0.5'],
@@ -39,32 +33,23 @@ PERPAGE = 15
def mainlist(item):
logger.info(item.channel+" mainlist")
log()
itemlist = []
support.menu(itemlist, 'Serie TV bold', 'lista_serie', host,'tvshow')
support.menu(itemlist, 'Novità submenu', 'pelicuals_tv', host,'tvshow')
support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', host,'tvshow',args=['serie'])
support.menu(itemlist, 'Cerca', 'search', host,'tvshow')
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
itemlist.append(
Item(channel='setting',
action="channel_config",
title=support.typo("Configurazione Canale color lime"),
config=item.channel,
folder=False,
thumbnail=channelselector.get_thumb('setting_0.png'))
)
support.menu(itemlist, 'Novità bold', 'pelicuals_tv', host, 'tvshow')
support.menu(itemlist, 'Serie TV bold', 'lista_serie', host, 'tvshow')
support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', host, 'tvshow', args=['serie'])
support.menu(itemlist, 'Cerca', 'search', host, 'tvshow')
support.aplay(item, itemlist, list_servers, list_quality)
support.channel_config(item, itemlist)
return itemlist
# ----------------------------------------------------------------------------------------------------------------
def cleantitle(scrapedtitle):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
scrapedtitle = scrapedtitle.replace('', '\'').replace('&#215;','x').replace('×','x')
scrapedtitle = scrapedtitle.replace('', '\'').replace('&#215;', 'x').replace('×', 'x').replace('"', "'")
return scrapedtitle.strip()
@@ -73,7 +58,7 @@ def cleantitle(scrapedtitle):
def newest(categoria):
logger.info(__channel__+" newest" + categoria)
log()
itemlist = []
item = Item()
try:
@@ -96,15 +81,11 @@ def newest(categoria):
def pelicuals_tv(item):
logger.info(item.channel+" pelicuals_tv")
log()
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
# Estrae i contenuti
patron = r'<h3 class="entry-title title-font"><a href="([^"]+)" rel="bookmark">(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
matches, data = support.match(item, r'<h3 class="entry-title title-font"><a href="([^"]+)" rel="bookmark">(.*?)<',
headers=headers)
for scrapedurl, scrapedtitle in matches:
scrapedplot = ""
@@ -123,7 +104,9 @@ def pelicuals_tv(item):
else:
scrapedurl = "http:" + scrapedurl
title = scraped_1+" - "+infoLabels['season']+"x"+infoLabels['episode']+" Sub-ITA"
serie = cleantitle(scraped_1)
title = serie + " - " + infoLabels['season'] + "x" + infoLabels['episode'] + " "+support.typo('Sub-ITA', '_ [] color kod')
itemlist.append(
Item(channel=item.channel,
@@ -134,45 +117,30 @@ def pelicuals_tv(item):
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
show=scraped_1,
show=serie,
extra=item.extra,
contentSerieName=scraped_1,
contentSerieName=serie,
contentLanguage='Sub-ITA',
infoLabels=infoLabels,
folder=True))
support.checkHost(item, itemlist)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginazione
support.nextPage(itemlist,item,data,'<a class="next page-numbers" href="(.*?)">Successivi')
support.nextPage(itemlist, item, data, '<a class="next page-numbers" href="(.*?)">Successivi')
return itemlist
def serietv():
logger.info(__channel__+" serietv")
log()
itemlist = []
data = httptools.downloadpage("%s/" % host, headers=headers).data
# block = scrapertools.find_single_match(data, r'<div class="entry-content">(.*?)</div>')
block = scrapertools.find_single_match(data, r"<select\s*?name='cat'\s*?id='cat'\s*?class='postform'\s*?>(.*?)</select>")
# block = data
# Estrae i contenuti
# patron = r'<a style.*?href="([^"]+)">([^<]+)<\/a>'
# patron = r'<a.*?href="([^"]+)">([^<]+)<\/a>'
# matches = re.compile(patron, re.DOTALL).findall(block)
matches = re.compile(r'<option class="level-([0-9]?)" value="([^"]+)">([^<]+)</option>', re.DOTALL).findall(block)
matches = support.match(Item(), r'<option class="level-([0-9]?)" value="([^"]+)">([^<]+)</option>',
r'<select\s*?name="cat"\s*?id="cat"\s*?class="postform"\s*?>(.*?)</select>', headers,
url="%s/" % host)[0]
index = 0
# for scrapedurl, scrapedtitle in matches:
# scrapedtitle = cleantitle(scrapedtitle)
# if "http:" not in scrapedurl:
# scrapedurl = "http:" + scrapedurl
#
# if ('S' in scrapedtitle.strip().upper()[0] and len(scrapedtitle.strip()) == 3) or '02' == scrapedtitle:
# # itemlist[index -1][0]+='{|}'+scrapedurl
# continue
#
# itemlist.append([scrapedurl,scrapedtitle])
# index += 1
for level, cat, title in matches:
title = cleantitle(title)
url = '%s?cat=%s' % (host, cat)
@@ -183,12 +151,11 @@ def serietv():
itemlist.append([url, title])
index += 1
logger.debug(itemlist)
return itemlist
def lista_serie(item):
logger.info(item.channel+" lista_serie")
log()
itemlist = []
p = 1
@@ -196,16 +163,6 @@ def lista_serie(item):
item.url, p = item.url.split('{}')
p = int(p)
# logger.debug(p)
# Carica la pagina
# data = httptools.downloadpage(item.url, headers=headers).data
#
# block = scrapertools.find_single_match(data,r'<div class="entry-content">(.*?)</div>')
#
# # Estrae i contenuti
# # patron = r'<a style.*?href="([^"]+)">([^<]+)<\/a>'
# patron = r'<a.*?href="([^"]+)">([^<]+)<\/a>'
# matches = re.compile(patron, re.DOTALL).findall(block)
if '||' in item.url:
series = item.url.split('\n\n')
matches = []
@@ -235,76 +192,42 @@ def lista_serie(item):
contentType='episode',
originalUrl=scrapedurl,
folder=True))
# ii += 1
support.checkHost(item, itemlist)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if len(series) >= p * PERPAGE:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=item.channel,
action='lista_serie',
contentType=item.contentType,
title=support.typo(config.get_localized_string(30992), 'color kod bold'),
url=scrapedurl,
args=item.args,
extra=item.extra,
thumbnail=support.thumb()))
next_page = item.url + '{}' + str(p + 1)
support.nextPage(itemlist, item, next_page=next_page)
return itemlist
def findvideos(item):
logger.info(item.channel+" findvideos")
log()
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
bloque = scrapertools.find_single_match(data, '<div class="entry-content">(.*?)<footer class="entry-footer">')
# data = httptools.downloadpage(item.url, headers=headers).data
patron_block = '<div class="entry-content">(.*?)<footer class="entry-footer">'
# bloque = scrapertools.find_single_match(data, patron_block)
patron = r'<a href="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(bloque)
# matches = re.compile(patron, re.DOTALL).findall(bloque)
matches, data = support.match(item, patron, patron_block, headers)
for scrapedurl in matches:
if 'is.gd' in scrapedurl:
resp = httptools.downloadpage(
scrapedurl, follow_redirects=False)
scrapedurl, follow_redirects=False)
data += resp.headers.get("location", "") + '\n'
itemlist = support.server(item,data)
# itemlist = servertools.find_video_items(data=data)
#
# for videoitem in itemlist:
# videoitem.title = item.title + videoitem.title
# videoitem.fulltitle = item.fulltitle
# videoitem.thumbnail = item.thumbnail
# videoitem.show = item.show
# videoitem.plot = item.plot
# videoitem.channel = item.channel
# videoitem.contentType = item.contentType
# videoitem.language = IDIOMAS['Italiano']
#
# # Requerido para Filtrar enlaces
#
# if checklinks:
# itemlist = servertools.check_list_links(itemlist, checklinks_number)
#
# # Requerido para FilterTools
#
# # itemlist = filtertools.get_links(itemlist, item, list_language)
#
# # Requerido para AutoPlay
#
# autoplay.start(itemlist, item)
return itemlist
return support.server(item, data)
def search(item, texto):
logger.info(item.channel + " " + item.url + " search " + texto)
# item.url = "%s/?s=%s" % (host, texto)
# item.url = "%s/elenco-serie-tv/" % host
log(texto)
itemlist = []
try:
series = serietv()
@@ -333,15 +256,15 @@ def search(item, texto):
logger.error("%s" % line)
return []
# ----------------------------------------------------------------------------------------------------------------
def list_az(item):
support.log(item.channel + " list_az")
log()
itemlist = []
alphabet = dict()
for i, (scrapedurl, scrapedtitle) in enumerate(serietv()):
letter = scrapedtitle[0].upper()
if letter not in alphabet:
@@ -358,37 +281,29 @@ def list_az(item):
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def episodios(item,itemlist = []):
support.log(item.channel + " episodios")
def episodios(item, itemlist=[]):
log()
urls = item.url.split('{|}')
# logger.debug(urls)
# Carica la pagina
data = httptools.downloadpage(urls[0], headers=headers).data
urls.pop(0)
# Estrae i contenuti
patron = r'<h3 class="entry-title title-font"><a href="([^"]+)" rel="bookmark">(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
matches, data = support.match(item, patron, headers=headers, url=urls[0])
urls.pop(0)
# logger.debug(matches)
for scrapedurl, scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedtitle = cleantitle(scrapedtitle)
episode = scrapertools.find_multiple_matches(scrapedtitle,r'((\d*)x(\d*))')[0]
episode = scrapertools.find_multiple_matches(scrapedtitle, r'((\d*)x(\d*))')[0]
season = episode[1].lstrip('0')
# if season in seasons and '/page/' not in item.url: break
# logger.debug(scrapedtitle)
# logger.debug(episode)
# return False
season = episode[1].lstrip('0').zfill(2)
infoLabels = {}
infoLabels['season'] = season
infoLabels['episode'] = episode[2]
title = infoLabels['season']+'x'+infoLabels['episode']+" Sub-ITA"
title = infoLabels['season'] + 'x' + infoLabels['episode'] + " "+support.typo('Sub-ITA', '_ [] color kod')
if "http:" not in scrapedurl:
scrapedurl = "http:" + scrapedurl
@@ -407,14 +322,11 @@ def episodios(item,itemlist = []):
infoLabels=infoLabels,
folder=True))
next_page = scrapertools.find_single_match(data,r'<a class="next page-numbers" href="(.*?)">Successivi')
next_page = scrapertools.find_single_match(data, r'<a class="next page-numbers" href="(.*?)">Successivi')
if next_page != "":
urls.insert(0,next_page)
urls.insert(0, next_page)
# logger.debug(urls)
if(len(urls) > 0):
if len(urls) > 0:
item.url = '{|}'.join(urls)
itemlist = episodios(item, itemlist)
else:

View File

@@ -1,17 +1,16 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per guardaserie.click
# Thanks to Icarus crew & Alfa addon
# Canale per Guardaserie.click
# Thanks to Icarus crew & Alfa addon & 4l3x87
# ------------------------------------------------------------
import re
import channelselector
from core import httptools, scrapertools, servertools, support
from core import httptools, scrapertools, support
from core import tmdb
from core.item import Item
from core.support import log
from platformcode import logger, config
from specials import autoplay
__channel__ = 'guardaserieclick'
host = config.get_setting("channel_host", __channel__)
@@ -19,7 +18,7 @@ headers = [['Referer', host]]
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['speedvideo','openload']
list_servers = ['speedvideo', 'openload']
list_quality = ['default']
headers = [['Referer', host]]
@@ -27,30 +26,20 @@ headers = [['Referer', host]]
# ----------------------------------------------------------------------------------------------------------------
def mainlist(item):
support.log(item.channel+" mainlist")
log()
itemlist = []
# support.menu(itemlist, 'Serie TV bold')
support.menu(itemlist, 'Novità bold', 'serietvaggiornate', "%s/lista-serie-tv" % host,'tvshow')
support.menu(itemlist, 'Nuove serie', 'nuoveserie', "%s/lista-serie-tv" % host,'tvshow')
support.menu(itemlist, 'Serie inedite Sub-ITA', 'nuoveserie', "%s/lista-serie-tv" % host,'tvshow',args=['inedite'])
support.menu(itemlist, 'Da non perdere bold', 'nuoveserie', "%s/lista-serie-tv" % host,'tvshow',args=['tv','da non perdere'])
support.menu(itemlist, 'Classiche bold', 'nuoveserie', "%s/lista-serie-tv" % host,'tvshow',args=['tv','classiche'])
support.menu(itemlist, 'Anime', 'lista_serie', "%s/category/animazione/" % host,'tvshow')
support.menu(itemlist, 'Categorie', 'categorie', host,'tvshow',args=['serie'])
support.menu(itemlist, 'Cerca', 'search', host,'tvshow',args=['serie'])
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
itemlist.append(
Item(channel='setting',
action="channel_config",
title=support.typo("Configurazione Canale color lime"),
config=item.channel,
folder=False,
thumbnail=channelselector.get_thumb('setting_0.png'))
)
support.menu(itemlist, 'Novità bold', 'serietvaggiornate', "%s/lista-serie-tv" % host, 'tvshow')
support.menu(itemlist, 'Nuove serie', 'nuoveserie', "%s/lista-serie-tv" % host, 'tvshow')
support.menu(itemlist, 'Serie inedite Sub-ITA', 'nuoveserie', "%s/lista-serie-tv" % host, 'tvshow', args=['inedite'])
support.menu(itemlist, 'Da non perdere bold', 'nuoveserie', "%s/lista-serie-tv" % host, 'tvshow', args=['tv', 'da non perdere'])
support.menu(itemlist, 'Classiche bold', 'nuoveserie', "%s/lista-serie-tv" % host, 'tvshow', args=['tv', 'classiche'])
support.menu(itemlist, 'Anime', 'lista_serie', "%s/category/animazione/" % host, 'tvshow')
support.menu(itemlist, 'Categorie', 'categorie', host, 'tvshow', args=['serie'])
support.menu(itemlist, 'Cerca', 'search', host, 'tvshow', args=['serie'])
support.aplay(item, itemlist, list_servers, list_quality)
support.channel_config(item, itemlist)
return itemlist
@@ -59,7 +48,7 @@ def mainlist(item):
# ----------------------------------------------------------------------------------------------------------------
def newest(categoria):
support.log(__channel__+" newest" + categoria)
log()
itemlist = []
item = Item()
try:
@@ -85,7 +74,7 @@ def newest(categoria):
# ----------------------------------------------------------------------------------------------------------------
def search(item, texto):
support.log(item.channel+" search")
log(texto)
item.url = host + "/?s=" + texto
try:
return lista_serie(item)
@@ -100,209 +89,102 @@ def search(item, texto):
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def cleantitle(scrapedtitle):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip()).replace('"',"'")
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip()).replace('"', "'")
return scrapedtitle.strip()
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def nuoveserie(item):
support.log(item.channel+" nuoveserie")
log()
itemlist = []
patron_block = ''
if 'inedite' in item.args:
patron_block = r'<div\s*class="container container-title-serie-ined container-scheda" meta-slug="ined">(.*?)</div></div><div'
elif 'da non perder' in item.args:
patron_block = r'<div\s*class="container container-title-serie-danonperd container-scheda" meta-slug="danonperd">(.*?)</div></div><div'
patron_block = r'<div class="container container-title-serie-ined container-scheda" meta-slug="ined">(.*?)</div></div><div'
elif 'da non perdere' in item.args:
patron_block = r'<div class="container container-title-serie-danonperd container-scheda" meta-slug="danonperd">(.*?)</div></div><div'
elif 'classiche' in item.args:
patron_block = r'<div\s*class="container container-title-serie-classiche container-scheda" meta-slug="classiche">(.*?)</div></div><div'
patron_block = r'<div class="container container-title-serie-classiche container-scheda" meta-slug="classiche">(.*?)</div></div><div'
else:
patron_block = r'<div\s*class="container container-title-serie-new container-scheda" meta-slug="new">(.*?)</div></div><div'
patron_block = r'<div class="container container-title-serie-new container-scheda" meta-slug="new">(.*?)</div></div><div'
patron = r'<a\s*href="([^"]+)".*?>\s*<img\s*.*?src="([^"]+)" />[^>]+>[^>]+>[^>]+>[^>]+>'
patron += r'[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)</p>'
matches = support.match(item, patron, patron_block, headers)[0]
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedtitle = cleantitle(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
action="episodios",
contentType="episode",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
show=scrapedtitle,
thumbnail=scrapedthumbnail,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
patron = r'<a href="([^"]+)".*?>\s<img\s.*?src="([^"]+)" />[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)</p>'
return support.scrape(item, patron, ['url', 'thumb', 'title'], patron_block=patron_block, action='episodios')
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def serietvaggiornate(item):
support.log(item.channel+" serietvaggiornate")
log()
itemlist = []
patron_block = r'<div\s*class="container container-title-serie-lastep container-scheda" meta-slug="lastep">(.*?)</div></div><div'
patron = r'<a\s*rel="nofollow" href="([^"]+)"[^>]+> <img\s*.*?src="([^"]+)"[^>]+>[^>]+>'
patron += r'[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<[^>]+>'
patron_block = r'<div class="container\s*container-title-serie-lastep\s*container-scheda" meta-slug="lastep">(.*?)<\/div><\/div><div'
patron = r'<a rel="nofollow" href="([^"]+)"[^>]+> <img.*?src="([^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^(?:<|\()]+)(?:\(([^\)]+)\))?[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<[^>]+>'
matches = support.match(item,patron, patron_block, headers)[0]
for scrapedurl, scrapedthumbnail, scrapedep, scrapedtitle in matches:
episode = re.compile(r'^(\d+)x(\d+)', re.DOTALL).findall(scrapedep) # Prendo stagione ed episodioso
scrapedtitle = cleantitle(scrapedtitle)
contentlanguage = ""
if 'sub-ita' in scrapedep.strip().lower():
contentlanguage = 'Sub-ITA'
extra = r'<span\s*.*?meta-stag="%s" meta-ep="%s" meta-embed="([^"]+)"\s*.*?embed2="([^"]+)?"\s*.*?embed3="([^"]+)?"[^>]*>' % (
episode[0][0], episode[0][1].lstrip("0"))
infoLabels = {}
infoLabels['episode'] = episode[0][1].lstrip("0")
infoLabels['season'] = episode[0][0]
title = str("%s - %sx%s %s" % (scrapedtitle,infoLabels['season'],infoLabels['episode'],contentlanguage)).strip()
itemlist.append(
Item(channel=item.channel,
action="findepvideos",
contentType="episode",
title=title,
show=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
extra=extra,
thumbnail=scrapedthumbnail,
contentLanguage=contentlanguage,
infoLabels=infoLabels,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
return support.scrape(item, patron, ['url', 'thumb', 'episode', 'lang', 'title'], patron_block=patron_block, action='findvideos')
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def categorie(item):
support.log(item.channel+" categorie")
itemlist = []
matches = support.match(item, r'<li>\s*<a\s*href="([^"]+)"[^>]+>([^<]+)</a></li>', r'<ul\s*class="dropdown-menu category">(.*?)</ul>', headers)[0]
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(channel=item.channel,
action="lista_serie",
title=scrapedtitle,
contentType="tvshow",
url="".join([host, scrapedurl]),
thumbnail=item.thumbnail,
extra="tv",
folder=True))
return itemlist
log()
return support.scrape(item, r'<li>\s<a\shref="([^"]+)"[^>]+>([^<]+)</a></li>', ['url', 'title'], patron_block=r'<ul\sclass="dropdown-menu category">(.*?)</ul>', headers=headers, action="lista_serie")
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def lista_serie(item):
support.log(item.channel+" lista_serie")
log()
itemlist = []
# data = httptools.downloadpage(item.url, headers=headers).data
#
# patron = r'<a\s*href="([^"]+)".*?>\s*<img\s*.*?src="([^"]+)" />[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)</p></div>'
# blocco = scrapertools.find_single_match(data,
# r'<div\s*class="col-xs-\d+ col-sm-\d+-\d+">(.*?)<div\s*class="container-fluid whitebg" style="">')
# matches = re.compile(patron, re.DOTALL).findall(blocco)
patron_block = r'<div\sclass="col-xs-\d+ col-sm-\d+-\d+">(.*?)<div\sclass="container-fluid whitebg" style="">'
patron = r'<a\shref="([^"]+)".*?>\s<img\s.*?src="([^"]+)" />[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)</p></div>'
patron_block = r'<div\s*class="col-xs-\d+ col-sm-\d+-\d+">(.*?)<div\s*class="container-fluid whitebg" style="">'
patron = r'<a\s*href="([^"]+)".*?>\s*<img\s*.*?src="([^"]+)" />[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)</p></div>'
matches, data = support.match(item, patron, patron_block, headers)
for scrapedurl, scrapedimg, scrapedtitle in matches:
scrapedtitle = cleantitle(scrapedtitle)
if scrapedtitle not in ['DMCA','Contatti','Lista di tutte le serie tv']:
itemlist.append(
Item(channel=item.channel,
action="episodios",
contentType="episode",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedimg,
extra=item.extra,
show=scrapedtitle,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
support.nextPage(itemlist,item,data,r"<link\s.*?rel='next'\shref='([^']*)'")
return itemlist
return support.scrape(item, patron, ['url', 'thumb', 'title'], patron_block=patron_block, patronNext=r"<link\s.*?rel='next'\shref='([^']*)'", action='episodios')
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def episodios(item):
support.log(item.channel+" episodios")
log()
itemlist = []
# data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<div\s*class="[^"]+">\s*([^<]+)<\/div>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)?[^>]+>[^>]+>[^>]+>[^>]+>[^>]+><p[^>]+>([^<]+)<[^>]+>[^>]+>[^>]+>'
patron += r'[^<]+[^"]+".*?serie="([^"]+)".*?stag="([0-9]*)".*?ep="([0-9]*)"\s*'
patron += r'.*?embed="([^"]+)"\s*.*?embed2="([^"]+)?"\s*.*?embed3="([^"]+)?"?[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s*'
patron += r'(?:<img\s*class="[^"]+" meta-src="([^"]+)"[^>]+>|<img\s*class="[^"]+" src="" data-original="([^"]+)"[^>]+>)?'
# matches = re.compile(patron, re.DOTALL).findall(data)
# logger.debug(matches)
patron = r'<div\sclass="[^"]+">\s([^<]+)<\/div>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)?[^>]+>[^>]+>[^>]+>[^>]+>[^>]+><p[^>]+>([^<]+)<[^>]+>[^>]+>[^>]+>'
patron += r'[^"]+".*?serie="([^"]+)".*?stag="([0-9]*)".*?ep="([0-9]*)"\s'
patron += r'.*?embed="([^"]+)"\s.*?embed2="([^"]+)?"\s.*?embed3="([^"]+)?"?[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s?'
patron += r'(?:<img\sclass="[^"]+" meta-src="([^"]+)"[^>]+>|<img\sclass="[^"]+" src="" data-original="([^"]+)"[^>]+>)?'
matches = support.match(item, patron, headers=headers)[0]
for scrapedtitle, scrapedepisodetitle, scrapedplot, scrapedserie, scrapedseason, scrapedepisode, scrapedurl, scrapedurl2,scrapedurl3,scrapedthumbnail,scrapedthumbnail2 in matches:
for scrapedtitle, scrapedepisodetitle, scrapedplot, scrapedserie, scrapedseason, scrapedepisode, scrapedurl, scrapedurl2, scrapedurl3, scrapedthumbnail, scrapedthumbnail2 in matches:
scrapedtitle = cleantitle(scrapedtitle)
scrapedepisode = scrapedepisode.zfill(2)
scrapedepisodetitle = cleantitle(scrapedepisodetitle)
title = str("%sx%s %s" % (scrapedseason, scrapedepisode, scrapedepisodetitle)).strip()
if 'SUB-ITA' in scrapedtitle:
title +=" Sub-ITA"
title += " "+support.typo("Sub-ITA", '_ [] color kod')
infoLabels = {}
infoLabels['season'] = scrapedseason
infoLabels['episode'] = scrapedepisode
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title=title,
fulltitle=scrapedtitle,
url=scrapedurl+"\r\n"+scrapedurl2+"\r\n"+scrapedurl3,
contentType="episode",
plot=scrapedplot,
contentSerieName=scrapedserie,
contentLanguage='Sub-ITA' if 'Sub-ITA' in title else '',
infoLabels=infoLabels,
thumbnail=scrapedthumbnail2 if scrapedthumbnail2 != '' else scrapedthumbnail,
folder=True))
Item(channel=item.channel,
action="findvideos",
title=support.typo(title, 'bold'),
fulltitle=scrapedtitle,
url=scrapedurl + "\r\n" + scrapedurl2 + "\r\n" + scrapedurl3,
contentType="episode",
plot=scrapedplot,
contentSerieName=scrapedserie,
contentLanguage='Sub-ITA' if 'Sub-ITA' in title else '',
infoLabels=infoLabels,
thumbnail=scrapedthumbnail2 if scrapedthumbnail2 != '' else scrapedthumbnail,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
@@ -315,22 +197,24 @@ def episodios(item):
# ----------------------------------------------------------------------------------------------------------------
def findepvideos(item):
support.log(item.channel+" findepvideos")
data = httptools.downloadpage(item.url, headers=headers).data
log()
data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data
matches = scrapertools.find_multiple_matches(data, item.extra)
data = "\r\n".join(matches[0])
item.contentType = 'movie'
itemlist = support.server(item, data=data)
return itemlist
return support.server(item, data=data)
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def findvideos(item):
support.log(item.channel+" findvideos")
logger.debug(item.url)
itemlist = support.server(item, data=item.url)
return itemlist
log()
if item.contentType == 'tvshow':
data = httptools.downloadpage(item.url, headers=headers).data
matches = scrapertools.find_multiple_matches(data, item.extra)
data = "\r\n".join(matches[0])
else:
log(item.url)
data = item.url
return support.server(item, data)

View File

@@ -1,16 +1,16 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per Serie Tv Sub ITA
# Thanks to Icarus crew & Alfa addon
# Canale per Serietvsubita
# Thanks to Icarus crew & Alfa addon & 4l3x87
# ----------------------------------------------------------
import re
import time
import channelselector
from core import httptools, tmdb, scrapertools, support
from core.item import Item
from core.support import log
from platformcode import logger, config
from specials import autoplay
__channel__ = "serietvsubita"
host = config.get_setting("channel_host", __channel__)
@@ -18,33 +18,19 @@ headers = [['Referer', host]]
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['gounlimited','verystream','streamango','openload']
list_servers = ['gounlimited', 'verystream', 'streamango', 'openload']
list_quality = ['default']
# checklinks = config.get_setting('checklinks', __channel__)
# checklinks_number = config.get_setting('checklinks_number', __channel__)
def mainlist(item):
support.log(item.channel + 'mainlist')
log()
itemlist = []
support.menu(itemlist, 'Serie TV bold', 'lista_serie', host,'tvshow')
support.menu(itemlist, 'Novità submenu', 'peliculas_tv', host,'tvshow')
support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', host,'tvshow',args=['serie'])
support.menu(itemlist, 'Cerca', 'search', host,'tvshow')
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
itemlist.append(
Item(channel='setting',
action="channel_config",
title=support.typo("Configurazione Canale color lime"),
config=item.channel,
folder=False,
thumbnail=channelselector.get_thumb('setting_0.png'))
)
support.menu(itemlist, 'Novità bold', 'peliculas_tv', host, 'tvshow')
support.menu(itemlist, 'Serie TV bold', 'lista_serie', host, 'tvshow')
support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', host, 'tvshow', args=['serie'])
support.menu(itemlist, 'Cerca', 'search', host, 'tvshow')
support.aplay(item, itemlist, list_servers, list_quality)
support.channel_config(item, itemlist)
return itemlist
@@ -52,20 +38,57 @@ def mainlist(item):
# ----------------------------------------------------------------------------------------------------------------
def cleantitle(scrapedtitle):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
scrapedtitle = scrapedtitle.replace('[HD]', '').replace('', '\'').replace('×','x').replace('Game of Thrones ','').replace('In The Dark 2019','In The Dark (2019)').strip()
scrapedtitle = scrapedtitle.replace('[HD]', '').replace('', '\'').replace('×', 'x').replace('Game of Thrones ','')\
.replace('In The Dark 2019', 'In The Dark (2019)').replace('"', "'").strip()
year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)')
if year:
scrapedtitle = scrapedtitle.replace('(' + year + ')', '')
return scrapedtitle.strip()
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def findvideos(item):
log()
data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data
data = re.sub(r'\n|\t|\s+', ' ', data)
# recupero il blocco contenente i link
blocco = scrapertools.find_single_match(data, r'<div class="entry">([\s\S.]*?)<div class="post').replace('..:: Episodio ', 'Episodio ').strip()
matches = scrapertools.find_multiple_matches(blocco, '(S(\d*)E(\d*))\s')
if len(matches) > 0:
for fullseasonepisode, season, episode in matches:
blocco = blocco.replace(fullseasonepisode + ' ', 'Episodio ' + episode + ' ')
blocco = blocco.replace('Episodio ', '..:: Episodio ')
episodio = item.infoLabels['episode']
patron = r'\.\.:: Episodio %s([\s\S]*?)(<div class="post|..:: Episodio)' % episodio
log(patron)
log(blocco)
matches = scrapertools.find_multiple_matches(blocco, patron)
if len(matches):
data = matches[0][0]
patron = 'href="(https?://www\.keeplinks\.(?:co|eu)/p(?:[0-9]*)/([^"]+))"'
matches = re.compile(patron, re.DOTALL).findall(data)
for keeplinks, id in matches:
headers2 = [['Cookie', 'flag[' + id + ']=1; defaults=1; nopopatall=' + str(int(time.time()))],
['Referer', keeplinks]]
html = httptools.downloadpage(keeplinks, headers=headers2).data
data += str(scrapertools.find_multiple_matches(html, '</lable><a href="([^"]+)" target="_blank"'))
return support.server(item, data=data)
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def lista_serie(item):
support.log(item.channel + " lista_serie")
log()
itemlist = []
PERPAGE = 15
@@ -81,12 +104,9 @@ def lista_serie(item):
for i, serie in enumerate(series):
matches.append(serie.split('||'))
else:
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# Extrae las entradas
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)" >([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
patron = r'<li class="cat-item cat-item-\d+"><a href="([^"]+)" >([^<]+)</a>'
matches = support.match(item, patron, headers=headers)[0]
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
scrapedplot = ""
@@ -112,90 +132,58 @@ def lista_serie(item):
# Paginazione
if len(matches) >= p * PERPAGE:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=item.channel,
action='lista_serie',
contentType=item.contentType,
title=support.typo(config.get_localized_string(30992), 'color kod bold'),
url=scrapedurl,
args=item.args,
thumbnail=support.thumb()))
support.nextPage(itemlist, item, next_page=(item.url + '{}' + str(p + 1)))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def episodios(item, itemlist=[]):
support.log(item.channel + " episodios")
# itemlist = []
log()
patron = r'<div class="post-meta">\s*<a href="([^"]+)"\s*title="([^"]+)"\s*class=".*?"></a>.*?'
patron += r'<p><a href="([^"]+)">'
data = httptools.downloadpage(item.url).data
matches, data = support.match(item, patron, headers=headers)
patron = '<div class="post-meta">\s*<a href="([^"]+)"\s*title="([^"]+)"\s*class=".*?"></a>.*?'
patron += '<p><a href="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
logger.debug(itemlist)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedplot = ""
scrapedtitle = cleantitle(scrapedtitle)
if "(Completa)" in scrapedtitle:
data = httptools.downloadpage(scrapedurl).data
scrapedtitle = scrapedtitle.replace(" Miniserie"," Stagione 1")
data = httptools.downloadpage(scrapedurl, headers=headers).data
scrapedtitle = scrapedtitle.replace(" Miniserie", " Stagione 1")
title = scrapedtitle.split(" Stagione")[0].strip()
# recupero la stagione
season = scrapertools.find_single_match(scrapedtitle,'Stagione ([0-9]*)')
season = scrapertools.find_single_match(scrapedtitle, 'Stagione ([0-9]*)')
blocco = scrapertools.find_single_match(data, '<div class="entry">[\s\S.]*?<div class="post')
# blocco = scrapertools.decodeHtmlentities(blocco)
blocco = blocco.replace('<strong>Episodio ','<strong>Episodio ').replace(' </strong>',' </strong>')
blocco = blocco.replace('<strong>Episodio ','<strong>S'+season.zfill(2)+'E')
# logger.debug(blocco)
# controllo se gli episodi son nel formato S0XE0X
matches = scrapertools.find_multiple_matches(blocco,r'(S(\d*)E(\d*))\s')
blocco = blocco.replace('<strong>Episodio ', '<strong>Episodio ').replace(' </strong>', ' </strong>')
blocco = blocco.replace('<strong>Episodio ', '<strong>S' + season.zfill(2) + 'E')
matches = scrapertools.find_multiple_matches(blocco, r'(S(\d*)E(\d*))\s')
episodes = []
if len(matches) > 0:
for fullepisode_s, season, episode in matches:
season = season.lstrip("0")
# episode = episode.lstrip("0")
episodes.append([
"".join([season, "x", episode]),
season,
episode
])
# else:
# # blocco = blocco.replace('>Episodio 0','>Episodio-0')
# matches = scrapertools.find_multiple_matches(blocco, r'Episodio[^\d](\d*)')
# logger.debug(blocco)
# logger.debug(matches)
# episodes = []
# if len(matches) > 0:
# for string, episode in matches:
# episodes.append([
# "".join([season, "x", episode]),
# season,
# episode
# ])
else:
title = scrapedtitle.split(" S0")[0].strip()
title = title.split(" S1")[0].strip()
title = title.split(" S2")[0].strip()
episodes = scrapertools.find_multiple_matches(scrapedtitle,r'((\d*)x(\d*))')
# logger.debug(scrapedtitle)
# logger.debug(episodes)
episodes = scrapertools.find_multiple_matches(scrapedtitle, r'((\d*)x(\d*))')
for fullepisode, season, episode in episodes:
infoLabels = {}
infoLabels['season'] = season
infoLabels['episode'] = episode
fullepisode+=' Sub-ITA'
fullepisode += ' ' + support.typo("Sub-ITA", '_ [] color kod')
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
@@ -213,83 +201,33 @@ def episodios(item, itemlist=[]):
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginazionazione
patron = '<strong class=\'on\'>\d+</strong>\s*<a href="([^<]+)">\d+</a>'
patron = r'<strong class="on">\d+</strong>\s*<a href="([^<]+)">\d+</a>'
next_page = scrapertools.find_single_match(data, patron)
if next_page != "":
item.url = next_page
itemlist = episodios(item,itemlist)
itemlist = episodios(item, itemlist)
else:
item.url = item.originalUrl
support.videolibrary(itemlist,item,'bold color kod')
support.videolibrary(itemlist, item, 'bold color kod')
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def findvideos(item):
support.log(item.channel + " findvideos")
data = httptools.downloadpage(item.url).data
# recupero il blocco contenente i link
blocco = scrapertools.find_single_match(data,'<div class="entry">[\s\S.]*?<div class="post')
blocco = blocco.replace('..:: Episodio ','Episodio ')
matches = scrapertools.find_multiple_matches(blocco, '(S(\d*)E(\d*))\s')
if len(matches) > 0:
for fullseasonepisode, season, episode in matches:
blocco = blocco.replace(fullseasonepisode+' ','Episodio '+episode+' ')
blocco = blocco.replace('Episodio ', '..:: Episodio ')
logger.debug(blocco)
episodio = item.title.replace(str(item.contentSeason)+"x",'')
patron = r'\.\.:: Episodio %s([\s\S]*?)(<div class="post|..:: Episodio)' % episodio
matches = re.compile(patron, re.DOTALL).findall(blocco)
if len(matches):
data = matches[0][0]
patron = 'href="(https?://www\.keeplinks\.(?:co|eu)/p(?:[0-9]*)/([^"]+))"'
matches = re.compile(patron, re.DOTALL).findall(data)
for keeplinks, id in matches:
headers = [['Cookie', 'flag[' + id + ']=1; defaults=1; nopopatall=' + str(int(time.time()))],
['Referer', keeplinks]]
html = httptools.downloadpage(keeplinks, headers=headers).data
data += str(scrapertools.find_multiple_matches(html, '</lable><a href="([^"]+)" target="_blank"'))
itemlist = support.server(item, data=data)
# itemlist = filtertools.get_links(itemlist, item, list_language)
# Controlla se i link sono validi
# if checklinks:
# itemlist = servertools.check_list_links(itemlist, checklinks_number)
#
# autoplay.start(itemlist, item)
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def peliculas_tv(item):
logger.info(item.channel+" peliculas_tv")
log()
itemlist = []
data = httptools.downloadpage(item.url).data
# logger.debug(data)
patron = '<div class="post-meta">\s*<a href="([^"]+)"\s*title="([^"]+)"\s*class=".*?"></a>'
matches = re.compile(patron, re.DOTALL).findall(data)
matches, data = support.match(item, patron, headers=headers)
for scrapedurl, scrapedtitle in matches:
if "FACEBOOK" in scrapedtitle or "RAPIDGATOR" in scrapedtitle:
continue
if scrapedtitle == "WELCOME!":
if scrapedtitle in ["FACEBOOK", "RAPIDGATOR", "WELCOME!"]:
continue
scrapedthumbnail = ""
scrapedplot = ""
scrapedtitle = cleantitle(scrapedtitle)
@@ -300,14 +238,14 @@ def peliculas_tv(item):
title = title.split(" S2")[0].strip()
infoLabels['season'] = episode[1]
infoLabels['episode'] = episode[2]
infoLabels['episode'] = episode[2].zfill(2)
itemlist.append(
Item(channel=item.channel,
action="findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=title+" - "+episode[0]+" Sub-ITA",
title=title + " - " + episode[0] + " " + support.typo("Sub-ITA", '_ [] color kod'),
url=scrapedurl,
thumbnail=scrapedthumbnail,
contentSerieName=title,
@@ -318,24 +256,9 @@ def peliculas_tv(item):
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginazione
patron = '<strong class=\'on\'>\d+</strong>\s*<a href="([^<]+)">\d+</a>'
support.nextPage(itemlist,item,data,patron)
# next_page = scrapertools.find_single_match(data, patron)
# if next_page != "":
# if item.extra == "search_tv":
# next_page = next_page.replace('&#038;', '&')
# itemlist.append(
# Item(channel=item.channel,
# action='peliculas_tv',
# contentType=item.contentType,
# title=support.typo(config.get_localized_string(30992), 'color kod bold'),
# url=next_page,
# args=item.args,
# extra=item.extra,
# thumbnail=support.thumb()))
patron = r'<strong class="on">\d+</strong>\s<a href="([^<]+)">\d+</a>'
support.nextPage(itemlist, item, data, patron)
return itemlist
@@ -343,10 +266,9 @@ def peliculas_tv(item):
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def newest(categoria):
logger.info(__channel__ + " newest" + categoria)
log(categoria)
itemlist = []
item = Item()
item.url = host
@@ -355,7 +277,6 @@ def newest(categoria):
if categoria == "series":
itemlist = peliculas_tv(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
@@ -369,15 +290,11 @@ def newest(categoria):
# ----------------------------------------------------------------------------------------------------------------
def search(item, texto):
logger.info(item.channel + " search")
log(texto)
itemlist = []
# Scarico la pagina
data = httptools.downloadpage(item.url).data
# Articoli
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)" >([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = support.match(item, patron, headers=headers)[0]
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
if texto.upper() in scrapedtitle.upper():
@@ -401,19 +318,6 @@ def search(item, texto):
return itemlist
# item.extra = "search_tv"
#
# item.url = host + "/?s=" + texto + "&op.x=0&op.y=0"
#
# try:
# return peliculas_tv(item)
#
# except:
# import sys
# for line in sys.exc_info():
# logger.error("%s" % line)
# return []
# ================================================================================================================
@@ -421,23 +325,18 @@ def search(item, texto):
def list_az(item):
support.log(item.channel+" list_az")
log()
itemlist = []
alphabet = dict()
# Scarico la pagina
data = httptools.downloadpage(item.url).data
# Articoli
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)" >([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = support.match(item, patron, headers=headers)[0]
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
letter = scrapedtitle[0].upper()
if letter not in alphabet:
alphabet[letter] = []
alphabet[letter].append(scrapedurl+'||'+scrapedtitle)
alphabet[letter].append(scrapedurl + '||' + scrapedtitle)
for letter in sorted(alphabet):
itemlist.append(

View File

@@ -1,15 +1,14 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per SerieTVU
# Thanks to Icarus crew & Alfa addon
# Thanks to Icarus crew & Alfa addon & 4l3x87
# ----------------------------------------------------------
import re
import channelselector
from core import httptools, tmdb, scrapertools, support
from core import tmdb, scrapertools, support
from core.item import Item
from core.support import log
from platformcode import logger, config
from specials import autoplay
__channel__ = 'serietvu'
host = config.get_setting("channel_host", __channel__)
@@ -20,44 +19,28 @@ list_language = IDIOMAS.values()
list_servers = ['speedvideo']
list_quality = ['default']
# checklinks = config.get_setting('checklinks', __channel__)
# checklinks_number = config.get_setting('checklinks_number', __channel__)
def mainlist(item):
support.log(item.channel + 'mainlist')
log()
itemlist = []
support.menu(itemlist, 'Serie TV bold', 'lista_serie', "%s/category/serie-tv" % host,'tvshow')
support.menu(itemlist, 'Novità submenu', 'latestep', "%s/ultimi-episodi" % host,'tvshow')
# support.menu(itemlist, 'Nuove serie color azure', 'lista_serie', "%s/category/serie-tv" % host,'tvshow')
support.menu(itemlist, 'Categorie', 'categorie', host,'tvshow')
support.menu(itemlist, 'Cerca', 'search', host,'tvshow')
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
itemlist.append(
Item(channel='setting',
action="channel_config",
title=support.typo("Configurazione Canale color lime"),
config=item.channel,
folder=False,
thumbnail=channelselector.get_thumb('setting_0.png'))
)
support.menu(itemlist, 'Novità bold', 'latestep', "%s/ultimi-episodi" % host, 'tvshow')
support.menu(itemlist, 'Serie TV bold', 'lista_serie', "%s/category/serie-tv" % host, 'tvshow')
support.menu(itemlist, 'Categorie', 'categorie', host, 'tvshow')
support.menu(itemlist, 'Cerca', 'search', host, 'tvshow')
support.aplay(item, itemlist, list_servers, list_quality)
support.channel_config(item, itemlist)
return itemlist
# ----------------------------------------------------------------------------------------------------------------
def cleantitle(scrapedtitle):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
scrapedtitle = scrapedtitle.replace('[HD]', '').replace('', '\'').replace(' Il Trono di Spade','').replace('Flash 2014','Flash')
scrapedtitle = scrapedtitle.replace('[HD]', '').replace('', '\'').replace(' Il Trono di Spade', '').replace(
'Flash 2014', 'Flash').replace('"', "'")
year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)')
if year:
scrapedtitle = scrapedtitle.replace('(' + year + ')', '')
return scrapedtitle.strip()
@@ -65,14 +48,12 @@ def cleantitle(scrapedtitle):
# ----------------------------------------------------------------------------------------------------------------
def lista_serie(item):
support.log(item.channel + " lista_serie")
log()
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<div class="item">\s*<a href="([^"]+)" data-original="([^"]+)" class="lazy inner">'
patron += r'[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
matches, data = support.match(item, patron, headers=headers)
for scrapedurl, scrapedimg, scrapedtitle in matches:
infoLabels = {}
@@ -96,34 +77,43 @@ def lista_serie(item):
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Pagine
support.nextPage(itemlist,item,data,'<li><a href="([^"]+)">Pagina successiva')
support.nextPage(itemlist, item, data, '<li><a href="([^"]+)">Pagina successiva')
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def episodios(item):
support.log(item.channel + " episodios")
log()
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<option value="(\d+)"[\sselected]*>.*?</option>'
matches = re.compile(patron, re.DOTALL).findall(data)
matches, data = support.match(item, patron, headers=headers)
for value in matches:
patron = r'<div class="list [active]*" data-id="%s">(.*?)</div>\s*</div>' % value
blocco = scrapertools.find_single_match(data, patron)
log(blocco)
patron = r'(<a data-id="\d+[^"]*" data-href="([^"]+)"(?:\sdata-original="([^"]+)")?\sclass="[^"]+">)[^>]+>[^>]+>([^<]+)<'
matches = scrapertools.find_multiple_matches(blocco, patron)
patron = r'(<a data-id="\d+[^"]*" data-href="([^"]+)" data-original="([^"]+)" class="[^"]+">)[^>]+>[^>]+>([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedextra, scrapedurl, scrapedimg, scrapedtitle in matches:
number = scrapertools.decodeHtmlentities(scrapedtitle.replace("Episodio", "")).strip()
contentlanguage = ''
if 'sub-ita' in scrapedtitle.lower():
contentlanguage = 'Sub-ITA'
scrapedtitle = scrapedtitle.replace(contentlanguage, '')
number = cleantitle(scrapedtitle.replace("Episodio", "")).strip()
title = value + "x" + number.zfill(2)
title += " "+support.typo(contentlanguage, '_ [] color kod') if contentlanguage else ''
infoLabels = {}
infoLabels['episode'] = number.zfill(2)
infoLabels['season'] = value
itemlist.append(
Item(channel=item.channel,
@@ -134,60 +124,40 @@ def episodios(item):
url=scrapedurl,
thumbnail=scrapedimg,
extra=scrapedextra,
infoLabels=infoLabels,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
support.videolibrary(itemlist,item,'bold color kod')
support.videolibrary(itemlist, item, 'bold color kod')
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def findvideos(item):
support.log(item.channel + " findvideos")
log()
return support.server(item, data=item.url)
itemlist = support.server(item, data=item.url)
# itemlist = filtertools.get_links(itemlist, item, list_language)
# Controlla se i link sono validi
# if checklinks:
# itemlist = servertools.check_list_links(itemlist, checklinks_number)
#
# autoplay.start(itemlist, item)
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def findepisodevideo(item):
support.log(item.channel + " findepisodevideo")
log()
# Download Pagina
data = httptools.downloadpage(item.url, headers=headers).data
# Prendo il blocco specifico per la stagione richiesta
patron = r'<div class="list [active]*" data-id="%s">(.*?)</div>\s*</div>' % item.extra[0][0]
blocco = scrapertools.find_single_match(data, patron)
# Estraggo l'episodio
patron = r'<a data-id="%s[^"]*" data-href="([^"]+)" data-original="([^"]+)" class="[^"]+">' % item.extra[0][1].lstrip("0")
matches = re.compile(patron, re.DOTALL).findall(blocco)
itemlist = support.server(item, data=matches[0][0])
# itemlist = filtertools.get_links(itemlist, item, list_language)
# Controlla se i link sono validi
# if checklinks:
# itemlist = servertools.check_list_links(itemlist, checklinks_number)
#
# autoplay.start(itemlist, item)
return itemlist
patron_block = r'<div class="list [active]*" data-id="%s">(.*?)</div>\s*</div>' % item.extra[0][0]
patron = r'<a data-id="%s[^"]*" data-href="([^"]+)"(?:\sdata-original="[^"]+")?\sclass="[^"]+">' % item.extra[0][1].lstrip("0")
matches = support.match(item, patron, patron_block, headers)[0]
data = ''
if len(matches) > 0:
data = matches[0]
item.contentType = 'movie'
return support.server(item, data=data)
# ================================================================================================================
@@ -195,16 +165,13 @@ def findepisodevideo(item):
# ----------------------------------------------------------------------------------------------------------------
def latestep(item):
support.log(item.channel + " latestep")
log()
itemlist = []
titles = []
#recupero gli episodi in home nella sezione Ultimi episodi aggiunti
data = httptools.downloadpage(host, headers=headers).data
block = scrapertools.find_single_match(data,r"Ultimi episodi aggiunti.*?<h2>")
regex = r'<a href="([^"]*)"\sdata-src="([^"]*)"\sclass="owl-lazy.*?".*?class="title">(.*?)<small>\((\d*?)x(\d*?)\s(Sub-Ita|Ita)'
matches = re.compile(regex, re.DOTALL).findall(block)
patron_block = r"Ultimi episodi aggiunti.*?<h2>"
patron = r'<a href="([^"]*)"\sdata-src="([^"]*)"\sclass="owl-lazy.*?".*?class="title">(.*?)<small>\((\d*?)x(\d*?)\s(Sub-Ita|Ita)'
matches = support.match(item, patron, patron_block, headers, host)[0]
for scrapedurl, scrapedimg, scrapedtitle, scrapedseason, scrapedepisode, scrapedlanguage in matches:
infoLabels = {}
@@ -213,13 +180,13 @@ def latestep(item):
infoLabels['year'] = year
infoLabels['episode'] = scrapedepisode
infoLabels['season'] = scrapedseason
episode = scrapedseason+"x"+scrapedepisode
episode = scrapedseason + "x" + scrapedepisode
scrapedtitle = cleantitle(scrapedtitle)
title = scrapedtitle+" - "+episode
title = scrapedtitle + " - " + episode
contentlanguage = ""
if scrapedlanguage.strip().lower() != 'ita':
title +=" Sub-ITA"
title += " "+support.typo("Sub-ITA", '_ [] color kod')
contentlanguage = 'Sub-ITA'
titles.append(title)
@@ -229,7 +196,7 @@ def latestep(item):
title=title,
fulltitle=title,
url=scrapedurl,
extra=[[scrapedseason,scrapedepisode]],
extra=[[scrapedseason, scrapedepisode]],
thumbnail=scrapedimg,
contentSerieName=scrapedtitle,
contentLanguage=contentlanguage,
@@ -237,11 +204,9 @@ def latestep(item):
infoLabels=infoLabels,
folder=True))
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<div class="item">\s*<a href="([^"]+)" data-original="([^"]+)" class="lazy inner">'
patron += r'[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<small>([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = support.match(item, patron, headers=headers)[0]
for scrapedurl, scrapedimg, scrapedtitle, scrapedinfo in matches:
infoLabels = {}
@@ -261,7 +226,7 @@ def latestep(item):
title = title.strip()
contentlanguage = ""
if 'sub-ita' in scrapedinfo.lower():
title+=" Sub-ITA"
title += " "+support.typo("Sub-ITA", '_ [] color kod')
contentlanguage = 'Sub-ITA'
if title in titles: continue
@@ -279,12 +244,8 @@ def latestep(item):
contentType='episode',
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# logger.debug("".join(map(str,itemlist)))
return itemlist
@@ -292,7 +253,7 @@ def latestep(item):
# ----------------------------------------------------------------------------------------------------------------
def newest(categoria):
logger.info(__channel__ + " newest" + categoria)
log(categoria)
itemlist = []
item = Item()
try:
@@ -318,7 +279,7 @@ def newest(categoria):
# ----------------------------------------------------------------------------------------------------------------
def search(item, texto):
logger.info(item.channel + " search")
log(texto)
item.url = host + "/?s=" + texto
try:
return lista_serie(item)
@@ -334,26 +295,9 @@ def search(item, texto):
# ----------------------------------------------------------------------------------------------------------------
def categorie(item):
logger.info(item.channel +" categorie")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
blocco = scrapertools.find_single_match(data, r'<h2>Sfoglia</h2>\s*<ul>(.*?)</ul>\s*</section>')
log()
patron_block= r'<h2>Sfoglia</h2>\s*<ul>(.*?)</ul>\s*</section>'
patron = r'<li><a href="([^"]+)">([^<]+)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedurl, scrapedtitle in matches:
if scrapedtitle == 'Home Page' or scrapedtitle == 'Calendario Aggiornamenti':
continue
itemlist.append(
Item(channel=item.channel,
action="lista_serie",
title=scrapedtitle,
contentType="tvshow",
url="%s%s" % (host, scrapedurl),
thumbnail=item.thumbnail,
folder=True))
return itemlist
return support.scrape(item, patron, ['url','title'], patron_block=patron_block, action='lista_serie', blacklist=["Home Page", "Calendario Aggiornamenti"])
# ================================================================================================================

View File

@@ -130,7 +130,7 @@ def peliculas(item):
action = 'findvideos' if item.extra == 'movie' else 'episodios'
if item.args == 'movie':
patron= r'<div class="mediaWrap mediaWrapAlt">[^<]+<a href="([^"]+)" title="Permalink to\s([^"]+) \(([^<]+)\).*?"[^>]+>[^<]+<img[^s]+src="([^"]+)"[^>]+>[^<]+<\/a>.*?<p>\s*([a-zA-Z-0-9]+)\s*<\/p>'
itemlist = support.scrape(item, patron, ['url', 'title', 'year', 'thumb', 'quality'], headers, action=action, patronNext='<a class="nextpostslink" rel="next" href="([^"]+)">')
itemlist = support.scrape(item, patron, ['url', 'title', 'year', 'thumb', 'quality'], headers, action=action, patron_block='<div id="main_col">(.*?)main_col', patronNext='<a class="nextpostslink" rel="next" href="([^"]+)">')
else:
patron = r'<div class="media3">[^>]+><a href="([^"]+)"><img[^s]+src="([^"]+)"[^>]+><\/a><[^>]+><a[^<]+><p>([^<]+) \(([^\)]+)[^<]+<\/p>.*?<p>\s*([a-zA-Z-0-9]+)\s*<\/p>'
itemlist = support.scrape(item, patron, ['url', 'thumb', 'title', 'year', 'quality'], headers, action=action, patronNext='<a class="nextpostslink" rel="next" href="([^"]+)">')
@@ -210,7 +210,7 @@ def anime(item):
def findvideos(item):
log()
itemlist = []
# itemlist = []
if item.args == 'anime':
data = item.url
@@ -233,6 +233,5 @@ def findvideos(item):
page = httptools.downloadpage(url, headers=headers).data
data += '\t' + scrapertoolsV2.find_single_match(page,'<meta name="og:url" content="([^=]+)">')
itemlist= support.server(item, data, headers, True, True)
support.videolibrary(itemlist, item, 'color kod bold')
return itemlist
return support.server(item, data, headers=headers)
# return itemlist

View File

@@ -113,4 +113,4 @@ def play(item):
data = support.swzz_get_url(item)
return support.server(item, data, headers)
return support.server(item, data, headers=headers)

View File

@@ -8,7 +8,7 @@ import urlparse
import xbmcaddon
from channelselector import thumb
from core import httptools, scrapertoolsV2, servertools, tmdb
from core import httptools, scrapertoolsV2, servertools, tmdb, channeltools
from core.item import Item
from lib import unshortenit
from platformcode import logger, config
@@ -167,7 +167,7 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
scraped['episode'] = re.sub(r'\s-\s|-|x|&#8211', 'x' , scraped['episode'])
longtitle = typo(scraped['episode'] + ' - ', 'bold') + longtitle
if scraped['title2']:
title2 = scrapertoolsV2.decodeHtmlentities(scraped["title2"]).strip()
title2 = scrapertoolsV2.decodeHtmlentities(scraped["title2"]).replace('"', "'").strip()
longtitle = longtitle + typo(title2, 'bold _ -- _')
if scraped["lang"]:
if 'sub' in scraped["lang"].lower():
@@ -225,7 +225,7 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
it.__setattr__(lg, match[listGroups.index(lg)])
itemlist.append(it)
checkHost(item, itemlist)
if (item.contentType == "episode" and (action != "findvideos" and action != "play")) \
or (item.contentType == "movie" and action != "play"):
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
@@ -243,6 +243,21 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
return itemlist
def checkHost(item, itemlist):
# nel caso non ci siano risultati puo essere che l'utente abbia cambiato manualmente l'host, pertanto lo riporta
# al valore di default (fixa anche il problema del cambio di host da parte nostra)
if len(itemlist) == 0:
# trovo il valore di default
defHost = None
for s in channeltools.get_channel_json(item.channel)['settings']:
if s['id'] == 'channel_host':
defHost = s['default']
break
# lo confronto con quello attuale
if config.get_setting('channel_host', item.channel) != defHost:
config.set_setting('channel_host', defHost, item.channel)
def dooplay_get_links(item, host):
# get links from websites using dooplay theme and dooplay_player
# return a list of dict containing these values: url, title and server
@@ -452,7 +467,8 @@ def match(item, patron='', patron_block='', headers='', url=''):
matches = []
url = url if url else item.url
data = httptools.downloadpage(url, headers=headers, ignore_response_code=True).data.replace("'", '"')
data = re.sub(r'\n|\t|\s\s', '', data)
data = re.sub(r'\n|\t', ' ', data)
data = re.sub(r'>\s\s*<', '><', data)
log('DATA= ', data)
if patron_block:
@@ -500,11 +516,11 @@ def videolibrary(itemlist, item, typography='', function_level=1):
return itemlist
def nextPage(itemlist, item, data, patron, function_level=1):
def nextPage(itemlist, item, data='', patron='', function_level=1, next_page=''):
# Function_level is useful if the function is called by another function.
# If the call is direct, leave it blank
next_page = scrapertoolsV2.find_single_match(data, patron)
if next_page == '':
next_page = scrapertoolsV2.find_single_match(data, patron)
if next_page != "":
if 'http' not in next_page:

View File

@@ -393,6 +393,10 @@ msgctxt "#50004"
msgid "Path: "
msgstr ""
msgctxt "#50005"
msgid "Delete This Channel?"
msgstr ""
msgctxt "#59970"
msgid "Synchronization with Trakt started"
msgstr ""

View File

@@ -393,6 +393,10 @@ msgctxt "#50004"
msgid "Path: "
msgstr "Percorso: "
msgctxt "#50005"
msgid "Delete This Channel?"
msgstr "Eliminare Questo Canale?"
msgctxt "#59970"
msgid "Synchronization with Trakt started"
msgstr "Sincronizzazione con Trakt iniziata"

View File

@@ -1,7 +1,7 @@
<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<settings>
<category label="70168">
<setting id="player_mode" type="enum" values="Direct|SetResolvedUrl|Built-In|Download and Play" label="30044" default="0"/>
<setting id="player_mode" type="enum" values="Direct|SetResolvedUrl|Built-In|Download and Play" label="30044" default="1"/>
<setting id="default_action" type="enum" lvalues="30006|30007|30008" label="30005" default="0"/>
<setting id="autoplay" type="bool" label="70562" default="false" visible="true"/>
<setting id="checklinks" type="bool" label="30020" default="false"/>

42
servers/animeworld.json Normal file
View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(https://animeworld.biz/v/[a-zA-Z0-9/-]+)",
"url": "\\1"
}
]
},
"free": true,
"id": "animeworld",
"name": "animeworld",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": ""
}

34
servers/animeworld.py Normal file
View File

@@ -0,0 +1,34 @@
# -*- coding: utf-8 -*-
import urllib
from core import httptools, jsontools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
post = urllib.urlencode({'r': '', 'd': 'animeworld.biz'})
data_json = httptools.downloadpage(page_url.replace('/v/', '/api/source/'), headers=[['x-requested-with', 'XMLHttpRequest']], post=post).data
json = jsontools.load(data_json)
if not json['data']:
return False, "Video not found"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
post = urllib.urlencode({'r': '', 'd': 'animeworld.biz'})
data_json = httptools.downloadpage(page_url.replace('/v/', '/api/source/'), headers=[['x-requested-with', 'XMLHttpRequest']], post=post).data
json = jsontools.load(data_json)
if json['data']:
for file in json['data']:
media_url = file['file']
label = file['label']
extension = file['type']
video_urls.append([label + " " + extension + ' [animeworld]', media_url])
return video_urls

View File

@@ -28,17 +28,19 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
vid = scrapertools.find_multiple_matches(data, 'download_video.*?>.*?<.*?<td>([^\,,\s]+)')
headers.append(['Referer', page_url])
post_data = scrapertools.find_single_match(data,
"</div>\s*<script type='text/javascript'>(eval.function.p,a,c,k,e,.*?)\s*</script>")
post_data = scrapertools.find_single_match(data,"</div>\s*<script type='text/javascript'>(eval.function.p,a,c,k,e,.*?)\s*</script>")
if post_data != "":
from lib import jsunpack
data = jsunpack.unpack(post_data)
media_url = scrapertools.find_multiple_matches(data, '(http.*?\.mp4)')
block = scrapertools.find_single_match(data, 'sources:\s*\[[^\]]+\]')
if block: data = block
media_urls = scrapertools.find_multiple_matches(data, '(http.*?\.mp4)')
_headers = urllib.urlencode(dict(headers))
i = 0
for media_url in media_url:
for media_url in media_urls:
video_urls.append([vid[i] + " mp4 [wstream] ", media_url + '|' + _headers])
i = i + 1

View File

@@ -37,7 +37,7 @@ def show_channels(item):
logger.info()
itemlist = []
context = [{"title": "Eliminar este canal",
context = [{"title": config.get_localized_string(50005),
"action": "remove_channel",
"channel": "community"}]

View File

@@ -155,7 +155,7 @@ def check_for_update(overwrite=True):
if not serie.active:
# si la serie no esta activa descartar
if overwrite_forced == False:
if not overwrite:
#Sincronizamos los episodios vistos desde la videoteca de Kodi con la de Alfa, aunque la serie esté desactivada
try:
if config.is_xbmc(): #Si es Kodi, lo hacemos