70
channels/animesaturn.json
Normal file
70
channels/animesaturn.json
Normal file
@@ -0,0 +1,70 @@
|
||||
{
|
||||
"id": "animesaturn",
|
||||
"name": "AnimeSaturn",
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"language": ["ita"],
|
||||
"thumbnail": "animesaturn.png",
|
||||
"banner": "animesaturn.png",
|
||||
"categories": ["anime"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "channel_host",
|
||||
"type": "text",
|
||||
"label": "Host del canale",
|
||||
"default": "https://www.animesaturn.com",
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_anime",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Anime",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_italiano",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Italiano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces_num",
|
||||
"type": "list",
|
||||
"label": "Numero de link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "1", "3", "5", "10" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": ["Non filtrare","IT"]
|
||||
}
|
||||
]
|
||||
}
|
||||
432
channels/animesaturn.py
Normal file
432
channels/animesaturn.py
Normal file
@@ -0,0 +1,432 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Canale per AnimeSaturn
|
||||
# Thanks to me
|
||||
# ----------------------------------------------------------
|
||||
import inspect
|
||||
import re
|
||||
import time
|
||||
import urlparse
|
||||
|
||||
import channelselector
|
||||
from channels import autoplay, support, filtertools
|
||||
from core import httptools, tmdb, scrapertools, servertools
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
__channel__ = "animesaturn"
|
||||
host = config.get_setting("channel_host", __channel__)
|
||||
headers = [['Referer', host]]
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['openload','fembed']
|
||||
list_quality = ['default']
|
||||
|
||||
# __comprueba_enlaces__ = config.get_setting('comprueba_enlaces', __channel__)
|
||||
# __comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', __channel__)
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
support.log(item.channel + 'mainlist')
|
||||
itemlist = []
|
||||
support.menu(itemlist, 'Anime bold', 'lista_anime', "%s/animelist?load_all=1" % host,'anime')
|
||||
# support.menu(itemlist, 'Novità submenu', 'ultimiep', "%s/fetch_pages.php?request=episodes" % host,'anime')
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="ultimiep",
|
||||
url="%s/fetch_pages.php?request=episodes" % host,
|
||||
title=support.typo("Novità submenu"),
|
||||
extra="",
|
||||
contentType='anime',
|
||||
folder=True,
|
||||
thumbnail=support.thumb())
|
||||
)
|
||||
# itemlist.append(
|
||||
# Item(channel=item.channel,
|
||||
# action="lista_anime",
|
||||
# url="%s/animeincorso" % host,
|
||||
# title=support.typo("In corso submenu"),
|
||||
# extra="anime",
|
||||
# contentType='anime',
|
||||
# folder=True,
|
||||
# thumbnail=channelselector.get_thumb('on_the_air.png'))
|
||||
# )
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="list_az",
|
||||
url="%s/animelist?load_all=1" % host,
|
||||
title=support.typo("Archivio A-Z submenu"),
|
||||
extra="anime",
|
||||
contentType='anime',
|
||||
folder=True,
|
||||
thumbnail=channelselector.get_thumb('channels_tvshow_az.png'))
|
||||
)
|
||||
support.menu(itemlist, 'Cerca', 'search', host,'anime')
|
||||
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
itemlist.append(
|
||||
Item(channel='setting',
|
||||
action="channel_config",
|
||||
title=support.typo("Configurazione Canale color lime"),
|
||||
config=item.channel,
|
||||
folder=False,
|
||||
thumbnail=channelselector.get_thumb('setting_0.png'))
|
||||
)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def cleantitle(scrapedtitle):
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
|
||||
scrapedtitle = scrapedtitle.replace('[HD]', '').replace('’', '\'').replace('×','x')
|
||||
year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)')
|
||||
if year:
|
||||
scrapedtitle = scrapedtitle.replace('(' + year + ')', '')
|
||||
|
||||
return scrapedtitle.strip()
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def lista_anime(item):
|
||||
support.log(item.channel + " lista_anime")
|
||||
itemlist = []
|
||||
|
||||
PERPAGE = 15
|
||||
|
||||
p = 1
|
||||
if '{}' in item.url:
|
||||
item.url, p = item.url.split('{}')
|
||||
p = int(p)
|
||||
|
||||
if '||' in item.url:
|
||||
series = item.url.split('\n\n')
|
||||
matches = []
|
||||
for i, serie in enumerate(series):
|
||||
matches.append(serie.split('||'))
|
||||
else:
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = r'<a href="([^"]+)"[^>]*?>[^>]*?>(.+?)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
|
||||
if (p - 1) * PERPAGE > i: continue
|
||||
if i >= p * PERPAGE: break
|
||||
title = cleantitle(scrapedtitle).replace('(ita)','(ITA)')
|
||||
showtitle = title
|
||||
if '(ITA)' in title:
|
||||
title = title.replace('(ITA)','').strip()
|
||||
showtitle = title
|
||||
title += ' '+support.typo(' [ITA] color kod')
|
||||
|
||||
infoLabels = {}
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action="episodios",
|
||||
title=title,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
fulltitle=showtitle,
|
||||
show=showtitle,
|
||||
plot=scrapedplot,
|
||||
contentType='episode',
|
||||
originalUrl=scrapedurl,
|
||||
infoLabels=infoLabels,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Paginazione
|
||||
if len(matches) >= p * PERPAGE:
|
||||
scrapedurl = item.url + '{}' + str(p + 1)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='lista_anime',
|
||||
contentType=item.contentType,
|
||||
title=support.typo(config.get_localized_string(30992), 'color kod bold'),
|
||||
url=scrapedurl,
|
||||
args=item.args,
|
||||
thumbnail=support.thumb()))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def episodios(item):
|
||||
support.log(item.channel + " episodios")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
anime_id = scrapertools.find_single_match(data, r'\?anime_id=(\d+)')
|
||||
|
||||
data = httptools.downloadpage(
|
||||
host + "/loading_anime?anime_id=" + anime_id,
|
||||
headers={
|
||||
'X-Requested-With': 'XMLHttpRequest'
|
||||
}).data
|
||||
|
||||
patron = r'<td style="[^"]+"><b><strong" style="[^"]+">(.+?)</b></strong></td>\s*'
|
||||
patron += r'<td style="[^"]+"><a href="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedtitle, scrapedurl in matches:
|
||||
scrapedtitle = cleantitle(scrapedtitle)
|
||||
scrapedtitle = re.sub(r'<[^>]*?>', '', scrapedtitle)
|
||||
scrapedtitle = '[COLOR azure][B]' + scrapedtitle + '[/B][/COLOR]'
|
||||
itemlist.append(
|
||||
Item(
|
||||
channel=item.channel,
|
||||
action="findvideos",
|
||||
contentType="episode",
|
||||
title=scrapedtitle,
|
||||
url=urlparse.urljoin(host, scrapedurl),
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
plot=item.plot,
|
||||
fanart=item.thumbnail,
|
||||
thumbnail=item.thumbnail))
|
||||
|
||||
|
||||
# tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# support.videolibrary(itemlist,item,'bold color kod')
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def findvideos(item):
|
||||
support.log(item.channel + " findvideos")
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = r'<a href="([^"]+)"><div class="downloadestreaming">'
|
||||
url = scrapertools.find_single_match(data, patron)
|
||||
|
||||
data = httptools.downloadpage(url).data
|
||||
patron = r"""<source\s*src=(?:"|')([^"']+?)(?:"|')\s*type=(?:"|')video/mp4(?:"|')>"""
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for video in matches:
|
||||
itemlist.append(
|
||||
Item(
|
||||
channel=item.channel,
|
||||
action="play",
|
||||
fulltitle=item.fulltitle,
|
||||
title="".join([item.title, ' ', support.typo(video.title, 'color kod []')]),
|
||||
url=video,
|
||||
contentType=item.contentType,
|
||||
folder=False))
|
||||
|
||||
itemlist = support.server(item, data=data)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
|
||||
# Controlla se i link sono validi
|
||||
# if __comprueba_enlaces__:
|
||||
# itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
#
|
||||
# autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
|
||||
def ultimiep(item):
|
||||
logger.info(item.channel + "ultimiep")
|
||||
itemlist = []
|
||||
|
||||
post = "page=%s" % item.extra if item.extra else None
|
||||
logger.debug(post)
|
||||
logger.debug(item.url)
|
||||
data = httptools.downloadpage(
|
||||
item.url, post=post, headers={
|
||||
'X-Requested-With': 'XMLHttpRequest'
|
||||
}).data
|
||||
|
||||
logger.debug(data)
|
||||
|
||||
patron = r"""<a href='[^']+'><div class="locandina"><img alt="[^"]+" src="([^"]+)" title="[^"]+" class="grandezza"></div></a>\s*"""
|
||||
patron += r"""<a href='([^']+)'><div class="testo">(.+?)</div></a>\s*"""
|
||||
patron += r"""<a href='[^']+'><div class="testo2">(.+?)</div></a>"""
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle1, scrapedtitle2 in matches:
|
||||
scrapedtitle1 = cleantitle(scrapedtitle1)
|
||||
scrapedtitle2 = cleantitle(scrapedtitle2)
|
||||
scrapedtitle = scrapedtitle1 + ' - ' + scrapedtitle2 + ''
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
contentType="tvshow",
|
||||
action="findvideos",
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
fulltitle=scrapedtitle1,
|
||||
show=scrapedtitle1,
|
||||
thumbnail=scrapedthumbnail))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Pagine
|
||||
patronvideos = r'data-page="(\d+)" title="Next">Pagina Successiva'
|
||||
next_page = scrapertools.find_single_match(data, patronvideos)
|
||||
|
||||
if next_page:
|
||||
itemlist.append(
|
||||
Item(
|
||||
channel=item.channel,
|
||||
action="ultimiep",
|
||||
title=support.typo(config.get_localized_string(30992), 'color kod bold'),
|
||||
url=host + "/fetch_pages?request=episodes",
|
||||
thumbnail= support.thumb(),
|
||||
extra=next_page,
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def newest(categoria):
|
||||
logger.info(__channel__ + " newest" + categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
item.url = host
|
||||
item.extra = ''
|
||||
try:
|
||||
if categoria == "anime":
|
||||
item.url = "%s/fetch_pages?request=episodes" % host
|
||||
item.action = "ultimiep"
|
||||
itemlist = ultimiep(item)
|
||||
|
||||
if itemlist[-1].action == "ultimiep":
|
||||
itemlist.pop()
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def search_anime(item):
|
||||
logger.info(item.channel + " search_anime")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(host + "/animelist?load_all=1").data
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
|
||||
texto = item.url.lower().split('+')
|
||||
|
||||
patron = r'<a href="([^"]+)"[^>]*?>[^>]*?>(.+?)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in [(scrapedurl, scrapedtitle)
|
||||
for scrapedurl, scrapedtitle in matches
|
||||
if all(t in scrapedtitle.lower()
|
||||
for t in texto)]:
|
||||
|
||||
title = cleantitle(scrapedtitle).replace('(ita)','(ITA)')
|
||||
showtitle = title
|
||||
if '(ITA)' in title:
|
||||
title = title.replace('(ITA)','').strip()
|
||||
showtitle = title
|
||||
title += ' '+support.typo(' [ITA] color kod')
|
||||
|
||||
itemlist.append(
|
||||
Item(
|
||||
channel=item.channel,
|
||||
contentType="episode",
|
||||
action="episodios",
|
||||
title=title,
|
||||
url=scrapedurl,
|
||||
fulltitle=title,
|
||||
show=showtitle,
|
||||
thumbnail=""))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def search(item, texto):
|
||||
logger.info(item.channel + " search")
|
||||
itemlist = []
|
||||
item.url = texto
|
||||
|
||||
try:
|
||||
return search_anime(item)
|
||||
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
|
||||
|
||||
def list_az(item):
|
||||
support.log(item.channel+" list_az")
|
||||
itemlist = []
|
||||
|
||||
alphabet = dict()
|
||||
|
||||
# Scarico la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Articoli
|
||||
patron = r'<a href="([^"]+)"[^>]*?>[^>]*?>(.+?)<'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
|
||||
letter = scrapedtitle[0].upper()
|
||||
if letter not in alphabet:
|
||||
alphabet[letter] = []
|
||||
alphabet[letter].append(scrapedurl+'||'+scrapedtitle)
|
||||
|
||||
for letter in sorted(alphabet):
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="lista_anime",
|
||||
url='\n\n'.join(alphabet[letter]),
|
||||
title=letter,
|
||||
fulltitle=letter))
|
||||
|
||||
return itemlist
|
||||
|
||||
# ================================================================================================================
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"id": "serietvsubita",
|
||||
"name": "Serie TV Sub ITA",
|
||||
"active": false,
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["ita"],
|
||||
"thumbnail": "http://serietvsubita.xyz/wp-content/uploads/2012/07/logo.jpg",
|
||||
@@ -12,7 +12,7 @@
|
||||
"id": "channel_host",
|
||||
"type": "text",
|
||||
"label": "Host del canale",
|
||||
"default": "http://serietvsubita.xyz/",
|
||||
"default": "http://serietvsubita.xyz",
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
@@ -39,6 +39,32 @@
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces_num",
|
||||
"type": "list",
|
||||
"label": "Numero de link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "1", "3", "5", "10" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": ["Non filtrare","IT"]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Canale per Serie Tv Sub ITA
|
||||
# Ringraziamo Icarus crew
|
||||
# Thanks to Icarus crew & Alfa addon
|
||||
# ----------------------------------------------------------
|
||||
import inspect
|
||||
import re
|
||||
@@ -9,11 +9,11 @@ import time
|
||||
|
||||
import channelselector
|
||||
from channels import autoplay, support, filtertools
|
||||
from core import httptools, tmdb, scrapertools
|
||||
from core import httptools, tmdb, scrapertools, servertools
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
|
||||
host = config.get_setting("channel_host", 'serietvsubita')
|
||||
__channel__ = "serietvsubita"
|
||||
host = config.get_setting("channel_host", __channel__)
|
||||
headers = [['Referer', host]]
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
@@ -21,6 +21,8 @@ list_language = IDIOMAS.values()
|
||||
list_servers = ['gounlimited','verystream','streamango','openload']
|
||||
list_quality = ['default']
|
||||
|
||||
# __comprueba_enlaces__ = config.get_setting('comprueba_enlaces', __channel__)
|
||||
# __comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', __channel__)
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
@@ -50,7 +52,7 @@ def mainlist(item):
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def cleantitle(scrapedtitle):
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
|
||||
scrapedtitle = scrapedtitle.replace('[HD]', '').replace('’', '\'').replace('Game of Thrones –','')
|
||||
scrapedtitle = scrapedtitle.replace('[HD]', '').replace('’', '\'').replace('×','x').replace('Game of Thrones –','')
|
||||
year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)')
|
||||
if year:
|
||||
scrapedtitle = scrapedtitle.replace('(' + year + ')', '')
|
||||
@@ -73,12 +75,18 @@ def lista_serie(item):
|
||||
item.url, p = item.url.split('{}')
|
||||
p = int(p)
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if '||' in item.url:
|
||||
series = item.url.split('\n\n')
|
||||
matches = []
|
||||
for i, serie in enumerate(series):
|
||||
matches.append(serie.split('||'))
|
||||
else:
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Extrae las entradas
|
||||
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)" >([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
# Extrae las entradas
|
||||
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)" >([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
|
||||
scrapedplot = ""
|
||||
@@ -89,13 +97,15 @@ def lista_serie(item):
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action="episodes",
|
||||
action="episodios",
|
||||
title=title,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
fulltitle=title,
|
||||
show=title,
|
||||
plot=scrapedplot,
|
||||
contentType='episode',
|
||||
originalUrl=scrapedurl,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
@@ -118,35 +128,87 @@ def lista_serie(item):
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def episodes(item):
|
||||
support.log(item.channel + " episodes")
|
||||
itemlist = []
|
||||
def episodios(item, itemlist=[]):
|
||||
support.log(item.channel + " episodios")
|
||||
# itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
|
||||
patron = '<div class="post-meta">\s*<a href="([^"]+)"\s*title="([^"]+)"\s*class=".*?"></a>.*?'
|
||||
patron += '<p><a href="([^"]+)">'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
logger.debug(itemlist)
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = cleantitle(scrapedtitle)
|
||||
title = scrapedtitle.split(" S0")[0].strip()
|
||||
title = title.split(" S1")[0].strip()
|
||||
title = title.split(" S2")[0].strip()
|
||||
if "(Completa)" in scrapedtitle:
|
||||
data = httptools.downloadpage(scrapedurl).data
|
||||
scrapedtitle = scrapedtitle.replace(" – Miniserie"," – Stagione 1")
|
||||
title = scrapedtitle.split(" – Stagione")[0].strip()
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action="findvideos",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
contentSerieName=title,
|
||||
folder=True))
|
||||
# recupero la stagione
|
||||
season = scrapertools.find_single_match(scrapedtitle,'Stagione ([0-9]*)')
|
||||
blocco = scrapertools.find_single_match(data, '<div class="entry">[\s\S.]*?<div class="post')
|
||||
# blocco = scrapertools.decodeHtmlentities(blocco)
|
||||
blocco = blocco.replace('<strong>Episodio ','<strong>Episodio ').replace(' </strong>',' </strong>')
|
||||
blocco = blocco.replace('<strong>Episodio ','<strong>S'+season.zfill(2)+'E')
|
||||
# logger.debug(blocco)
|
||||
# controllo se gli episodi son nel formato S0XE0X
|
||||
matches = scrapertools.find_multiple_matches(blocco,r'(S(\d*)E(\d*))\s')
|
||||
episodes = []
|
||||
if len(matches) > 0:
|
||||
for fullepisode_s, season, episode in matches:
|
||||
|
||||
season = season.lstrip("0")
|
||||
# episode = episode.lstrip("0")
|
||||
|
||||
episodes.append([
|
||||
"".join([season, "x", episode]),
|
||||
season,
|
||||
episode
|
||||
])
|
||||
# else:
|
||||
# # blocco = blocco.replace('>Episodio 0','>Episodio-0')
|
||||
# matches = scrapertools.find_multiple_matches(blocco, r'Episodio[^\d](\d*)')
|
||||
# logger.debug(blocco)
|
||||
# logger.debug(matches)
|
||||
# episodes = []
|
||||
# if len(matches) > 0:
|
||||
# for string, episode in matches:
|
||||
# episodes.append([
|
||||
# "".join([season, "x", episode]),
|
||||
# season,
|
||||
# episode
|
||||
# ])
|
||||
|
||||
else:
|
||||
title = scrapedtitle.split(" S0")[0].strip()
|
||||
title = title.split(" S1")[0].strip()
|
||||
title = title.split(" S2")[0].strip()
|
||||
episodes = scrapertools.find_multiple_matches(scrapedtitle,r'((\d*)x(\d*))')
|
||||
# logger.debug(scrapedtitle)
|
||||
# logger.debug(episodes)
|
||||
|
||||
for fullepisode, season, episode in episodes:
|
||||
infoLabels = {}
|
||||
infoLabels['season'] = season
|
||||
infoLabels['episode'] = episode
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action="findvideos",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title=fullepisode,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
contentSerieName=title,
|
||||
infoLabels=infoLabels,
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
@@ -154,16 +216,11 @@ def episodes(item):
|
||||
patron = '<strong class=\'on\'>\d+</strong>\s*<a href="([^<]+)">\d+</a>'
|
||||
next_page = scrapertools.find_single_match(data, patron)
|
||||
if next_page != "":
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='episodes',
|
||||
contentType=item.contentType,
|
||||
title=support.typo(config.get_localized_string(30992), 'color kod bold'),
|
||||
url=next_page,
|
||||
args=item.args,
|
||||
thumbnail=support.thumb()))
|
||||
|
||||
# support.videolibrary(itemlist,item,'bold color kod')
|
||||
item.url = next_page
|
||||
itemlist = episodios(item,itemlist)
|
||||
else:
|
||||
item.url = item.originalUrl
|
||||
support.videolibrary(itemlist,item,'bold color kod')
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -175,6 +232,24 @@ def findvideos(item):
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# recupero il blocco contenente i link
|
||||
blocco = scrapertools.find_single_match(data,'<div class="entry">[\s\S.]*?<div class="post')
|
||||
blocco = blocco.replace('..:: Episodio ','Episodio ')
|
||||
|
||||
matches = scrapertools.find_multiple_matches(blocco, '(S(\d*)E(\d*))\s')
|
||||
if len(matches) > 0:
|
||||
for fullseasonepisode, season, episode in matches:
|
||||
blocco = blocco.replace(fullseasonepisode+' ','Episodio '+episode+' ')
|
||||
|
||||
blocco = blocco.replace('Episodio ', '..:: Episodio ')
|
||||
logger.debug(blocco)
|
||||
|
||||
episodio = item.title.replace(str(item.contentSeason)+"x",'')
|
||||
patron = r'\.\.:: Episodio %s([\s\S]*?)(<div class="post|..:: Episodio)' % episodio
|
||||
matches = re.compile(patron, re.DOTALL).findall(blocco)
|
||||
if len(matches):
|
||||
data = matches[0][0]
|
||||
|
||||
patron = 'href="(https?://www\.keeplinks\.(?:co|eu)/p(?:[0-9]*)/([^"]+))"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for keeplinks, id in matches:
|
||||
@@ -187,7 +262,12 @@ def findvideos(item):
|
||||
itemlist = support.server(item, data=data)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
# Controlla se i link sono validi
|
||||
# if __comprueba_enlaces__:
|
||||
# itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
#
|
||||
# autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -196,11 +276,11 @@ def findvideos(item):
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def peliculas_tv(item):
|
||||
logger.info("icarus serietvsubita peliculas_tv")
|
||||
logger.info(item.channel+" peliculas_tv")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
logger.debug(data)
|
||||
# logger.debug(data)
|
||||
patron = '<div class="post-meta">\s*<a href="([^"]+)"\s*title="([^"]+)"\s*class=".*?"></a>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
@@ -233,19 +313,20 @@ def peliculas_tv(item):
|
||||
|
||||
# Paginazione
|
||||
patron = '<strong class=\'on\'>\d+</strong>\s*<a href="([^<]+)">\d+</a>'
|
||||
next_page = scrapertools.find_single_match(data, patron)
|
||||
if next_page != "":
|
||||
if item.extra == "search_tv":
|
||||
next_page = next_page.replace('&', '&')
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='peliculas_tv',
|
||||
contentType=item.contentType,
|
||||
title=support.typo(config.get_localized_string(30992), 'color kod bold'),
|
||||
url=next_page,
|
||||
args=item.args,
|
||||
extra=item.extra,
|
||||
thumbnail=support.thumb()))
|
||||
support.nextPage(itemlist,item,data,patron)
|
||||
# next_page = scrapertools.find_single_match(data, patron)
|
||||
# if next_page != "":
|
||||
# if item.extra == "search_tv":
|
||||
# next_page = next_page.replace('&', '&')
|
||||
# itemlist.append(
|
||||
# Item(channel=item.channel,
|
||||
# action='peliculas_tv',
|
||||
# contentType=item.contentType,
|
||||
# title=support.typo(config.get_localized_string(30992), 'color kod bold'),
|
||||
# url=next_page,
|
||||
# args=item.args,
|
||||
# extra=item.extra,
|
||||
# thumbnail=support.thumb()))
|
||||
|
||||
|
||||
return itemlist
|
||||
@@ -257,11 +338,11 @@ def peliculas_tv(item):
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def newest(categoria):
|
||||
logger.info('serietvsubita' + " newest" + categoria)
|
||||
logger.info(__channel__ + " newest" + categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
item.url = host;
|
||||
item.extra = 'serie';
|
||||
item.url = host
|
||||
item.extra = 'serie'
|
||||
try:
|
||||
if categoria == "series":
|
||||
itemlist = peliculas_tv(item)
|
||||
@@ -282,32 +363,6 @@ def newest(categoria):
|
||||
def search(item, texto):
|
||||
logger.info(item.channel + " search")
|
||||
itemlist = []
|
||||
item.extra = "search_tv"
|
||||
|
||||
item.url = host + "/?s=" + texto + "&op.x=0&op.y=0"
|
||||
|
||||
try:
|
||||
return peliculas_tv(item)
|
||||
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def list_az(item):
|
||||
support.log(item.channel+" list_az")
|
||||
itemlist = []
|
||||
PERPAGE = 50
|
||||
|
||||
p = 1
|
||||
if '{}' in item.url:
|
||||
item.url, p = item.url.split('{}')
|
||||
p = int(p)
|
||||
|
||||
# Scarico la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
@@ -317,35 +372,72 @@ def list_az(item):
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
if (p - 1) * PERPAGE > i: continue
|
||||
if i >= p * PERPAGE: break
|
||||
title = cleantitle(scrapedtitle)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action="episodes",
|
||||
title=title,
|
||||
url=scrapedurl,
|
||||
fulltitle=title,
|
||||
show=title,
|
||||
plot=scrapedplot,
|
||||
folder=True))
|
||||
if texto.upper() in scrapedtitle.upper():
|
||||
scrapedthumbnail = ""
|
||||
scrapedplot = ""
|
||||
title = cleantitle(scrapedtitle)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
action="episodios",
|
||||
title=title,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail,
|
||||
fulltitle=title,
|
||||
show=title,
|
||||
plot=scrapedplot,
|
||||
contentType='episode',
|
||||
originalUrl=scrapedurl,
|
||||
folder=True))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Paginazione
|
||||
if len(matches) >= p * PERPAGE:
|
||||
scrapedurl = item.url + '{}' + str(p + 1)
|
||||
return itemlist
|
||||
|
||||
# item.extra = "search_tv"
|
||||
#
|
||||
# item.url = host + "/?s=" + texto + "&op.x=0&op.y=0"
|
||||
#
|
||||
# try:
|
||||
# return peliculas_tv(item)
|
||||
#
|
||||
# except:
|
||||
# import sys
|
||||
# for line in sys.exc_info():
|
||||
# logger.error("%s" % line)
|
||||
# return []
|
||||
|
||||
|
||||
# ================================================================================================================
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
|
||||
|
||||
def list_az(item):
|
||||
support.log(item.channel+" list_az")
|
||||
itemlist = []
|
||||
|
||||
alphabet = dict()
|
||||
|
||||
# Scarico la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Articoli
|
||||
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)" >([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
|
||||
letter = scrapedtitle[0].upper()
|
||||
if letter not in alphabet:
|
||||
alphabet[letter] = []
|
||||
alphabet[letter].append(scrapedurl+'||'+scrapedtitle)
|
||||
|
||||
for letter in sorted(alphabet):
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='list_az',
|
||||
contentType=item.contentType,
|
||||
title=support.typo(config.get_localized_string(30992), 'color kod bold'),
|
||||
url=scrapedurl,
|
||||
args=item.args,
|
||||
extra=item.extra,
|
||||
thumbnail=support.thumb()))
|
||||
action="lista_serie",
|
||||
url='\n\n'.join(alphabet[letter]),
|
||||
title=letter,
|
||||
fulltitle=letter))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -39,6 +39,32 @@
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces",
|
||||
"type": "bool",
|
||||
"label": "Verifica se i link esistono",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "comprueba_enlaces_num",
|
||||
"type": "list",
|
||||
"label": "Numero de link da verificare",
|
||||
"default": 1,
|
||||
"enabled": true,
|
||||
"visible": "eq(-1,true)",
|
||||
"lvalues": [ "1", "3", "5", "10" ]
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostra link in lingua...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": ["Non filtrare","IT"]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Canale per SerieTVU
|
||||
# Ringraziamo Icarus crew
|
||||
# Thanks to Icarus crew & Alfa addon
|
||||
# ----------------------------------------------------------
|
||||
import re
|
||||
|
||||
import channelselector
|
||||
from channels import autoplay, support, filtertools
|
||||
from core import httptools, tmdb, scrapertools
|
||||
from core import httptools, tmdb, scrapertools, servertools
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
|
||||
host = config.get_setting("channel_host", 'serietvu')
|
||||
__channel__ = 'serietvu'
|
||||
host = config.get_setting("channel_host", __channel__)
|
||||
headers = [['Referer', host]]
|
||||
|
||||
IDIOMAS = {'Italiano': 'IT'}
|
||||
@@ -19,6 +19,9 @@ list_language = IDIOMAS.values()
|
||||
list_servers = ['speedvideo']
|
||||
list_quality = ['default']
|
||||
|
||||
# __comprueba_enlaces__ = config.get_setting('comprueba_enlaces', __channel__)
|
||||
# __comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', __channel__)
|
||||
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
@@ -30,9 +33,8 @@ def mainlist(item):
|
||||
support.menu(itemlist, 'Categorie', 'categorie', host,'tvshow')
|
||||
support.menu(itemlist, 'Cerca', 'search', host,'tvshow')
|
||||
|
||||
|
||||
# autoplay.init(item.channel, list_servers, list_quality)
|
||||
# autoplay.show_option(item.channel, itemlist)
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
itemlist.append(
|
||||
Item(channel='setting',
|
||||
@@ -87,7 +89,7 @@ def lista_serie(item):
|
||||
thumbnail=scrapedimg,
|
||||
show=scrapedtitle,
|
||||
infoLabels=infoLabels,
|
||||
contentType='tvshow',
|
||||
contentType='episode',
|
||||
folder=True))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
@@ -129,16 +131,9 @@ def episodios(item):
|
||||
extra=scrapedextra,
|
||||
folder=True))
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title=support.typo(config.get_localized_string(30161) + ' bold color kod'),
|
||||
thumbnail=support.thumb(),
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios",
|
||||
contentSerieName=item.fulltitle,
|
||||
show=item.show))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
support.videolibrary(itemlist,item,'bold color kod')
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -151,7 +146,11 @@ def findvideos(item):
|
||||
itemlist = support.server(item, data=item.url)
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
# Controlla se i link sono validi
|
||||
# if __comprueba_enlaces__:
|
||||
# itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
#
|
||||
# autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -176,7 +175,11 @@ def findepisodevideo(item):
|
||||
itemlist = support.server(item, data=matches[0][0])
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
# Controlla se i link sono validi
|
||||
# if __comprueba_enlaces__:
|
||||
# itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
||||
#
|
||||
# autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -229,7 +232,7 @@ def latestep(item):
|
||||
|
||||
# ----------------------------------------------------------------------------------------------------------------
|
||||
def newest(categoria):
|
||||
logger.info('serietvu' + " newest" + categoria)
|
||||
logger.info(__channel__ + " newest" + categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
|
||||
@@ -465,8 +465,10 @@ def nextPage(itemlist, item, data, patron, function_level=1):
|
||||
# If the call is direct, leave it blank
|
||||
|
||||
next_page = scrapertoolsV2.find_single_match(data, patron)
|
||||
if 'http' not in next_page:
|
||||
next_page = scrapertoolsV2.find_single_match(item.url, 'https?://[a-z0-9.-]+') + next_page
|
||||
|
||||
if next_page != "":
|
||||
if 'http' not in next_page:
|
||||
next_page = scrapertoolsV2.find_single_match(item.url, 'https?://[a-z0-9.-]+') + next_page
|
||||
log('NEXT= ', next_page)
|
||||
|
||||
if next_page != "":
|
||||
|
||||
BIN
resources/media/channels/banner/animesaturn.png
Normal file
BIN
resources/media/channels/banner/animesaturn.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 59 KiB |
BIN
resources/media/channels/thumb/animesaturn.png
Normal file
BIN
resources/media/channels/thumb/animesaturn.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 58 KiB |
Reference in New Issue
Block a user