Fix vari (#42)

* Improvements channel Guardaserie.click, Fastsubita and support

* Refactor channels series with support & fix

* Refactor channels series / anime with support & fix
New server animeworld.biz

* Fix videolibrary update
This commit is contained in:
4l3x87
2019-06-01 10:00:22 +02:00
committed by mac12m99
parent 352312c7c5
commit e85ef540ff
10 changed files with 308 additions and 484 deletions

View File

@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per AnimeSaturn
# Thanks to me
# Thanks to 4l3x87
# ----------------------------------------------------------
import re
@@ -10,6 +10,7 @@ import urlparse
import channelselector
from core import httptools, tmdb, support, scrapertools, jsontools
from core.item import Item
from core.support import log
from platformcode import logger, config
from specials import autoplay, autorenumber
@@ -19,30 +20,19 @@ headers = [['Referer', host]]
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['openload','fembed']
list_quality = ['default']
list_servers = ['openload', 'fembed', 'animeworld']
list_quality = ['default', '480p', '720p', '1080p']
def mainlist(item):
support.log(item.channel + 'mainlist')
log()
itemlist = []
support.menu(itemlist, 'Anime bold', 'lista_anime', "%s/animelist?load_all=1" % host)
support.menu(itemlist, 'Novità submenu', 'ultimiep', "%s/fetch_pages.php?request=episodes" % host,'tvshow')
support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', '%s/animelist?load_all=1' % host,args=['tvshow','alfabetico'])
support.menu(itemlist, 'Novità bold', 'ultimiep', "%s/fetch_pages.php?request=episodes" % host, 'tvshow')
support.menu(itemlist, 'Anime bold', 'lista_anime', "%s/animelist?load_all=1" % host)
support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', '%s/animelist?load_all=1' % host, args=['tvshow', 'alfabetico'])
support.menu(itemlist, 'Cerca', 'search', host)
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
itemlist.append(
Item(channel='setting',
action="channel_config",
title=support.typo("Configurazione Canale color lime"),
config=item.channel,
folder=False,
thumbnail=channelselector.get_thumb('setting_0.png'))
)
support.aplay(item, itemlist, list_servers, list_quality)
support.channel_config(item, itemlist)
return itemlist
@@ -50,7 +40,7 @@ def mainlist(item):
# ----------------------------------------------------------------------------------------------------------------
def cleantitle(scrapedtitle):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
scrapedtitle = scrapedtitle.replace('[HD]', '').replace('', '\'').replace('×','x')
scrapedtitle = scrapedtitle.replace('[HD]', '').replace('', '\'').replace('×', 'x').replace('"', "'")
year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)')
if year:
scrapedtitle = scrapedtitle.replace('(' + year + ')', '')
@@ -62,7 +52,7 @@ def cleantitle(scrapedtitle):
# ----------------------------------------------------------------------------------------------------------------
def lista_anime(item):
support.log(item.channel + " lista_anime")
log()
itemlist = []
PERPAGE = 15
@@ -78,37 +68,33 @@ def lista_anime(item):
for i, serie in enumerate(series):
matches.append(serie.split('||'))
else:
# Carica la pagina
data = httptools.downloadpage(item.url).data
# Estrae i contenuti
patron = r'<a href="([^"]+)"[^>]*?>[^>]*?>(.+?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = support.match(item, patron, headers=headers)[0]
scrapedplot = ""
scrapedthumbnail = ""
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
if (p - 1) * PERPAGE > i: continue
if i >= p * PERPAGE: break
title = cleantitle(scrapedtitle).replace('(ita)','(ITA)')
title = cleantitle(scrapedtitle).replace('(ita)', '(ITA)')
movie = False
showtitle = title
if '(ITA)' in title:
title = title.replace('(ITA)','').strip()
title = title.replace('(ITA)', '').strip()
showtitle = title
title += ' '+support.typo(' (ITA)')
else:
title += ' ' + support.typo('Sub-ITA', '_ [] color kod')
infoLabels = {}
if 'Akira' in title:
movie = True
infoLabels['year']= 1988
infoLabels['year'] = 1988
if 'Dragon Ball Super Movie' in title:
movie = True
infoLabels['year'] = 2019
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
@@ -130,15 +116,7 @@ def lista_anime(item):
# Paginazione
if len(matches) >= p * PERPAGE:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=item.channel,
action='lista_anime',
contentType=item.contentType,
title=support.typo(config.get_localized_string(30992), 'color kod bold'),
url=scrapedurl,
args=item.args,
thumbnail=support.thumb()))
support.nextPage(itemlist, item, next_page=(item.url + '{}' + str(p + 1)))
return itemlist
@@ -148,17 +126,14 @@ def lista_anime(item):
# ----------------------------------------------------------------------------------------------------------------
def episodios(item):
support.log(item.channel + " episodios")
log()
itemlist = []
data = httptools.downloadpage(item.url).data
data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data
anime_id = scrapertools.find_single_match(data, r'\?anime_id=(\d+)')
#movie or series
# movie or series
movie = scrapertools.find_single_match(data, r'\Episodi:</b>\s(\d*)\sMovie')
data = httptools.downloadpage(
host + "/loading_anime?anime_id=" + anime_id,
headers={
@@ -167,7 +142,7 @@ def episodios(item):
patron = r'<td style="[^"]+"><b><strong" style="[^"]+">(.+?)</b></strong></td>\s*'
patron += r'<td style="[^"]+"><a href="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedtitle, scrapedurl in matches:
scrapedtitle = cleantitle(scrapedtitle)
@@ -187,62 +162,57 @@ def episodios(item):
fanart=item.thumbnail,
thumbnail=item.thumbnail))
if(((len(itemlist) == 1 and 'Movie' in itemlist[0].title) or movie) and item.contentType!='movie'):
if ((len(itemlist) == 1 and 'Movie' in itemlist[0].title) or movie) and item.contentType != 'movie':
item.url = itemlist[0].url
item.contentType = 'movie'
return findvideos(item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
autorenumber.renumber(itemlist, item)
support.videolibrary(itemlist,item,'bold color kod')
support.videolibrary(itemlist, item, 'bold color kod')
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def findvideos(item):
support.log(item.channel + " findvideos")
log()
originalItem = item
if(item.contentType == 'movie'):
if item.contentType == 'movie':
episodes = episodios(item)
if(len(episodes)>0):
if len(episodes) > 0:
item.url = episodes[0].url
itemlist = []
data = httptools.downloadpage(item.url).data
data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data
data = re.sub(r'\n|\t|\s+', ' ', data)
patron = r'<a href="([^"]+)"><div class="downloadestreaming">'
url = scrapertools.find_single_match(data, patron)
data = httptools.downloadpage(url).data
data = httptools.downloadpage(url, headers=headers, ignore_response_code=True).data
data = re.sub(r'\n|\t|\s+', ' ', data)
itemlist = support.server(item, data=data)
if item.contentType == 'movie':
support.videolibrary(itemlist, item, 'color kod')
# Controlla se i link sono validi
# if checklinks:
# itemlist = servertools.check_list_links(itemlist, checklinks_number)
#
# autoplay.start(itemlist, item)
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def ultimiep(item):
logger.info(item.channel + "ultimiep")
log()
itemlist = []
post = "page=%s" % item.args['page'] if item.args and item.args['page'] else None
p = 1
if '{}' in item.url:
item.url, p = item.url.split('{}')
p = int(p)
post = "page=%s" % p if p > 1 else None
data = httptools.downloadpage(
item.url, post=post, headers={
@@ -259,14 +229,23 @@ def ultimiep(item):
scrapedtitle2 = cleantitle(scrapedtitle2)
scrapedtitle = scrapedtitle1 + ' - ' + scrapedtitle2 + ''
title = scrapedtitle
showtitle = scrapedtitle
if '(ITA)' in title:
title = title.replace('(ITA)', '').strip()
showtitle = title
else:
title += ' ' + support.typo('Sub-ITA', '_ [] color kod')
itemlist.append(
Item(channel=item.channel,
contentType="tvshow",
contentType="episode",
action="findvideos",
title=scrapedtitle,
title=title,
url=scrapedurl,
fulltitle=scrapedtitle1,
show=scrapedtitle1,
show=showtitle,
thumbnail=scrapedthumbnail))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
@@ -275,27 +254,17 @@ def ultimiep(item):
patronvideos = r'data-page="(\d+)" title="Next">Pagina Successiva'
next_page = scrapertools.find_single_match(data, patronvideos)
if next_page:
itemlist.append(
Item(
channel=item.channel,
action="ultimiep",
title=support.typo(config.get_localized_string(30992), 'color kod bold'),
url=item.url,
thumbnail= support.thumb(),
args={'page':next_page},
folder=True))
support.nextPage(itemlist, item, next_page=(item.url + '{}' + next_page))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def newest(categoria):
logger.info(__channel__ + " newest" + categoria)
log(categoria)
itemlist = []
item = Item()
item.url = host
@@ -323,42 +292,9 @@ def newest(categoria):
# ----------------------------------------------------------------------------------------------------------------
def search_anime(item, texto):
logger.info(item.channel + " search_anime: "+texto)
log(texto)
itemlist = []
# data = httptools.downloadpage(host + "/animelist?load_all=1").data
# data = scrapertools.decodeHtmlentities(data)
#
# texto = texto.lower().split('+')
#
# patron = r'<a href="([^"]+)"[^>]*?>[^>]*?>(.+?)<'
# matches = re.compile(patron, re.DOTALL).findall(data)
#
# for scrapedurl, scrapedtitle in [(scrapedurl, scrapedtitle)
# for scrapedurl, scrapedtitle in matches
# if all(t in scrapedtitle.lower()
# for t in texto)]:
#
# title = cleantitle(scrapedtitle).replace('(ita)','(ITA)')
# showtitle = title
# if '(ITA)' in title:
# title = title.replace('(ITA)','').strip()
# showtitle = title
# title += ' '+support.typo(' [ITA] color kod')
#
# itemlist.append(
# Item(
# channel=item.channel,
# contentType="episode",
# action="episodios",
# title=title,
# url=scrapedurl,
# fulltitle=title,
# show=showtitle,
# thumbnail=""))
#
# tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
data = httptools.downloadpage(host + "/index.php?search=1&key=%s" % texto).data
jsondata = jsontools.load(data)
@@ -368,16 +304,15 @@ def search_anime(item, texto):
if 'Anime non esistente' in data:
continue
else:
title = title.replace('(ita)','(ITA)')
title = title.replace('(ita)', '(ITA)')
showtitle = title
if '(ITA)' in title:
title = title.replace('(ITA)', '').strip()
showtitle = title
title += ' ' + support.typo(' (ITA)')
else:
title += ' ' + support.typo('Sub-ITA', '_ [] color kod')
url = "%s/anime/%s" % (host, data)
logger.debug(title)
logger.debug(url)
itemlist.append(
Item(
@@ -397,7 +332,7 @@ def search_anime(item, texto):
# ----------------------------------------------------------------------------------------------------------------
def search(item, texto):
logger.info(item.channel + " search")
log(texto)
itemlist = []
try:
@@ -416,23 +351,20 @@ def search(item, texto):
def list_az(item):
support.log(item.channel+" list_az")
log()
itemlist = []
alphabet = dict()
# Scarico la pagina
data = httptools.downloadpage(item.url).data
# Articoli
patron = r'<a href="([^"]+)"[^>]*?>[^>]*?>(.+?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = support.match(item, patron, headers=headers)[0]
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
letter = scrapedtitle[0].upper()
if letter not in alphabet:
alphabet[letter] = []
alphabet[letter].append(scrapedurl+'||'+scrapedtitle)
alphabet[letter].append(scrapedurl + '||' + scrapedtitle)
for letter in sorted(alphabet):
itemlist.append(
@@ -444,4 +376,4 @@ def list_az(item):
return itemlist
# ================================================================================================================
# ================================================================================================================

View File

@@ -18,11 +18,9 @@ headers = [['Referer', host]]
IDIOMAS = {'Italiano': 'Italiano'}
list_language = IDIOMAS.values()
list_servers = ['diretto']
list_quality = []
list_servers = ['animeworld', 'verystream', 'streamango', 'openload', 'directo']
list_quality = ['default', '480p', '720p', '1080p']
checklinks = config.get_setting('checklinks', 'animeworld')
checklinks_number = config.get_setting('checklinks_number', 'animeworld')
def mainlist(item):
@@ -47,19 +45,10 @@ def mainlist(item):
def generi(item):
log()
itemlist = []
patron_block = r'</i>\sGeneri</a>\s*<ul class="sub">(.*?)</ul>'
patron = r'<a href="([^"]+)"\stitle="([^"]+)">'
matches = support.match(item,patron, patron_block, headers)[0]
for scrapedurl, scrapedtitle in matches:
itemlist.append(Item(
channel=item.channel,
action="video",
title=scrapedtitle,
url="%s%s" % (host,scrapedurl)))
return itemlist
return support.scrape(item, patron, ['url','title'], patron_block=patron_block, action='video')
# Crea Menu Filtro ======================================================
@@ -183,7 +172,7 @@ def video(item):
log()
itemlist = []
matches, data = support.match(item, r'<a href="([^"]+)" class="poster.*?> <img src="([^"]+)"(.*?)data-jtitle="([^"]+)" .*?>(.*?)<\/a>')
matches, data = support.match(item, r'<a href="([^"]+)" class="poster.*?>\s<img src="([^"]+)"(.*?)data-jtitle="([^"]+)" .*?>(.*?)<\/a>', headers=headers)
for scrapedurl, scrapedthumb ,scrapedinfo, scrapedoriginal, scrapedtitle in matches:
# Cerca Info come anno o lingua nel Titolo
@@ -231,6 +220,9 @@ def video(item):
# Concatena le informazioni
lang = support.typo('Sub-ITA', '_ [] color kod') if '(ita)' not in lang.lower() else ''
info = ep + lang + year + ova + ona + movie + special
# Crea il title da visualizzare
@@ -329,25 +321,6 @@ def findvideos(item):
videoData +='\n'+json['grabber']
if serverid == '33':
post = urllib.urlencode({'r': '', 'd': 'www.animeworld.biz'})
dataJson = httptools.downloadpage(json['grabber'].replace('/v/','/api/source/'),headers=[['x-requested-with', 'XMLHttpRequest']],post=post).data
json = jsontools.load(dataJson)
log(json['data'])
if json['data']:
for file in json['data']:
itemlist.append(
Item(
channel=item.channel,
action="play",
title='diretto',
url=file['file'],
quality=file['label'],
server='directo',
show=item.show,
contentType=item.contentType,
folder=False))
if serverid == '28':
itemlist.append(
Item(

View File

@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Thanks Icarus crew & Alfa addon
# Canale per fastsubita
# Thanks Icarus crew & Alfa addon & 4l3x87
# ------------------------------------------------------------
from core import scrapertools, httptools, tmdb, support
@@ -17,7 +17,7 @@ list_servers = ['verystream', 'openload', 'speedvideo', 'wstream', 'flashx', 'vi
list_quality = ['default']
headers = [
['Host', 'fastsubita.com'],
['Host', host.split("//")[-1].split("/")[0]],
['User-Agent', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0'],
['Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'],
['Accept-Language', 'en-US,en;q=0.5'],
@@ -104,7 +104,9 @@ def pelicuals_tv(item):
else:
scrapedurl = "http:" + scrapedurl
title = scraped_1 + " - " + infoLabels['season'] + "x" + infoLabels['episode'] + " Sub-ITA"
serie = cleantitle(scraped_1)
title = serie + " - " + infoLabels['season'] + "x" + infoLabels['episode'] + " "+support.typo('Sub-ITA', '_ [] color kod')
itemlist.append(
Item(channel=item.channel,
@@ -115,9 +117,9 @@ def pelicuals_tv(item):
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
show=scraped_1,
show=serie,
extra=item.extra,
contentSerieName=scraped_1,
contentSerieName=serie,
contentLanguage='Sub-ITA',
infoLabels=infoLabels,
folder=True))
@@ -301,7 +303,7 @@ def episodios(item, itemlist=[]):
infoLabels = {}
infoLabels['season'] = season
infoLabels['episode'] = episode[2]
title = infoLabels['season'] + 'x' + infoLabels['episode'] + " Sub-ITA"
title = infoLabels['season'] + 'x' + infoLabels['episode'] + " "+support.typo('Sub-ITA', '_ [] color kod')
if "http:" not in scrapedurl:
scrapedurl = "http:" + scrapedurl

View File

@@ -1,18 +1,16 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per guardaserie.click
# Thanks to Icarus crew & Alfa addon
# Canale per Guardaserie.click
# Thanks to Icarus crew & Alfa addon & 4l3x87
# ------------------------------------------------------------
import re
import channelselector
from core import httptools, scrapertools, servertools, support
from core import httptools, scrapertools, support
from core import tmdb
from core.item import Item
from core.support import log
from platformcode import logger, config
from specials import autoplay
__channel__ = 'guardaserieclick'
host = config.get_setting("channel_host", __channel__)
@@ -163,7 +161,7 @@ def serietvaggiornate(item):
infoLabels['season'] = episode[0][0]
title = str(
"%s - %sx%s %s" % (scrapedtitle, infoLabels['season'], infoLabels['episode'], contentlanguage)).strip()
"%s - %sx%s %s" % (scrapedtitle, infoLabels['season'], infoLabels['episode'], support.typo(contentlanguage, '_ [] color kod') if contentlanguage else '')).strip()
itemlist.append(
Item(channel=item.channel,
@@ -247,7 +245,7 @@ def episodios(item):
scrapedepisodetitle = cleantitle(scrapedepisodetitle)
title = str("%sx%s %s" % (scrapedseason, scrapedepisode, scrapedepisodetitle)).strip()
if 'SUB-ITA' in scrapedtitle:
title += " Sub-ITA"
title += " "+support.typo("Sub-ITA", '_ [] color kod')
infoLabels = {}
infoLabels['season'] = scrapedseason
@@ -278,7 +276,7 @@ def episodios(item):
# ----------------------------------------------------------------------------------------------------------------
def findepvideos(item):
log()
data = httptools.downloadpage(item.url, headers=headers).data
data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data
matches = scrapertools.find_multiple_matches(data, item.extra)
data = "\r\n".join(matches[0])
item.contentType = 'movie'

View File

@@ -1,16 +1,16 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per Serie Tv Sub ITA
# Thanks to Icarus crew & Alfa addon
# Canale per Serietvsubita
# Thanks to Icarus crew & Alfa addon & 4l3x87
# ----------------------------------------------------------
import re
import time
import channelselector
from core import httptools, tmdb, scrapertools, support
from core.item import Item
from core.support import log
from platformcode import logger, config
from specials import autoplay
__channel__ = "serietvsubita"
host = config.get_setting("channel_host", __channel__)
@@ -18,33 +18,19 @@ headers = [['Referer', host]]
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['gounlimited','verystream','streamango','openload']
list_servers = ['gounlimited', 'verystream', 'streamango', 'openload']
list_quality = ['default']
# checklinks = config.get_setting('checklinks', __channel__)
# checklinks_number = config.get_setting('checklinks_number', __channel__)
def mainlist(item):
support.log(item.channel + 'mainlist')
log()
itemlist = []
support.menu(itemlist, 'Serie TV bold', 'lista_serie', host,'tvshow')
support.menu(itemlist, 'Novità submenu', 'peliculas_tv', host,'tvshow')
support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', host,'tvshow',args=['serie'])
support.menu(itemlist, 'Cerca', 'search', host,'tvshow')
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
itemlist.append(
Item(channel='setting',
action="channel_config",
title=support.typo("Configurazione Canale color lime"),
config=item.channel,
folder=False,
thumbnail=channelselector.get_thumb('setting_0.png'))
)
support.menu(itemlist, 'Novità bold', 'peliculas_tv', host, 'tvshow')
support.menu(itemlist, 'Serie TV bold', 'lista_serie', host, 'tvshow')
support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', host, 'tvshow', args=['serie'])
support.menu(itemlist, 'Cerca', 'search', host, 'tvshow')
support.aplay(item, itemlist, list_servers, list_quality)
support.channel_config(item, itemlist)
return itemlist
@@ -52,20 +38,57 @@ def mainlist(item):
# ----------------------------------------------------------------------------------------------------------------
def cleantitle(scrapedtitle):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
scrapedtitle = scrapedtitle.replace('[HD]', '').replace('', '\'').replace('×','x').replace('Game of Thrones ','').replace('In The Dark 2019','In The Dark (2019)').strip()
scrapedtitle = scrapedtitle.replace('[HD]', '').replace('', '\'').replace('×', 'x').replace('Game of Thrones ','')\
.replace('In The Dark 2019', 'In The Dark (2019)').replace('"', "'").strip()
year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)')
if year:
scrapedtitle = scrapedtitle.replace('(' + year + ')', '')
return scrapedtitle.strip()
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def findvideos(item):
log()
data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data
data = re.sub(r'\n|\t|\s+', ' ', data)
# recupero il blocco contenente i link
blocco = scrapertools.find_single_match(data, r'<div class="entry">([\s\S.]*?)<div class="post').replace('..:: Episodio ', 'Episodio ').strip()
matches = scrapertools.find_multiple_matches(blocco, '(S(\d*)E(\d*))\s')
if len(matches) > 0:
for fullseasonepisode, season, episode in matches:
blocco = blocco.replace(fullseasonepisode + ' ', 'Episodio ' + episode + ' ')
blocco = blocco.replace('Episodio ', '..:: Episodio ')
episodio = item.infoLabels['episode']
patron = r'\.\.:: Episodio %s([\s\S]*?)(<div class="post|..:: Episodio)' % episodio
log(patron)
log(blocco)
matches = scrapertools.find_multiple_matches(blocco, patron)
if len(matches):
data = matches[0][0]
patron = 'href="(https?://www\.keeplinks\.(?:co|eu)/p(?:[0-9]*)/([^"]+))"'
matches = re.compile(patron, re.DOTALL).findall(data)
for keeplinks, id in matches:
headers2 = [['Cookie', 'flag[' + id + ']=1; defaults=1; nopopatall=' + str(int(time.time()))],
['Referer', keeplinks]]
html = httptools.downloadpage(keeplinks, headers=headers2).data
data += str(scrapertools.find_multiple_matches(html, '</lable><a href="([^"]+)" target="_blank"'))
return support.server(item, data=data)
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def lista_serie(item):
support.log(item.channel + " lista_serie")
log()
itemlist = []
PERPAGE = 15
@@ -81,12 +104,9 @@ def lista_serie(item):
for i, serie in enumerate(series):
matches.append(serie.split('||'))
else:
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# Extrae las entradas
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)" >([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
patron = r'<li class="cat-item cat-item-\d+"><a href="([^"]+)" >([^<]+)</a>'
matches = support.match(item, patron, headers=headers)[0]
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
scrapedplot = ""
@@ -112,90 +132,58 @@ def lista_serie(item):
# Paginazione
if len(matches) >= p * PERPAGE:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=item.channel,
action='lista_serie',
contentType=item.contentType,
title=support.typo(config.get_localized_string(30992), 'color kod bold'),
url=scrapedurl,
args=item.args,
thumbnail=support.thumb()))
support.nextPage(itemlist, item, next_page=(item.url + '{}' + str(p + 1)))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def episodios(item, itemlist=[]):
support.log(item.channel + " episodios")
# itemlist = []
log()
patron = r'<div class="post-meta">\s*<a href="([^"]+)"\s*title="([^"]+)"\s*class=".*?"></a>.*?'
patron += r'<p><a href="([^"]+)">'
data = httptools.downloadpage(item.url).data
matches, data = support.match(item, patron, headers=headers)
patron = '<div class="post-meta">\s*<a href="([^"]+)"\s*title="([^"]+)"\s*class=".*?"></a>.*?'
patron += '<p><a href="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
logger.debug(itemlist)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedplot = ""
scrapedtitle = cleantitle(scrapedtitle)
if "(Completa)" in scrapedtitle:
data = httptools.downloadpage(scrapedurl).data
scrapedtitle = scrapedtitle.replace(" Miniserie"," Stagione 1")
data = httptools.downloadpage(scrapedurl, headers=headers).data
scrapedtitle = scrapedtitle.replace(" Miniserie", " Stagione 1")
title = scrapedtitle.split(" Stagione")[0].strip()
# recupero la stagione
season = scrapertools.find_single_match(scrapedtitle,'Stagione ([0-9]*)')
season = scrapertools.find_single_match(scrapedtitle, 'Stagione ([0-9]*)')
blocco = scrapertools.find_single_match(data, '<div class="entry">[\s\S.]*?<div class="post')
# blocco = scrapertools.decodeHtmlentities(blocco)
blocco = blocco.replace('<strong>Episodio ','<strong>Episodio ').replace(' </strong>',' </strong>')
blocco = blocco.replace('<strong>Episodio ','<strong>S'+season.zfill(2)+'E')
# logger.debug(blocco)
# controllo se gli episodi son nel formato S0XE0X
matches = scrapertools.find_multiple_matches(blocco,r'(S(\d*)E(\d*))\s')
blocco = blocco.replace('<strong>Episodio ', '<strong>Episodio ').replace(' </strong>', ' </strong>')
blocco = blocco.replace('<strong>Episodio ', '<strong>S' + season.zfill(2) + 'E')
matches = scrapertools.find_multiple_matches(blocco, r'(S(\d*)E(\d*))\s')
episodes = []
if len(matches) > 0:
for fullepisode_s, season, episode in matches:
season = season.lstrip("0")
# episode = episode.lstrip("0")
episodes.append([
"".join([season, "x", episode]),
season,
episode
])
# else:
# # blocco = blocco.replace('>Episodio 0','>Episodio-0')
# matches = scrapertools.find_multiple_matches(blocco, r'Episodio[^\d](\d*)')
# logger.debug(blocco)
# logger.debug(matches)
# episodes = []
# if len(matches) > 0:
# for string, episode in matches:
# episodes.append([
# "".join([season, "x", episode]),
# season,
# episode
# ])
else:
title = scrapedtitle.split(" S0")[0].strip()
title = title.split(" S1")[0].strip()
title = title.split(" S2")[0].strip()
episodes = scrapertools.find_multiple_matches(scrapedtitle,r'((\d*)x(\d*))')
# logger.debug(scrapedtitle)
# logger.debug(episodes)
episodes = scrapertools.find_multiple_matches(scrapedtitle, r'((\d*)x(\d*))')
for fullepisode, season, episode in episodes:
infoLabels = {}
infoLabels['season'] = season
infoLabels['episode'] = episode
fullepisode+=' Sub-ITA'
fullepisode += ' ' + support.typo("Sub-ITA", '_ [] color kod')
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
@@ -213,83 +201,33 @@ def episodios(item, itemlist=[]):
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginazionazione
patron = '<strong class=\'on\'>\d+</strong>\s*<a href="([^<]+)">\d+</a>'
patron = r'<strong class="on">\d+</strong>\s*<a href="([^<]+)">\d+</a>'
next_page = scrapertools.find_single_match(data, patron)
if next_page != "":
item.url = next_page
itemlist = episodios(item,itemlist)
itemlist = episodios(item, itemlist)
else:
item.url = item.originalUrl
support.videolibrary(itemlist,item,'bold color kod')
support.videolibrary(itemlist, item, 'bold color kod')
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def findvideos(item):
support.log(item.channel + " findvideos")
data = httptools.downloadpage(item.url).data
# recupero il blocco contenente i link
blocco = scrapertools.find_single_match(data,'<div class="entry">[\s\S.]*?<div class="post')
blocco = blocco.replace('..:: Episodio ','Episodio ')
matches = scrapertools.find_multiple_matches(blocco, '(S(\d*)E(\d*))\s')
if len(matches) > 0:
for fullseasonepisode, season, episode in matches:
blocco = blocco.replace(fullseasonepisode+' ','Episodio '+episode+' ')
blocco = blocco.replace('Episodio ', '..:: Episodio ')
logger.debug(blocco)
episodio = item.title.replace(str(item.contentSeason)+"x",'')
patron = r'\.\.:: Episodio %s([\s\S]*?)(<div class="post|..:: Episodio)' % episodio
matches = re.compile(patron, re.DOTALL).findall(blocco)
if len(matches):
data = matches[0][0]
patron = 'href="(https?://www\.keeplinks\.(?:co|eu)/p(?:[0-9]*)/([^"]+))"'
matches = re.compile(patron, re.DOTALL).findall(data)
for keeplinks, id in matches:
headers = [['Cookie', 'flag[' + id + ']=1; defaults=1; nopopatall=' + str(int(time.time()))],
['Referer', keeplinks]]
html = httptools.downloadpage(keeplinks, headers=headers).data
data += str(scrapertools.find_multiple_matches(html, '</lable><a href="([^"]+)" target="_blank"'))
itemlist = support.server(item, data=data)
# itemlist = filtertools.get_links(itemlist, item, list_language)
# Controlla se i link sono validi
# if checklinks:
# itemlist = servertools.check_list_links(itemlist, checklinks_number)
#
# autoplay.start(itemlist, item)
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def peliculas_tv(item):
logger.info(item.channel+" peliculas_tv")
log()
itemlist = []
data = httptools.downloadpage(item.url).data
# logger.debug(data)
patron = '<div class="post-meta">\s*<a href="([^"]+)"\s*title="([^"]+)"\s*class=".*?"></a>'
matches = re.compile(patron, re.DOTALL).findall(data)
matches, data = support.match(item, patron, headers=headers)
for scrapedurl, scrapedtitle in matches:
if "FACEBOOK" in scrapedtitle or "RAPIDGATOR" in scrapedtitle:
continue
if scrapedtitle == "WELCOME!":
if scrapedtitle in ["FACEBOOK", "RAPIDGATOR", "WELCOME!"]:
continue
scrapedthumbnail = ""
scrapedplot = ""
scrapedtitle = cleantitle(scrapedtitle)
@@ -300,14 +238,14 @@ def peliculas_tv(item):
title = title.split(" S2")[0].strip()
infoLabels['season'] = episode[1]
infoLabels['episode'] = episode[2]
infoLabels['episode'] = episode[2].zfill(2)
itemlist.append(
Item(channel=item.channel,
action="findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=title+" - "+episode[0]+" Sub-ITA",
title=title + " - " + episode[0] + " " + support.typo("Sub-ITA", '_ [] color kod'),
url=scrapedurl,
thumbnail=scrapedthumbnail,
contentSerieName=title,
@@ -318,24 +256,9 @@ def peliculas_tv(item):
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginazione
patron = '<strong class=\'on\'>\d+</strong>\s*<a href="([^<]+)">\d+</a>'
support.nextPage(itemlist,item,data,patron)
# next_page = scrapertools.find_single_match(data, patron)
# if next_page != "":
# if item.extra == "search_tv":
# next_page = next_page.replace('&#038;', '&')
# itemlist.append(
# Item(channel=item.channel,
# action='peliculas_tv',
# contentType=item.contentType,
# title=support.typo(config.get_localized_string(30992), 'color kod bold'),
# url=next_page,
# args=item.args,
# extra=item.extra,
# thumbnail=support.thumb()))
patron = r'<strong class="on">\d+</strong>\s<a href="([^<]+)">\d+</a>'
support.nextPage(itemlist, item, data, patron)
return itemlist
@@ -343,10 +266,9 @@ def peliculas_tv(item):
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def newest(categoria):
logger.info(__channel__ + " newest" + categoria)
log(categoria)
itemlist = []
item = Item()
item.url = host
@@ -355,7 +277,6 @@ def newest(categoria):
if categoria == "series":
itemlist = peliculas_tv(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
@@ -369,15 +290,11 @@ def newest(categoria):
# ----------------------------------------------------------------------------------------------------------------
def search(item, texto):
logger.info(item.channel + " search")
log(texto)
itemlist = []
# Scarico la pagina
data = httptools.downloadpage(item.url).data
# Articoli
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)" >([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = support.match(item, patron, headers=headers)[0]
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
if texto.upper() in scrapedtitle.upper():
@@ -401,19 +318,6 @@ def search(item, texto):
return itemlist
# item.extra = "search_tv"
#
# item.url = host + "/?s=" + texto + "&op.x=0&op.y=0"
#
# try:
# return peliculas_tv(item)
#
# except:
# import sys
# for line in sys.exc_info():
# logger.error("%s" % line)
# return []
# ================================================================================================================
@@ -421,23 +325,18 @@ def search(item, texto):
def list_az(item):
support.log(item.channel+" list_az")
log()
itemlist = []
alphabet = dict()
# Scarico la pagina
data = httptools.downloadpage(item.url).data
# Articoli
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)" >([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = support.match(item, patron, headers=headers)[0]
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
letter = scrapedtitle[0].upper()
if letter not in alphabet:
alphabet[letter] = []
alphabet[letter].append(scrapedurl+'||'+scrapedtitle)
alphabet[letter].append(scrapedurl + '||' + scrapedtitle)
for letter in sorted(alphabet):
itemlist.append(

View File

@@ -1,15 +1,14 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per SerieTVU
# Thanks to Icarus crew & Alfa addon
# Thanks to Icarus crew & Alfa addon & 4l3x87
# ----------------------------------------------------------
import re
import channelselector
from core import httptools, tmdb, scrapertools, support
from core import tmdb, scrapertools, support
from core.item import Item
from core.support import log
from platformcode import logger, config
from specials import autoplay
__channel__ = 'serietvu'
host = config.get_setting("channel_host", __channel__)
@@ -20,44 +19,28 @@ list_language = IDIOMAS.values()
list_servers = ['speedvideo']
list_quality = ['default']
# checklinks = config.get_setting('checklinks', __channel__)
# checklinks_number = config.get_setting('checklinks_number', __channel__)
def mainlist(item):
support.log(item.channel + 'mainlist')
log()
itemlist = []
support.menu(itemlist, 'Serie TV bold', 'lista_serie', "%s/category/serie-tv" % host,'tvshow')
support.menu(itemlist, 'Novità submenu', 'latestep', "%s/ultimi-episodi" % host,'tvshow')
# support.menu(itemlist, 'Nuove serie color azure', 'lista_serie', "%s/category/serie-tv" % host,'tvshow')
support.menu(itemlist, 'Categorie', 'categorie', host,'tvshow')
support.menu(itemlist, 'Cerca', 'search', host,'tvshow')
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
itemlist.append(
Item(channel='setting',
action="channel_config",
title=support.typo("Configurazione Canale color lime"),
config=item.channel,
folder=False,
thumbnail=channelselector.get_thumb('setting_0.png'))
)
support.menu(itemlist, 'Novità bold', 'latestep', "%s/ultimi-episodi" % host, 'tvshow')
support.menu(itemlist, 'Serie TV bold', 'lista_serie', "%s/category/serie-tv" % host, 'tvshow')
support.menu(itemlist, 'Categorie', 'categorie', host, 'tvshow')
support.menu(itemlist, 'Cerca', 'search', host, 'tvshow')
support.aplay(item, itemlist, list_servers, list_quality)
support.channel_config(item, itemlist)
return itemlist
# ----------------------------------------------------------------------------------------------------------------
def cleantitle(scrapedtitle):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
scrapedtitle = scrapedtitle.replace('[HD]', '').replace('', '\'').replace(' Il Trono di Spade','').replace('Flash 2014','Flash')
scrapedtitle = scrapedtitle.replace('[HD]', '').replace('', '\'').replace(' Il Trono di Spade', '').replace(
'Flash 2014', 'Flash').replace('"', "'")
year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)')
if year:
scrapedtitle = scrapedtitle.replace('(' + year + ')', '')
return scrapedtitle.strip()
@@ -65,14 +48,12 @@ def cleantitle(scrapedtitle):
# ----------------------------------------------------------------------------------------------------------------
def lista_serie(item):
support.log(item.channel + " lista_serie")
log()
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<div class="item">\s*<a href="([^"]+)" data-original="([^"]+)" class="lazy inner">'
patron += r'[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
matches, data = support.match(item, patron, headers=headers)
for scrapedurl, scrapedimg, scrapedtitle in matches:
infoLabels = {}
@@ -96,34 +77,43 @@ def lista_serie(item):
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Pagine
support.nextPage(itemlist,item,data,'<li><a href="([^"]+)">Pagina successiva')
support.nextPage(itemlist, item, data, '<li><a href="([^"]+)">Pagina successiva')
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def episodios(item):
support.log(item.channel + " episodios")
log()
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<option value="(\d+)"[\sselected]*>.*?</option>'
matches = re.compile(patron, re.DOTALL).findall(data)
matches, data = support.match(item, patron, headers=headers)
for value in matches:
patron = r'<div class="list [active]*" data-id="%s">(.*?)</div>\s*</div>' % value
blocco = scrapertools.find_single_match(data, patron)
log(blocco)
patron = r'(<a data-id="\d+[^"]*" data-href="([^"]+)"(?:\sdata-original="([^"]+)")?\sclass="[^"]+">)[^>]+>[^>]+>([^<]+)<'
matches = scrapertools.find_multiple_matches(blocco, patron)
patron = r'(<a data-id="\d+[^"]*" data-href="([^"]+)" data-original="([^"]+)" class="[^"]+">)[^>]+>[^>]+>([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedextra, scrapedurl, scrapedimg, scrapedtitle in matches:
number = scrapertools.decodeHtmlentities(scrapedtitle.replace("Episodio", "")).strip()
contentlanguage = ''
if 'sub-ita' in scrapedtitle.lower():
contentlanguage = 'Sub-ITA'
scrapedtitle = scrapedtitle.replace(contentlanguage, '')
number = cleantitle(scrapedtitle.replace("Episodio", "")).strip()
title = value + "x" + number.zfill(2)
title += " "+support.typo(contentlanguage, '_ [] color kod') if contentlanguage else ''
infoLabels = {}
infoLabels['episode'] = number.zfill(2)
infoLabels['season'] = value
itemlist.append(
Item(channel=item.channel,
@@ -134,60 +124,40 @@ def episodios(item):
url=scrapedurl,
thumbnail=scrapedimg,
extra=scrapedextra,
infoLabels=infoLabels,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
support.videolibrary(itemlist,item,'bold color kod')
support.videolibrary(itemlist, item, 'bold color kod')
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def findvideos(item):
support.log(item.channel + " findvideos")
log()
return support.server(item, data=item.url)
itemlist = support.server(item, data=item.url)
# itemlist = filtertools.get_links(itemlist, item, list_language)
# Controlla se i link sono validi
# if checklinks:
# itemlist = servertools.check_list_links(itemlist, checklinks_number)
#
# autoplay.start(itemlist, item)
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def findepisodevideo(item):
support.log(item.channel + " findepisodevideo")
log()
# Download Pagina
data = httptools.downloadpage(item.url, headers=headers).data
# Prendo il blocco specifico per la stagione richiesta
patron = r'<div class="list [active]*" data-id="%s">(.*?)</div>\s*</div>' % item.extra[0][0]
blocco = scrapertools.find_single_match(data, patron)
# Estraggo l'episodio
patron = r'<a data-id="%s[^"]*" data-href="([^"]+)" data-original="([^"]+)" class="[^"]+">' % item.extra[0][1].lstrip("0")
matches = re.compile(patron, re.DOTALL).findall(blocco)
itemlist = support.server(item, data=matches[0][0])
# itemlist = filtertools.get_links(itemlist, item, list_language)
# Controlla se i link sono validi
# if checklinks:
# itemlist = servertools.check_list_links(itemlist, checklinks_number)
#
# autoplay.start(itemlist, item)
return itemlist
patron_block = r'<div class="list [active]*" data-id="%s">(.*?)</div>\s*</div>' % item.extra[0][0]
patron = r'<a data-id="%s[^"]*" data-href="([^"]+)"(?:\sdata-original="[^"]+")?\sclass="[^"]+">' % item.extra[0][1].lstrip("0")
matches = support.match(item, patron, patron_block, headers)[0]
data = ''
if len(matches) > 0:
data = matches[0]
item.contentType = 'movie'
return support.server(item, data=data)
# ================================================================================================================
@@ -195,16 +165,13 @@ def findepisodevideo(item):
# ----------------------------------------------------------------------------------------------------------------
def latestep(item):
support.log(item.channel + " latestep")
log()
itemlist = []
titles = []
#recupero gli episodi in home nella sezione Ultimi episodi aggiunti
data = httptools.downloadpage(host, headers=headers).data
block = scrapertools.find_single_match(data,r"Ultimi episodi aggiunti.*?<h2>")
regex = r'<a href="([^"]*)"\sdata-src="([^"]*)"\sclass="owl-lazy.*?".*?class="title">(.*?)<small>\((\d*?)x(\d*?)\s(Sub-Ita|Ita)'
matches = re.compile(regex, re.DOTALL).findall(block)
patron_block = r"Ultimi episodi aggiunti.*?<h2>"
patron = r'<a href="([^"]*)"\sdata-src="([^"]*)"\sclass="owl-lazy.*?".*?class="title">(.*?)<small>\((\d*?)x(\d*?)\s(Sub-Ita|Ita)'
matches = support.match(item, patron, patron_block, headers, host)[0]
for scrapedurl, scrapedimg, scrapedtitle, scrapedseason, scrapedepisode, scrapedlanguage in matches:
infoLabels = {}
@@ -213,13 +180,13 @@ def latestep(item):
infoLabels['year'] = year
infoLabels['episode'] = scrapedepisode
infoLabels['season'] = scrapedseason
episode = scrapedseason+"x"+scrapedepisode
episode = scrapedseason + "x" + scrapedepisode
scrapedtitle = cleantitle(scrapedtitle)
title = scrapedtitle+" - "+episode
title = scrapedtitle + " - " + episode
contentlanguage = ""
if scrapedlanguage.strip().lower() != 'ita':
title +=" Sub-ITA"
title += " "+support.typo("Sub-ITA", '_ [] color kod')
contentlanguage = 'Sub-ITA'
titles.append(title)
@@ -229,7 +196,7 @@ def latestep(item):
title=title,
fulltitle=title,
url=scrapedurl,
extra=[[scrapedseason,scrapedepisode]],
extra=[[scrapedseason, scrapedepisode]],
thumbnail=scrapedimg,
contentSerieName=scrapedtitle,
contentLanguage=contentlanguage,
@@ -237,11 +204,9 @@ def latestep(item):
infoLabels=infoLabels,
folder=True))
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<div class="item">\s*<a href="([^"]+)" data-original="([^"]+)" class="lazy inner">'
patron += r'[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<small>([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = support.match(item, patron, headers=headers)[0]
for scrapedurl, scrapedimg, scrapedtitle, scrapedinfo in matches:
infoLabels = {}
@@ -261,7 +226,7 @@ def latestep(item):
title = title.strip()
contentlanguage = ""
if 'sub-ita' in scrapedinfo.lower():
title+=" Sub-ITA"
title += " "+support.typo("Sub-ITA", '_ [] color kod')
contentlanguage = 'Sub-ITA'
if title in titles: continue
@@ -279,12 +244,8 @@ def latestep(item):
contentType='episode',
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# logger.debug("".join(map(str,itemlist)))
return itemlist
@@ -292,7 +253,7 @@ def latestep(item):
# ----------------------------------------------------------------------------------------------------------------
def newest(categoria):
logger.info(__channel__ + " newest" + categoria)
log(categoria)
itemlist = []
item = Item()
try:
@@ -318,7 +279,7 @@ def newest(categoria):
# ----------------------------------------------------------------------------------------------------------------
def search(item, texto):
logger.info(item.channel + " search")
log(texto)
item.url = host + "/?s=" + texto
try:
return lista_serie(item)
@@ -334,26 +295,9 @@ def search(item, texto):
# ----------------------------------------------------------------------------------------------------------------
def categorie(item):
logger.info(item.channel +" categorie")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
blocco = scrapertools.find_single_match(data, r'<h2>Sfoglia</h2>\s*<ul>(.*?)</ul>\s*</section>')
log()
patron_block= r'<h2>Sfoglia</h2>\s*<ul>(.*?)</ul>\s*</section>'
patron = r'<li><a href="([^"]+)">([^<]+)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedurl, scrapedtitle in matches:
if scrapedtitle == 'Home Page' or scrapedtitle == 'Calendario Aggiornamenti':
continue
itemlist.append(
Item(channel=item.channel,
action="lista_serie",
title=scrapedtitle,
contentType="tvshow",
url="%s%s" % (host, scrapedurl),
thumbnail=item.thumbnail,
folder=True))
return itemlist
return support.scrape(item, patron, ['url','title'], patron_block=patron_block, action='lista_serie', blacklist=["Home Page", "Calendario Aggiornamenti"])
# ================================================================================================================

View File

@@ -467,7 +467,7 @@ def match(item, patron='', patron_block='', headers='', url=''):
matches = []
url = url if url else item.url
data = httptools.downloadpage(url, headers=headers, ignore_response_code=True).data.replace("'", '"')
data = re.sub(r'\n|\t|\s\s', ' ', data)
data = re.sub(r'\n|\t|\s+', ' ', data)
log('DATA= ', data)
if patron_block:

42
servers/animeworld.json Normal file
View File

@@ -0,0 +1,42 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(https://animeworld.biz/v/[a-zA-Z0-9/-]+)",
"url": "\\1"
}
]
},
"free": true,
"id": "animeworld",
"name": "animeworld",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@60654",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "@60655",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": ""
}

34
servers/animeworld.py Normal file
View File

@@ -0,0 +1,34 @@
# -*- coding: utf-8 -*-
import urllib
from core import httptools, jsontools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
post = urllib.urlencode({'r': '', 'd': 'animeworld.biz'})
data_json = httptools.downloadpage(page_url.replace('/v/', '/api/source/'), headers=[['x-requested-with', 'XMLHttpRequest']], post=post).data
json = jsontools.load(data_json)
if not json['data']:
return False, "Video not found"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
post = urllib.urlencode({'r': '', 'd': 'animeworld.biz'})
data_json = httptools.downloadpage(page_url.replace('/v/', '/api/source/'), headers=[['x-requested-with', 'XMLHttpRequest']], post=post).data
json = jsontools.load(data_json)
if json['data']:
for file in json['data']:
media_url = file['file']
label = file['label']
extension = file['type']
video_urls.append([label + " " + extension + ' [animeworld]', media_url])
return video_urls

View File

@@ -155,7 +155,7 @@ def check_for_update(overwrite=True):
if not serie.active:
# si la serie no esta activa descartar
if overwrite_forced == False:
if not overwrite:
#Sincronizamos los episodios vistos desde la videoteca de Kodi con la de Alfa, aunque la serie esté desactivada
try:
if config.is_xbmc(): #Si es Kodi, lo hacemos