Code Cleanup
This commit is contained in:
@@ -5,18 +5,15 @@
|
||||
import re
|
||||
import time
|
||||
import urllib
|
||||
|
||||
import urlparse
|
||||
|
||||
import channelselector
|
||||
from channelselector import thumb
|
||||
from core import httptools, scrapertoolsV2, servertools, tmdb, support, jsontools
|
||||
from core.support import log
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
from specials import autoplay, autorenumber
|
||||
|
||||
__channel__ = 'animeworld'
|
||||
host = config.get_setting("channel_host", __channel__)
|
||||
host = config.get_setting("channel_host", 'animeworld')
|
||||
headers = [['Referer', host]]
|
||||
|
||||
IDIOMAS = {'Italiano': 'Italiano'}
|
||||
@@ -29,7 +26,7 @@ checklinks_number = config.get_setting('checklinks_number', 'animeworld')
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info(__channel__+" mainlist")
|
||||
log()
|
||||
|
||||
itemlist =[]
|
||||
|
||||
@@ -42,26 +39,14 @@ def mainlist(item):
|
||||
support.menu(itemlist, 'Ultimi Aggiunti bold', 'video', host+'/newest', args=["anime"])
|
||||
support.menu(itemlist, 'Ultimi Episodi bold', 'video', host+'/updated', args=["novita'"])
|
||||
support.menu(itemlist, 'Cerca...', 'search')
|
||||
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
itemlist.append(
|
||||
Item(channel='setting',
|
||||
action="channel_config",
|
||||
title=support.typo("Configurazione Canale color lime"),
|
||||
config=item.channel,
|
||||
folder=False,
|
||||
thumbnail=channelselector.get_thumb('setting_0.png'))
|
||||
)
|
||||
|
||||
support.aplay(item, itemlist, list_servers, list_quality)
|
||||
support.channel_config(item, itemlist)
|
||||
return itemlist
|
||||
|
||||
# Crea menu dei generi =================================================
|
||||
|
||||
def generi(item):
|
||||
support.log(item.channel+" generi")
|
||||
log()
|
||||
itemlist = []
|
||||
patron_block = r'</i>\sGeneri</a>\s*<ul class="sub">(.*?)</ul>'
|
||||
patron = r'<a href="([^"]+)"\stitle="([^"]+)">'
|
||||
@@ -80,64 +65,29 @@ def generi(item):
|
||||
# Crea Menu Filtro ======================================================
|
||||
|
||||
def build_menu(item):
|
||||
log()
|
||||
itemlist = []
|
||||
itemlist.append(Item(
|
||||
channel=item.channel,
|
||||
action="video",
|
||||
title="[B]Tutti[/B]",
|
||||
url=item.url))
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'\n|\t','',data)
|
||||
data = re.sub(r'>\s*<','><',data)
|
||||
|
||||
block = scrapertoolsV2.find_single_match(data, r'<form class="filters.*?>(.*?)<\/form>')
|
||||
|
||||
matches = re.compile(r'<button class="btn btn-sm btn-default dropdown-toggle" data-toggle="dropdown"> (.*?) <span.*?>(.*?)<\/ul>', re.DOTALL).findall(block)
|
||||
|
||||
for title, html in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='build_sub_menu',
|
||||
contentType="tvshow",
|
||||
title='[B] > ' + title + '[/B]',
|
||||
fulltitle=title,
|
||||
show=title,
|
||||
url=item.url,
|
||||
html=html,
|
||||
thumbnail=item.thumbnail,
|
||||
fanart=item.fanart))
|
||||
|
||||
# Elimina FLingua dal Menu
|
||||
itemlist.pop(6)
|
||||
itemlist.pop(6)
|
||||
|
||||
itemlist = thumb(itemlist)
|
||||
|
||||
support.menu(itemlist, 'Tutti bold submenu', 'video', item.url)
|
||||
matches = support.match(item,r'<button class="btn btn-sm btn-default dropdown-toggle" data-toggle="dropdown"> (.*?) <span.*?>(.*?)<\/ul>',r'<form class="filters.*?>(.*?)<\/form>')
|
||||
for title, html in matches[0]:
|
||||
if title not in 'Lingua Ordine':
|
||||
support.menu(itemlist, title + ' submenu bold', 'build_sub_menu', item.url, args=html + ' anime')
|
||||
return itemlist
|
||||
|
||||
# Crea SottoMenu Filtro ======================================================
|
||||
|
||||
def build_sub_menu(item):
|
||||
log()
|
||||
itemlist = []
|
||||
matches = re.compile(r'<input.*?name="(.*?)" value="(.*?)".*?><label.*?>(.*?)<\/label>', re.DOTALL).findall(item.html)
|
||||
for name, value, title in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='video',
|
||||
contentType="tvshow",
|
||||
title='[B]' + title + ' >[/B]',
|
||||
fulltitle=title,
|
||||
show=title,
|
||||
url=item.url + '&' + name + '=' + value,
|
||||
plot=""))
|
||||
itemlist = thumb(itemlist)
|
||||
matches = re.compile(r'<input.*?value="([^"]+)"> <label for="([^"]+)">([^<]+)<\/label>', re.DOTALL).findall(item.args)
|
||||
for value, name, title in matches:
|
||||
support.menu(itemlist, support.typo(title, 'bold'), 'video', item.url + '&' + name + '=' + value)
|
||||
return itemlist
|
||||
|
||||
# Novità ======================================================
|
||||
|
||||
def newest(categoria):
|
||||
logger.info(__channel__+" newest")
|
||||
log()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
@@ -161,7 +111,7 @@ def newest(categoria):
|
||||
# Cerca ===========================================================
|
||||
|
||||
def search(item, texto):
|
||||
logger.info(texto)
|
||||
log(texto)
|
||||
item.url = host + '/search?keyword=' + texto
|
||||
try:
|
||||
return video(item)
|
||||
@@ -173,77 +123,47 @@ def search(item, texto):
|
||||
return []
|
||||
|
||||
|
||||
|
||||
|
||||
# Lista A-Z ====================================================
|
||||
|
||||
def alfabetico(item):
|
||||
logger.info(__channel__+" alfabetico")
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'\n|\t','',data)
|
||||
data = re.sub(r'>\s*<','><',data)
|
||||
|
||||
block = scrapertoolsV2.find_single_match(data, r'<span>.*?A alla Z.<\/span>.*?<ul>(.*?)<\/ul>')
|
||||
|
||||
matches = re.compile('<a href="([^"]+)" title="([^"]+)">', re.DOTALL).findall(block)
|
||||
scrapertoolsV2.printMatches(matches)
|
||||
|
||||
for url, title in matches:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action='lista_anime',
|
||||
contentType="tvshow",
|
||||
title=title,
|
||||
fulltitle=title,
|
||||
show=title,
|
||||
url=url,
|
||||
plot=""))
|
||||
|
||||
return itemlist
|
||||
return support.scrape(item, '<a href="([^"]+)" title="([^"]+)">', ['url', 'title'], patron_block=r'<span>.*?A alla Z.<\/span>.*?<ul>(.*?)<\/ul>', action='lista_anime')
|
||||
|
||||
|
||||
def lista_anime(item):
|
||||
logger.info(__channel__+" lista_anime")
|
||||
|
||||
log()
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'\n|\t','',data)
|
||||
data = re.sub(r'>\s*<','><',data)
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = r'<div class="item"><a href="([^"]+)".*?src="([^"]+)".*?data-jtitle="([^"]+)".*?>([^<]+)<\/a><p>(.*?)<\/p>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
matches ,data = support.match(item, r'<div class="item"><a href="([^"]+)".*?src="([^"]+)".*?data-jtitle="([^"]+)".*?>([^<]+)<\/a><p>(.*?)<\/p>')
|
||||
for scrapedurl, scrapedthumb, scrapedoriginal, scrapedtitle, scrapedplot in matches:
|
||||
|
||||
if scrapedoriginal == scrapedtitle:
|
||||
scrapedoriginal=''
|
||||
else:
|
||||
scrapedoriginal = ' - [ ' + scrapedoriginal + ' ]'
|
||||
scrapedoriginal = support.typo(scrapedoriginal,' -- []')
|
||||
|
||||
year = ''
|
||||
lang = ''
|
||||
infoLabels = {}
|
||||
if '(' in scrapedtitle:
|
||||
year = scrapertoolsV2.find_single_match(scrapedtitle, r'(\([0-9]+\))')
|
||||
lang = scrapertoolsV2.find_single_match(scrapedtitle, r'(\([a-zA-Z]+\))')
|
||||
|
||||
title = scrapedtitle.replace(year,'').replace(lang,'')
|
||||
original = scrapedoriginal.replace(year,'').replace(lang,'')
|
||||
title = '[B]' + title + '[/B]' + year + lang + original
|
||||
|
||||
infoLabels['year'] = year
|
||||
title = scrapedtitle.replace(year,'').replace(lang,'').strip()
|
||||
original = scrapedoriginal.replace(year,'').replace(lang,'').strip()
|
||||
if lang: lang = support.typo(lang,'_ color kod')
|
||||
title = '[B]' + title + '[/B]' + lang + original
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
extra=item.extra,
|
||||
contentType="episode",
|
||||
action="episodios",
|
||||
text_color="azure",
|
||||
title=title,
|
||||
url=scrapedurl,
|
||||
thumbnail=scrapedthumb,
|
||||
fulltitle=title,
|
||||
show=title,
|
||||
infoLabels=infoLabels,
|
||||
plot=scrapedplot,
|
||||
folder=True))
|
||||
|
||||
@@ -252,31 +172,15 @@ def lista_anime(item):
|
||||
|
||||
# Next page
|
||||
support.nextPage(itemlist, item, data, r'<a class="page-link" href="([^"]+)" rel="next"')
|
||||
# next_page = scrapertoolsV2.find_single_match(data, '<a class="page-link" href="([^"]+)" rel="next"')
|
||||
#
|
||||
# if next_page != '':
|
||||
# itemlist.append(
|
||||
# Item(channel=item.channel,
|
||||
# action='lista_anime',
|
||||
# title='[B]' + config.get_localized_string(30992) + ' >[/B]',
|
||||
# url=next_page,
|
||||
# contentType=item.contentType,
|
||||
# thumbnail=thumb()))
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def video(item):
|
||||
logger.info(__channel__+" video")
|
||||
log()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'\n|\t','',data)
|
||||
data = re.sub(r'>\s*<','><',data)
|
||||
|
||||
patron = r'<a href="([^"]+)" class="poster.*?><img src="([^"]+)"(.*?)data-jtitle="([^"]+)" .*?>(.*?)<\/a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
matches, data = support.match(item, r'<a href="([^"]+)" class="poster.*?> <img src="([^"]+)"(.*?)data-jtitle="([^"]+)" .*?>(.*?)<\/a>')
|
||||
|
||||
for scrapedurl, scrapedthumb ,scrapedinfo, scrapedoriginal, scrapedtitle in matches:
|
||||
# Cerca Info come anno o lingua nel Titolo
|
||||
@@ -287,14 +191,14 @@ def video(item):
|
||||
lang = scrapertoolsV2.find_single_match(scrapedtitle, r'( \([a-zA-Z]+\))')
|
||||
|
||||
# Rimuove Anno e Lingua nel Titolo
|
||||
title = scrapedtitle.replace(year,'').replace(lang,'')
|
||||
original = scrapedoriginal.replace(year,'').replace(lang,'')
|
||||
title = scrapedtitle.replace(year,'').replace(lang,'').strip()
|
||||
original = scrapedoriginal.replace(year,'').replace(lang,'').strip()
|
||||
|
||||
# Compara Il Titolo con quello originale
|
||||
if original == title:
|
||||
original=''
|
||||
else:
|
||||
original = ' - [ ' + scrapedoriginal + ' ]'
|
||||
original = support.typo(scrapedoriginal,'-- []')
|
||||
|
||||
# cerca info supplementari
|
||||
ep = ''
|
||||
@@ -353,22 +257,11 @@ def video(item):
|
||||
|
||||
# Next page
|
||||
support.nextPage(itemlist, item, data, r'<a\sclass="page-link"\shref="([^"]+)"\srel="next"\saria-label="Successiva')
|
||||
# next_page = scrapertoolsV2.find_single_match(data, '<a class="page-link" href=".*?page=([^"]+)" rel="next"')
|
||||
#
|
||||
# if next_page != '':
|
||||
# itemlist.append(
|
||||
# Item(channel=item.channel,
|
||||
# action='video',
|
||||
# title='[B]' + config.get_localized_string(30992) + ' >[/B]',
|
||||
# url=re.sub('&page=([^"]+)', '', item.url) + '&page=' + next_page,
|
||||
# contentType=item.contentType,
|
||||
# thumbnail=thumb()))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info(__channel__+" episodios")
|
||||
log()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data.replace('\n', '')
|
||||
@@ -400,39 +293,29 @@ def episodios(item):
|
||||
thumbnail=item.thumbnail))
|
||||
|
||||
autorenumber.renumber(itemlist, item,'bold')
|
||||
|
||||
|
||||
# Aggiungi a Libreria
|
||||
# if config.get_videolibrary_support() and len(itemlist) != 0:
|
||||
# itemlist.append(
|
||||
# Item(
|
||||
# channel=item.channel,
|
||||
# title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
|
||||
# url=item.url,
|
||||
# action="add_serie_to_library",
|
||||
# extra="episodios",
|
||||
# show=item.show))
|
||||
support.videolibrary(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info(__channel__+" findvideos")
|
||||
log()
|
||||
|
||||
itemlist = []
|
||||
# logger.debug(item.extra)
|
||||
episode = '1'
|
||||
#recupero i server disponibili
|
||||
|
||||
if item.extra and item.extra['episode']:
|
||||
data = item.extra['data']
|
||||
episode = item.extra['episode']
|
||||
else:
|
||||
data = httptools.downloadpage(item.url,headers=headers).data
|
||||
|
||||
block = scrapertoolsV2.find_single_match(data,r'data-target="\.widget\.servers.*?>(.*?)</div>')
|
||||
servers = scrapertoolsV2.find_multiple_matches(block,r'class="tab.*?data-name="([0-9]+)">([^<]+)</span')
|
||||
|
||||
videolist = []
|
||||
videoData = ''
|
||||
|
||||
for serverid, servername in servers:
|
||||
#recupero l'id del video per questo server
|
||||
block = scrapertoolsV2.find_single_match(data,r'<div class="server.*?data-id="'+serverid+'">(.*?)</ul>')
|
||||
@@ -447,7 +330,7 @@ def findvideos(item):
|
||||
post = urllib.urlencode({'r': '', 'd': 'www.animeworld.biz'})
|
||||
dataJson = httptools.downloadpage(json['grabber'].replace('/v/','/api/source/'),headers=[['x-requested-with', 'XMLHttpRequest']],post=post).data
|
||||
json = jsontools.load(dataJson)
|
||||
logger.debug(json['data'])
|
||||
log(json['data'])
|
||||
if json['data']:
|
||||
for file in json['data']:
|
||||
itemlist.append(
|
||||
@@ -477,45 +360,5 @@ def findvideos(item):
|
||||
|
||||
|
||||
itemlist += servertools.find_video_items(item,videoData)
|
||||
|
||||
return support.server(item,itemlist=itemlist)
|
||||
|
||||
anime_id = scrapertoolsV2.find_single_match(item.url, r'.*\..*?\/(.*)')
|
||||
data = httptools.downloadpage(host + "/ajax/episode/serverPlayer?id=" + anime_id).data
|
||||
patron = '<source src="([^"]+)"'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
|
||||
for video in matches:
|
||||
itemlist.append(
|
||||
Item(
|
||||
channel=item.channel,
|
||||
action="play",
|
||||
# title=item.title + " [[COLOR orange]Diretto[/COLOR]]",
|
||||
title='diretto',
|
||||
quality='',
|
||||
url=video,
|
||||
server='directo',
|
||||
show=item.show,
|
||||
contentType=item.contentType,
|
||||
folder=False))
|
||||
|
||||
return support.server(item,data, itemlist)
|
||||
|
||||
# Requerido para Filtrar enlaces
|
||||
|
||||
if checklinks:
|
||||
itemlist = servertools.check_list_links(itemlist, checklinks_number)
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -452,7 +452,7 @@ def match(item, patron='', patron_block='', headers='', url=''):
|
||||
matches = []
|
||||
url = url if url else item.url
|
||||
data = httptools.downloadpage(url, headers=headers).data.replace("'", '"')
|
||||
data = re.sub('\n|\t', '', data)
|
||||
data = re.sub(r'\n|\t|\s\s', '', data)
|
||||
log('DATA= ', data)
|
||||
|
||||
if patron_block:
|
||||
|
||||
Reference in New Issue
Block a user