newScrape

This commit is contained in:
mac12m99
2019-07-19 18:38:52 +02:00
parent fbe8c16f6c
commit 4107dbd5ae
11 changed files with 654 additions and 789 deletions

View File

@@ -3,59 +3,109 @@
# Canale per altadefinizione01
# ------------------------------------------------------------
from core import servertools, httptools, tmdb, scrapertoolsV2, support
from core.item import Item
from platformcode import logger, config
from specials import autoplay
from core import servertools, support, jsontools
from core.item import Item
from platformcode import config, logger
#URL che reindirizza sempre al dominio corrente
#host = "https://altadefinizione01.to"
__channel__ = "altadefinizione01_club"
__channel__ = "altadefinizione01"
host = config.get_channel_url(__channel__)
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['openload', 'streamango', 'rapidvideo', 'streamcherry', 'megadrive']
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
list_servers = ['verystream','openload','rapidvideo','streamango']
list_quality = ['default']
checklinks = config.get_setting('checklinks', 'altadefinizione01')
checklinks_number = config.get_setting('checklinks_number', 'altadefinizione01')
headers = [['Referer', host]]
blacklist_categorie = ['Altadefinizione01', 'Altadefinizione.to']
@support.menu
def mainlist(item):
support.log()
itemlist =[]
film = ''
support.menu(itemlist, 'Al Cinema','peliculas',host+'/cinema/')
support.menu(itemlist, 'Ultimi Film Inseriti','peliculas',host)
support.menu(itemlist, 'Film Sub-ITA','peliculas',host+'/sub-ita/')
support.menu(itemlist, 'Film Ordine Alfabetico ','AZlist',host+'/catalog/')
support.menu(itemlist, 'Categorie Film','categories',host)
support.menu(itemlist, 'Cerca...','search')
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
filmSub = [
('Al Cinema', ['/cinema/', 'peliculas', 'pellicola']),
('Generi', ['', 'categorie', 'genres']),
('Lettera', ['/catalog/a/', 'categorie', 'orderalf']),
('Anni', ['', 'categorie', 'years']),
('Sub-ITA', ['/sub-ita/', 'peliculas', 'pellicola'])
]
return itemlist
return locals()
@support.scrape
def peliculas(item):
## import web_pdb; web_pdb.set_trace()
support.log('peliculas',item)
def categories(item):
support.log(item)
itemlist = support.scrape(item,'<li><a href="([^"]+)">(.*?)</a></li>',['url','title'],headers,'Altadefinizione01',patron_block='<ul class="kategori_list">(.*?)</ul>',action='peliculas')
return support.thumb(itemlist)
action="findvideos"
if item.args == "search":
patronBlock = r'</script> <div class="boxgrid caption">(.*?)<div id="right_bar">'
else:
patronBlock = r'<div class="cover_kapsul ml-mask">(.*?)<div class="page_nav">'
patron = r'<div class="cover boxcaption"> <h2>.<a href="(?P<url>[^"]+)">.*?<.*?src="(?P<thumb>[^"]+)"'\
'.+?[^>]+>[^>]+<div class="trdublaj"> (?P<quality>[A-Z]+)<[^>]+>(?:.[^>]+>(?P<lang>.*?)<[^>]+>).*?'\
'<p class="h4">(?P<title>.*?)</p>[^>]+> [^>]+> [^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+> [^>]+> '\
'[^>]+>[^>]+>(?P<year>\d{4})[^>]+>[^>]+> [^>]+>[^>]+>(?P<duration>\d+).+?>'
def AZlist(item):
support.log()
return support.scrape(item,r'<a title="([^"]+)" href="([^"]+)"',['title','url'],headers,patron_block=r'<div class="movies-letter">(.*?)<\/div>',action='peliculas_list')
patronNext = '<span>\d</span> <a href="([^"]+)">'
return locals()
@support.scrape
def categorie(item):
support.log('categorie',item)
## import web_pdb; web_pdb.set_trace()
if item.args != 'orderalf': action = "peliculas"
else: action = 'orderalf'
blacklist = 'Altadefinizione01'
if item.args == 'genres':
patronBlock = r'<ul class="kategori_list">(.*?)</ul>'
patron = '<li><a href="(?P<url>[^"]+)">(?P<title>.*?)</a>'
elif item.args == 'years':
patronBlock = r'<ul class="anno_list">(.*?)</ul>'
patron = '<li><a href="(?P<url>[^"]+)">(?P<title>.*?)</a>'
elif item.args == 'orderalf':
patronBlock = r'<div class="movies-letter">(.*)<div class="clearfix">'
patron = '<a title=.*?href="(?P<url>[^"]+)"><span>(?P<title>.*?)</span>'
return locals()
@support.scrape
def orderalf(item):
support.log('orderalf',item)
action= 'findvideos'
patron = r'<td class="mlnh-thumb"><a href="(?P<url>[^"]+)".*?src="(?P<thumb>[^"]+)"'\
'.+?[^>]+>[^>]+ [^>]+[^>]+ [^>]+>(?P<title>[^<]+).*?[^>]+>(?P<year>\d{4})<'\
'[^>]+>[^>]+>(?P<quality>[A-Z]+)[^>]+> <td class="mlnh-5">(?P<lang>.*?)</td>'
patronNext = r'<span>[^<]+</span>[^<]+<a href="(.*?)">'
return locals()
def findvideos(item):
support.log('findvideos', item)
return support.server(item, headers=headers)
def search(item, text):
logger.info("%s mainlist search log: %s %s" % (__channel__, item, text))
itemlist = []
text = text.replace(" ", "+")
item.url = host + "/index.php?do=search&story=%s&subaction=search" % (text)
item.args = "search"
try:
return peliculas(item)
# Cattura la eccezione così non interrompe la ricerca globle se il canale si rompe!
except:
import sys
for line in sys.exc_info():
logger.error("%s Sono qua: %s" % (__channel__, line))
return []
def newest(categoria):
# import web_pdb; web_pdb.set_trace()
support.log(categoria)
itemlist = []
item = Item()
@@ -67,7 +117,7 @@ def newest(categoria):
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Continua la ricerca in caso di errore
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
@@ -75,76 +125,3 @@ def newest(categoria):
return []
return itemlist
def search(item, texto):
support.log(texto)
item.url = "%s/index.php?do=search&story=%s&subaction=search" % (
host, texto)
try:
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def peliculas(item):
support.log()
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<div class="cover_kapsul ml-mask".*?<a href="(.*?)">(.*?)<\/a>.*?<img .*?src="(.*?)".*?<div class="trdublaj">(.*?)<\/div>.(<div class="sub_ita">(.*?)<\/div>|())'
matches = scrapertoolsV2.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedquality, subDiv, subText, empty in matches:
info = scrapertoolsV2.find_multiple_matches(data, r'<span class="ml-label">([0-9]+)+<\/span>.*?<span class="ml-label">(.*?)<\/span>.*?<p class="ml-cat".*?<p>(.*?)<\/p>.*?<a href="(.*?)" class="ml-watch">')
infoLabels = {}
for infoLabels['year'], duration, scrapedplot, checkUrl in info:
if checkUrl == scrapedurl:
break
infoLabels['duration'] = int(duration.replace(' min', '')) * 60 # calcolo la durata in secondi
scrapedthumbnail = host + scrapedthumbnail
scrapedtitle = scrapertoolsV2.decodeHtmlentities(scrapedtitle)
fulltitle = scrapedtitle
if subDiv:
fulltitle += support.typo(subText + ' _ () color limegreen')
fulltitle += support.typo(scrapedquality.strip()+ ' _ [] color kod')
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType=item.contenType,
contentTitle=scrapedtitle,
contentQuality=scrapedquality.strip(),
plot=scrapedplot,
title=fulltitle,
fulltitle=scrapedtitle,
show=scrapedtitle,
url=scrapedurl,
infoLabels=infoLabels,
thumbnail=scrapedthumbnail))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
support.nextPage(itemlist,item,data,'<span>[^<]+</span>[^<]+<a href="(.*?)">')
return itemlist
def peliculas_list(item):
support.log()
item.fulltitle = ''
block = r'<tbody>(.*)<\/tbody>'
patron = r'<a href="([^"]+)" title="([^"]+)".*?> <img.*?src="([^"]+)".*?<td class="mlnh-3">([0-9]{4}).*?mlnh-4">([A-Z]+)'
return support.scrape(item,patron, ['url', 'title', 'thumb', 'year', 'quality'], patron_block=block)
def findvideos(item):
support.log()
itemlist = support.server(item, headers=headers)
return itemlist

View File

@@ -3,131 +3,96 @@
# -*- Riscritto per KOD -*-
# -*- By Greko -*-
# -*- last change: 04/05/2019
# -*- doppione di altadefinizione01
from core import channeltools, servertools, support
from specials import autoplay
from core import servertools, support
from core.item import Item
from platformcode import config, logger
from specials import autoplay
__channel__ = "altadefinizione01_club"
host = config.get_channel_url(__channel__)
# ======== Funzionalità =============================
checklinks = config.get_setting('checklinks', __channel__)
checklinks_number = config.get_setting('checklinks_number', __channel__)
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
parameters = channeltools.get_channel_parameters(__channel__)
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['verystream','openload','supervideo','rapidvideo','streamango'] # per l'autoplay
list_servers = ['verystream','openload','rapidvideo','streamango']
list_quality = ['default']
# =========== home menu ===================
@support.menu
def mainlist(item):
"""
Creo il menu principale del canale
:param item:
:return: itemlist []
"""
logger.info("%s mainlist log: %s" % (__channel__, item))
itemlist = []
# Menu Principale
support.menu(itemlist, 'Film Ultimi Arrivi bold', 'peliculas', host, args='pellicola')
support.menu(itemlist, 'Genere', 'categorie', host, args='genres')
support.menu(itemlist, 'Per anno submenu', 'categorie', host, args=['Film per Anno','years'])
support.menu(itemlist, 'Per lettera', 'categorie', host + '/catalog/a/', args=['Film per Lettera','orderalf'])
support.menu(itemlist, 'Al Cinema bold', 'peliculas', host + '/cinema/', args='pellicola')
support.menu(itemlist, 'Sub-ITA bold', 'peliculas', host + '/sub-ita/', args='pellicola')
support.menu(itemlist, 'Cerca film submenu', 'search', host, args = 'search')
film = ''
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
filmSub = [
('Al Cinema', ['/cinema/', 'peliculas', 'pellicola']),
('Generi', ['', 'categorie', 'genres']),
('Lettera', ['/catalog/a/', 'categorie', 'orderalf']),
('Anni', ['', 'categorie', 'years']),
('Sub-ITA', ['/sub-ita/', 'peliculas', 'pellicola'])
]
support.channel_config(item, itemlist)
return itemlist
return locals()
# ======== def in ordine di menu ===========================
# =========== def per vedere la lista dei film =============
@support.scrape
def peliculas(item):
logger.info("%s mainlist peliculas log: %s" % (__channel__, item))
itemlist = []
## import web_pdb; web_pdb.set_trace()
support.log('peliculas',item)
patron_block = r'<div id="dle-content">(.*?)<div class="page_nav">'
action="findvideos"
if item.args == "search":
patron_block = r'</table> </form>(.*?)<div class="search_bg">'
patron = r'<h2>.<a href="(.*?)".*?src="(.*?)".*?(?:|<div class="sub_ita">(.*?)</div>)[ ]</div>.*?<p class="h4">(.*?)</p>'
patronBlock = r'</script> <div class="boxgrid caption">(.*?)<div id="right_bar">'
else:
patronBlock = r'<div class="cover_kapsul ml-mask">(.*?)<div class="page_nav">'
patron = r'<div class="cover boxcaption"> <h2>.<a href="(?P<url>[^"]+)">.*?<.*?src="(?P<thumb>[^"]+)"'\
'.+?[^>]+>[^>]+<div class="trdublaj"> (?P<quality>[A-Z]+)<[^>]+>(?:.[^>]+>(?P<lang>.*?)<[^>]+>).*?'\
'<p class="h4">(?P<title>.*?)</p>[^>]+> [^>]+> [^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+> [^>]+> '\
'[^>]+>[^>]+>(?P<year>\d{4})[^>]+>[^>]+> [^>]+>[^>]+>(?P<duration>\d+).+?>'
listGroups = ['url', 'thumb', 'lang', 'title', 'year']
patronNext = '<span>\d</span> <a href="([^"]+)">'
patronNext = '<span>[^<]+</span>[^<]+<a href="(.*?)">'
itemlist = support.scrape(item, patron=patron, listGroups=listGroups,
headers= headers, patronNext=patronNext,patron_block=patron_block,
action='findvideos')
return itemlist
return locals()
# =========== def pagina categorie ======================================
@support.scrape
def categorie(item):
logger.info("%s mainlist categorie log: %s" % (__channel__, item))
itemlist = []
support.log('categorie',item)
## import web_pdb; web_pdb.set_trace()
if item.args != 'orderalf': action = "peliculas"
else: action = 'orderalf'
blacklist = 'Altadefinizione01'
# da qui fare le opportuni modifiche
patron = r'<li><a href="(.*?)">(.*?)</a>'
action = 'peliculas'
if item.args == 'genres':
bloque = r'<ul class="kategori_list">(.*?)</ul>'
elif item.args[1] == 'years':
bloque = r'<ul class="anno_list">(.*?)</ul>'
elif item.args[1] == 'orderalf':
bloque = r'<div class="movies-letter">(.*)<div class="clearfix">'
patron = r'<a title=.*?href="(.*?)"><span>(.*?)</span>'
action = 'orderalf'
listGroups = ['url', 'title']
patronNext = ''
itemlist = support.scrape(item, patron=patron, listGroups=listGroups,
headers= headers, patronNext=patronNext, patron_block = bloque,
action=action)
return itemlist
patronBlock = r'<ul class="kategori_list">(.*?)</ul>'
patron = '<li><a href="(?P<url>[^"]+)">(?P<title>.*?)</a>'
elif item.args == 'years':
patronBlock = r'<ul class="anno_list">(.*?)</ul>'
patron = '<li><a href="(?P<url>[^"]+)">(?P<title>.*?)</a>'
elif item.args == 'orderalf':
patronBlock = r'<div class="movies-letter">(.*)<div class="clearfix">'
patron = '<a title=.*?href="(?P<url>[^"]+)"><span>(?P<title>.*?)</span>'
return locals()
# =========== def pagina lista alfabetica ===============================
@support.scrape
def orderalf(item):
logger.info("%s mainlist orderalf log: %s" % (__channel__, item))
itemlist = []
support.log('orderalf',item)
listGroups = ['url', 'title', 'thumb', 'year', 'lang']
patron = r'<td class="mlnh-thumb"><a href="(.*?)".title="(.*?)".*?src="(.*?)".*?mlnh-3">(.*?)<.*?"mlnh-5">.<(.*?)<td' #scrapertools.find_single_match(data, '<td class="mlnh-thumb"><a href="(.*?)".title="(.*?)".*?src="(.*?)".*?mlnh-3">(.*?)<.*?"mlnh-5">.<(.*?)<td')
patronNext = r'<span>[^<]+</span>[^<]+<a href="(.*?)">'
itemlist = support.scrape(item, patron=patron, listGroups=listGroups,
headers= headers, patronNext=patronNext,
action='findvideos')
return itemlist
action= 'findvideos'
patron = r'<td class="mlnh-thumb"><a href="(?P<url>[^"]+)".*?src="(?P<thumb>[^"]+)"'\
'.+?[^>]+>[^>]+ [^>]+[^>]+ [^>]+>(?P<title>[^<]+).*?[^>]+>(?P<year>\d{4})<'\
'[^>]+>[^>]+>(?P<quality>[A-Z]+)[^>]+> <td class="mlnh-5">(?P<lang>.*?)</td>'
patronNext = r'<span>[^<]+</span>[^<]+<a href="(.*?)">'
return locals()
# =========== def pagina del film con i server per verderlo =============
def findvideos(item):
logger.info("%s mainlist findvideos_film log: %s" % (__channel__, item))
itemlist = []
support.log('findvideos', item)
return support.server(item, headers=headers)
# =========== def per cercare film/serietv =============
@@ -137,7 +102,7 @@ def search(item, text):
itemlist = []
text = text.replace(" ", "+")
item.url = host + "/index.php?do=search&story=%s&subaction=search" % (text)
#item.extra = "search"
item.args = "search"
try:
return peliculas(item)
# Cattura la eccezione così non interrompe la ricerca globle se il canale si rompe!
@@ -150,16 +115,17 @@ def search(item, text):
# =========== def per le novità nel menu principale =============
def newest(categoria):
logger.info("%s mainlist newest log: %s" % (__channel__, categoria))
support.log(categoria)
itemlist = []
item = Item()
try:
item.url = host
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
if categoria == "peliculas":
item.url = host
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys

View File

@@ -2,7 +2,7 @@
# -*- Channel Altadefinizione01L Film - Serie -*-
# -*- By Greko -*-
import channelselector
##import channelselector
from specials import autoplay
from core import servertools, support, jsontools
from core.item import Item
@@ -11,88 +11,66 @@ from platformcode import config, logger
__channel__ = "altadefinizione01_link"
# ======== def per utility INIZIO ============================
host = config.get_setting("channel_host", __channel__)
list_servers = ['supervideo', 'streamcherry','rapidvideo', 'streamango', 'openload']
list_quality = ['default']
host = config.get_setting("channel_host", __channel__)
headers = [['Referer', host]]
# =========== home menu ===================
@support.menu
def mainlist(item):
"""
Creo il menu principale del canale
:param item:
:return: itemlist []
"""
support.log()
itemlist = []
# Menu Principale
support.menu(itemlist, 'Novità bold', 'peliculas', host)
support.menu(itemlist, 'Film per Genere', 'genres', host, args='genres')
support.menu(itemlist, 'Film per Anno submenu', 'genres', host, args='years')
support.menu(itemlist, 'Film per Qualità submenu', 'genres', host, args='quality')
support.menu(itemlist, 'Al Cinema bold', 'peliculas', host + '/film-del-cinema')
support.menu(itemlist, 'Popolari bold', 'peliculas', host + '/piu-visti.html')
support.menu(itemlist, 'Mi sento fortunato bold', 'genres', host, args='lucky')
support.menu(itemlist, 'Sub-ITA bold', 'peliculas', host + '/film-sub-ita/')
support.menu(itemlist, 'Cerca film submenu', 'search', host)
film = ''
filmSub = [
('Al Cinema', ['/film-del-cinema', 'peliculas']),
('Generi', ['', 'genres', 'genres']),
('Anni', ['', 'genres', 'years']),
('Mi sento fortunato', ['/piu-visti.html', 'genres', 'lucky']),
('Popolari', ['/piu-visti.html', 'peliculas', '']),
('Qualità', ['/piu-visti.html', 'genres', 'quality']),
('Sub-ITA', ['/sub-ita/', 'peliculas'])
]
# per autoplay
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
support.channel_config(item, itemlist)
return itemlist
return locals()
# ======== def in ordine di action dal menu ===========================
@support.scrape
def peliculas(item):
support.log
#import web_pdb; web_pdb.set_trace()
support.log('peliculas',item)
itemlist = []
patron = r'class="innerImage">.*?href="([^"]+)".*?src="([^"]+)"'\
'.*?class="ml-item-title">([^<]+)</.*?class="ml-item-label"> (\d{4}) <'\
'.*?class="ml-item-label">.*?class="ml-item-label ml-item-label-.+?"> '\
'(.+?) </div>.*?class="ml-item-label"> (.+?) </'
listGroups = ['url', 'thumb', 'title', 'year', 'quality', 'lang']
patron = r'class="innerImage">.*?href="(?P<url>[^"]+)".*?src="(?P<thumb>[^"]+)"'\
'.*?class="ml-item-title">(?P<title>[^<]+)</.*?class="ml-item-label"> '\
'(?P<year>\d{4}) <.*?class="ml-item-label"> (?P<duration>\d+) .*?'\
'class="ml-item-label ml-item-label-.+?"> (?P<quality>.+?) <.*?'\
'class="ml-item-label"> (?P<lang>.+?) </'
patronNext = '<span>\d</span> <a href="([^"]+)">'
itemlist = support.scrape(item, patron=patron, listGroups=listGroups,
headers= headers, patronNext=patronNext,
action='findvideos')
return itemlist
return locals()
# =========== def pagina categorie ======================================
@support.scrape
def genres(item):
support.log
itemlist = []
#data = httptools.downloadpage(item.url, headers=headers).data
action = 'peliculas'
if item.args == 'genres':
bloque = r'<ul class="listSubCat" id="Film">(.*?)</ul>'
patronBlock = r'<ul class="listSubCat" id="Film">(.*?)</ul>'
elif item.args == 'years':
bloque = r'<ul class="listSubCat" id="Anno">(.*?)</ul>'
patronBlock = r'<ul class="listSubCat" id="Anno">(.*?)</ul>'
elif item.args == 'quality':
bloque = r'<ul class="listSubCat" id="Qualita">(.*?)</ul>'
patronBlock = r'<ul class="listSubCat" id="Qualita">(.*?)</ul>'
elif item.args == 'lucky': # sono i titoli random nella pagina, cambiano 1 volta al dì
bloque = r'FILM RANDOM.*?class="listSubCat">(.*?)</ul>'
patronBlock = r'FILM RANDOM.*?class="listSubCat">(.*?)</ul>'
action = 'findvideos'
patron = r'<li><a href="([^"]+)">(.*?)<'
listGroups = ['url','title']
itemlist = support.scrape(item, patron=patron, listGroups=listGroups,
headers= headers, patron_block = bloque,
action=action)
patron = r'<li><a href="(?P<url>[^"]+)">(?P<title>[^<]+)<'
return itemlist
return locals()
# =========== def per cercare film/serietv =============
#host+/index.php?do=search&story=avatar&subaction=search
@@ -133,15 +111,6 @@ def newest(categoria):
return itemlist
def findvideos(item):
support.log()
itemlist = support.server(item, headers=headers)
# Requerido para FilterTools
# itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
return support.server(item, headers=headers)

View File

@@ -3,41 +3,60 @@
# Canale per altadefinizioneclick
# ----------------------------------------------------------
import re
from specials import autoplay
from core import servertools, support
from core.item import Item
from platformcode import logger, config
from specials import autoplay
from platformcode import config, logger
#host = config.get_setting("channel_host", 'altadefinizioneclick')
__channel__ = 'altadefinizioneclick'
host = config.get_channel_url(__channel__)
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
host = config.get_channel_url(__channel__)
headers = [['Referer', host]]
list_servers = ['verystream', 'openload', 'streamango', "vidoza", "thevideo", "okru", 'youtube']
list_quality = ['1080p']
checklinks = config.get_setting('checklinks', 'altadefinizioneclick')
checklinks_number = config.get_setting('checklinks_number', 'altadefinizioneclick')
headers = [['Referer', host]]
@support.menu
def mainlist(item):
support.log()
itemlist = []
support.log()
support.menu(itemlist, 'Film', 'peliculas', host + "/nuove-uscite/")
support.menu(itemlist, 'Per Genere submenu', 'menu', host, args='Film')
support.menu(itemlist, 'Per Anno submenu', 'menu', host, args='Anno')
support.menu(itemlist, 'Sub-ITA', 'peliculas', host + "/sub-ita/")
support.menu(itemlist, 'Cerca...', 'search', host, 'movie')
support.aplay(item, itemlist,list_servers, list_quality)
support.channel_config(item, itemlist)
film = '' #'/nuove-uscite/'
filmSub = [
('Novità', ['/nuove-uscite/', 'peliculas']),
('Al Cinema', ['/film-del-cinema', 'peliculas']),
('Generi', ['', 'menu', 'Film']),
('Anni', ['', 'menu', 'Anno']),
('Qualità', ['', 'menu', 'Qualita']),
('Sub-ITA', ['/sub-ita/', 'peliculas'])
]
return itemlist
return locals()
@support.scrape
def menu(item):
support.log()
action='peliculas'
patron = r'<li><a href="(?P<url>[^"]+)">(?P<title>[^<]+)</a></li>'
patronBlock= r'<ul class="listSubCat" id="'+ str(item.args) + '">(.*?)</ul>'
return locals()
@support.scrape
def peliculas(item):
support.log()
if item.extra == 'search':
patron = r'<a href="(?P<url>[^"]+)">\s*<div class="wrapperImage">(?:<span class="hd">(?P<quality>[^<]+)'\
'<\/span>)?<img[^s]+src="(?P<thumb>[^"]+)"[^>]+>[^>]+>[^>]+>(?P<title>[^<]+)<[^<]+>'\
'(?:.*?IMDB:\s(\2[^<]+)<\/div>)?'
else:
patron = r'<img width[^s]+src="(?P<thumb>[^"]+)[^>]+><\/a>.*?<a href="(?P<url>[^"]+)">(?P<title>[^(?:\]|<)]+)'\
'(?:\[(?P<lang>[^\]]+)\])?<\/a>[^>]+>[^>]+>[^>]+>(?:\sIMDB\:\s(?P<rating>[^<]+)<)?'\
'(?:.*?<span class="hd">(?P<quality>[^<]+)<\/span>)?\s*<a'
# in caso di CERCA si apre la maschera di inserimento dati
patronNext = r'<a class="next page-numbers" href="([^"]+)">'
return locals()
def search(item, texto):
support.log("search ", texto)
@@ -77,36 +96,6 @@ def newest(categoria):
return itemlist
def menu(item):
support.log()
itemlist = support.scrape(item, '<li><a href="([^"]+)">([^<]+)</a></li>', ['url', 'title'], headers, patron_block='<ul class="listSubCat" id="'+ str(item.args) + '">(.*?)</ul>', action='peliculas')
return support.thumb(itemlist)
def peliculas(item):
support.log()
if item.extra == 'search':
patron = r'<a href="([^"]+)">\s*<div class="wrapperImage">(?:<span class="hd">([^<]+)<\/span>)?<img[^s]+src="([^"]+)"[^>]+>[^>]+>[^>]+>([^<]+)<[^<]+>(?:.*?IMDB:\s([^<]+)<\/div>)?'
elements = ['url', 'quality', 'thumb', 'title', 'rating']
else:
patron = r'<img width[^s]+src="([^"]+)[^>]+><\/a>.*?<a href="([^"]+)">([^(?:\]|<)]+)(?:\[([^\]]+)\])?<\/a>[^>]+>[^>]+>[^>]+>(?:\sIMDB\:\s([^<]+)<)?(?:.*?<span class="hd">([^<]+)<\/span>)?\s*<a'
elements =['thumb', 'url', 'title','lang', 'rating', 'quality']
itemlist = support.scrape(item, patron, elements, headers, patronNext='<a class="next page-numbers" href="([^"]+)">')
return itemlist
def findvideos(item):
support.log()
itemlist = support.hdpass_get_servers(item)
if checklinks:
itemlist = servertools.check_list_links(itemlist, checklinks_number)
# itemlist = filtertools.get_links(itemlist, item, list_language)
autoplay.start(itemlist, item)
support.videolibrary(itemlist, item ,'color kod bold')
return itemlist
support.log('findvideos', item)
return support.hdpass_get_servers(item)

View File

@@ -36,31 +36,24 @@ blacklist = ['BENVENUTI', 'Richieste Serie TV', 'CB01.UNO &#x25b6; TROVA L&#8217
'Openload: la situazione. Benvenuto Verystream', 'Openload: lo volete ancora?']
@support.menu
def mainlist(item):
findhost()
film = ''
filmSub = [
('HD', ['', 'menu', 'Film HD Streaming']),
('Generi', ['', 'menu', 'Film per Genere']),
('Anni', ['', 'menu', 'Film per Anno'])
]
tvshow = '/serietv/'
tvshowSub = [
('Aggiornamenti serie tv', ['/serietv/aggiornamento-quotidiano-serie-tv/', 'last']),
('Per Lettera', ['/serietv/', 'menu', 'Serie-Tv per Lettera']),
('Per Genere', ['/serietv/aggiornamento-quotidiano-serie-tv/', 'menu', 'Serie-Tv per Genere']),
('Per anno', ['/serietv/aggiornamento-quotidiano-serie-tv/', 'menu', 'Serie-Tv per Anno'])
]
autoplay.init(item.channel, list_servers, list_quality)
# Main options
itemlist = []
support.menu(itemlist, 'Ultimi 100 Film Aggiornati bold', 'last', host + '/lista-film-ultimi-100-film-aggiornati/')
support.menu(itemlist, 'Film bold', 'peliculas', host)
support.menu(itemlist, 'HD submenu', 'menu', host, args="Film HD Streaming")
support.menu(itemlist, 'Per genere submenu', 'menu', host, args="Film per Genere")
support.menu(itemlist, 'Per anno submenu', 'menu', host, args="Film per Anno")
support.menu(itemlist, 'Cerca film... submenu', 'search', host, args='film')
support.menu(itemlist, 'Serie TV bold', 'peliculas', host + '/serietv/', contentType='tvshow')
support.menu(itemlist, 'Aggiornamenti serie tv', 'last', host + '/serietv/aggiornamento-quotidiano-serie-tv/', contentType='tvshow')
support.menu(itemlist, 'Per Lettera submenu', 'menu', host + '/serietv/', contentType='tvshow', args="Serie-Tv per Lettera")
support.menu(itemlist, 'Per Genere submenu', 'menu', host + '/serietv/', contentType='tvshow', args="Serie-Tv per Genere")
support.menu(itemlist, 'Per anno submenu', 'menu', host + '/serietv/', contentType='tvshow', args="Serie-Tv per Anno")
support.menu(itemlist, 'Cerca serie... submenu', 'search', host + '/serietv/', contentType='tvshow', args='serie')
autoplay.show_option(item.channel, itemlist)
return itemlist
return locals()
def menu(item):
@@ -88,10 +81,9 @@ def menu(item):
def search(item, text):
support.log(item.url, "search" ,text)
try:
item.url = item.url + "/?s=" + text.replace(' ','+')
item.url = item.url + "/?s=" + text
return peliculas(item)
# Continua la ricerca in caso di errore
@@ -110,7 +102,7 @@ def newest(categoria):
item.url = host + '/lista-film-ultimi-100-film-aggiunti/'
return support.scrape(item, r'<a href=([^>]+)>([^<([]+)(?:\[([A-Z]+)\])?\s\(([0-9]{4})\)<\/a>',
['url', 'title', 'quality', 'year'],
patron_block=r'Ultimi 100 film aggiunti:.*?<\/td>')
patronBlock=r'Ultimi 100 film aggiunti:.*?<\/td>')
def last(item):
@@ -119,10 +111,8 @@ def last(item):
itemlist = []
infoLabels = {}
quality = ''
PERPAGE = 30
PERPAGE = 20
page = 1
count = 0
if item.page:
page = item.page
@@ -132,14 +122,13 @@ def last(item):
matches = support.match(item, r'<a href=([^>]+)>([^(:(|[)]+)([^<]+)<\/a>', r'<strong>Ultimi 100 film Aggiornati:<\/a><\/strong>(.*?)<td>', headers)[0]
for i, (url, title, info) in enumerate(matches):
if (page - 1) * PERPAGE > i - count: continue
if i - count >= page * PERPAGE: break
if (page - 1) * PERPAGE > i: continue
if i >= page * PERPAGE: break
add = True
title = title.rstrip()
if item.contentType == 'tvshow':
for i in itemlist:
if i.url == url: # togliamo i doppi
count = count + 1
add = False
else:
infoLabels['year'] = scrapertoolsV2.find_single_match(info, r'\(([0-9]+)\)')
@@ -170,21 +159,22 @@ def last(item):
return itemlist
@support.scrape
def peliculas(item):
support.log()
if item.contentType == 'movie' or '/serietv/' not in item.url:
patron = r'<div class="?card-image"?>.*?<img src="?([^" ]+)"? alt.*?<a href="?([^" >]+)(?:\/|")>([^<[(]+)(?:\[([A-Za-z0-9/-]+)])? (?:\(([0-9]{4})\))?.*?<strong>([^<>&]+).*?DURATA ([0-9]+).*?<br(?: /)?>([^<>]+)'
listGroups = ['thumb', 'url', 'title', 'quality', 'year', 'genre', 'duration', 'plot']
patron = r'<div class="?card-image"?>.*?<img src="?(?P<thumb>[^" ]+)"? alt.*?<a href="?(?P<url>[^" >]+)(?:\/|")>(?P<title>[^<[(]+)(?:\[(?P<quality>[A-Za-z0-9/-]+)])? (?:\((?P<year>[0-9]{4})\))?.*?<strong>(?P<genre>[^<>&]+).*?DURATA (?P<duration>[0-9]+).*?<br(?: /)?>(?P<plot>[^<>]+)'
action = 'findvideos'
else:
patron = r'div class="card-image">.*?<img src="([^ ]+)" alt.*?<a href="([^ >]+)">([^<[(]+)<\/a>.*?<strong><span style="[^"]+">([^<>0-9(]+)\(([0-9]{4}).*?</(?:p|div)>(.*?)</div'
patron = r'div class="card-image">.*?<img src="(?P<thumb>[^ ]+)" alt.*?<a href="(?P<url>[^ >]+)">(?P<title>[^<[(]+)<\/a>.*?<strong><span style="[^"]+">(?P<genre>[^<>0-9(]+)\((?P<year>[0-9]{4}).*?</(?:p|div)>(?P<plot>.*?)</div'
listGroups = ['thumb', 'url', 'title', 'genre', 'year', 'plot']
action = 'episodios'
return support.scrape(item, patron_block=[r'<div class="?sequex-page-left"?>(.*?)<aside class="?sequex-page-right"?>',
'<div class="?card-image"?>.*?(?=<div class="?card-image"?>|<div class="?rating"?>)'],
patron=patron, listGroups=listGroups,
patronNext='<a class="?page-link"? href="?([^>]+)"?><i class="fa fa-angle-right">', blacklist=blacklist, action=action)
patronBlock=[r'<div class="?sequex-page-left"?>(.*?)<aside class="?sequex-page-right"?>',
'<div class="?card-image"?>.*?(?=<div class="?card-image"?>|<div class="?rating"?>)']
patronNext='<a class="?page-link"? href="?([^>]+)"?><i class="fa fa-angle-right">'
return locals()
def episodios(item):

View File

@@ -64,7 +64,7 @@ def search(item, texto):
def genres(item):
return support.scrape(item, patron_block=r'<div id="bordobar" class="dropdown-menu(.*?)</li>', patron=r'<a class="dropdown-item" href="([^"]+)" title="([A-z]+)"', listGroups=['url', 'title'], action='video')
return support.scrape(item, patronBlock=r'<div id="bordobar" class="dropdown-menu(.*?)</li>', patron=r'<a class="dropdown-item" href="([^"]+)" title="([A-z]+)"', listGroups=['url', 'title'], action='video')
def video(item):

View File

@@ -108,11 +108,11 @@ def episodios(item):
def menu(item):
patron_block = r'<ul class="sub-menu">.*?</ul>'
patronBlock = r'<ul class="sub-menu">.*?</ul>'
patron = r'menu-category-list"><a href="([^"]+)">([^<]+)<'
list_groups = ["url", "title"]
return support.scrape(item, patron, list_groups, blacklist="Anime", action="peliculas_menu", patron_block=patron_block)
return support.scrape(item, patron, list_groups, blacklist="Anime", action="peliculas_menu", patronBlock=patronBlock)
def search(item, texto):

View File

@@ -4,22 +4,15 @@
# by Greko
# ------------------------------------------------------------
"""
Riscritto per poter usufruire del modulo support.
Riscritto per poter usufruire del decoratore support.scrape
Problemi noti:
Le regex non prendono tutto...
server versystream : 'http://vcrypt.net/very/' # VeryS non decodifica il link :http://vcrypt.net/fastshield/
alcuni server tra cui nowvideo.club non sono implementati nella cartella servers
Alcune sezioni di anime-cartoni non vanno, alcune hanno solo la lista degli episodi, ma non hanno link
altre cambiano la struttura
La sezione novità non fa apparire il titolo degli episodi
In episodios è stata aggiunta la possibilità di configurare la videoteca
"""
import channelselector
from specials import autoplay, filtertools
from core import scrapertoolsV2, httptools, servertools, tmdb, support
#import channelselector
#from specials import autoplay#, filtertools
from core import scrapertoolsV2, httptools, support#, servertools, tmdb
from core.item import Item
from platformcode import logger, config
@@ -30,8 +23,8 @@ headers = ['Referer', host]
list_servers = ['verystream', 'wstream', 'speedvideo', 'flashx', 'nowvideo', 'streamango', 'deltabit', 'openload']
list_quality = ['default']
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'eurostreaming')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'eurostreaming')
checklinks = config.get_setting('checklinks', 'cineblog01')
checklinks_number = config.get_setting('checklinks_number', 'cineblog01')
IDIOMAS = {'Italiano': 'ITA', 'Sub-ITA':'vosi'}
list_language = IDIOMAS.values()
@@ -48,39 +41,46 @@ def mainlist(item):
support.menu(itemlist, 'Cerca...', 'search', host, contentType = 'tvshow')
## itemlist = filtertools.show_option(itemlist, item.channel, list_language, list_quality)
# richiesto per autoplay
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
# autoplay
support.aplay(item, itemlist, list_servers, list_quality)
# configurazione canale
support.channel_config(item, itemlist)
return itemlist
@support.scrape
def serietv(item):
#import web_pdb; web_pdb.set_trace()
# lista serie tv
## import web_pdb; web_pdb.set_trace()
support.log()
itemlist = []
if item.args:
# il titolo degli episodi viene inglobato in episode ma non sono visibili in newest!!!
patron = r'<span class="serieTitle" style="font-size:20px">(.*?).[^]<a href="([^"]+)"\s+target="_blank">(.*?)<\/a>'
listGroups = ['title', 'url', 'title2']
#patron = r'<span class="serieTitle" style="font-size:20px">(.*?).[^]<a href="([^"]+)"\s+target="_blank">(.*?)<\/a>'
## # DA SISTEMARE - problema: mette tutti gli episodi in sub-ita
## patron = r'<span class="serieTitle" style="font-size:20px">(.*?).[^]<a href="([^"]+)"'\
## '\s+target="_blank">(\d+x\d+) (.*?)(?:|\((.+?)\))</a>'
patron = r'<span class="serieTitle" style="font-size:20px">(.*?).[^]<a href="([^"]+)"'\
'\s+target="_blank">(\d+x\d+) (.*?)</a>'
listGroups = ['title', 'url', 'episode', 'title2']
patronNext = ''
# permette di vedere episodio e titolo + titolo2 in novità
def itemHook(item):
item.show = item.episode + item.title
return item
else:
patron = r'<div class="post-thumb">.*?\s<img src="([^"]+)".*?><a href="([^"]+)".*?>(.*?(?:\((\d{4})\)|(\d{4}))?)<\/a><\/h2>'
listGroups = ['thumb', 'url', 'title', 'year', 'year']
patronNext='a class="next page-numbers" href="?([^>"]+)">Avanti &raquo;</a>'
action='episodios'
return locals()
itemlist = support.scrape(item, patron_block='', patron=patron, listGroups=listGroups,
patronNext=patronNext, action='episodios')
return itemlist
@support.scrape
def episodios(item):
## import web_pdb; web_pdb.set_trace()
support.log("episodios")
support.log("episodios: %s" % item)
itemlist = []
item.contentType = 'episode'
# Carica la pagina
data = httptools.downloadpage(item.url).data
#========
@@ -97,46 +97,17 @@ def episodios(item):
patron = r'(?:<\/span>\w+ STAGIONE\s\d+ (?:\()?(ITA|SUB ITA)(?:\))?<\/div>'\
'<div class="su-spoiler-content su-clearfix" style="display:none">|'\
'(?:\s|\Wn)?(?:<strong>)?(\d+&#.*?)(?:|)?<a\s(.*?)<\/a><br\s\/>)'
## '(?:<\/span>\w+ STAGIONE\s\d+ (?:\()?(ITA|SUB ITA)(?:\))?'\
## '<\/div><div class="su-spoiler-content su-clearfix" style="display:none">|'\
## '(?:\s|\Wn)?(?:<strong>)?(\d[&#].*?)(?:|\W)?<a\s(.*?)<\/a><br\s\/>)'
## '(?:<\/span>\w+ STAGIONE\s\d+ (?:\()?(ITA|SUB ITA)(?:\))?<\/div>'\
## '<div class="su-spoiler-content su-clearfix" style="display:none">|'\
## '\s(?:<strong>)?(\d[&#].*?)<a\s(.*?)<\/a><br\s\/>)'
listGroups = ['lang', 'title', 'url']
itemlist = support.scrape(item, data=data, patron=patron,
listGroups=listGroups, action='findvideos')
# Permette la configurazione della videoteca senza andare nel menu apposito
# così si possono Attivare/Disattivare le impostazioni direttamente dalla
# pagina delle puntate
itemlist.append(
Item(channel='setting',
action="channel_config",
title=support.typo("Configurazione Videoteca color lime"),
plot = 'Filtra per lingua utilizzando la configurazione della videoteca.\
Escludi i video in sub attivando "Escludi streams... " e aggiungendo sub in Parole',
config='videolibrary', #item.channel,
folder=False,
thumbnail=channelselector.get_thumb('setting_0.png')
))
listGroups = ['lang', 'title', 'url']
action = 'findvideos'
itemlist = filtertools.get_links(itemlist, item, list_language)
return itemlist
return locals()
# =========== def findvideos =============
def findvideos(item):
support.log()
itemlist =[]
# Requerido para FilterTools
## itemlist = filtertools.get_links(itemlist, item, list_language)
itemlist = support.server(item, item.url)
## support.videolibrary(itemlist, item)
return itemlist
support.log('findvideos', item)
return support.server(item, item.url)
# =========== def ricerca =============
def search(item, texto):
@@ -174,6 +145,3 @@ def newest(categoria):
return []
return itemlist
def paginator(item):
pass

View File

@@ -208,13 +208,13 @@ def findvideos(item):
itemlist = []
# data = httptools.downloadpage(item.url, headers=headers).data
patron_block = '<div class="entry-content">(.*?)<footer class="entry-footer">'
# bloque = scrapertools.find_single_match(data, patron_block)
patronBlock = '<div class="entry-content">(.*?)<footer class="entry-footer">'
# bloque = scrapertools.find_single_match(data, patronBlock)
patron = r'<a href="([^"]+)">'
# matches = re.compile(patron, re.DOTALL).findall(bloque)
matches, data = support.match(item, patron, patron_block, headers)
matches, data = support.match(item, patron, patronBlock, headers)
for scrapedurl in matches:
if 'is.gd' in scrapedurl:

View File

@@ -4,15 +4,18 @@
# Thanks to Icarus crew & Alfa addon & 4l3x87
# ------------------------------------------------------------
import re
"""
Problemi noti:
- nella pagina categorie appaiono i risultati di tmdb in alcune voci
"""
from core import httptools, scrapertools, support
from core import tmdb
from core import scrapertoolsV2, httptools, support
from core.item import Item
from core.support import log
from platformcode import logger, config
from core.support import log
__channel__ = 'guardaserieclick'
host = config.get_channel_url(__channel__)
headers = [['Referer', host]]
@@ -30,34 +33,154 @@ def mainlist(item):
itemlist = []
support.menu(itemlist, 'Novità bold', 'serietvaggiornate', "%s/lista-serie-tv" % host, 'tvshow')
support.menu(itemlist, 'Nuove serie', 'nuoveserie', "%s/lista-serie-tv" % host, 'tvshow')
support.menu(itemlist, 'Serie inedite Sub-ITA', 'nuoveserie', "%s/lista-serie-tv" % host, 'tvshow', args=['inedite'])
support.menu(itemlist, 'Da non perdere bold', 'nuoveserie', "%s/lista-serie-tv" % host, 'tvshow', args=['tv', 'da non perdere'])
support.menu(itemlist, 'Classiche bold', 'nuoveserie', "%s/lista-serie-tv" % host, 'tvshow', args=['tv', 'classiche'])
support.menu(itemlist, 'Anime', 'lista_serie', "%s/category/animazione/" % host, 'tvshow')
support.menu(itemlist, 'Categorie', 'categorie', host, 'tvshow', args=['serie'])
support.menu(itemlist, 'Cerca', 'search', host, 'tvshow', args=['serie'])
support.menu(itemlist, 'Serie', 'serietv', "%s/lista-serie-tv" % host, 'tvshow', args=['news'])
support.menu(itemlist, 'Ultimi Aggiornamenti submenu', 'serietv', "%s/lista-serie-tv" % host, 'tvshow', args= ['update'])
support.menu(itemlist, 'Categorie', 'categorie', host, 'tvshow', args=['cat'])
support.menu(itemlist, 'Serie inedite Sub-ITA submenu', 'serietv', "%s/lista-serie-tv" % host, 'tvshow', args=['inedite'])
support.menu(itemlist, 'Da non perdere bold submenu', 'serietv', "%s/lista-serie-tv" % host, 'tvshow', args=['tv', 'da non perdere'])
support.menu(itemlist, 'Classiche bold submenu', 'serietv', "%s/lista-serie-tv" % host, 'tvshow', args=['tv', 'classiche'])
support.menu(itemlist, 'Disegni che si muovono sullo schermo per magia bold', 'tvserie', "%s/category/animazione/" % host, 'tvshow', args= ['anime'])
support.menu(itemlist, 'Cerca', 'search', host, 'tvshow', args=['cerca'])
# autoplay
support.aplay(item, itemlist, list_servers, list_quality)
# configurazione del canale
support.channel_config(item, itemlist)
return itemlist
@support.scrape
def serietv(item):
## import web_pdb; web_pdb.set_trace()
log('serietv ->\n')
##<<<<<<< HEAD
##
## action = 'episodios'
## listGroups = ['url', 'thumb', 'title']
## patron = r'<a href="([^"]+)".*?> <img\s.*?src="([^"]+)" \/>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<\/p>'
## if 'news' in item.args:
## patronBlock = r'<div class="container container-title-serie-new container-scheda" meta-slug="new">(.*?)</div></div><div'
## elif 'inedite' in item.args:
## patronBlock = r'<div class="container container-title-serie-ined container-scheda" meta-slug="ined">(.*?)</div></div><div'
## elif 'da non perdere' in item.args:
## patronBlock = r'<div class="container container-title-serie-danonperd container-scheda" meta-slug="danonperd">(.*?)</div></div><div'
## elif 'classiche' in item.args:
## patronBlock = r'<div class="container container-title-serie-classiche container-scheda" meta-slug="classiche">(.*?)</div></div><div'
## elif 'update' in item.args:
## listGroups = ['url', 'thumb', 'episode', 'lang', 'title']
## patron = r'rel="nofollow" href="([^"]+)"[^>]+> <img.*?src="([^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(\d+.\d+) \((.+?)\).<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<[^>]+>'
## patronBlock = r'meta-slug="lastep">(.*?)</div></div><div'
## # permette di vedere episodio + titolo + titolo2 in novità
## def itemHook(item):
## item.show = item.episode + item.title
## return item
## return locals()
##
##@support.scrape
##def tvserie(item):
##
## action = 'episodios'
## listGroups = ['url', 'thumb', 'title']
## patron = r'<a\shref="([^"]+)".*?>\s<img\s.*?src="([^"]+)" />[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)</p></div>'
## patronBlock = r'<div\sclass="col-xs-\d+ col-sm-\d+-\d+">(.*?)<div\sclass="container-fluid whitebg" style="">'
## patronNext = r'<link\s.*?rel="next"\shref="([^"]+)"'
##
## return locals()
##
##@support.scrape
##def episodios(item):
## log('episodios ->\n')
## item.contentType = 'episode'
##
## action = 'findvideos'
## listGroups = ['episode', 'lang', 'title2', 'plot', 'title', 'url']
## patron = r'class="number-episodes-on-img"> (\d+.\d+)(?:|[ ]\((.*?)\))<[^>]+>'\
## '[^>]+>[^>]+>[^>]+>[^>]+>(.*?)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>'\
## '(.*?)<[^>]+></div></div>.<span\s.+?meta-serie="(.*?)" meta-stag=(.*?)</span>'
##
## return locals()
##
##=======
action = 'episodios'
listGroups = ['url', 'thumb', 'title']
patron = r'<a href="([^"]+)".*?> <img\s.*?src="([^"]+)" \/>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<\/p>'
if 'news' in item.args:
patron_block = r'<div class="container container-title-serie-new container-scheda" meta-slug="new">(.*?)</div></div><div'
elif 'inedite' in item.args:
patron_block = r'<div class="container container-title-serie-ined container-scheda" meta-slug="ined">(.*?)</div></div><div'
elif 'da non perdere' in item.args:
patron_block = r'<div class="container container-title-serie-danonperd container-scheda" meta-slug="danonperd">(.*?)</div></div><div'
elif 'classiche' in item.args:
patron_block = r'<div class="container container-title-serie-classiche container-scheda" meta-slug="classiche">(.*?)</div></div><div'
elif 'update' in item.args:
listGroups = ['url', 'thumb', 'episode', 'lang', 'title']
patron = r'rel="nofollow" href="([^"]+)"[^>]+> <img.*?src="([^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(\d+.\d+) \((.+?)\).<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<[^>]+>'
patron_block = r'meta-slug="lastep">(.*?)</div></div><div'
# permette di vedere episodio + titolo + titolo2 in novità
def itemHook(item):
item.show = item.episode + item.title
return item
return locals()
@support.scrape
def tvserie(item):
action = 'episodios'
listGroups = ['url', 'thumb', 'title']
patron = r'<a\shref="([^"]+)".*?>\s<img\s.*?src="([^"]+)" />[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)</p></div>'
patron_block = r'<div\sclass="col-xs-\d+ col-sm-\d+-\d+">(.*?)<div\sclass="container-fluid whitebg" style="">'
patronNext = r'<link\s.*?rel="next"\shref="([^"]+)"'
return locals()
@support.scrape
def episodios(item):
log('episodios ->\n')
item.contentType = 'episode'
action = 'findvideos'
listGroups = ['episode', 'lang', 'title2', 'plot', 'title', 'url']
patron = r'class="number-episodes-on-img"> (\d+.\d+)(?:|[ ]\((.*?)\))<[^>]+>'\
'[^>]+>[^>]+>[^>]+>[^>]+>(.*?)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>'\
'(.*?)<[^>]+></div></div>.<span\s.+?meta-serie="(.*?)" meta-stag=(.*?)</span>'
return locals()
##>>>>>>> a72130e0324ae485ae5f39d3d8f1df46c365fa5b
def findvideos(item):
log()
return support.server(item, item.url)
@support.scrape
def categorie(item):
log
action = 'tvserie'
listGroups = ['url', 'title']
patron = r'<li>\s<a\shref="([^"]+)"[^>]+>([^<]+)</a></li>'
patron_block = r'<ul\sclass="dropdown-menu category">(.*?)</ul>'
return locals()
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
##
### ----------------------------------------------------------------------------------------------------------------
def newest(categoria):
log()
itemlist = []
item = Item()
item.contentType= 'episode'
item.args = 'update'
try:
if categoria == "series":
item.url = "%s/lista-serie-tv" % host
item.action = "serietvaggiornate"
itemlist = serietvaggiornate(item)
item.action = "serietv"
itemlist = serietv(item)
if itemlist[-1].action == "serietvaggiornate":
if itemlist[-1].action == "serietv":
itemlist.pop()
# Continua la ricerca in caso di errore
@@ -69,207 +192,18 @@ def newest(categoria):
return itemlist
### ================================================================================================================
### ----------------------------------------------------------------------------------------------------------------
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def search(item, texto):
log(texto)
item.url = host + "/?s=" + texto
item.args = 'cerca'
try:
return lista_serie(item)
return tvserie(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def cleantitle(scrapedtitle):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip()).replace('"', "'")
return scrapedtitle.strip()
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def nuoveserie(item):
log()
itemlist = []
patron_block = ''
if 'inedite' in item.args:
patron_block = r'<div class="container container-title-serie-ined container-scheda" meta-slug="ined">(.*?)</div></div><div'
elif 'da non perdere' in item.args:
patron_block = r'<div class="container container-title-serie-danonperd container-scheda" meta-slug="danonperd">(.*?)</div></div><div'
elif 'classiche' in item.args:
patron_block = r'<div class="container container-title-serie-classiche container-scheda" meta-slug="classiche">(.*?)</div></div><div'
else:
patron_block = r'<div class="container container-title-serie-new container-scheda" meta-slug="new">(.*?)</div></div><div'
patron = r'<a href="([^"]+)".*?><img\s.*?src="([^"]+)" \/>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<\/p>'
matches = support.match(item, patron, patron_block, headers)[0]
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedtitle = cleantitle(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
action="episodios",
contentType="tvshow",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
show=scrapedtitle,
thumbnail=scrapedthumbnail,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def serietvaggiornate(item):
log()
itemlist = []
patron_block = r'<div class="container\s*container-title-serie-lastep\s*container-scheda" meta-slug="lastep">(.*?)<\/div><\/div><div'
patron = r'<a rel="nofollow"\s*href="([^"]+)"[^>]+><img.*?src="([^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<[^>]+>'
matches = support.match(item, patron, patron_block, headers)[0]
for scrapedurl, scrapedthumbnail, scrapedep, scrapedtitle in matches:
episode = re.compile(r'^(\d+)x(\d+)', re.DOTALL).findall(scrapedep) # Prendo stagione ed episodioso
scrapedtitle = cleantitle(scrapedtitle)
contentlanguage = ""
if 'sub-ita' in scrapedep.strip().lower():
contentlanguage = 'Sub-ITA'
extra = r'<span\s.*?meta-stag="%s" meta-ep="%s" meta-embed="([^"]+)"\s.*?embed2="([^"]+)?"\s.*?embed3="([^"]+)?"[^>]*>' % (
episode[0][0], episode[0][1].lstrip("0"))
infoLabels = {}
infoLabels['episode'] = episode[0][1].zfill(2)
infoLabels['season'] = episode[0][0]
title = str(
"%s - %sx%s %s" % (scrapedtitle, infoLabels['season'], infoLabels['episode'], contentlanguage)).strip()
itemlist.append(
Item(channel=item.channel,
action="findepvideos",
contentType="tvshow",
title=title,
show=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
extra=extra,
thumbnail=scrapedthumbnail,
contentLanguage=contentlanguage,
infoLabels=infoLabels,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def categorie(item):
log()
return support.scrape(item, r'<li>\s<a\shref="([^"]+)"[^>]+>([^<]+)</a></li>', ['url', 'title'], patron_block=r'<ul\sclass="dropdown-menu category">(.*?)</ul>', headers=headers, action="lista_serie")
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def lista_serie(item):
log()
itemlist = []
patron_block = r'<div\sclass="col-xs-\d+ col-sm-\d+-\d+">(.*?)<div\sclass="container-fluid whitebg" style="">'
patron = r'<a\shref="([^"]+)".*?>\s<img\s.*?src="([^"]+)" />[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)</p></div>'
return support.scrape(item, patron, ['url', 'thumb', 'title'], patron_block=patron_block, patronNext=r"<link\s.*?rel='next'\shref='([^']*)'", action='episodios')
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def episodios(item):
log()
itemlist = []
patron = r'<div\sclass="[^"]+">\s([^<]+)<\/div>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)?[^>]+>[^>]+>[^>]+>[^>]+>[^>]+><p[^>]+>([^<]+)<[^>]+>[^>]+>[^>]+>'
patron += r'[^"]+".*?serie="([^"]+)".*?stag="([0-9]*)".*?ep="([0-9]*)"\s'
patron += r'.*?embed="([^"]+)"\s.*?embed2="([^"]+)?"\s.*?embed3="([^"]+)?"?[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s?'
patron += r'(?:<img\sclass="[^"]+" meta-src="([^"]+)"[^>]+>|<img\sclass="[^"]+" src="" data-original="([^"]+)"[^>]+>)?'
matches = support.match(item, patron, headers=headers)[0]
for scrapedtitle, scrapedepisodetitle, scrapedplot, scrapedserie, scrapedseason, scrapedepisode, scrapedurl, scrapedurl2, scrapedurl3, scrapedthumbnail, scrapedthumbnail2 in matches:
scrapedtitle = cleantitle(scrapedtitle)
scrapedepisode = scrapedepisode.zfill(2)
scrapedepisodetitle = cleantitle(scrapedepisodetitle)
title = str("%sx%s %s" % (scrapedseason, scrapedepisode, scrapedepisodetitle)).strip()
if 'SUB-ITA' in scrapedtitle:
title += " "+support.typo("Sub-ITA", '_ [] color kod')
infoLabels = {}
infoLabels['season'] = scrapedseason
infoLabels['episode'] = scrapedepisode
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title=support.typo(title, 'bold'),
fulltitle=scrapedtitle,
url=scrapedurl + "\r\n" + scrapedurl2 + "\r\n" + scrapedurl3,
contentType="episode",
plot=scrapedplot,
contentSerieName=scrapedserie,
contentLanguage='Sub-ITA' if 'Sub-ITA' in title else '',
infoLabels=infoLabels,
thumbnail=scrapedthumbnail2 if scrapedthumbnail2 != '' else scrapedthumbnail,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
support.videolibrary(itemlist, item)
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def findepvideos(item):
log()
data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data
matches = scrapertools.find_multiple_matches(data, item.extra)
data = "\r\n".join(matches[0])
item.contentType = 'movie'
return support.server(item, data=data)
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def findvideos(item):
log()
if item.contentType == 'tvshow':
data = httptools.downloadpage(item.url, headers=headers).data
matches = scrapertools.find_multiple_matches(data, item.extra)
data = "\r\n".join(matches[0])
else:
log(item.url)
data = item.url
return support.server(item, data)

View File

@@ -92,172 +92,202 @@ def url_decode(url_enc):
def color(text, color):
return "[COLOR " + color + "]" + text + "[/COLOR]"
def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data="", patron_block="",
patronNext="", action="findvideos", addVideolibrary = True, type_content_dict={}, type_action_dict={}):
def scrape(func):
# args is a dict containing the foolowing keys:
# patron: the patron to use for scraping page, all capturing group must match with listGroups
# listGroups: a list containing the scraping info obtained by your patron, in order
# accepted values are: url, title, thumb, quality, year, plot, duration, genre, rating, episode, lang
# header: values to pass to request header
# headers: values to pass to request header
# blacklist: titles that you want to exclude(service articles for example)
# data: if you want to pass data manually, maybe because you need some custom replacement
# patron_block: patron to get parts of the page (to scrape with patron attribute),
# patronBlock: patron to get parts of the page (to scrape with patron attribute),
# if you need a "block inside another block" you can create a list, please note that all matches
# will be packed as string
# patronNext: patron for scraping next page link
# action: if you want results perform an action different from "findvideos", useful when scraping film by genres
# url_host: string to prepend to scrapedurl, useful when url don't contain host
# addVideolibrary: if "add to videolibrary" should appear
# example usage:
# import support
# itemlist = []
# patron = 'blablabla'
# headers = [['Referer', host]]
# blacklist = 'Request a TV serie!'
# return support.scrape(item, itemlist, patron, ['thumb', 'quality', 'url', 'title', 'title2', 'year', 'plot', 'episode', 'lang'],
# return support.scrape(item, itemlist, patron, ['thumb', 'quality', 'url', 'title', 'year', 'plot', 'episode', 'lang'],
# headers=headers, blacklist=blacklist)
# listGroups
# thumb = immagine, quality = qualità, url = link singolo o gruppo, title = titolo film o serie, title2 = titolo aggiuntivo
# year = anno del film o della serie, plot = descrizione film o serie, episode = numero stagione - numero episodio in caso di serie,
# lang = lingua del video
# 'type' is a check for typologies of content e.g. Film or TV Series
# 'episode' is a key to grab episode numbers if it is separated from the title
# IMPORTANT 'type' is a special key, to work need type_content_dict={} and type_action_dict={}
# IMPORTANT 'type' is a special key, to work need typeContentDict={} and typeActionDict={}
itemlist = []
def wrapper(*args):
itemlist = []
if not data:
data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data.replace("'", '"')
data = re.sub('\n|\t', ' ', data)
# replace all ' with " and eliminate newline, so we don't need to worry about
log('DATA =', data)
args = func(*args)
block = data
item = args['item']
if patron_block:
if type(patron_block) == str:
patron_block = [patron_block]
action = args['action'] if 'action' in args else 'findvideos'
anime = args['anime'] if 'anime' in args else ''
addVideolibrary = args['addVideolibrary'] if 'addVideolibrary' in args else True
blacklist = args['blacklist'] if 'blacklist' in args else ''
data = args['data'] if 'data' in args else ''
headers = args['headers'] if 'headers' in args else ''
patron = args['patron'] if 'patron' in args else ''
patronNext = args['patronNext'] if 'patronNext' in args else ''
patronBlock = args['patronBlock'] if 'patronBlock' in args else ''
typeActionDict = args['type_action_dict'] if 'type_action_dict' in args else {}
typeContentDict = args['type_content_dict'] if 'type_content_dict' in args else {}
for n, regex in enumerate(patron_block):
blocks = scrapertoolsV2.find_multiple_matches(block, regex)
block = ""
for b in blocks:
block += "\n" + str(b)
log('BLOCK ', n, '=', block)
else:
block = data
if patron and listGroups:
matches = scrapertoolsV2.find_multiple_matches(block, patron)
log('MATCHES =', matches)
if not data:
data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data.replace("'", '"')
data = re.sub('\n|\t', ' ', data)
# replace all ' with " and eliminate newline, so we don't need to worry about
log('DATA =', data)
known_keys = ['url', 'title', 'title2', 'episode', 'thumb', 'quality', 'year', 'plot', 'duration', 'genere', 'rating', 'type', 'lang'] #by greko aggiunto episode
lang = '' # aggiunto per gestire i siti con pagine di serietv dove si hanno i video in ita e in subita
for match in matches:
if len(listGroups) > len(match): # to fix a bug
match = list(match)
match.extend([''] * (len(listGroups) - len(match)))
block = data
scraped = {}
for kk in known_keys:
val = match[listGroups.index(kk)] if kk in listGroups else ''
if val and (kk == "url" or kk == 'thumb') and 'http' not in val:
val = scrapertoolsV2.find_single_match(item.url, 'https?://[a-z0-9.-]+') + val
scraped[kk] = val
if patronBlock:
if type(patronBlock) == str:
patronBlock = [patronBlock]
title = scrapertoolsV2.htmlclean(scrapertoolsV2.decodeHtmlentities(scraped["title"])).replace('', '\'').replace('"', "'").strip() # fix by greko da " a '
plot = scrapertoolsV2.htmlclean(scrapertoolsV2.decodeHtmlentities(scraped["plot"]))
longtitle = typo(title, 'bold')
if scraped['quality']: longtitle = longtitle + typo(scraped['quality'], '_ [] color kod')
if scraped['episode']:
scraped['episode'] = re.sub(r'\s-\s|-|x|&#8211', 'x' , scraped['episode'])
longtitle = typo(scraped['episode'] + ' - ', 'bold') + longtitle
if scraped['title2']:
title2 = scrapertoolsV2.htmlclean(scrapertoolsV2.decodeHtmlentities(scraped["title2"])).replace('"', "'").strip()
longtitle = longtitle + typo(title2, 'bold _ -- _')
## Aggiunto/modificato per gestire i siti che hanno i video
## in ita e subita delle serie tv nella stessa pagina
if scraped['lang'] == '': #altrimenti nei canali dei film mi aggiunge sub-ita a tutti i film successivi
lang = '' # o in alternativa lang = 'ITA'
if scraped['lang']:
if 'sub' in scraped['lang'].lower():
lang = 'Sub-ITA'
else:
lang = 'ITA'
if lang != '':
longtitle += typo(lang, '_ [] color kod')
if item.infoLabels["title"] or item.fulltitle: # if title is set, probably this is a list of episodes or video sources
infolabels = item.infoLabels
else:
infolabels = {}
if scraped["year"]:
infolabels['year'] = scraped["year"]
if scraped["plot"]:
infolabels['plot'] = plot
if scraped["duration"]:
matches = scrapertoolsV2.find_multiple_matches(scraped["duration"],r'([0-9])\s*?(?:[hH]|:|\.|,|\\|\/|\||\s)\s*?([0-9]+)')
for h, m in matches:
scraped["duration"] = int(h) * 60 + int(m)
if not matches:
scraped["duration"] = scrapertoolsV2.find_single_match(scraped["duration"], r'(\d+)')
infolabels['duration'] = int(scraped["duration"]) * 60
if scraped["genere"]:
genres = scrapertoolsV2.find_multiple_matches(scraped["genere"], '[A-Za-z]+')
infolabels['genere'] = ", ".join(genres)
if scraped["rating"]:
infolabels['rating'] = scrapertoolsV2.decodeHtmlentities(scraped["rating"])
if type_content_dict:
for name, variants in type_content_dict.items():
if scraped['type'] in variants:
item.contentType = name
if type_action_dict:
for name, variants in type_action_dict.items():
if scraped['type'] in variants:
action = name
if inspect.stack()[1][3] == 'episodios': item.contentType = 'episode'
if scraped["title"] not in blacklist:
it = Item(
channel=item.channel,
action=action,
contentType=item.contentType,
title=longtitle,
fulltitle=title,
show=title,
language = lang if lang != '' else '',
quality=scraped["quality"],
url=scraped["url"],
infoLabels=infolabels,
thumbnail=scraped["thumb"],
args=item.args
)
for lg in list(set(listGroups).difference(known_keys)):
it.__setattr__(lg, match[listGroups.index(lg)])
itemlist.append(it)
checkHost(item, itemlist)
if (item.contentType == "tvshow" and (action != "findvideos" and action != "play")) \
or (item.contentType == "episode" and action != "play") \
or (item.contentType == "movie" and action != "play"):
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
for n, regex in enumerate(patronBlock):
blocks = scrapertoolsV2.find_multiple_matches(block, regex)
block = ""
for b in blocks:
block += "\n" + str(b)
log('BLOCK ', n, '=', block)
else:
for it in itemlist:
it.infoLabels = item.infoLabels
block = data
if patron:
matches = scrapertoolsV2.find_multiple_matches_groups(block, patron)
log('MATCHES =', matches)
if patronNext:
nextPage(itemlist, item, data, patronNext, 2)
known_keys = ['url', 'title', 'title2', 'episode', 'thumb', 'quality', 'year', 'plot', 'duration', 'genere',
'rating', 'type', 'lang'] # by greko aggiunto episode
lang = '' # aggiunto per gestire i siti con pagine di serietv dove si hanno i video in ita e in subita
for match in matches:
listGroups = match.keys()
match = match.values()
if addVideolibrary and (item.infoLabels["title"] or item.fulltitle):
item.fulltitle = item.infoLabels["title"]
videolibrary(itemlist, item)
if len(listGroups) > len(match): # to fix a bug
match = list(match)
match.extend([''] * (len(listGroups) - len(match)))
return itemlist
scraped = {}
for kk in known_keys:
val = match[listGroups.index(kk)] if kk in listGroups else ''
if val and (kk == "url" or kk == 'thumb') and 'http' not in val:
val = scrapertoolsV2.find_single_match(item.url, 'https?://[a-z0-9.-]+') + val
scraped[kk] = val
title = scrapertoolsV2.htmlclean(scrapertoolsV2.decodeHtmlentities(scraped["title"])
.replace('"',"'")).strip() # fix by greko da " a '
plot = scrapertoolsV2.htmlclean(scrapertoolsV2.decodeHtmlentities(scraped["plot"]))
longtitle = typo(title, 'bold')
if scraped['quality']: longtitle = longtitle + typo(scraped['quality'], '_ [] color kod')
if scraped['episode']:
scraped['episode'] = re.sub(r'\s-\s|-|x|&#8211', 'x', scraped['episode'])
longtitle = typo(scraped['episode'] + ' - ', 'bold') + longtitle
if scraped['title2']:
title2 = scrapertoolsV2.htmlclean(scrapertoolsV2.decodeHtmlentities(scraped["title2"]).replace('"', "'")).strip()
longtitle = longtitle + typo(title2, 'bold _ -- _')
## Aggiunto/modificato per gestire i siti che hanno i video
## in ita e subita delle serie tv nella stessa pagina
if scraped['lang']:
if 'sub' in scraped['lang'].lower():
lang = 'Sub-ITA'
else:
lang = 'ITA'
if lang != '':
longtitle += typo(lang, '_ [] color kod')
# if title is set, probably this is a list of episodes or video sources
if item.infoLabels["title"] or item.fulltitle:
infolabels = item.infoLabels
else:
infolabels = {}
if scraped["year"]:
infolabels['year'] = scraped["year"]
if scraped["plot"]:
infolabels['plot'] = plot
if scraped["duration"]:
matches = scrapertoolsV2.find_multiple_matches(scraped["duration"],
r'([0-9])\s*?(?:[hH]|:|\.|,|\\|\/|\||\s)\s*?([0-9]+)')
for h, m in matches:
scraped["duration"] = int(h) * 60 + int(m)
if not matches:
scraped["duration"] = scrapertoolsV2.find_single_match(scraped["duration"], r'(\d+)')
infolabels['duration'] = int(scraped["duration"]) * 60
if scraped["genere"]:
genres = scrapertoolsV2.find_multiple_matches(scraped["genere"], '[A-Za-z]+')
infolabels['genere'] = ", ".join(genres)
if scraped["rating"]:
infolabels['rating'] = scrapertoolsV2.decodeHtmlentities(scraped["rating"])
if typeContentDict:
for name, variants in typeContentDict.items():
if scraped['type'] in variants:
item.contentType = name
if typeActionDict:
for name, variants in typeActionDict.items():
if scraped['type'] in variants:
action = name
if scraped["title"] not in blacklist:
it = Item(
channel=item.channel,
action=action,
contentType=item.contentType,
title=longtitle,
fulltitle=title,
show=title,
quality=scraped["quality"],
url=scraped["url"],
infoLabels=infolabels,
thumbnail=scraped["thumb"],
args=item.args
)
for lg in list(set(listGroups).difference(known_keys)):
it.__setattr__(lg, match[listGroups.index(lg)])
if 'itemHook' in args:
it = args['itemHook'](it)
itemlist.append(it)
checkHost(item, itemlist)
## if (item.contentType == "episode" and (action != "findvideos" and action != "play")) \
## or (item.contentType == "movie" and action != "play"):
if (item.contentType == "tvshow" and (action != "findvideos" and action != "play")) \
or (item.contentType == "episode" and action != "play") \
or (item.contentType == "movie" and action != "play") :
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
else:
for it in itemlist:
it.infoLabels = item.infoLabels
if 'itemlistHook' in args:
itemlist = args['itemlistHook'](itemlist)
if patronNext:
nextPage(itemlist, item, data, patronNext, 2)
if anime:
from specials import autorenumber
autorenumber.renumber(itemlist)
if addVideolibrary and (item.infoLabels["title"] or item.fulltitle):
item.fulltitle = item.infoLabels["title"]
videolibrary(itemlist, item)
if 'fullItemlistHook' in args:
itemlist = args['fullItemlistHook'](itemlist)
return itemlist
return wrapper
def checkHost(item, itemlist):
@@ -398,13 +428,9 @@ def swzz_get_url(item):
return data
def menu(itemlist, title='', action='', url='', contentType='movie', args=[]):
def menuItem(itemlist, filename, title='', action='', url='', contentType='movie', args=[]):
# Function to simplify menu creation
frame = inspect.stack()[1]
filename = frame[0].f_code.co_filename
filename = os.path.basename(filename).replace('.py','')
# Call typo function
title = typo(title)
@@ -428,6 +454,51 @@ def menu(itemlist, title='', action='', url='', contentType='movie', args=[]):
return itemlist
def menu(func):
def wrapper(*args):
args = func(*args)
item = args['item']
host = func.__globals__['host']
list_servers = func.__globals__['list_servers']
list_quality = func.__globals__['list_quality']
filename = func.__module__.split('.')[1]
listUrls = ['film', 'filmSub', 'tvshow', 'tvshowSub']
dictUrl = {}
for name in listUrls:
dictUrl[name] = args[name] if name in args else None
autoplay.init(item.channel, list_servers, list_quality)
# Main options
itemlist = []
if dictUrl['film'] is not None:
menuItem(itemlist, filename, 'Film bold', 'peliculas', host + dictUrl['film'])
### modificato by greko ########
for sub, var in dictUrl['filmSub']:
menuItem(itemlist, filename, sub + ' submenu', var[1],
host + var[0],
args=var[2] if len(var)>2 else '')
menuItem(itemlist, filename, 'Cerca submenu bold', 'search', host, args='film')
if dictUrl['tvshow'] is not None:
menuItem(itemlist, filename, 'Serie TV bold', 'peliculas', host + dictUrl['tvshow'], contentType='tvshow')
for sub, var in dictUrl['tvshowSub']:
menuItem(itemlist, filename, sub + ' submenu', var[1],
host + var[0], contentType='tvshow',
args=var[2] if len(var)>2 else '')
menuItem(itemlist, filename, 'Cerca submenu bold', 'search', host, args='serie')
### fine by greko ########
autoplay.show_option(item.channel, itemlist)
return itemlist
return wrapper
def typo(string, typography=''):
kod_color = '0xFF65B3DA' #'0xFF0081C2'
@@ -480,7 +551,7 @@ def typo(string, typography=''):
return string
def match(item, patron='', patron_block='', headers='', url=''):
def match(item, patron='', patronBlock='', headers='', url=''):
matches = []
url = url if url else item.url
data = httptools.downloadpage(url, headers=headers, ignore_response_code=True).data.replace("'", '"')
@@ -488,8 +559,8 @@ def match(item, patron='', patron_block='', headers='', url=''):
data = re.sub(r'>\s\s*<', '><', data)
log('DATA= ', data)
if patron_block:
block = scrapertoolsV2.find_single_match(data, patron_block)
if patronBlock:
block = scrapertoolsV2.find_single_match(data, patronBlock)
log('BLOCK= ',block)
else:
block = data
@@ -546,7 +617,8 @@ def nextPage(itemlist, item, data='', patron='', function_level=1, next_page='',
log('NEXT= ', next_page)
itemlist.append(
Item(channel=item.channel,
action=inspect.stack()[function_level][3],
#action=inspect.stack()[function_level][3],
action = item.action,
contentType=item.contentType,
title=typo(config.get_localized_string(30992), 'color kod bold'),
url=next_page,
@@ -556,7 +628,7 @@ def nextPage(itemlist, item, data='', patron='', function_level=1, next_page='',
return itemlist
def pagination(itemlist, item, page, perpage, function_level=1):
if len(itemlist) >= perpage: # page * perpage
if len(itemlist) >= page * perpage:
itemlist.append(
Item(channel=item.channel,
action=inspect.stack()[function_level][3],