-Nuova ricerca globale
-migliorie prestazionali in generale
-fix vari ai server
This commit is contained in:
marco
2019-12-20 22:32:38 +01:00
parent c2c0ccf525
commit f4e9f29f40
192 changed files with 5763 additions and 48666 deletions

View File

@@ -39,8 +39,7 @@ def mainlist(item):
tvshow = ['/category/serie-tv/'
]
## Anime = [(support.typo('Anime', 'bullet bold'),['/category/anime-giapponesi/', 'peliculas', 'anime', 'tvshow'])
## ]
anime = ['/category/anime-giapponesi/']
## Sport = [(support.typo('Sport', 'bullet bold'), ['/category/sport/', 'peliculas', 'sport', 'tvshow'])
## ]
@@ -53,12 +52,7 @@ def mainlist(item):
@support.scrape
def peliculas(item):
data = httptools.downloadpage(item.url, headers=headers).data
data = re.sub('\n|\t', ' ', data)
data = re.sub(r'>\s+<', '> <', data)
GENERI = scrapertoolsV2.find_multiple_matches(data, r'<a class="dropdown-item" href="[^"]+" title="([A-z]+)"')
action = 'check'
patronBlock = r'<div class="container">.*?class="col-md-12[^"]*?">(?P<block>.*?)<div class=(?:"container"|"bg-dark ")>'
if item.args == 'newest':
patron = r'<div class="col-lg-3">[^>]+>[^>]+>\s<a href="(?P<url>[^"]+)".+?url\((?P<thumb>[^\)]+)\)">[^>]+>(?P<title>[^<]+)<[^>]+>[^>]+>(?:[^>]+>)?\s?(?P<rating>[\d\.]+)?[^>]+>.+?(?:[ ]\((?P<year>\d{4})\))?<[^>]+>[^>]+>(.?[\d\-x]+\s\(?(?P<lang>[sSuUbBiItTaA\-]+)?\)?\s?(?P<quality>[\w]+)?[|]?\s?(?:[fFiInNeE]+)?\s?\(?(?P<lang2>[sSuUbBiItTaA\-]+)?\)?)?'
@@ -69,49 +63,30 @@ def peliculas(item):
patronBlock = r'<section id="slider">(?P<block>.*?)</section>'
patron = r'<a href="(?P<url>(?:https:\/\/.+?\/(?P<title>[^\/]+[a-zA-Z0-9\-]+)(?P<year>\d{4})))/".+?url\((?P<thumb>[^\)]+)\)">'
elif item.contentType == 'tvshow':
action = 'episodios'
patron = r'<a href="(?P<url>[^"]+)".+?url\((?P<thumb>[^\)]+)\)">[^>]+>[^>]+>[^>]+>(?:[^>]+>)?\s?(?P<rating>[\d\.]+)?[^>]+>(?P<title>.+?)(?:[ ]\((?P<year>\d{4})\))?<[^>]+>[^>]+>(.?[\d\-x]+\s\(?(?P<lang>[sSuUbBiItTaA\-]+)?\)?\s?(?P<quality>[\w]+)?[|]?\s?(?:[fFiInNeE]+)?\s?\(?(?P<lang2>[sSuUbBiItTaA\-]+)?\)?)?'
if item.args == 'anime':
anime = True
patron = r'<div class="col-lg-3">[^>]+>[^>]+>\s<a href="(?P<url>[^"]+)".+?url\((?P<thumb>[^\)]+)\)">[^>]+>[^>]+>[^>]+>\s?(?P<rating>[\d\.]+)?[^>]+>[^>]+>(?P<title>.+?)\(?(?P<year>\d+)?\)?<[^>]+>[^>]+>(?:.+?[^fFiInNeE]+?\(?(?P<lang>[sSuUbBiItTaA]+)\)?.+?)<'
elif item.args == 'update':
# patron = r'<a href="(?P<url>[^"]+)".+?url\((?P<thumb>[^\)]+)\)">[^>]+>[^>]+>[^>]+>(?:[^>]+>)?\s?(?P<rating>[\d\.]+)?[^>]+>(?P<title>.+?)(?:[ ]\((?P<year>\d{4})\))?<[^>]+>[^>]+>(.?[\d\-x]+\s\(?(?P<lang>[sSuUbBiItTaA\-]+)?\)?\s?(?P<quality>[\w]+)?[|]?\s?(?:[fFiInNeE]+)?\s?\(?(?P<lang2>[sSuUbBiItTaA\-]+)?\)?)?'
# if item.args == 'anime':
# anime = True
# patron = r'<div class="col-lg-3">[^>]+>[^>]+>\s<a href="(?P<url>[^"]+)".+?url\((?P<thumb>[^\)]+)\)">[^>]+>[^>]+>[^>]+>\s?(?P<rating>[\d\.]+)?[^>]+>[^>]+>(?P<title>.+?)\(?(?P<year>\d+)?\)?<[^>]+>[^>]+>(?:.+?[^fFiInNeE]+?\(?(?P<lang>[sSuUbBiItTaA]+)\)?.+?)<'
if item.args == 'update':
patron = r'<a href="(?P<url>[^"]+)".+?url\((?P<thumb>.+?)\)">\s<div class="titolo">(?P<title>.+?)(?: &#8211; Serie TV)?(?:\([sSuUbBiItTaA\-]+\))?[ ]?(?P<year>\d{4})?</div>[ ]<div class="genere">(?:[\w]+?\.?\s?[\s|S]?[\dx\-S]+?\s\(?(?P<lang>[iItTaA]+|[sSuUbBiItTaA\-]+)\)?\s?(?P<quality>[HD]+)?|.+?\(?(?P<lang2>[sSuUbBiItTaA\-]+)?\)?</div>)'
pagination = 25
else:
patron = r'<a href="(?P<url>[^"]+)"\s*title="(?P<title>[^"\(]+)(?:"|\()(?:(?P<year>\d+)[^"]+)?.*?url\((?P<thumb>[^\)]+)\)(?:.*?<div class="voto">[^>]+>[^>]+>\s*(?P<rating>[^<]+))?.*?<div class="titolo">[^>]+>(?:<div class="genere">[^ ]*(?:\s\d+)?\s*(?:\()?(?P<lang>[^\)< ]+))?'
else:
#search
patron = r'<div class="col-lg-3">[^>]+>[^>]+>\s<a href="(?P<url>[^"]+)".+?url\((?P<thumb>[^\)]+)\)">[^>]+>[^>]+>[^>]+>(?:[^>]+>)?\s?(?P<rating>[\d\.]+)?[^>]+>(?P<title>.+?)(?:[ ]\((?P<year>\d{4})\))?<[^>]+>[^>]+>(.?[\d\-x]+\s\(?(?P<lang>[sSuUbBiItTaA\-]+)?\)?\s?(?P<quality>[\w]+)?[|]?\s?(?:[fFiInNeE]+)?\s?\(?(?P<lang2>[sSuUbBiItTaA\-]+)?\)?)?'
def itemHook(item):
if 'sub' in item.contentLanguage.lower():
item.contentLanguage= 'Sub-ITA'
item.title = re.sub('[Ss]ub(?:-)?', item.contentLanguage, item.title)
if item.lang2:
if len(item.lang2)<3:
item.lang2 = 'ITA'
item.contentLanguage = item.lang2
item.title += support.typo(item.lang2, '_ [] color kod')
dataBlock = httptools.downloadpage(item.url, headers=headers).data
genere = scrapertoolsV2.find_single_match(dataBlock, r'rel="category tag">([a-zA-Z0-9]+).+?<')
if genere.lower() in str(GENERI).lower():
item.contentType = 'movie'
action = 'findvideos'
if item.args == 'update':
item.title = item.title.replace('-',' ')
elif genere.lower() == 'serie':
item.action = 'episodios'
item.contentType = 'tvshow'
elif genere.lower() == 'anime':
blockAnime = scrapertoolsV2.find_single_match(dataBlock, r'<div id="container" class="container">(.+?)<div style="margin-left')
if 'stagione' in blockAnime.lower() or 'episodio' in blockAnime.lower() or 'saga' in blockAnime.lower():
anime = True
item.action = 'episodios'
item.contentType = 'tvshow'
else:
item.contentType = 'movie'
item.action = 'findvideos'
item.url = scrapertoolsV2.find_single_match(blockAnime, r'<span class="txt_dow">(?:.+?)?Streaming:(?:.+?)?</span>(.*?)<div style="margin-left:')
else:
# Tutto il resto!!
pass
if item.args == 'update':
item.title = item.title.replace('-', ' ')
return item
@@ -121,22 +96,29 @@ def peliculas(item):
@support.scrape
def episodios(item):
data=item.data
# debug = True
support.log('EPISODIOS DATA',data)
if item.args == 'anime':
support.log("Anime :", item)
blacklist = ['Clipwatching', 'Verystream', 'Easybytez', 'Flix555', 'Cloudvideo']
patron = r'(Stagione (?P<season>\d+))?.+?<a target=(?P<url>[^>]+>(?P<title>.+?(?P<episode>\d+)))(?:[:]?.+?)(?:</a></p>|</a><br />)'
patronBlock = r'Stagione (?P<season>\d+)</span><br />(?P<block>.*?)(?:<div style="margin-left:|<span class="txt_dow">)'
# blacklist = ['Clipwatching', 'Verystream', 'Easybytez', 'Flix555', 'Cloudvideo']
patron = r'<a target=(?P<url>[^>]+>(?P<title>Episodio\s(?P<episode>\d+))(?::)?(?:(?P<title2>[^<]+))?.*?(?:<br|</p))'
patronBlock = r'(?:Stagione (?P<season>\d+))?(?:</span><br />|</span></p>|strong></p>)(?P<block>.*?)(?:<div style="margin-left|<span class="txt_dow">)'
item.contentType = 'tvshow'
item.contentSerieName = item.fulltitle
# item.contentSerieName = item.fulltitle
else:# item.extra == 'serie':
# debug = True
support.log("Serie :", item)
patron = r'(?P<episode>\d+(?:&#215;|×)?\d+\-\d+|\d+(?:&#215;|×)\d+)[;]?[ ]?(?:(?P<title>[^<]+)(?P<url>.*?)|(\2[ ])(?:<(\3.*?)))(?:</a><br />|</a></p>)'
patronBlock = r'<p><strong>(?P<block>(?:.+?[Ss]tagione.+?(?P<lang>iTA|ITA|Sub-ITA|Sub-iTA))?(?:|.+?|</strong>)(/?:</span>)?</p>.*?</p>)'
patronBlock = r'<p><strong>(?:.+?[Ss]tagione\s)?(?:(?P<lang>iTA|ITA|Sub-ITA|Sub-iTA))?.*?</strong>(?P<block>.+?)(?:</span|</p)'
item.contentType = 'tvshow'
item.contentSerieName = item.fulltitle
# item.contentSerieName = item.fulltitle
def itemHook(item):
# support.dbg()
if not scrapertoolsV2.find_single_match(item.title, r'(\d+x\d+)'):
item.title = re.sub(r'(\d+) -', '1x\\1', item.title)
return item
#debug = True
return locals()
@@ -145,7 +127,7 @@ def genres(item):
action='peliculas'
patron_block=r'<div id="bordobar" class="dropdown-menu(?P<block>.*?)</li>'
patron=r'<a class="dropdown-item" href="(?P<url>[^"]+)" title="(?P<title>[A-z]+)"'
patronMenu=r'<a class="dropdown-item" href="(?P<url>[^"]+)" title="(?P<title>[A-z]+)"'
return locals()
@@ -154,7 +136,7 @@ def search(item, texto):
support.log(item.url,texto)
text = texto.replace(' ', '+')
item.url = host + "/?s=" + texto
item.contentType = 'episode'
item.contentType = 'tv'
item.args = 'search'
try:
return peliculas(item)
@@ -192,9 +174,41 @@ def newest(categoria):
return itemlist
def check(item):
data = httptools.downloadpage(item.url, headers=headers).data
# support.log('cinemalibero DATA=',data)
if data:
genere = scrapertoolsV2.find_single_match(data, r'rel="category tag">([a-zA-Z0-9]+).+?<')
blockAnime = scrapertoolsV2.find_single_match(data, r'<div id="container" class="container">(.+?<div style="margin-left)')
support.log('GENRE',genere)
if blockAnime:
support.log('È un ANIME')
if 'episodio' in blockAnime.lower() or 'saga' in blockAnime.lower():
item.contentType = 'tvshow'
item.args = 'anime'
item.data = blockAnime
return episodios(item)
elif scrapertoolsV2.find_single_match(blockAnime,r'\d+(?:&#215;|×)?\d+\-\d+|\d+(?:&#215;|×)\d+'):
item.contentType = 'tvshow'
item.data = blockAnime
return episodios(item)
else:
support.log('È un ANIME FILM')
item.contentType = 'movie'
item.url = data
return findvideos(item)
if genere.lower() == 'serie':
item.contentType = 'tvshow'
item.data = data
return episodios(item)
else:
support.log('È un FILM')
item.contentType = 'movie'
item.url = data
return findvideos(item)
def findvideos(item):
support.log('findvideos ->', item)
if item.contentType == 'movie' and item.args != 'anime':
return support.server(item)
else:
return support.server(item, data= item.url)
item.url = item.url.replace('http://rapidcrypt.net/verys/', '').replace('http://rapidcrypt.net/open/', '') #blocca la ricerca
return support.server(item, data= item.url)

View File

@@ -1,37 +0,0 @@
{
"id": "mondolunatico",
"name": "Mondo Lunatico",
"language": ["ita"],
"active": true,
"adult": false,
"thumbnail": "http://mondolunatico.org/wp-content/uploads/2016/02/images-111.jpg",
"banner": "http://mondolunatico.org/wp-content/uploads/2016/02/images-111.jpg",
"categories": ["movie"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi in Ricerca Globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,449 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale mondolunatico
# ------------------------------------------------------------
import os
import re
import time
import urllib
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config
from platformcode import logger
__channel__ = "mondolunatico"
host = config.get_channel_url(__channel__)
captcha_url = '%s/pass/CaptchaSecurityImages.php?width=100&height=40&characters=5' % host
PERPAGE = 25
def mainlist(item):
logger.info("kod.mondolunatico mainlist")
itemlist = [Item(channel=item.channel,
title="[COLOR azure]Novità[/COLOR]",
extra="movie",
action="peliculas",
url=host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="[COLOR azure]Categorie[/COLOR]",
extra="movie",
action="categorias",
url=host,
thumbnail="http://xbmc-repo-ackbarr.googlecode.com/svn/trunk/dev/skin.cirrus%20extended%20v2/extras/moviegenres/All%20Movies%20by%20Genre.png"),
Item(channel=item.channel,
title="[COLOR yellow]Cerca...[/COLOR]",
extra="movie",
action="search",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
#Item(channel=item.channel,
# title="[COLOR azure]Serie TV[/COLOR]",
# extra="tvshow",
# action="serietv",
# url="%s/serietv/lista-alfabetica/" % host,
# thumbnail="http://i.imgur.com/rO0ggX2.png"),
#Item(channel=item.channel,
# title="[COLOR yellow]Cerca Serie TV...[/COLOR]",
# extra="tvshow",
# action="search",
# thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"),]
return itemlist
def categorias(item):
logger.info("kod.mondolunatico categorias")
itemlist = []
data = httptools.downloadpage(item.url).data
# Narrow search by selecting only the combo
bloque = scrapertools.find_single_match(data, '<option class="level-0" value="7">(.*?)<option class="level-0" value="8">')
# The categories are the options for the combo
patron = '<option class=[^=]+="([^"]+)">(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapedtitle.replace("&nbsp;", "")
scrapedtitle = scrapedtitle.replace("(", "")
scrapedtitle = scrapedtitle.replace(")", "")
scrapedtitle = scrapedtitle.replace("0", "")
scrapedtitle = scrapedtitle.replace("1", "")
scrapedtitle = scrapedtitle.replace("2", "")
scrapedtitle = scrapedtitle.replace("3", "")
scrapedtitle = scrapedtitle.replace("4", "")
scrapedtitle = scrapedtitle.replace("5", "")
scrapedtitle = scrapedtitle.replace("6", "")
scrapedtitle = scrapedtitle.replace("7", "")
scrapedtitle = scrapedtitle.replace("8", "")
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.replace("9", ""))
scrapedurl = host + "/category/film-per-genere/" + scrapedtitle
scrapedthumbnail = ""
scrapedplot = ""
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="peliculas",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot))
return itemlist
def search(item, texto):
logger.info("[mondolunatico.py] " + item.url + " search " + texto)
item.url = host + "/?s=" + texto
try:
if item.extra == "movie":
return peliculas(item)
if item.extra == "tvshow":
item.url = "%s/serietv/lista-alfabetica/" % host
return search_serietv(item, texto)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def peliculas(item):
logger.info("kod.mondolunatico peliculas")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
# Estrae i contenuti
patron = '<div class="boxentry">\s*<a href="([^"]+)"[^>]+>\s*<img src="([^"]+)" alt="([^"]+)"[^>]+>'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapedplot = ""
for scrapedurl, scrapedthumbnail, scrapedtitle, in matches:
title = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="findvideos",
contentType="movie",
title=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=title,
show=title,
plot=scrapedplot,
folder=True))
# Paginazione
patronvideos = '<a class="nextpostslink" rel="next" href="([^"]+)">'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="peliculas",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def serietv(item):
logger.info("kod.mondolunatico serietv")
itemlist = []
p = 1
if '{}' in item.url:
item.url, p = item.url.split('{}')
p = int(p)
# Carica la pagina
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<h1>Lista Alfabetica</h1>(.*?)</div>')
# Estrae i contenuti
patron = '<li><a href="([^"]+)">([^<]+)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapedplot = ""
scrapedthumbnail = ""
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
if (p - 1) * PERPAGE > i: continue
if i >= p * PERPAGE: break
title = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="episodios",
title=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=title,
show=title,
plot=scrapedplot,
folder=True))
if len(matches) >= p * PERPAGE:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="serietv",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def search_serietv(item, texto):
logger.info("kod.mondolunatico serietv")
texto = urllib.unquote_plus(texto).lower()
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<h1>Lista Alfabetica</h1>(.*?)</div>')
# Estrae i contenuti
patron = '<li><a href="([^"]+)">([^<]+)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapedplot = ""
scrapedthumbnail = ""
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
title = scrapertools.decodeHtmlentities(scrapedtitle)
if texto not in title.lower(): continue
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="episodios",
title=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=title,
show=title,
plot=scrapedplot,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def episodios(item):
logger.info("kod.mondolunatico episodios")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
html = []
for i in range(2):
patron = 'href="(https?://www\.keeplinks\.eu/p92/([^"]+))"'
matches = re.compile(patron, re.DOTALL).findall(data)
for keeplinks, id in matches:
_headers = [['Cookie', 'flag[' + id + ']=1; defaults=1; nopopatall=' + str(int(time.time()))],
['Referer', keeplinks]]
html.append(httptools.downloadpage(keeplinks, headers=_headers).data)
patron = r'="(%s/pass/index\.php\?ID=[^"]+)"' % host
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl in matches:
tmp = httptools.downloadpage(scrapedurl).data
if 'CaptchaSecurityImages.php' in tmp:
# Descarga el captcha
img_content = httptools.downloadpage(captcha_url).data
captcha_fname = os.path.join(config.get_data_path(), __channel__ + "captcha.img")
with open(captcha_fname, 'wb') as ff:
ff.write(img_content)
from platformcode import captcha
keyb = captcha.Keyboard(heading='', captcha=captcha_fname)
keyb.doModal()
if keyb.isConfirmed():
captcha_text = keyb.getText()
post_data = urllib.urlencode({'submit1': 'Invia', 'security_code': captcha_text})
tmp = httptools.downloadpage(scrapedurl, post=post_data).data
try:
os.remove(captcha_fname)
except:
pass
html.append(tmp)
data = '\n'.join(html)
encontrados = set()
patron = '<p><a href="([^"]+?)">([^<]+?)</a></p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapedtitle.split('/')[-1]
if not scrapedtitle or scrapedtitle in encontrados: continue
encontrados.add(scrapedtitle)
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="findvideos",
title=scrapedtitle,
url=scrapedurl,
thumbnail=item.thumbnail,
fulltitle=item.fulltitle,
show=item.show))
patron = '<a href="([^"]+)" target="_blank" class="selecttext live">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapedtitle.split('/')[-1]
if not scrapedtitle or scrapedtitle in encontrados: continue
encontrados.add(scrapedtitle)
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="findvideos",
title=scrapedtitle,
url=scrapedurl,
thumbnail=item.thumbnail,
fulltitle=item.fulltitle,
show=item.show))
return itemlist
def findvideos(item):
logger.info("kod.mondolunatico findvideos")
itemlist = []
# Carica la pagina
data = item.url if item.extra == "tvshow" else httptools.downloadpage(item.url).data
# Estrae i contenuti
patron = r'noshade>(.*?)<br>.*?<a href="(%s/pass/index\.php\?ID=[^"]+)"' % host
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle, scrapedurl in matches:
scrapedtitle = scrapedtitle.replace('*', '').replace('Streaming', '').strip()
title = '%s - [%s]' % (item.title, scrapedtitle)
itemlist.append(
Item(channel=item.channel,
action="play",
title=title,
url=scrapedurl,
thumbnail=item.thumbnail,
fulltitle=item.fulltitle,
show=item.show,
server='captcha',
folder=False))
patron = 'href="(%s/stream/links/\d+/)"' % host
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl in matches:
data += httptools.downloadpage(scrapedurl).data
### robalo fix obfuscator - start ####
patron = 'href="(https?://www\.keeplinks\.(?:co|eu)/p92/([^"]+))"'
matches = re.compile(patron, re.DOTALL).findall(data)
for keeplinks, id in matches:
headers = [['Cookie', 'flag[' + id + ']=1; defaults=1; nopopatall=' + str(int(time.time()))],
['Referer', keeplinks]]
html = httptools.downloadpage(keeplinks, headers=headers).data
data += str(scrapertools.find_multiple_matches(html, '</lable><a href="([^"]+)" target="_blank"'))
### robalo fix obfuscator - end ####
patron = 'src="([^"]+)" frameborder="0"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl in matches:
data += httptools.downloadpage(scrapedurl).data
for videoitem in servertools.find_video_items(data=data):
videoitem.title = item.title + videoitem.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.show = item.show
videoitem.plot = item.plot
videoitem.channel = item.channel
itemlist.append(videoitem)
return itemlist
def play(item):
logger.info("kod.mondolunatico play")
itemlist = []
if item.server == 'captcha':
headers = [['Referer', item.url]]
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
if 'CaptchaSecurityImages.php' in data:
# Descarga el captcha
img_content = httptools.downloadpage(captcha_url, headers=headers).data
captcha_fname = os.path.join(config.get_data_path(), __channel__ + "captcha.img")
with open(captcha_fname, 'wb') as ff:
ff.write(img_content)
from platformcode import captcha
keyb = captcha.Keyboard(heading='', captcha=captcha_fname)
keyb.doModal()
if keyb.isConfirmed():
captcha_text = keyb.getText()
post_data = urllib.urlencode({'submit1': 'Invia', 'security_code': captcha_text})
data = httptools.downloadpage(item.url, post=post_data, headers=headers).data
try:
os.remove(captcha_fname)
except:
pass
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.show = item.show
videoitem.plot = item.plot
videoitem.channel = item.channel
else:
itemlist.append(item)
return itemlist

View File

@@ -1,65 +0,0 @@
{
"id": "mondolunatico2",
"name": "MondoLunatico 2.0",
"language": ["ita"],
"active": true,
"adult": false,
"thumbnail": "mondolunatico2.png",
"banner": "mondolunatico2.png",
"categories": ["tvshow", "movie", "vos", "anime"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi in Ricerca Globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Numero di link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Italiano"
]
}
]
}

View File

@@ -1,447 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per MondoLunatico 2.0
# ------------------------------------------------------------
"""
WARNING:
questo sito è una kakatura di kazz...incredibile!!!
Per renderlo compatibile con support ci vuole MOLTA PAZIENZA!!!
Problemi noti che non superano il test del canale:
Nelle pagine dei "FILM", film e serie nel sito sono mischiate,
I titoli quindi non saranno nello stesso numero nelle pagine del canale.
Alcuni Titoli sono pagine informative e NON devono apparire nel CANALE!!!
Controllare:
-che nelle varie sezioni dei FILM appaiano solo FILM, stessa cosa per le serie.
-che effettivamente vengano tagliati solo gli avvisi.
Nella TOP FILM non ci sono le voci lingua, anno ( quindi niente TMDB o vari ) e qualità
Nella pagina delle serie potreste trovare solo il titolo senza stagione ed episodio
Nel menu contestuale potreste non trovare le voci:
-"Aggiungi in Videoteca"
-"Scarica" di qualunque tipo: stagione, serie, etc...
AVVISO:
i link 1fichier hanno bisogno dei DNS modificati
il server woof potrebbe rispondere con "connettore assente"
I titoli nella sezione SUB-ITA che non riportano Sub-ITA sono in lingua originale senza sottotitoli
"""
import re
import urlparse
import urllib
import urllib2
import time
from channelselector import thumb
from specials import autoplay, filtertools
from core import scrapertools, httptools, tmdb, servertools, support, scrapertoolsV2
from core.item import Item
from platformcode import config, platformtools #,logger
__channel__ = "mondolunatico2"
host = config.get_channel_url(__channel__)
headers = [['Referer', host]]
list_servers = ['verystream', 'wstream', 'openload', 'streamango']
list_quality = ['HD', 'default']
@support.menu
def mainlist(item):
support.log()
top = [('Film', ['/genre/film-aggiornati/', 'peliculas', 'movies']),
('Al Cinema', ['/genre/al-cinema/', 'peliculas', 'cinema']),
('Ultimi Aggiunti', ['/movies/', 'peliculas', 'latest']),
('Ultime Richieste', ['/genre/richieste/', 'peliculas', 'request']),
('Top ImDb', ['/top-imdb/', 'peliculas', 'top']),
('Sub-ITA', ['/genre/subita/', 'peliculas', 'sub']),
('Serie TV', ['/tvshows/', 'peliculas', '', 'tvshow']),
('Top ImDb', ['/top-imdb/', 'peliculas', 'top', 'tvshow']),
('Search...',['', 'search', 'search'])
]
return locals()
@support.scrape
def peliculas(item):
support.log()
action = 'findvideos'
blacklist = ['Avviso Agli Utenti',]
if item.args != 'search':
if item.contentType == 'movie':
action = 'findvideos'
patron = r'class="item movies"><div class="poster"><img src="(?P<thumb>[^"]+)"'\
'[^>]+>(?:<div class="rating">)?[^>]+>.+?(?P<rating>\d+.\d+|\d+)'\
'[^>]+>[^>]+>[^>]+>(:?(?P<lang>SubITA)?|(?P<quality>[^<]+)?)?'\
'<.+?href="(?P<url>[^"]+)">[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>'\
'[^>]+>(?P<title>.+?)</a>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>'\
'[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<year>\d+)</span>'\
'[^>]+>(?P<duration>\d+)?.+?<[^>]+>(?:[^>]+>[^>]+>[^>]+>[^>]+>)(?P<plot>.+?)<'
if item.args == 'movies':
patronBlock = r'<h1>\*Film Aggiornati</h1>(?P<block>.*?)<div class="pagination">'
elif item.args == 'cinema':
patronBlock = r'<h1>\*Al Cinema</h1>(?P<block>.*?)<div class="pagination">'
elif item.args == 'latest':
patronBlock = r'<h1>Film</h1>(?P<block>.*?)<div class="pagination">'
elif item.args == 'request':
patronBlock = r'<h1>\*Richieste</h1>(?P<block>.*?)<div class="pagination">'
elif item.args == 'sub':
patronBlock = r'<h1>\*SubITA</h1>(?P<block>.*?)<div class="pagination">'
elif item.args == 'top':
patronBlock = r'<h3>Film</h3>(?P<block>.*?)<div class="top-imdb-list tright">'
patron = r'<div class="image"><div class="[^"]+"><a href="(?P<url>[^"]+)"'\
'[^"]+"(?P<thumb>[^"]+)"[^"]+alt="(?P<title>[^"]+)">[^>]+>[^>]+>'\
'[^>]+>[^>]+>[^>]+>[^>]+>(?P<rating>[^<]+)<'
pagination = 25
else:
action = 'episodios'
if item.args == 'top':
patronBlock = r'<h3>TVShows</h3>(?P<block>.*?)<h2 class="widget-title">'
patron = r'<div class="image"><div class="[^"]+"><a href="(?P<url>[^"]+)"'\
'[^"]+"(?P<thumb>[^"]+)"[^"]+alt="(?P<title>[^"]+)">[^>]+>[^>]+>'\
'[^>]+>[^>]+>[^>]+>[^>]+>(?P<rating>[^<]+)<'
else:
patronBlock = r'<h1>Serie T[v|V]</h1>(?P<block>.*?)<div class="pagination">'
patron = r'class="item tvshows">[^>]+>.+?src="(?P<thumb>[^"]+)".+?>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+><a href="(?P<url>[^"]+)">[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<title>[^<]+)</h4>[^>]+>[^>]+> (?:<span class="imdb">IMDb: (?P<rating>\d+.\d+|\d+|N\/A)(?:</span>)?[^>]+>(?P<year>\d+))?<[^>]+>[^>]+>[^>]+>(?:[^>]+>[^>]+>(?P<plot>[^<]+)<)'
else:
patronBlock = r'<h1>Results found\:.+?</h1>(?P<block>.*?)<div class="sidebar scrolling">'
patron = r'<div class="result-item">[^>]+>[^>]+>[^>]+>.+?href="(?P<url>[^"]+)">.+?src="(?P<thumb>[^"]+)" alt="(?P<title>[^"]+)"[^>]+>[^>]+>(?P<type>[^>]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>IMDb (?P<rating>\d+.\d+|\d+)[^>]+>[^>]+>(?P<year>\d+)<[^>]+>[^>]+>[^>]+>[^>]+>(:?[^>]+>[^>]+>)?(?P<plot>[^<]+)<'
type_content_dict={'movie': ['film'], 'tvshow': ['tv']}
type_action_dict={'findvideos': ['film'], 'episodios': ['tv']}
patronNext = r'<span class="current">.*?href="([^"]+)" class="inactive">'
## debug = True
return locals()
def search(item, texto):
support.log('s-> '+texto)
item.url = host + "/?s=" + texto
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def findvideos(item):
support.log()
if item.contentType == "tvshow":
ret=support.dooplay_get_links(item, host)
if ret == []:
return episodios(item)
else:
item.url = ret[0]["url"]
return videoplayer(item)
#if item.args == "movies" or "movie":
if item.contentType == 'movie':
return videoplayer(item)
else:
return halfplayer(item)
def episodios(item):
support.log()
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
if "<h2>Stagioni ed Episodi</h2>" in data:
# Se è presente direttamente la lista Stagioni con i relativi episodi
block = scrapertools.find_single_match(data, r'<h2>Stagioni ed Episodi</h2>(.*?)<div class=\'sbox\'>')
patron = r'episodiotitle.*?href=\'([^\']+)\'>([^<]+)'
matches = re.compile(patron, re.DOTALL).findall(block)
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(channel=__channel__,
action="videoplayer",
contentType=item.contentType,
title=scrapedtitle,
thumbnail=item.thumbnail,
fulltitle=scrapedtitle,
url=scrapedurl,
args=item.args,
show=item.show))
support.videolibrary(itemlist, item, 'color kod')
return itemlist
if "File Unico..." in data:
#Se è direttamente un file unico
return dooplayer(item)
if "http://mondolunatico.org/stream/wp-content/uploads/2017/08/hand.gif" in data:
# Keeplinks
return keeplink(item)
else:
# Se nella lista è presente Dooplayer con elenco episodi
patron = r'<div class="sp-head" title="Espandi">([^<]+).*?<iframe.*?src="([^"]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) > 1:
for scrapedtitle, scrapedurl in matches:
itemlist.append(
Item(channel=__channel__,
action="player_list",
contentType=item.contentType,
title=scrapedtitle,
thumbnail=item.thumbnail,
fulltitle=scrapedtitle,
url=scrapedurl,
show=item.show))
return itemlist
else:
return dooplayer(item)
# ---------------------------------------------------------------------------------------------------------------------------------------------
def player(item):
support.log()
data = httptools.downloadpage(item.url, headers=headers).data
item.url = scrapertools.find_single_match(item.url, r'([^/]+//[^/]+/[^/]+/[^/]+)')
if "https://mondolunatico.tk" in data:
data = httptools.downloadpage(item.url, headers=headers).data
link = scrapertools.find_single_match(data, r'<p><iframe src="(.*?/.*?)[A-Z]')
item.url = link
return halfplayer(item)
if "mondolunatico.tk" in item.url:
return halfplayer(item)
#Scarica il link del video integrato nella pagina
ret=support.dooplay_get_links(item, host)
#Prelevo il link del video integrato
url = ret[0]["url"]
data = httptools.downloadpage(url, headers=headers).data
if "zmdi zmdi-playlist-audio zmdi-hc-3x" in data:
return player_list(item)
else:
#Correggo il link con il lin del POST
url = url.replace("/v/", "/api/source/").replace("/p/", "/api/source/")
postData = urllib.urlencode({
"r": "",
"d": "modolunatico.tk",
})
block = httptools.downloadpage(url, post=postData).data
patron = r'"file":".*?\/(r[^"]+)'
matches = re.compile(patron, re.DOTALL).findall(block)
itemlist = []
for scrapedurl in matches:
scrapedurl = "https://fvs.io/" + scrapedurl
itemlist.append(
Item(channel=__channel__,
action="play",
contentType=item.contentType,
title=item.title,
thumbnail=item.thumbnail,
fulltitle=item.title,
url=scrapedurl,
show=item.show))
autoplay.start(itemlist, item)
return itemlist
# ---------------------------------------------------------------------------------------------------------------------------------------------
def player_list(item):
support.log()
itemlist = []
# Scarico la pagina
data = httptools.downloadpage(item.url, headers=headers).data
if "panel_toggle toggleable" in data:
# Prelevo il blocco lista puntate
block = scrapertools.find_single_match(data, r'panel_toggle toggleable.*?(<div.*?)<!-- Javascript -->')
patron = r'data-url="([^"]+)">.*?([A-Z].*?) '
matches = re.compile(patron, re.DOTALL).findall(block)
for scrapedurl, scrapedtitle in matches:
scrapedtitle = re.sub('mp4|avi|mkv', '', scrapedtitle)
scrapedtitle = re.sub('WebRip|WEBRip|x264|AC3|1080p|DLMux|XviD-|BDRip|BluRay|HD|WEBMux|H264|BDMux|720p|TV|NFMux|DVDRip|DivX|DVDip|Ac3|Dvdrip|Mux|NovaRip|DVD|SAT|Divx', '', scrapedtitle)
scrapedtitle = re.sub('ITA|ENG|Italian|SubITA|SUBITA|iTALiAN|LiAN|Ita', '', scrapedtitle)
scrapedtitle = re.sub('Pir8|UBi|M L|BEDLAM|REPACK|DD5.1|bloody|SVU', '', scrapedtitle)
scrapedtitle = scrapedtitle.replace(".", " ").replace(" - ", " ").replace(" -", "").replace(" ", "")
itemlist.append(
Item(channel=__channel__,
action="halfplayer",
contentType=item.contentType,
title=scrapedtitle,
thumbnail=item.thumbnail,
fulltitle=scrapedtitle,
url="https://mondolunatico.tk" + scrapedurl,
show=item.show))
support.videolibrary(itemlist, item, 'color kod')
return itemlist
else:
return player(item)
# ---------------------------------------------------------------------------------------------------------------------------------------------
def dooplayer(item):
support.log()
itemlist = []
url = item.url
data = httptools.downloadpage(url, headers=headers).data
link= scrapertools.find_single_match(data, r'(https://mondolunatico.tk/./[^"]+)')
data = httptools.downloadpage(link, headers=headers).data
if "panel_toggle toggleable" in data:
item.url = link
return player_list(item)
# Correggo il link con il lin del POST
link1 = link.replace("/v/", "/api/source/").replace("/p/", "/api/source/")
postData = urllib.urlencode({
"r": link,
"d": "modolunatico.tk",
})
block = httptools.downloadpage(link1, post=postData).data
patron = r'"file":".*?\/(r[^"]+)'
matches = re.compile(patron, re.DOTALL).findall(block)
for scrapedurl in matches:
scrapedurl = "https://fvs.io/" + scrapedurl
itemlist.append(
Item(channel=__channel__,
action="play",
contentType=item.contentType,
title=item.title,
thumbnail=item.thumbnail,
fulltitle=item.title,
url=scrapedurl,
show=item.show))
autoplay.start(itemlist, item)
support.videolibrary(itemlist, item, 'color kod')
return itemlist
# ---------------------------------------------------------------------------------------------------------------------------------------------
def keeplink(item):
support.log()
itemlist = []
# Scarico la pagina
data = httptools.downloadpage(item.url).data
# Prendo url keeplink
patron = 'href="(https?://www\.keeplinks\.(?:co|eu)/p92/([^"]+))"'
matches = re.compile(patron, re.DOTALL).findall(data)
for keeplinks, id in matches:
headers = [['Cookie', 'flag[' + id + ']=1; defaults=1; nopopatall=' + str(int(time.time()))],
['Referer', keeplinks]]
html = httptools.downloadpage(keeplinks, headers=headers).data
data += str(scrapertools.find_multiple_matches(html, '</lable><a href="([^"]+)" target="_blank"'))
patron = 'src="([^"]+)" frameborder="0"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl in matches:
data += httptools.downloadpage(scrapedurl).data
for videoitem in servertools.find_video_items(data=data):
videoitem.title = item.title + " - " + videoitem.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.show = item.show
videoitem.plot = item.plot
videoitem.channel = item.channel
itemlist.append(videoitem)
return itemlist
# ---------------------------------------------------------------------------------------------------------------------------------------------
def videoplayer(item):
support.log()
itemlist = []
for link in support.dooplay_get_links(item, host):
server = link['server'][:link['server'].find(".")]
if server == "":
server = "mondolunatico"
itemlist.append(
Item(channel=item.channel,
action="player" if "mondolunatico" in server else "play",
title=server + " [COLOR blue][" + link['title'] + "][/COLOR]",
url=link['url'],
server=server,
fulltitle=item.fulltitle,
thumbnail=item.thumbnail,
show=item.show,
quality=link['title'],
contentType=item.contentType,
folder=False))
support.videolibrary(itemlist, item, 'color kod', function_level=2)
autoplay.start(itemlist, item)
return itemlist
# ---------------------------------------------------------------------------------------------------------------------------------------------
def halfplayer(item):
support.log()
url=item.url
# Correggo il link con il lin del POST
url = url.replace("/v/", "/api/source/").replace("/p/", "/api/source/")
postData = urllib.urlencode({
"r": "",
"d": "modolunatico.tk",
})
block = httptools.downloadpage(url, post=postData).data
patron = r'"file":".*?\/(r[^"]+)'
matches = re.compile(patron, re.DOTALL).findall(block)
for scrapedurl in matches:
item.url = "https://fvs.io/" + scrapedurl
item.server = ""
itemlist = platformtools.play_video(item, force_direct=True, autoplay=True)
return itemlist

View File

@@ -7,7 +7,10 @@
from core import scrapertoolsV2, httptools, support
from core.item import Item
host = 'https://www.seriehd.moda'
__channel__ = 'seriehd'
host = support.config.get_channel_url(__channel__)
# host = 'https://www.seriehd.watch'
headers = ''
def findhost():

View File

@@ -34,7 +34,7 @@ def mainlist(item):
@support.scrape
def peliculas(item):
patronBlock = r'<div class="wrap">\s+<h.>.*?</h.>(?P<block>.*?)<footer>'
patronBlock = r'<div class="wrap">\s*<h.>.*?</h.>(?P<block>.*?)<footer>'
if item.args != 'update':

View File

@@ -75,30 +75,6 @@ def peliculas(item):
if item.args != 'all' and item.args != 'search':
action = 'findvideos' if item.extra == 'movie' else 'episodios'
item.contentType = 'movie' if item.extra == 'movie' else 'tvshow'
else:
def itemHook(item):
item.action = 'episodios'
item.contentType = 'tvshow'
data = httptools.downloadpage(item.url, headers=headers).data
data = re.sub('\n|\t', ' ', data)
data = re.sub(r'>\s+<', '> <', data)
check = scrapertoolsV2.find_single_match(data, r'<div class="category-film">\s+<h3>\s+(.*?)\s+</h3>\s+</div>')
if 'sub' in check.lower():
item.contentLanguage = 'Sub-ITA'
item.title += support.typo('Sub-ITA', '_ [] color kod')
support.log("CHECK : ", check)
if 'anime' in check.lower():
support.log('select = ### è una anime ###')
item.action = 'episodios'
anime = True
args='anime'
elif 'serie' in check.lower():
pass
else:
support.log('select ELSE = ### è un film ###')
item.action = 'findvideos'
item.contentType='movie'
return item
#debug = True
return locals()
@@ -107,30 +83,38 @@ def peliculas(item):
def episodios(item):
log()
findhost()
data_check = httptools.downloadpage(item.url, headers=headers).data
data_check = re.sub('\n|\t', ' ', data_check)
data_check = re.sub(r'>\s+<', '> <', data_check)
if not item.data:
data_check = httptools.downloadpage(item.url, headers=headers).data
data_check = re.sub('\n|\t', ' ', data_check)
data_check = re.sub(r'>\s+<', '> <', data_check)
else:
data_check = item.data
patron_check = r'<iframe src="([^"]+)" scrolling="no" frameborder="0" width="626" height="550" allowfullscreen="true" webkitallowfullscreen="true" mozallowfullscreen="true">'
item.url = scrapertoolsV2.find_single_match(data_check, patron_check)
patronBlock = r'Episodio<\/a>.*?<ul class="nav navbar-nav">(?P<block>.*?)<\/ul>'
patronBlock = r'Stagioni<\/a>.*?<ul class="nav navbar-nav">(?P<block>.*?)<\/ul>'
patron = r'<a href="(?P<url>[^"]+)"\s*>\s*<i[^>]+><\/i>\s*(?P<episode>\d+)<\/a>'
# debug = True
def itemHook(item):
item.contentType = 'tvshow'
url_season = item.url.rpartition('/')
support.log("ITEM URL: ", url_season[0])
seasons = support.match(item, r'<a href="([^"]+)"\s*>\s*<i[^>]+><\/i>\s*(\d+)<\/a>', r'Stagioni<\/a>.*?<ul class="nav navbar-nav">(.*?)<\/ul>', headers=headers, url=url_season[0])[0]
for season_url, season in seasons:
support.log("ITEM URL2: ", url_season[0],' - ', item.url)
if season_url[0] in item.url:
item.title = support.typo(season+'x'+unify.remove_format(item.title), 'bold')
## item.infoLabels['title'] = item.fulltitle if item.infoLabels['title'] == '' else item.infoLabels['title']
## item.infoLabels['tvshowtitle'] = item.fulltitle if item.infoLabels['tvshowtitle'] == '' else item.infoLabels['tvshowtitle']
break
def itemlistHook(itemlist):
retItemlist = []
for item in itemlist:
item.contentType = 'episode'
return item
season = unify.remove_format(item.title)
season_data = httptools.downloadpage(item.url).data
season_data = re.sub('\n|\t', ' ', season_data)
season_data = re.sub(r'>\s+<', '> <', season_data)
block = scrapertoolsV2.find_single_match(season_data, 'Episodi.*?<ul class="nav navbar-nav">(.*?)</ul>')
episodes = scrapertoolsV2.find_multiple_matches(block, '<a href="([^"]+)"\s*>\s*<i[^>]+><\/i>\s*(\d+)<\/a>')
for url, episode in episodes:
i = item.clone()
i.action = 'findvideos'
i.url = url
i.title = str(season) + 'x' + str(episode)
retItemlist.append(i)
return retItemlist
#debug = True
return locals()
@@ -262,7 +246,23 @@ def findvideos(item):
## data = item.url
## else:
## data = httptools.downloadpage(item.url, headers=headers).data
data = item.url if item.contentType == "episode" else httptools.downloadpage(item.url, headers=headers).data
data = httptools.downloadpage(item.url, headers=headers).data
data = re.sub('\n|\t', ' ', data)
data = re.sub(r'>\s+<', '> <', data)
check = scrapertoolsV2.find_single_match(data, r'<div class="category-film">\s+<h3>\s+(.*?)\s+</h3>\s+</div>')
if 'sub' in check.lower():
item.contentLanguage = 'Sub-ITA'
support.log("CHECK : ", check)
if 'anime' in check.lower():
item.contentType = 'tvshow'
item.data = data
support.log('select = ### è una anime ###')
return episodios(item)
elif 'serie' in check.lower():
item.contentType = 'tvshow'
item.data = data
return episodios(item)
if 'protectlink' in data:
urls = scrapertoolsV2.find_multiple_matches(data, r'<iframe src="[^=]+=(.*?)"')
@@ -271,13 +271,13 @@ def findvideos(item):
url = url.decode('base64')
# tiro via l'ultimo carattere perchè non c'entra
url, c = unshorten_only(url)
data += '\t' + url
support.log("SONO QUI: ", url)
if 'nodmca' in data:
page = httptools.downloadpage(url, headers=headers).data
url += isturl.add('\t' + scrapertoolsV2.find_single_match(page,'<meta name="og:url" content="([^=]+)">'))
return support.server(item, data=listurl)#, headers=headers)
if 'nodmca' in url:
page = httptools.downloadpage(url, headers=headers).data
url = '\t' + scrapertoolsV2.find_single_match(page,'<meta name="og:url" content="([^=]+)">')
if url:
listurl.add(url)
support.dbg()
return support.server(item, data=listurl if listurl else data)#, headers=headers)
# return itemlist
##def findvideos(item):