|
|
|
|
@@ -1,130 +1,79 @@
|
|
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
# ------------------------------------------------------------
|
|
|
|
|
# Ringraziamo Icarus crew
|
|
|
|
|
# Canale piratestreaming
|
|
|
|
|
# Canale per Tantifilm
|
|
|
|
|
# ------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
import re
|
|
|
|
|
import urlparse
|
|
|
|
|
|
|
|
|
|
from channels import autoplay
|
|
|
|
|
from channels import filtertools
|
|
|
|
|
from core import scrapertools, servertools, httptools
|
|
|
|
|
from channels import support, autorenumber
|
|
|
|
|
from channels.support import menu, log, aplay
|
|
|
|
|
from core import scrapertoolsV2, httptools, tmdb
|
|
|
|
|
from core.item import Item
|
|
|
|
|
from core import tmdb
|
|
|
|
|
from lib.unshortenit import unshorten_only
|
|
|
|
|
from platformcode import config
|
|
|
|
|
from platformcode import logger
|
|
|
|
|
from platformcode import config, logger
|
|
|
|
|
|
|
|
|
|
IDIOMAS = {'Italiano': 'IT'}
|
|
|
|
|
list_language = IDIOMAS.values()
|
|
|
|
|
list_servers = ['verystream', 'openload', 'streamango', 'vidlox', 'youtube']
|
|
|
|
|
list_quality = ['default']
|
|
|
|
|
|
|
|
|
|
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'tantifilm')
|
|
|
|
|
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'tantifilm')
|
|
|
|
|
|
|
|
|
|
host = "https://www.tantifilm.club" ### <-- Cambiato Host da .gratis a .club --> Continua riga 233
|
|
|
|
|
host = "https://www.tantifilm.cafe"
|
|
|
|
|
|
|
|
|
|
headers = [['Referer', host]]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def mainlist(item):
|
|
|
|
|
logger.info("kod.tantifilm mainlist")
|
|
|
|
|
log()
|
|
|
|
|
itemlist = []
|
|
|
|
|
|
|
|
|
|
autoplay.init(item.channel, list_servers, list_quality)
|
|
|
|
|
itemlist = [Item(channel=item.channel,
|
|
|
|
|
title="[COLOR azure]Novita'[/COLOR]",
|
|
|
|
|
action="peliculas",
|
|
|
|
|
url="%s/watch-genre/recommended/" % host,
|
|
|
|
|
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
|
|
|
|
Item(channel=item.channel,
|
|
|
|
|
title="[COLOR azure]HD - Alta Definizione[/COLOR]",
|
|
|
|
|
action="peliculas",
|
|
|
|
|
url="%s/watch-genre/altadefinizione/" % host,
|
|
|
|
|
thumbnail="http://jcrent.com/apple%20tv%20final/HD.png"),
|
|
|
|
|
Item(channel=item.channel,
|
|
|
|
|
title="[COLOR azure]Film Per Categoria[/COLOR]",
|
|
|
|
|
action="categorias",
|
|
|
|
|
url=host,
|
|
|
|
|
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
|
|
|
|
Item(channel=item.channel,
|
|
|
|
|
title="[COLOR yellow]Cerca...[/COLOR]",
|
|
|
|
|
action="search",
|
|
|
|
|
extra="movie",
|
|
|
|
|
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"),
|
|
|
|
|
Item(channel=item.channel,
|
|
|
|
|
title="[COLOR azure]Serie TV[/COLOR]",
|
|
|
|
|
extra="tvshow",
|
|
|
|
|
action="peliculas_tv",
|
|
|
|
|
url="%s/watch-genre/serie-tv" % host,
|
|
|
|
|
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
|
|
|
|
Item(channel=item.channel,
|
|
|
|
|
title="[COLOR yellow]Cerca Serie TV...[/COLOR]",
|
|
|
|
|
action="search",
|
|
|
|
|
extra="tvshow",
|
|
|
|
|
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
|
|
|
|
|
|
|
|
|
autoplay.show_option(item.channel, itemlist)
|
|
|
|
|
menu(itemlist, 'Film', 'peliculas', host + '/film/', 'movie', args='movie')
|
|
|
|
|
menu(itemlist, 'Film Al Cinema submenu', 'peliculas', host + '/watch-genre/al-cinema/', 'movie')
|
|
|
|
|
menu(itemlist, 'Film HD submenu', 'peliculas', host + '/watch-genre/altadefinizione/', 'movie')
|
|
|
|
|
menu(itemlist, 'Film Per Categoria submenu', 'category', host, 'movie')
|
|
|
|
|
menu(itemlist, 'Cerca film... submenu color kod', 'search', contentType='movie', args='findvideos')
|
|
|
|
|
menu(itemlist, 'Serie TV', 'peliculas', host + '/watch-genre/serie-tv/', contentType='episode')
|
|
|
|
|
menu(itemlist, 'Serie TV HD submenu', 'peliculas', host + '/watch-genre/serie-altadefinizione/', contentType='episode')
|
|
|
|
|
menu(itemlist, 'Miniserie submenu', 'peliculas', host + '/watch-genre/miniserie/', contentType='episode', args='serie')
|
|
|
|
|
menu(itemlist, 'Programmi TV submenu', 'peliculas', host + '/watch-genre/programmi-tv/', contentType='episode')
|
|
|
|
|
menu(itemlist, 'Anime submenu', 'peliculas', host + '/watch-genre/anime/', contentType='episode', args='anime')
|
|
|
|
|
menu(itemlist, 'Cerca Serie TV... submenu color kod', 'search', contentType='episode', args='episodios')
|
|
|
|
|
aplay(item, itemlist, list_servers, list_quality)
|
|
|
|
|
|
|
|
|
|
return itemlist
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def newest(categoria):
|
|
|
|
|
logger.info("kod.tantifilm newest" + categoria)
|
|
|
|
|
log()
|
|
|
|
|
itemlist = []
|
|
|
|
|
item = Item()
|
|
|
|
|
try:
|
|
|
|
|
if categoria == "film":
|
|
|
|
|
item.url = host
|
|
|
|
|
item.action = "latest"
|
|
|
|
|
itemlist = latest(item)
|
|
|
|
|
item.url = host +'/aggiornamenti/'
|
|
|
|
|
|
|
|
|
|
if itemlist[-1].action == "latest":
|
|
|
|
|
itemlist.pop()
|
|
|
|
|
matches = support.match(item, r'mediaWrapAlt recomended_videos"[^>]+>\s*<a href="([^"]+)" title="([^"]+)" rel="bookmark">\s*<img[^s]+src="([^"]+)"[^>]+>')[0]
|
|
|
|
|
|
|
|
|
|
# Continua la ricerca in caso di errore
|
|
|
|
|
except:
|
|
|
|
|
import sys
|
|
|
|
|
for line in sys.exc_info():
|
|
|
|
|
logger.error("{0}".format(line))
|
|
|
|
|
return []
|
|
|
|
|
|
|
|
|
|
return itemlist
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def categorias(item):
|
|
|
|
|
itemlist = []
|
|
|
|
|
|
|
|
|
|
# Carica la pagina
|
|
|
|
|
data = httptools.downloadpage(item.url, headers=headers).data
|
|
|
|
|
bloque = scrapertools.find_single_match(data,
|
|
|
|
|
'<ul class="table-list">(.*?)</ul>')
|
|
|
|
|
|
|
|
|
|
# Estrae i contenuti
|
|
|
|
|
patron = '<li><a href=\'(.*?)\'><span></span>(.*?)</a></li>'
|
|
|
|
|
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
|
|
|
|
|
|
|
|
|
for scrapedurl, scrapedtitle in matches:
|
|
|
|
|
for url, title, thumb in matches:
|
|
|
|
|
title = scrapertoolsV2.decodeHtmlentities(title).replace("Permalink to ", "").replace("streaming", "")
|
|
|
|
|
title = re.sub(r'\s\(\d+\)','',title)
|
|
|
|
|
itemlist.append(
|
|
|
|
|
Item(channel=item.channel,
|
|
|
|
|
action="peliculas",
|
|
|
|
|
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
|
|
|
|
url=scrapedurl,
|
|
|
|
|
thumbnail="http://xbmc-repo-ackbarr.googlecode.com/svn/trunk/dev/skin.cirrus%20extended%20v2/extras/moviegenres/All%20Movies%20by%20Genre.png",
|
|
|
|
|
action="findvideos",
|
|
|
|
|
contentType="movie",
|
|
|
|
|
fulltitle=title,
|
|
|
|
|
show=title,
|
|
|
|
|
title=support.typo(title, 'bold'),
|
|
|
|
|
url=url,
|
|
|
|
|
thumbnail=thumb,
|
|
|
|
|
folder=True))
|
|
|
|
|
|
|
|
|
|
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
|
|
|
|
return itemlist
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def search(item, texto):
|
|
|
|
|
logger.info("[tantifilm.py] " + item.url + " search " + texto)
|
|
|
|
|
item.url = "%s/?s=%s" % (host, texto)
|
|
|
|
|
log(texto)
|
|
|
|
|
item.url = host + "/?s=" + texto
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
if item.extra == "movie":
|
|
|
|
|
return search_peliculas(item)
|
|
|
|
|
if item.extra == "tvshow":
|
|
|
|
|
return search_peliculas_tv(item)
|
|
|
|
|
return search_peliculas(item)
|
|
|
|
|
|
|
|
|
|
# Continua la ricerca in caso di errore
|
|
|
|
|
except:
|
|
|
|
|
@@ -134,373 +83,149 @@ def search(item, texto):
|
|
|
|
|
return []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def search_peliculas(item):
|
|
|
|
|
logger.info("kod.tantifilm search_peliculas")
|
|
|
|
|
log()
|
|
|
|
|
itemlist = []
|
|
|
|
|
|
|
|
|
|
# Carica la pagina
|
|
|
|
|
data = httptools.downloadpage(item.url, headers=headers).data
|
|
|
|
|
action = 'findvideos' if item.extra == 'movie' else 'episodios'
|
|
|
|
|
|
|
|
|
|
# Estrae i contenuti
|
|
|
|
|
patron = '<a href="([^"]+)" title="([^"]+)" rel="[^"]+">\s*<img[^s]+src="(.*?)"'
|
|
|
|
|
matches = re.compile(patron, re.DOTALL).findall(data)
|
|
|
|
|
|
|
|
|
|
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
|
|
|
|
scrapedplot = ""
|
|
|
|
|
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
|
|
|
|
scrapedtitle = scrapedtitle.replace("streaming", "").replace("Permalink to ", "")
|
|
|
|
|
data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data
|
|
|
|
|
patron = r'<a href="([^"]+)" title="Permalink to\s([^"]+) \([^<]+\).*?" rel="[^"]+">\s*<img[^s]+src="(.*?)"'
|
|
|
|
|
matches = re.compile(patron, re.MULTILINE).findall(data)
|
|
|
|
|
|
|
|
|
|
for url, title, thumb in matches:
|
|
|
|
|
title = scrapertoolsV2.decodeHtmlentities(title)
|
|
|
|
|
itemlist.append(
|
|
|
|
|
Item(channel=item.channel,
|
|
|
|
|
action="findvideos",
|
|
|
|
|
contentType="movie",
|
|
|
|
|
fulltitle=scrapedtitle,
|
|
|
|
|
show=scrapedtitle,
|
|
|
|
|
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
|
|
|
|
url=scrapedurl,
|
|
|
|
|
thumbnail=scrapedthumbnail,
|
|
|
|
|
plot=scrapedplot,
|
|
|
|
|
folder=True))
|
|
|
|
|
action=action,
|
|
|
|
|
contentType=item.contentType,
|
|
|
|
|
fulltitle=title,
|
|
|
|
|
show=title,
|
|
|
|
|
title=title,
|
|
|
|
|
url=url,
|
|
|
|
|
thumbnail=thumb,
|
|
|
|
|
args=item.args))
|
|
|
|
|
|
|
|
|
|
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
|
|
|
|
|
|
|
|
|
return itemlist
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def search_peliculas_tv(item):
|
|
|
|
|
logger.info("kod.tantifilm search_peliculas")
|
|
|
|
|
itemlist = []
|
|
|
|
|
|
|
|
|
|
# Carica la pagina
|
|
|
|
|
data = httptools.downloadpage(item.url, headers=headers).data
|
|
|
|
|
|
|
|
|
|
# Estrae i contenuti
|
|
|
|
|
patron = '<a href="([^"]+)" title="([^"]+)" rel="[^"]+">\s*<img[^s]+src="(.*?)"'
|
|
|
|
|
matches = re.compile(patron, re.DOTALL).findall(data)
|
|
|
|
|
|
|
|
|
|
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
|
|
|
|
scrapedplot = ""
|
|
|
|
|
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
|
|
|
|
scrapedtitle = scrapedtitle.replace("streaming", "").replace("Permalink to ", "")
|
|
|
|
|
|
|
|
|
|
itemlist.append(
|
|
|
|
|
Item(channel=item.channel,
|
|
|
|
|
action="episodios",
|
|
|
|
|
fulltitle=scrapedtitle,
|
|
|
|
|
show=scrapedtitle,
|
|
|
|
|
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
|
|
|
|
url=scrapedurl,
|
|
|
|
|
thumbnail=scrapedthumbnail,
|
|
|
|
|
plot=scrapedplot,
|
|
|
|
|
folder=True))
|
|
|
|
|
|
|
|
|
|
return itemlist
|
|
|
|
|
def category(item):
|
|
|
|
|
blacklist = ['Serie TV Altadefinizione', 'HD AltaDefinizione', 'Al Cinema', 'Serie TV', 'Miniserie', 'Programmi Tv', 'Live', 'Trailers', 'Serie TV Aggiornate', 'Aggiornamenti', 'Featured']
|
|
|
|
|
itemlist = support.scrape(item, '<li><a href="([^"]+)"><span></span>([^<]+)</a></li>', ['url', 'title'], headers, blacklist, patron_block='<ul class="table-list">(.*?)</ul>', action='peliculas')
|
|
|
|
|
return support.thumb(itemlist)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def peliculas(item):
|
|
|
|
|
logger.info("kod.tantifilm peliculas")
|
|
|
|
|
itemlist = []
|
|
|
|
|
|
|
|
|
|
# Carica la pagina
|
|
|
|
|
data = httptools.downloadpage(item.url, headers=headers).data
|
|
|
|
|
|
|
|
|
|
# Estrae i contenuti
|
|
|
|
|
patron = '<div class="media3">[^>]+><a href="([^"]+)"><img[^s]+src="([^"]+)"[^>]+></a><[^>]+><a[^>]+><p>(.*?)</p></a></div>'
|
|
|
|
|
matches = re.compile(patron, re.DOTALL).findall(data)
|
|
|
|
|
|
|
|
|
|
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
|
|
|
|
html = httptools.downloadpage(scrapedurl, headers=headers).data
|
|
|
|
|
start = html.find("<div class=\"content-left-film\">")
|
|
|
|
|
end = html.find("</div>", start)
|
|
|
|
|
scrapedplot = html[start:end]
|
|
|
|
|
scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
|
|
|
|
|
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
|
|
|
|
|
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
|
|
|
|
scrapedtitle = scrapedtitle.replace("streaming", "")
|
|
|
|
|
|
|
|
|
|
itemlist.append(
|
|
|
|
|
Item(channel=item.channel,
|
|
|
|
|
action="findvideos",
|
|
|
|
|
contentType="movie",
|
|
|
|
|
fulltitle=scrapedtitle,
|
|
|
|
|
show=scrapedtitle,
|
|
|
|
|
title=scrapedtitle,
|
|
|
|
|
url=scrapedurl,
|
|
|
|
|
thumbnail=scrapedthumbnail,
|
|
|
|
|
plot=scrapedplot,
|
|
|
|
|
folder=True))
|
|
|
|
|
|
|
|
|
|
# Paginazione
|
|
|
|
|
patronvideos = '<a class="nextpostslink".*?href="([^"]+)"' ### <- Fix Pagina successiva '<a class="nextpostslink" rel="next" href="([^"]+)">»</a>'
|
|
|
|
|
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
|
|
|
|
|
|
|
|
|
if len(matches) > 0:
|
|
|
|
|
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
|
|
|
|
itemlist.append(
|
|
|
|
|
Item(channel=item.channel,
|
|
|
|
|
action="peliculas",
|
|
|
|
|
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
|
|
|
|
url=scrapedurl,
|
|
|
|
|
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
|
|
|
|
folder=True))
|
|
|
|
|
|
|
|
|
|
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
|
|
|
|
return itemlist
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def peliculas_tv(item):
|
|
|
|
|
logger.info("kod.tantifilm peliculas")
|
|
|
|
|
itemlist = []
|
|
|
|
|
|
|
|
|
|
# Carica la pagina
|
|
|
|
|
data = httptools.downloadpage(item.url, headers=headers).data
|
|
|
|
|
|
|
|
|
|
# Estrae i contenuti
|
|
|
|
|
patron = '<div class="media3">[^>]+><a href="([^"]+)"><img[^s]+src="([^"]+)"[^>]+></a><[^>]+><a[^>]+><p>(.*?)</p></a></div>'
|
|
|
|
|
matches = re.compile(patron, re.DOTALL).findall(data)
|
|
|
|
|
|
|
|
|
|
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
|
|
|
|
html = httptools.downloadpage(scrapedurl, headers=headers).data
|
|
|
|
|
start = html.find("<div class=\"content-left-film\">")
|
|
|
|
|
end = html.find("</div>", start)
|
|
|
|
|
scrapedplot = html[start:end]
|
|
|
|
|
scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
|
|
|
|
|
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
|
|
|
|
|
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
|
|
|
|
scrapedtitle = scrapedtitle.replace("streaming", "")
|
|
|
|
|
|
|
|
|
|
itemlist.append(
|
|
|
|
|
Item(channel=item.channel,
|
|
|
|
|
action="episodios",
|
|
|
|
|
fulltitle=scrapedtitle,
|
|
|
|
|
show=scrapedtitle,
|
|
|
|
|
title=scrapedtitle,
|
|
|
|
|
url=scrapedurl,
|
|
|
|
|
thumbnail=scrapedthumbnail,
|
|
|
|
|
plot=scrapedplot,
|
|
|
|
|
folder=True))
|
|
|
|
|
|
|
|
|
|
# Paginazione
|
|
|
|
|
patronvideos = '<a class="nextpostslink" rel="next" href="([^"]+)">»</a>'
|
|
|
|
|
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
|
|
|
|
|
|
|
|
|
if len(matches) > 0:
|
|
|
|
|
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
|
|
|
|
itemlist.append(
|
|
|
|
|
Item(channel=item.channel,
|
|
|
|
|
action="peliculas_tv",
|
|
|
|
|
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
|
|
|
|
url=scrapedurl,
|
|
|
|
|
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
|
|
|
|
folder=True))
|
|
|
|
|
|
|
|
|
|
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
|
|
|
|
return itemlist
|
|
|
|
|
|
|
|
|
|
def latest(item):
|
|
|
|
|
logger.info("kod.tantifilm peliculas")
|
|
|
|
|
itemlist = []
|
|
|
|
|
|
|
|
|
|
# Carica la pagina
|
|
|
|
|
data = httptools.downloadpage(item.url, headers=headers).data
|
|
|
|
|
|
|
|
|
|
# Estrae i contenuti
|
|
|
|
|
patron = '<div class="mediaWrap mediaWrapAlt">\s*'
|
|
|
|
|
patron += '<a href="([^"]+)" title="([^"]+)" rel="bookmark">\s*'
|
|
|
|
|
patron += '<img[^s]+src="([^"]+)"[^>]+>'
|
|
|
|
|
matches = re.compile(patron, re.DOTALL).findall(data)
|
|
|
|
|
|
|
|
|
|
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
|
|
|
|
html = httptools.downloadpage(scrapedurl, headers=headers).data
|
|
|
|
|
start = html.find("<div class=\"content-left-film\">")
|
|
|
|
|
end = html.find("</div>", start)
|
|
|
|
|
scrapedplot = html[start:end]
|
|
|
|
|
scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
|
|
|
|
|
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
|
|
|
|
|
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
|
|
|
|
|
scrapedtitle = scrapedtitle.replace("Permalink to ", "")
|
|
|
|
|
scrapedtitle = scrapedtitle.replace("streaming", "")
|
|
|
|
|
itemlist.append(
|
|
|
|
|
Item(channel=item.channel,
|
|
|
|
|
action="findvideos",
|
|
|
|
|
contentType="movie",
|
|
|
|
|
fulltitle=scrapedtitle,
|
|
|
|
|
show=scrapedtitle,
|
|
|
|
|
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
|
|
|
|
url=scrapedurl,
|
|
|
|
|
thumbnail=scrapedthumbnail,
|
|
|
|
|
plot=scrapedplot,
|
|
|
|
|
folder=True))
|
|
|
|
|
|
|
|
|
|
# Paginazione
|
|
|
|
|
patronvideos = '<a class="nextpostslink" rel="next" href="([^"]+)">»</a>'
|
|
|
|
|
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
|
|
|
|
|
|
|
|
|
if len(matches) > 0:
|
|
|
|
|
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
|
|
|
|
itemlist.append(
|
|
|
|
|
Item(channel=item.channel,
|
|
|
|
|
action="latest",
|
|
|
|
|
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
|
|
|
|
url=scrapedurl,
|
|
|
|
|
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
|
|
|
|
folder=True))
|
|
|
|
|
|
|
|
|
|
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
|
|
|
|
return itemlist
|
|
|
|
|
log()
|
|
|
|
|
action = 'findvideos' if item.extra == 'movie' else 'episodios'
|
|
|
|
|
if item.args == 'movie':
|
|
|
|
|
patron= r'<div class="mediaWrap mediaWrapAlt">[^<]+<a href="([^"]+)" title="Permalink to\s([^"]+) \([^<]+\).*?"[^>]+>[^<]+<img[^s]+src="([^"]+)"[^>]+>[^<]+<\/a>'
|
|
|
|
|
itemlist = support.scrape(item, patron, ['url', 'title', 'thumb'], headers, action=action, patronNext='<a class="nextpostslink" rel="next" href="([^"]+)">')
|
|
|
|
|
else:
|
|
|
|
|
patron = r'<div class="media3">[^>]+><a href="([^"]+)"><img[^s]+src="([^"]+)"[^>]+><\/a><[^>]+><a[^>]+><p>([^(:?\(|>]+)'
|
|
|
|
|
itemlist = support.scrape(item, patron, ['url', 'thumb', 'title'], headers, action=action, patronNext='<a class="nextpostslink" rel="next" href="([^"]+)">')
|
|
|
|
|
return autorenumber.renumber(itemlist) if item.args == 'anime' else itemlist
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def episodios(item):
|
|
|
|
|
def load_episodios(html, item, itemlist):
|
|
|
|
|
for data in html.splitlines():
|
|
|
|
|
# Estrae i contenuti
|
|
|
|
|
end = data.find('<a ')
|
|
|
|
|
if end > 0:
|
|
|
|
|
scrapedtitle = re.sub(r'<[^>]*>', '', data[:end]).strip()
|
|
|
|
|
else:
|
|
|
|
|
scrapedtitle = ''
|
|
|
|
|
if scrapedtitle == '':
|
|
|
|
|
patron = '<a\s*href="[^"]+"(?:\s*target="_blank")?>([^<]+)</a>'
|
|
|
|
|
scrapedtitle = scrapertools.find_single_match(data, patron).strip()
|
|
|
|
|
title = scrapertools.find_single_match(scrapedtitle, '\d+[^\d]+\d+')
|
|
|
|
|
if title == '':
|
|
|
|
|
title = scrapedtitle
|
|
|
|
|
if title != '':
|
|
|
|
|
itemlist.append(
|
|
|
|
|
Item(channel=item.channel,
|
|
|
|
|
action="findvideos",
|
|
|
|
|
contentType="episode",
|
|
|
|
|
title=title,
|
|
|
|
|
url=data,
|
|
|
|
|
thumbnail=item.thumbnail,
|
|
|
|
|
extra=item.extra,
|
|
|
|
|
fulltitle=item.fulltitle,
|
|
|
|
|
show=item.show))
|
|
|
|
|
|
|
|
|
|
logger.info("kod.tantifilm episodios")
|
|
|
|
|
|
|
|
|
|
log()
|
|
|
|
|
itemlist = []
|
|
|
|
|
if item.args == 'anime': return anime(item)
|
|
|
|
|
|
|
|
|
|
data = httptools.downloadpage(item.url, headers=headers).data
|
|
|
|
|
data = scrapertools.decodeHtmlentities(data)
|
|
|
|
|
data = httptools.downloadpage(item.url).data
|
|
|
|
|
|
|
|
|
|
start = data.find('<div class="sp-wrap sp-wrap-blue">')
|
|
|
|
|
end = data.find('<div id="disqus_thread">', start)
|
|
|
|
|
# Check if is series
|
|
|
|
|
check = scrapertoolsV2.find_single_match(data.replace('\t','').replace('\n',''), r'<div class="category-film"><h3>([^<]+)<\/h3>')
|
|
|
|
|
if 'serie tv' not in check.lower(): return findvideos(item)
|
|
|
|
|
elif 'anime' in check.lower(): return findvideos(item)
|
|
|
|
|
|
|
|
|
|
data_sub = data[start:end]
|
|
|
|
|
|
|
|
|
|
starts = []
|
|
|
|
|
patron = r".*?STAGIONE|MINISERIE|WEBSERIE|SERIE"
|
|
|
|
|
matches = re.compile(patron, re.IGNORECASE).finditer(data_sub)
|
|
|
|
|
for match in matches:
|
|
|
|
|
season_title = match.group()
|
|
|
|
|
if season_title != '':
|
|
|
|
|
starts.append(match.end())
|
|
|
|
|
|
|
|
|
|
i = 1
|
|
|
|
|
len_starts = len(starts)
|
|
|
|
|
|
|
|
|
|
while i <= len_starts:
|
|
|
|
|
inizio = starts[i - 1]
|
|
|
|
|
fine = starts[i] if i < len_starts else -1
|
|
|
|
|
|
|
|
|
|
html = data_sub[inizio:fine]
|
|
|
|
|
|
|
|
|
|
load_episodios(html, item, itemlist)
|
|
|
|
|
|
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
|
|
if len(itemlist) == 0:
|
|
|
|
|
patron = '<a href="(#wpwm-tabs-\d+)">([^<]+)</a></li>'
|
|
|
|
|
seasons_episodes = re.compile(patron, re.DOTALL).findall(data)
|
|
|
|
|
|
|
|
|
|
end = None
|
|
|
|
|
for scrapedtag, scrapedtitle in seasons_episodes:
|
|
|
|
|
start = data.find(scrapedtag, end)
|
|
|
|
|
end = data.find('<div class="clearfix"></div>', start)
|
|
|
|
|
html = data[start:end]
|
|
|
|
|
patron = r'<iframe src="([^"]+)" scrolling="no" frameborder="0" width="626" height="550" allowfullscreen="true" webkitallowfullscreen="true" mozallowfullscreen="true">'
|
|
|
|
|
url = scrapertoolsV2.find_single_match(data, patron)
|
|
|
|
|
log('URL =', url)
|
|
|
|
|
seasons = support.match(item, r'<a href="([^"]+)"\s*>\s*<i[^>]+><\/i>\s*(\d+)<\/a>', r'Stagioni<\/a>.*?<ul class="nav navbar-nav">(.*?)<\/ul>', headers=headers, url=url)[0]
|
|
|
|
|
|
|
|
|
|
for season_url, season in seasons:
|
|
|
|
|
season_url = urlparse.urljoin(url, season_url)
|
|
|
|
|
episodes = support.match(item, r'<a href="([^"]+)"\s*>\s*<i[^>]+><\/i>\s*(\d+)<\/a>', r'Episodio<\/a>.*?<ul class="nav navbar-nav">(.*?)<\/ul>', headers=headers, url=season_url)[0]
|
|
|
|
|
for episode_url, episode in episodes:
|
|
|
|
|
episode_url = urlparse.urljoin(url, episode_url)
|
|
|
|
|
title = season + "x" + episode.zfill(2)
|
|
|
|
|
|
|
|
|
|
itemlist.append(
|
|
|
|
|
Item(channel=item.channel,
|
|
|
|
|
action="findvideos",
|
|
|
|
|
contentType="episode",
|
|
|
|
|
title=scrapedtitle,
|
|
|
|
|
url=html,
|
|
|
|
|
thumbnail=item.thumbnail,
|
|
|
|
|
extra=item.extra,
|
|
|
|
|
fulltitle=item.fulltitle,
|
|
|
|
|
show=item.show))
|
|
|
|
|
contentType=item.contentType,
|
|
|
|
|
title=support.typo(title + ' - ' + item.fulltitle,'bold'),
|
|
|
|
|
url=episode_url,
|
|
|
|
|
fulltitle=title + ' - ' + item.show,
|
|
|
|
|
show=item.show,
|
|
|
|
|
thumbnail=item.thumbnail))
|
|
|
|
|
|
|
|
|
|
if config.get_videolibrary_support() and len(itemlist) != 0:
|
|
|
|
|
itemlist.append(
|
|
|
|
|
Item(channel=item.channel,
|
|
|
|
|
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
|
|
|
|
|
url=item.url,
|
|
|
|
|
action="add_serie_to_library",
|
|
|
|
|
extra="episodios",
|
|
|
|
|
show=item.show))
|
|
|
|
|
support.videolibrary(itemlist, item, 'color kod bold')
|
|
|
|
|
|
|
|
|
|
return itemlist
|
|
|
|
|
|
|
|
|
|
def anime(item):
|
|
|
|
|
log()
|
|
|
|
|
itemlist = []
|
|
|
|
|
|
|
|
|
|
seasons = support.match(item, r'<div class="sp-body[^"]+">(.*?)<\/div>')[0]
|
|
|
|
|
for season in seasons:
|
|
|
|
|
episodes = scrapertoolsV2.find_multiple_matches(season, r'<a.*?href="([^"]+)"[^>]+>([^<]+)<\/a>(.*?)<(:?br|\/p)')
|
|
|
|
|
for url, title, urls, none in episodes:
|
|
|
|
|
urls = scrapertoolsV2.find_multiple_matches(urls, '<a.*?href="([^"]+)"[^>]+>')
|
|
|
|
|
|
|
|
|
|
for url2 in urls:
|
|
|
|
|
url += url2 + '\n'
|
|
|
|
|
|
|
|
|
|
log('EP URL',url)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
itemlist.append(
|
|
|
|
|
Item(channel=item.channel,
|
|
|
|
|
action="findvideos",
|
|
|
|
|
contentType=item.contentType,
|
|
|
|
|
title=support.typo(title + ' - ' + item.fulltitle,'bold'),
|
|
|
|
|
url=url,
|
|
|
|
|
fulltitle=title + ' - ' + item.show,
|
|
|
|
|
show=item.show,
|
|
|
|
|
thumbnail=item.thumbnail,
|
|
|
|
|
args=item.args))
|
|
|
|
|
|
|
|
|
|
autorenumber.renumber(itemlist, item,'bold')
|
|
|
|
|
support.videolibrary(itemlist, item, 'color kod bold')
|
|
|
|
|
|
|
|
|
|
return itemlist
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def findvideos(item):
|
|
|
|
|
logger.info("kod.tantifilm findvideos")
|
|
|
|
|
log()
|
|
|
|
|
itemlist = []
|
|
|
|
|
|
|
|
|
|
# Carica la pagina
|
|
|
|
|
data = item.url if item.contentType == "episode" else httptools.downloadpage(item.url, headers=headers).data
|
|
|
|
|
if item.args == 'anime':
|
|
|
|
|
data = item.url
|
|
|
|
|
else:
|
|
|
|
|
data = httptools.downloadpage(item.url, headers=headers).data
|
|
|
|
|
|
|
|
|
|
if 'protectlink' in data:
|
|
|
|
|
urls = scrapertools.find_multiple_matches(data, r'<iframe src="[^=]+=(.*?)"')
|
|
|
|
|
for url in urls:
|
|
|
|
|
url = url.decode('base64')
|
|
|
|
|
data += '\t' + url
|
|
|
|
|
url, c = unshorten_only(url)
|
|
|
|
|
data += '\t' + url
|
|
|
|
|
|
|
|
|
|
itemlist = servertools.find_video_items(data=data)
|
|
|
|
|
|
|
|
|
|
for videoitem in itemlist:
|
|
|
|
|
videoitem.title = item.title + videoitem.title
|
|
|
|
|
videoitem.fulltitle = item.fulltitle
|
|
|
|
|
videoitem.thumbnail = item.thumbnail
|
|
|
|
|
videoitem.show = item.show
|
|
|
|
|
videoitem.plot = item.plot
|
|
|
|
|
videoitem.channel = item.channel
|
|
|
|
|
videoitem.contentType = item.contentType
|
|
|
|
|
videoitem.language = IDIOMAS['Italiano']
|
|
|
|
|
|
|
|
|
|
# Requerido para Filtrar enlaces
|
|
|
|
|
|
|
|
|
|
if __comprueba_enlaces__:
|
|
|
|
|
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
|
|
|
|
|
|
|
|
|
|
# Requerido para FilterTools
|
|
|
|
|
|
|
|
|
|
# itemlist = filtertools.get_links(itemlist, item, list_language)
|
|
|
|
|
|
|
|
|
|
# Requerido para AutoPlay
|
|
|
|
|
|
|
|
|
|
autoplay.start(itemlist, item)
|
|
|
|
|
|
|
|
|
|
if item.contentType != 'episode':
|
|
|
|
|
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
|
|
|
|
itemlist.append(
|
|
|
|
|
Item(channel=item.channel, title='[COLOR yellow][B]Aggiungi alla videoteca[/B][/COLOR]', url=item.url,
|
|
|
|
|
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
|
|
|
|
|
|
|
|
|
|
# Estrae i contenuti
|
|
|
|
|
patron = r'\{"file":"([^"]+)","type":"[^"]+","label":"([^"]+)"\}'
|
|
|
|
|
matches = re.compile(patron, re.DOTALL).findall(data)
|
|
|
|
|
for scrapedurl, scrapedtitle in matches:
|
|
|
|
|
title = item.title + " " + scrapedtitle + " quality"
|
|
|
|
|
itemlist.append(
|
|
|
|
|
Item(channel=item.channel,
|
|
|
|
|
action="play",
|
|
|
|
|
title=title,
|
|
|
|
|
url=scrapedurl.replace(r'\/', '/').replace('%3B', ';'),
|
|
|
|
|
thumbnail=item.thumbnail,
|
|
|
|
|
fulltitle=item.title,
|
|
|
|
|
show=item.title,
|
|
|
|
|
server='',
|
|
|
|
|
contentType=item.contentType,
|
|
|
|
|
folder=False))
|
|
|
|
|
# Check if is series
|
|
|
|
|
check = scrapertoolsV2.find_single_match(data.replace('\t','').replace('\n',''), r'<div class="category-film"><h3>([^<]+)<\/h3>')
|
|
|
|
|
if 'serie tv' in check.lower(): return episodios(item)
|
|
|
|
|
elif 'anime' in check.lower(): return anime(item)
|
|
|
|
|
|
|
|
|
|
if 'protectlink' in data:
|
|
|
|
|
urls = scrapertoolsV2.find_multiple_matches(data, r'<iframe src="[^=]+=(.*?)"')
|
|
|
|
|
for url in urls:
|
|
|
|
|
url= url.decode('base64')
|
|
|
|
|
if '\t' in url:
|
|
|
|
|
url = url[:-1]
|
|
|
|
|
data += '\t' + url
|
|
|
|
|
if 'nodmca' in data:
|
|
|
|
|
page = httptools.downloadpage(url, headers=headers).data
|
|
|
|
|
data += '\t' + scrapertoolsV2.find_single_match(page,'<meta name="og:url" content="([^=]+)">')
|
|
|
|
|
|
|
|
|
|
itemlist= support.server(item, data, headers, True, True)
|
|
|
|
|
support.videolibrary(itemlist, item, 'color kod bold')
|
|
|
|
|
return itemlist
|
|
|
|
|
|