remove infoIca in favor of infoLabels

This commit is contained in:
mac12m99
2019-03-12 19:54:34 +01:00
parent 5922afb70c
commit d861c7c0b5
40 changed files with 280 additions and 516 deletions

2
.gitignore vendored
View File

@@ -1,3 +1,5 @@
*
!plugin.video.alfa
*.pyo
*.pyc
.DS_Store

View File

@@ -9,7 +9,7 @@ import re
from core import httptools, scrapertools, servertools
from platformcode import logger, config
from core.item import Item
from core.tmdb import infoIca
from core import tmdb
@@ -119,14 +119,14 @@ def elenco(item):
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
logger.info("title=[" + scrapedtitle + "] url=[" + scrapedurl + "] thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="movie",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail), tipo="movie"))
thumbnail=scrapedthumbnail))
# Paginazione
# ===========================================================================================================================
@@ -140,7 +140,8 @@ def elenco(item):
itemlist.append(Item(channel=item.channel, action="mainlist", title=ListTxt, folder=True))
# ===========================================================================================================================
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@@ -158,13 +159,13 @@ def search(item, texto):
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail), tipo="movie"))
thumbnail=scrapedthumbnail))
# Paginazione
# ===========================================================================================================================
@@ -176,6 +177,7 @@ def search(item, texto):
else:
itemlist.append(Item(channel=item.channel, action="mainlist", title=ListTxt, folder=True))
# ===========================================================================================================================
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist

View File

@@ -9,7 +9,7 @@ import re
from platformcode import logger, config
from core import httptools, scrapertools, servertools
from core.item import Item
from core.tmdb import infoIca
from core import tmdb
host = "https://www.cineblog01.cloud"
@@ -154,7 +154,7 @@ def peliculas(item):
for url in matches:
if "scrolling" not in url: continue
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="movie",
@@ -163,7 +163,7 @@ def peliculas(item):
url=scrapedurl,
extra="movie",
thumbnail=scrapedthumbnail,
folder=True), tipo="movie"))
folder=True))
# Pagine
patronvideos = r'<a href="([^"]+)">Avanti</a>'
@@ -184,6 +184,7 @@ def peliculas(item):
folder=True))
break
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
# ================================================================================================================

View File

@@ -15,7 +15,7 @@ from platformcode import logger, config
from core.item import Item
from lib import unshortenit
from platformcode import config
from core.tmdb import infoIca
from core import tmdb
# Necessario per Autoplay
IDIOMAS = {'Italiano': 'IT'}
@@ -136,7 +136,7 @@ def video(item):
tipologia = 'movie'
action = 'select'
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action=action,
contentType=item.contentType,
@@ -146,7 +146,7 @@ def video(item):
url=url,
thumbnail=thumb,
infoLabels=year,
show=title), tipo=tipologia))
show=title))
# Next page
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers".*?href="([^"]+)">')
@@ -160,6 +160,7 @@ def video(item):
contentType=item.contentType,
thumbnail='http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png'))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist

View File

@@ -9,7 +9,7 @@ from channels import filtertools
from core import scrapertools, servertools, httptools
from core.item import Item
from platformcode import config
from core.tmdb import infoIca
from core import tmdb
host = 'https://cinemastreaming.info'
@@ -70,7 +70,7 @@ def video(item):
url=scrapedurl,
thumbnail=scrapedthumb,
infoLabels = infoLabels,
show=scrapedtitle,))
show=scrapedtitle))
return itemlist

View File

@@ -9,7 +9,8 @@ import re
from core import httptools, scrapertools
from core import servertools
from core.item import Item
from core.tmdb import infoIca
from core import tmdb
from lib.unshortenit import unshorten
from platformcode import logger, config
from lib import unshortenit
@@ -91,7 +92,7 @@ def peliculas(item):
#scrapedtitle = scrapedtitle.split("&#8211;")[0]
#scrapedtitle = scrapedtitle.split(" Download")[0]
scrapedthumbnail = ""
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="findvideos" if 'movie' in item.extra else 'episodes',
text_color="azure",
@@ -100,7 +101,7 @@ def peliculas(item):
title=scrapedtitle,
url="%s/%s" % (host, scrapedurl),
viewmode="movie_with_plot",
thumbnail=scrapedthumbnail), tipo=item.extra))
thumbnail=scrapedthumbnail))
nextpage_regex = '<a class="next page-numbers" href="([^"]+)">'
next_page = scrapertools.find_single_match(data, nextpage_regex)

View File

@@ -12,7 +12,7 @@ from core import httptools
from platformcode import logger, config
from core import scrapertools
from core.item import Item
from core.tmdb import infoIca
from core import tmdb
@@ -90,14 +90,14 @@ def elenco_top(item):
# sempre per controllare il log
logger.info("Url:" + scrapedurl + " thumbnail:" + scrapedimg + " title:" + scrapedtitle)
title = scrapedtitle.split("(")[0]
itemlist.append(infoIca(Item(channel=item.channel,
itemlist.append(Item(channel=item.channel,
action="findvideos",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedimg,
fanart=""
)))
))
return itemlist
@@ -118,7 +118,7 @@ def elenco(item):
scrapedtitle = scrapedtitle.replace(" streaming ita", "")
scrapedtitle = scrapedtitle.replace(" film streaming", "")
scrapedtitle = scrapedtitle.replace(" streaming gratis", "")
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="movie",
@@ -128,9 +128,9 @@ def elenco(item):
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
folder=True), tipo='movie'))
folder=True))
# Paginazione
# Paginazione
patronvideos = r'<a class="page dark gradient" href=["|\']+([^"]+)["|\']+>AVANTI'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
@@ -144,6 +144,7 @@ def elenco(item):
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@@ -188,14 +189,14 @@ def elenco_ten(item):
for scrapedurl, scrapedtitle in matches:
logger.info("Url:" + scrapedurl + " title:" + scrapedtitle)
itemlist.append(infoIca(Item(channel=item.channel,
itemlist.append(Item(channel=item.channel,
action="findvideos",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail="",
fanart=""
)))
))
return itemlist

View File

@@ -12,7 +12,7 @@ from core import httptools
from platformcode import logger
from core import scrapertools
from core.item import Item
from core.tmdb import infoIca
from core import tmdb
@@ -66,7 +66,7 @@ def tvoggi(item):
scrapedurl = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="do_search",
extra=urllib.quote_plus(scrapedtitle) + '{}' + 'movie',
@@ -74,7 +74,7 @@ def tvoggi(item):
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
folder=True), tipo="movie"))
folder=True), tipo="movie")
return itemlist

View File

@@ -13,7 +13,7 @@ from platformcode import logger, config
from core import scrapertools
from core import servertools
from core.item import Item
from core.tmdb import infoIca
from core import tmdb
@@ -152,7 +152,7 @@ def peliculas(item):
scrapedtitle = scrapedtitle.title()
txt = "Serie Tv"
if txt in scrapedtitle: continue
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="findvideos",
fulltitle=scrapedtitle,
@@ -161,9 +161,9 @@ def peliculas(item):
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
folder=True), tipo='movie'))
folder=True))
# Paginazione
# Paginazione
patronvideos = '<span class=\'current\'>[^<]+</span><a class=[^=]+=[^=]+="(.*?)">'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
@@ -177,6 +177,7 @@ def peliculas(item):
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist

View File

@@ -10,7 +10,7 @@ import urlparse
from channels import autoplay
from core import scrapertools, servertools, httptools
from core.item import Item
from core.tmdb import infoIca
from core import tmdb
from lib import unshortenit
from platformcode import config, logger
@@ -93,7 +93,7 @@ def peliculas(item):
for scrapedurl, scrapedthumbnail, scrapedtitle, scraprate in matches:
scrapedplot = ""
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="movie",
@@ -104,9 +104,9 @@ def peliculas(item):
thumbnail=scrapedthumbnail,
plot=scrapedplot,
extra=item.extra,
folder=True), tipo='movie'))
folder=True))
# Paginazione
# Paginazione
patronvideos = '<a href="([^"]+)"[^>]+>Pagina'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
@@ -121,6 +121,7 @@ def peliculas(item):
extra=item.extra,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@@ -138,7 +139,7 @@ def peliculas_tv(item):
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
title = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedplot = ""
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="episodios",
fulltitle=title,
@@ -148,9 +149,9 @@ def peliculas_tv(item):
thumbnail=scrapedthumbnail,
plot=scrapedplot,
extra=item.extra,
folder=True), tipo='tv'))
folder=True))
# Paginazione
# Paginazione
patronvideos = '<a href="([^"]+)"[^>]+>Pagina'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
@@ -165,6 +166,7 @@ def peliculas_tv(item):
extra=item.extra,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def categorias(item):

View File

@@ -15,7 +15,7 @@ from core import scrapertools, servertools, httptools
from platformcode import logger, config
from core.item import Item
from platformcode import config
from core.tmdb import infoIca
from core import tmdb
__channel__ = 'filmsenzalimiti'
@@ -134,7 +134,7 @@ def video(item):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
scrapedrating = scrapertools.decodeHtmlentities(scrapedrating)
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action='findvideos',
title=scrapedtitle + ' (' + scrapedrating + ')',
@@ -142,7 +142,7 @@ def video(item):
url=scrapedurl,
show=scrapedtitle,
contentType=item.contentType,
thumbnail=scrapedthumbnail), tipo='movie'))
thumbnail=scrapedthumbnail), tipo='movie')
patron = '<a href="([^"]+)"><i class="glyphicon glyphicon-chevron-right"'
next_page = scrapertools.find_single_match(data, patron)
@@ -172,7 +172,7 @@ def cerca(item):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
scrapedrating = scrapertools.decodeHtmlentities(scrapedrating)
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action='findvideos',
title=scrapedtitle + ' (' + scrapedrating + ')',
@@ -180,7 +180,7 @@ def cerca(item):
url=scrapedurl,
show=scrapedtitle,
contentType=item.contentType,
thumbnail=scrapedthumbnail), tipo='movie'))
thumbnail=scrapedthumbnail), tipo='movie')
patron = '<a href="([^"]+)"><i class="glyphicon glyphicon-chevron-right"'
next_page = scrapertools.find_single_match(data, patron)

View File

@@ -14,7 +14,7 @@ from core import scrapertools, servertools, httptools
from platformcode import logger, config
from core.item import Item
from platformcode import config
from core.tmdb import infoIca
from core import tmdb
# Necessario per Autoplay
__channel__ = 'filmsenzalimiticc'
@@ -173,7 +173,7 @@ def video(item):
azione='episodios'
tipologia = 'tv'
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action=azione,
contentType=item.contentType,
@@ -183,7 +183,7 @@ def video(item):
url=scrapedurl,
thumbnail=scrapedthumbnail,
infoLabels=infolabels,
show=scrapedtitle), tipo=tipologia))
show=scrapedtitle))
# Next page
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink".*?href="([^"]+)">')
@@ -197,6 +197,7 @@ def video(item):
contentType=item.contentType,
thumbnail='http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png'))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist

View File

@@ -11,7 +11,7 @@ from channels import autoplay
from channels import filtertools
from core import scrapertools, servertools, httptools
from core.item import Item
from core.tmdb import infoIca
from core import tmdb
from platformcode import logger, config
IDIOMAS = {'Italiano': 'IT'}
@@ -99,7 +99,7 @@ def ultimifilm(item):
for scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="movie",
@@ -108,8 +108,9 @@ def ultimifilm(item):
url=scrapedurl,
extra="movie",
thumbnail=item.thumbnail,
folder=True), tipo="movie"))
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@@ -152,7 +153,7 @@ def loadfilms(item):
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedplot in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot.strip())
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title=scrapedtitle,
@@ -160,7 +161,7 @@ def loadfilms(item):
url=scrapedurl,
plot=scrapedplot,
thumbnail=scrapedthumbnail,
folder=True), tipo=item.extra))
folder=True))
patronvideos = '<link rel="next" href="([^"]+)"\s*/>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
@@ -175,6 +176,7 @@ def loadfilms(item):
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist

View File

@@ -11,7 +11,7 @@ from channels import autoplay
from channels import filtertools
from core import scrapertools, servertools, httptools
from core.item import Item
from core.tmdb import infoIca
from core import tmdb
@@ -81,16 +81,16 @@ def peliculas(item):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle = scrapedtitle.replace("Streaming ", "")
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="movie",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail), tipo="movie"))
thumbnail=scrapedthumbnail))
# Paginazione
# Paginazione
patronvideos = '<span class="current">[^>]+</span><a href=\'(.*?)\' class="inactive">'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
@@ -104,6 +104,7 @@ def peliculas(item):
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def peliculas_tv(item):
@@ -125,16 +126,16 @@ def peliculas_tv(item):
scrapedtitle = scrapedtitle.replace(" Streaming", "")
scrapedtitle = scrapedtitle.title()
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="episodios",
contentType="tv",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail), tipo="tv"))
thumbnail=scrapedthumbnail))
# Paginazione
# Paginazione
patronvideos = '<span class="current">[^>]+</span><a href=\'(.*?)\' class="inactive">'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
@@ -148,6 +149,7 @@ def peliculas_tv(item):
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def episodios(item):
@@ -166,7 +168,7 @@ def episodios(item):
scrapedthumbnail = ""
scrapedtitle = scraped_1 + " " + scraped_2
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="episode",
@@ -174,7 +176,7 @@ def episodios(item):
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail), tipo="tv"))
thumbnail=scrapedthumbnail))
# Comandi di servizio
if config.get_videolibrary_support() and len(itemlist) != 0:
@@ -186,6 +188,7 @@ def episodios(item):
extra="episodios",
show=item.show))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def peliculas_src_tv(item):
@@ -205,16 +208,16 @@ def peliculas_src_tv(item):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle = scrapedtitle.replace("Streaming ", "")
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="episodios",
contentType="tv",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail), tipo="tv"))
thumbnail=scrapedthumbnail))
# Paginazione
# Paginazione
patronvideos = '<span class="current">[^>]+</span><a href=\'(.*?)\' class="inactive">'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
@@ -228,6 +231,7 @@ def peliculas_src_tv(item):
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def peliculas_src(item):
@@ -247,16 +251,16 @@ def peliculas_src(item):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle = scrapedtitle.replace("Streaming ", "")
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="movie",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail), tipo="movie"))
thumbnail=scrapedthumbnail))
# Paginazione
# Paginazione
patronvideos = '<span class="current">[^>]+</span><a href=\'(.*?)\' class="inactive">'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
@@ -270,6 +274,7 @@ def peliculas_src(item):
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def categorias(item):

View File

@@ -11,7 +11,7 @@ from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from core.tmdb import infoIca
from core import tmdb
from platformcode import logger, config
host = "https://www.guardarefilm.video"
@@ -152,7 +152,7 @@ def peliculas(item):
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedplot = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="episodios" if item.extra == "tvshow" else "findvideos",
contentType="movie",
@@ -162,7 +162,7 @@ def peliculas(item):
url=scrapedurl,
thumbnail=urlparse.urljoin(host, scrapedthumbnail),
plot=scrapedplot,
folder=True), tipo='movie'))
folder=True))
# Paginazione
patronvideos = '<div class="pages".*?<span>.*?<a href="([^"]+)">'
@@ -196,7 +196,7 @@ def peliculas_tv(item):
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedplot = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="episodios" if item.extra == "tvshow" else "findvideos",
fulltitle=scrapedtitle,
@@ -205,7 +205,7 @@ def peliculas_tv(item):
url=scrapedurl,
thumbnail=urlparse.urljoin(host, scrapedthumbnail),
plot=scrapedplot,
folder=True), tipo='tv'))
folder=True))
# Paginazione
patronvideos = '<div class="pages".*?<span>.*?<a href="([^"]+)">'

View File

@@ -8,7 +8,7 @@ import re
from core import httptools, scrapertools, servertools
from core.item import Item
from core.tmdb import infoIca
from core import tmdb
from lib import unshortenit
from platformcode import logger, config
@@ -70,14 +70,14 @@ def lista_serie(item):
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
scrapedtitle = scrapedtitle.split("(")[0]
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="episodios",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=scrapedtitle,
show=scrapedtitle, viewmode="movie"), tipo='tv'))
show=scrapedtitle, viewmode="movie"))
# Paginazione
# ===========================================================
@@ -95,6 +95,7 @@ def lista_serie(item):
thumbnail=thumbnail_successivo,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@@ -209,15 +210,16 @@ def ricerca(item):
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedtitle = scrapedtitle.split("(")[0]
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="episodios",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=scrapedtitle,
show=scrapedtitle, viewmode="movie"), tipo='tv'))
show=scrapedtitle, viewmode="movie"))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@@ -245,15 +247,16 @@ def search(item, texto):
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
scrapedtitle = scrapedtitle.split("(")[0]
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="stagione",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=scrapedtitle,
show=scrapedtitle, viewmode="movie"), tipo='tv'))
show=scrapedtitle, viewmode="movie")))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist

View File

@@ -9,7 +9,7 @@ import re
from core import httptools, scrapertools, servertools
from core.item import Item
from core.tmdb import infoIca
from core import tmdb
from platformcode import logger, config
@@ -109,7 +109,7 @@ def nuoveserie(item):
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="episodi",
contentType="tv",
@@ -119,8 +119,9 @@ def nuoveserie(item):
extra="tv",
show=scrapedtitle,
thumbnail=scrapedthumbnail,
folder=True), tipo="tv"))
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@@ -145,7 +146,7 @@ def serietvaggiornate(item):
title = "%s %s" % (scrapedtitle, scrapedep)
extra = r'<span\s*.*?meta-stag="%s" meta-ep="%s" meta-embed="([^"]+)"[^>]*>' % (
episode[0][0], episode[0][1].lstrip("0"))
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="findepvideos",
contentType="tv",
@@ -155,7 +156,8 @@ def serietvaggiornate(item):
url=scrapedurl,
extra=extra,
thumbnail=scrapedthumbnail,
folder=True), tipo="tv"))
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@@ -201,7 +203,7 @@ def lista_serie(item):
for scrapedurl, scrapedimg, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="episodi",
title=scrapedtitle,
@@ -210,7 +212,8 @@ def lista_serie(item):
thumbnail=scrapedimg,
extra=item.extra,
show=scrapedtitle,
folder=True), tipo="tv"))
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist

View File

@@ -12,7 +12,7 @@ from platformcode import logger, config
from core import scrapertools
from core import servertools
from core.item import Item
from core.tmdb import infoIca
from core import tmdb
__channel__ = "guardogratis"
@@ -84,7 +84,7 @@ def list_titles(item):
rate=' IMDb: [[COLOR orange]%s[/COLOR]]' % match.group(4) if match.group(4)!='N/A'else ''
scrapedtitle = scrapertools.unescape(match.group(3))
#scrapedtitle = scrapertools.unescape(match.group(3))+rate
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="findvideos" if not 'tvshow' in item.extra else 'serietv',
contentType="movie" if not 'tvshow' in item.extra else 'serie',
@@ -94,7 +94,7 @@ def list_titles(item):
url=scrapedurl,
thumbnail=scrapedthumbnail,
extra=item.extra,
viewmode="movie_with_plot"), tipo=tipo))
viewmode="movie_with_plot"))
nextpage_regex=''
if item.extra in "movies,tvshow":
@@ -113,6 +113,7 @@ def list_titles(item):
extra=item.extra,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def search(item, texto):
@@ -176,7 +177,7 @@ def serietv(item):
scrapedtitle = scrapedtitle.replace("/", "")
scrapedtitle = scrapedtitle.replace("-", " ")
scrapedtitle = scrapedtitle.title()
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="findvideos",
fulltitle=scrapedtitle,
@@ -185,7 +186,7 @@ def serietv(item):
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
folder=True), tipo='tv'))
folder=True))
if config.get_videolibrary_support() and len(itemlist) != 0:
itemlist.append(
@@ -196,6 +197,7 @@ def serietv(item):
extra="serietv",
show=item.show))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):

View File

@@ -10,7 +10,7 @@ from core import scrapertools, servertools, httptools
from core.item import Item
from channels import autoplay
from channels import filtertools
from core.tmdb import infoIca
from core import tmdb
__channel__ = "ilgeniodellostreaming"
@@ -146,7 +146,7 @@ def peliculas_src(item):
logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
if scrapedtipo == "TV":
itemlist.append(infoIca(
itemlist.append(
Item(channel=__channel__,
action="episodios",
fulltitle=scrapedtitle,
@@ -154,9 +154,9 @@ def peliculas_src(item):
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
folder=True), tipo='tv'))
folder=True))
else:
itemlist.append(infoIca(
itemlist.append(
Item(channel=__channel__,
action="findvideos",
contentType="movie",
@@ -165,8 +165,9 @@ def peliculas_src(item):
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
folder=True), tipo='movie'))
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@@ -184,7 +185,7 @@ def peliculas(item):
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedplot = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(infoIca(
itemlist.append(
Item(channel=__channel__,
action="findvideos",
contentType="movie",
@@ -194,9 +195,9 @@ def peliculas(item):
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
folder=True), tipo='movie'))
folder=True))
# Paginazione
# Paginazione
patronvideos = '<span class="current">[^<]+<[^>]+><a href=\'(.*?)\''
matches = re.compile(patronvideos, re.DOTALL).findall(data)
@@ -210,6 +211,7 @@ def peliculas(item):
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@@ -236,7 +238,7 @@ def nuoviep(item):
if i >= p * PERPAGE: break
scrapedplot = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(infoIca(
itemlist.append(
Item(channel=__channel__,
action="findvideos",
fulltitle=scrapedtitle,
@@ -245,7 +247,7 @@ def nuoviep(item):
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
folder=True), tipo='tv'))
folder=True))
if len(matches) >= p * PERPAGE:
scrapedurl = item.url + '{}' + str(p + 1)
@@ -258,6 +260,7 @@ def nuoviep(item):
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@@ -275,7 +278,7 @@ def serie(item):
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedplot = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(infoIca(
itemlist.append(
Item(channel=__channel__,
action="episodios",
fulltitle=scrapedtitle,
@@ -284,9 +287,9 @@ def serie(item):
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
folder=True), tipo='tv'))
folder=True))
# Paginazione
# Paginazione
patronvideos = '<span class="current">[^<]+<[^>]+><a href=\'(.*?)\''
matches = re.compile(patronvideos, re.DOTALL).findall(data)
@@ -300,6 +303,7 @@ def serie(item):
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist

View File

@@ -11,7 +11,7 @@ from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from core.tmdb import infoIca
from core import tmdb
from lib.unshortenit import unshorten_only
from platformcode import logger, config
@@ -171,14 +171,14 @@ def latestep(item):
continue
if 'completa' in scrapedtitle.lower():
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="episodios",
title=completetitle,
contentSerieName=completetitle,
fulltitle=scrapedtitle,
url=scrapedurl,
folder=True), tipo='tv'))
folder=True))
else:
if 'episodio' not in scrapedepisode:
replace = re.compile(r'(\d+)x(\d+)')
@@ -186,7 +186,7 @@ def latestep(item):
else:
ep_pattern = r'%s(.*?(?:<br\s*/>|</p>))' % scrapedepisode
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="findvideos_single_ep",
title=completetitle,
@@ -194,8 +194,9 @@ def latestep(item):
fulltitle=scrapedtitle,
url=scrapedurl,
extra=ep_pattern,
folder=True), tipo='tv'))
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@@ -216,7 +217,7 @@ def peliculas(item):
plot = ""
thumbnail = scrapertools.find_single_match(match, 'data-echo="([^"]+)"')
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action='findvideos',
@@ -228,7 +229,7 @@ def peliculas(item):
thumbnail=thumbnail,
plot=plot,
viewmode="movie_with_plot",
folder=True), tipo='movie'))
folder=True))
# Pagina successiva
try:
@@ -244,6 +245,7 @@ def peliculas(item):
except:
pass
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@@ -292,7 +294,7 @@ def peliculas_tv(item):
plot = ""
thumbnail = scrapertools.find_single_match(match, 'data-echo="([^"]+)"')
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action='episodios',
@@ -303,7 +305,7 @@ def peliculas_tv(item):
thumbnail=thumbnail,
plot=plot,
viewmode="movie_with_plot",
folder=True), tipo='tv'))
folder=True))
# Successivo
try:
@@ -319,6 +321,7 @@ def peliculas_tv(item):
except:
pass
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@@ -336,7 +339,7 @@ def pel_tv(item):
thumbnail = ""
url = scrapedurl
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action='episodios',
@@ -347,7 +350,7 @@ def pel_tv(item):
thumbnail=thumbnail,
plot=plot,
viewmode="movie_with_plot",
folder=True), tipo='tv'))
folder=True))
# Siguiente
try:
@@ -363,6 +366,7 @@ def pel_tv(item):
except:
pass
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist

View File

@@ -12,7 +12,7 @@ from channels import autoplay
from channels import filtertools
from core import scrapertools, servertools, httptools
from core.item import Item
from core.tmdb import infoIca
from core import tmdb
from platformcode import logger, config
IDIOMAS = {'Italiano': 'IT'}
@@ -158,7 +158,7 @@ def fichas(item):
# ------------------------------------------------
scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
# ------------------------------------------------
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="movie",
@@ -166,7 +166,7 @@ def fichas(item):
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=title,
show=scrapedtitle), tipo='movie'))
show=scrapedtitle))
# Paginación
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)"\s*><span aria-hidden="true">&raquo;')
@@ -180,6 +180,7 @@ def fichas(item):
text_color="orange",
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@@ -195,7 +196,7 @@ def tv_series(item):
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="seasons",
contentType="tv",
@@ -204,7 +205,7 @@ def tv_series(item):
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=scrapedtitle,
show=scrapedtitle), tipo='tv'))
show=scrapedtitle))
# Pagine
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)"\s*><span aria-hidden="true">&raquo;')
@@ -218,6 +219,7 @@ def tv_series(item):
url=next_page,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist

View File

@@ -9,7 +9,7 @@ import urlparse
from core import httptools, scrapertools, servertools
from core.item import Item
from core.tmdb import infoIca
from core import tmdb
from lib import unshortenit
from platformcode import config, logger
@@ -87,7 +87,7 @@ def latestep(item):
seasonandep = scrapertools.find_single_match(ep, r'(\d+x[0-9\-?]+)')
completetitle = "%s %s" % (scrapedtitle, ep)
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="findepvideos",
title=completetitle,
@@ -95,7 +95,7 @@ def latestep(item):
fulltitle=fulltitle,
url=scrapedurl,
extra=extra % seasonandep.replace('x', '×'),
folder=True), tipo='tv'))
folder=True))
continue
# Ep singolo
@@ -103,7 +103,7 @@ def latestep(item):
extra = extra % (correct_scraped_number)
completetitle = ("%s %s %s" % (
scrapedtitle, scraped_number_and_title, "(%s)" % scrapedlang if scrapedlang else scrapedlang)).strip()
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="findepvideos",
title=completetitle,
@@ -111,8 +111,9 @@ def latestep(item):
fulltitle=fulltitle,
url=scrapedurl,
extra=extra,
folder=True), tipo='tv'))
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@@ -132,7 +133,7 @@ def peliculas(item):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedurl = scrapedurl.replace("-1/", "-links/")
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="episodios",
fulltitle=scrapedtitle,
@@ -141,9 +142,9 @@ def peliculas(item):
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
folder=True), tipo='tv'))
folder=True))
# Paginazione
# Paginazione
patronvideos = '<a class="next page-numbers" href="(.*?)">'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
@@ -157,6 +158,7 @@ def peliculas(item):
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def search(item, texto):

View File

@@ -11,7 +11,7 @@ import urlparse
from core import scrapertools, httptools
from core import servertools
from core.item import Item
from core.tmdb import infoIca
from core import tmdb
from platformcode import logger, config
@@ -133,7 +133,7 @@ def searchfilm(item):
# ------------------------------------------------
scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
# ------------------------------------------------
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title=scrapedtitle,
@@ -141,7 +141,7 @@ def searchfilm(item):
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=scrapedtitle,
show=scrapedtitle), tipo='movie'))
show=scrapedtitle))
# Paginación
next_page = scrapertools.find_single_match(data, "href='([^']+)'>Seguente &rsaquo;")
@@ -153,6 +153,7 @@ def searchfilm(item):
url=next_page,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@@ -268,14 +269,14 @@ def fichas(item):
# ------------------------------------------------
scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
# ------------------------------------------------
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=scrapedtitle,
show=scrapedtitle), tipo='movie'))
show=scrapedtitle))
# Paginación
next_page = scrapertools.find_single_match(data, "href='([^']+)'>Seguente &rsaquo;")
@@ -287,6 +288,7 @@ def fichas(item):
url=next_page,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist

View File

@@ -8,7 +8,7 @@ import re
from core import scrapertools, httptools, servertools
from core.item import Item
from core.tmdb import infoIca
from core import tmdb
from lib import unshortenit
from platformcode import logger, config
@@ -66,7 +66,7 @@ def peliculas(item):
for scrapedurl, scrapedtitle in matches:
scrapedthumbnail = ""
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="findvideos",
fulltitle=scrapedtitle,
@@ -76,7 +76,7 @@ def peliculas(item):
thumbnail=scrapedthumbnail,
extra=item.extra,
viewmode="movie_with_plot",
Folder=True), tipo='movie'))
Folder=True))
nextpage_regex = '<a class="nextpostslink".*?href="([^"]+)".*?<\/a>'
next_page = scrapertools.find_single_match(data, nextpage_regex)
@@ -88,6 +88,7 @@ def peliculas(item):
url="%s" % next_page,
extra=item.extra,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@@ -138,7 +139,7 @@ def peliculas_tv(item):
if (p - 1) * PERPAGE > i: continue
if i >= p * PERPAGE: break
title = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="episodios",
@@ -148,7 +149,7 @@ def peliculas_tv(item):
fulltitle=title,
show=title,
plot=scrapedplot,
folder=True), tipo='tv'))
folder=True))
if len(matches) >= p * PERPAGE:
scrapedurl = item.url + '{}' + str(p + 1)
@@ -161,6 +162,7 @@ def peliculas_tv(item):
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist

View File

@@ -12,7 +12,7 @@ from platformcode import logger
from core import scrapertools
from core import servertools
from core.item import Item
from core.tmdb import infoIca
from core import tmdb

View File

@@ -16,7 +16,7 @@ from platformcode import logger
from core import scrapertools
from core import servertools
from core.item import Item
from core.tmdb import infoIca
from core import tmdb
__channel__ = "mondolunatico"
@@ -135,7 +135,7 @@ def peliculas(item):
scrapedplot = ""
for scrapedurl, scrapedthumbnail, scrapedtitle, in matches:
title = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="findvideos",
@@ -146,9 +146,9 @@ def peliculas(item):
fulltitle=title,
show=title,
plot=scrapedplot,
folder=True), tipo='movie'))
folder=True))
# Paginazione
# Paginazione
patronvideos = '<a class="nextpostslink" rel="next" href="([^"]+)">'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
@@ -163,6 +163,7 @@ def peliculas(item):
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@@ -190,7 +191,7 @@ def serietv(item):
if (p - 1) * PERPAGE > i: continue
if i >= p * PERPAGE: break
title = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="episodios",
@@ -200,7 +201,7 @@ def serietv(item):
fulltitle=title,
show=title,
plot=scrapedplot,
folder=True), tipo='tv'))
folder=True))
if len(matches) >= p * PERPAGE:
scrapedurl = item.url + '{}' + str(p + 1)
@@ -213,6 +214,7 @@ def serietv(item):
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@@ -236,7 +238,7 @@ def search_serietv(item, texto):
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
title = scrapertools.decodeHtmlentities(scrapedtitle)
if texto not in title.lower(): continue
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="episodios",
@@ -246,8 +248,9 @@ def search_serietv(item, texto):
fulltitle=title,
show=title,
plot=scrapedplot,
folder=True), tipo='tv'))
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def episodios(item):

View File

@@ -9,7 +9,7 @@ import urlparse
from core import httptools, scrapertools, servertools
from core.item import Item
from core.tmdb import infoIca
from core import tmdb
from platformcode import logger, config
host = "http://mondolunatico.org"
@@ -89,7 +89,7 @@ def pelis_movie_src(item):
scrapedplot = ""
for scrapedurl, scrapedthumbnail, scrapedtitle, in matches:
title = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="findvideos",
@@ -100,8 +100,9 @@ def pelis_movie_src(item):
fulltitle=title,
show=title,
plot=scrapedplot,
folder=True), tipo='movie'))
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@@ -121,7 +122,7 @@ def peliculas(item):
scrapedplot = ""
scrapedthumbnail = ""
title = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="findvideos",
@@ -132,9 +133,9 @@ def peliculas(item):
fulltitle=title,
show=title,
plot=scrapedplot,
folder=True), tipo='movie'))
folder=True))
# Paginazione
# Paginazione
patronvideos = '<span class="current">[^<]+</span><a href=\'(.*?)\''
matches = re.compile(patronvideos, re.DOTALL).findall(data)
@@ -149,6 +150,7 @@ def peliculas(item):
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist

View File

@@ -11,7 +11,7 @@ from channels import autoplay
from channels import filtertools
from core import scrapertools, servertools, httptools, scrapertoolsV2
from core.item import Item
from core.tmdb import infoIca
from core import tmdb
from lib.unshortenit import unshorten
from platformcode import logger, config
@@ -101,7 +101,7 @@ def search_peliculas(item):
scrapedthumbnail = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="findvideos",
fulltitle=scrapedtitle,
@@ -111,8 +111,9 @@ def search_peliculas(item):
thumbnail=scrapedthumbnail,
plot=scrapedplot,
extra=item.extra,
folder=True), tipo='movie'))
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def search_peliculas_tv(item):
@@ -131,7 +132,7 @@ def search_peliculas_tv(item):
scrapedthumbnail = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="episodios",
fulltitle=scrapedtitle,
@@ -141,8 +142,9 @@ def search_peliculas_tv(item):
thumbnail=scrapedthumbnail,
plot=scrapedplot,
extra=item.extra,
folder=True), tipo='tv'))
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def peliculas(item):
@@ -164,7 +166,7 @@ def peliculas(item):
if (p - 1) * PERPAGE > i: continue
if i >= p * PERPAGE: break
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(infoIca(Item(channel=item.channel,
itemlist.append(Item(channel=item.channel,
contentType="movie",
action="findvideos",
title=scrapedtitle,
@@ -172,7 +174,7 @@ def peliculas(item):
url=scrapedurl,
fanart=item.fanart if item.fanart != "" else item.scrapedthumbnail,
show=item.fulltitle,
folder=True), tipo='movie'))
folder=True))
if len(matches) >= p * PERPAGE:
scrapedurl = item.url + '{}' + str(p + 1)
@@ -185,6 +187,7 @@ def peliculas(item):
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@@ -208,14 +211,14 @@ def lista_serie(item):
if (p - 1) * PERPAGE > i: continue
if i >= p * PERPAGE: break
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(infoIca(Item(channel=item.channel,
itemlist.append(Item(channel=item.channel,
action="episodios",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
fanart=item.fanart if item.fanart != "" else item.scrapedthumbnail,
show=item.fulltitle,
folder=True), tipo='tv'))
folder=True))
if len(matches) >= p * PERPAGE:
scrapedurl = item.url + '{}' + str(p + 1)
@@ -228,6 +231,7 @@ def lista_serie(item):
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist

View File

@@ -11,7 +11,7 @@ from channels import autoplay
from channels import filtertools
from core import httptools, scrapertools, servertools
from core.item import Item
from core.tmdb import infoIca
from core import tmdb
from lib import unshortenit
from platformcode import logger, config
@@ -85,7 +85,7 @@ def peliculas(item):
scrapedthumbnail = ""
scrapedplot = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="movie",
@@ -96,9 +96,9 @@ def peliculas(item):
thumbnail=scrapedthumbnail,
plot=scrapedplot,
extra=item.extra,
folder=True), tipo='movie'))
folder=True))
# Paginazione
# Paginazione
patronvideos = '<a\s*class="nextpostslink" rel="next" href="([^"]+)">Avanti'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
@@ -112,6 +112,7 @@ def peliculas(item):
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@@ -130,7 +131,7 @@ def peliculas_tv(item):
scrapedthumbnail = ""
scrapedplot = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="episodios",
fulltitle=scrapedtitle,
@@ -140,9 +141,9 @@ def peliculas_tv(item):
thumbnail=scrapedthumbnail,
plot=scrapedplot,
extra=item.extra,
folder=True), tipo='tv'))
folder=True))
# Paginazione
# Paginazione
patronvideos = '<a\s*class="nextpostslink" rel="next" href="([^"]+)">Avanti'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
@@ -156,6 +157,7 @@ def peliculas_tv(item):
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def search(item, texto):

View File

@@ -1,23 +0,0 @@
{
"id": "saghe",
"name": "Saghe",
"language": ["ita"],
"active": false,
"adult": false,
"thumbnail": null,
"banner": null,
"categories": [
null
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
}
]
}

View File

@@ -1,281 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Kodi on Demand - Kodi Addon
# Ricerca "Saghe"
# https://alfa-addon.com/categories/kod-addon.50/
# ------------------------------------------------------------
import datetime
import re
import urllib
from core import httptools, tmdb
from core import scrapertools
from core.item import Item
from core.tmdb import infoIca
from platformcode import logger, config
PERPAGE = 15
tmdb_key = tmdb.tmdb_auth_key # tmdb_key = '92db8778ccb39d825150332b0a46061d'
# tmdb_key = '92db8778ccb39d825150332b0a46061d'
dttime = (datetime.datetime.utcnow() - datetime.timedelta(hours=5))
systime = dttime.strftime('%Y%m%d%H%M%S%f')
today_date = dttime.strftime('%Y-%m-%d')
month_date = (dttime - datetime.timedelta(days=30)).strftime('%Y-%m-%d')
month2_date = (dttime - datetime.timedelta(days=60)).strftime('%Y-%m-%d')
year_date = (dttime - datetime.timedelta(days=365)).strftime('%Y-%m-%d')
tmdb_image = 'http://image.tmdb.org/t/p/original'
tmdb_poster = 'http://image.tmdb.org/t/p/w500'
def mainlist(item):
logger.info(" mainlist")
itemlist = [Item(channel=item.channel,
title="[COLOR yellow]Cult IMDB[/COLOR]",
action="movies",
url='https://www.imdb.com/list/ls000571226/',
thumbnail="https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTVTW_L9vDQY0sjdlpfiOZdI0Nvi_NxSBpxmltDOFUYlctVxzX0Qg"),
Item(channel=item.channel,
title="[COLOR yellow]The Marvel Universe[/COLOR]",
action="tmdb_saghe_alt",
url='http://api.themoviedb.org/3/list/50941077760ee35e1500000c?api_key=%s&language=it' % tmdb_key,
thumbnail="https://image.tmdb.org/t/p/w180_and_h270_bestv2/6t3KOEUtrIPmmtu1czzt6p2XxJy.jpg"),
Item(channel=item.channel,
title="[COLOR yellow]The DC Comics Universe[/COLOR]",
action="tmdb_saghe_alt",
url='http://api.themoviedb.org/3/list/5094147819c2955e4c00006a?api_key=%s&language=it' % tmdb_key,
thumbnail="https://image.tmdb.org/t/p/w180_and_h270_bestv2/xWlaTLnD8NJMTT9PGOD9z5re1SL.jpg"),
Item(channel=item.channel,
title="[COLOR yellow]iMDb Top 250 Movies[/COLOR]",
action="tmdb_saghe_alt",
url='http://api.themoviedb.org/3/list/522effe419c2955e9922fcf3?api_key=%s&language=it' % tmdb_key,
thumbnail="https://image.tmdb.org/t/p/w180_and_h270_bestv2/9O7gLzmreU0nGkIB6K3BsJbzvNv.jpg"),
Item(channel=item.channel,
title="[COLOR yellow]Rotten Tomatoes top 100 movies of all times[/COLOR]",
action="tmdb_saghe_alt",
url='http://api.themoviedb.org/3/list/5418c914c3a368462c000020?api_key=%s&language=it' % tmdb_key,
thumbnail="https://image.tmdb.org/t/p/w180_and_h270_bestv2/zGadcmcF48gy8rKCX2ubBz2ZlbF.jpg"),
Item(channel=item.channel,
title="[COLOR yellow]Reddit top 250 movies[/COLOR]",
action="tmdb_saghe_alt",
url='http://api.themoviedb.org/3/list/54924e17c3a3683d070008c8?api_key=%s&language=it' % tmdb_key,
thumbnail="https://image.tmdb.org/t/p/w180_and_h270_bestv2/dM2w364MScsjFf8pfMbaWUcWrR.jpg"),
Item(channel=item.channel,
title="[COLOR yellow]Sci-Fi Action[/COLOR]",
action="tmdb_saghe_alt",
url='http://api.themoviedb.org/3/list/54408e79929fb858d1000052?api_key=%s&language=it' % tmdb_key,
thumbnail="https://image.tmdb.org/t/p/w180_and_h270_bestv2/5ig0kdWz5kxR4PHjyCgyI5khCzd.jpg"),
Item(channel=item.channel,
title="[COLOR yellow]007 - Movies[/COLOR]",
action="tmdb_saghe_alt",
url='http://api.themoviedb.org/3/list/557b152bc3a36840f5000265?api_key=%s&language=it' % tmdb_key,
thumbnail="https://image.tmdb.org/t/p/w180_and_h270_bestv2/zlWBxz2pTA9p45kUTrI8AQiKrHm.jpg"),
Item(channel=item.channel,
title="[COLOR yellow]Disney Classic Collection[/COLOR]",
action="tmdb_saghe_alt",
url='http://api.themoviedb.org/3/list/51224e42760ee3297424a1e0?api_key=%s&language=it' % tmdb_key,
thumbnail="https://image.tmdb.org/t/p/w180_and_h270_bestv2/vGV35HBCMhQl2phhGaQ29P08ZgM.jpg"),
Item(channel=item.channel,
title="[COLOR yellow]Bad Movies[/COLOR]",
action="badmovies",
url='http://www.badmovies.org/movies/',
thumbnail="http://www.badmovies.org/mainpage/badmovielogo_600.jpg")]
return itemlist
def tmdb_saghe_alt(item):
itemlist = []
alphabet = dict()
# Carica la pagina
data = httptools.downloadpage(item.url).data
# Estrae i contenuti
patron = '"title":"(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle in matches:
letter = scrapedtitle[0].upper()
if letter not in alphabet:
alphabet[letter] = []
alphabet[letter].append(scrapedtitle)
for letter in sorted(alphabet):
itemlist.append(
Item(channel=item.channel,
action="tmdb_saghe",
url='\n\n'.join(alphabet[letter]),
title=letter,
fulltitle=letter))
return itemlist
def tmdb_saghe(item):
itemlist = []
p = 1
if '{}' in item.url:
item.url, p = item.url.split('{}')
p = int(p)
matches = item.url.split('\n\n')
for i, (scrapedtitle) in enumerate(matches):
if (p - 1) * PERPAGE > i: continue
if i >= p * PERPAGE: break
scrapedplot = ""
scrapedthumbnail = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(infoIca(
Item(channel=item.channel,
action="do_search",
contentType="movie",
extra=urllib.quote_plus(scrapedtitle),
title=scrapedtitle,
fulltitle=scrapedtitle,
plot=scrapedplot,
thumbnail=scrapedthumbnail), tipo="movie"))
if len(matches) >= p * PERPAGE:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="tmdb_saghe",
title=config.get_localized_string(30992),
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
return itemlist
def badmovies(item):
itemlist = []
p = 1
if '{}' in item.url:
item.url, p = item.url.split('{}')
p = int(p)
# Carica la pagina
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data,
'<table width="100%" cellpadding="6" cellspacing="1" class="listtab">(.*?)<tr><td align="center" valign="top">')
# Estrae i contenuti
patron = r'">([^<]+)\s*</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapedurl = ""
scrapedplot = ""
scrapedthumbnail = ""
for i, (scrapedtitle) in enumerate(matches):
if (p - 1) * PERPAGE > i: continue
if i >= p * PERPAGE: break
title = scrapertools.decodeHtmlentities(scrapedtitle).strip()
itemlist.append(infoIca(
Item(channel=item.channel,
extra=urllib.quote_plus(title),
action="do_search",
title=title,
url=title,
thumbnail=scrapedthumbnail,
fulltitle=title,
show=title,
plot=scrapedplot,
folder=True), tipo='movie'))
if len(matches) >= p * PERPAGE:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="badmovies",
title=config.get_localized_string(30992),
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
return itemlist
def do_search(item):
from channels import search
return search.do_search(item)
def movies(item):
logger.info("[saghe.py]==> movies")
itemlist = []
data = httptools.downloadpage(item.url).data
groups = scrapertools.find_multiple_matches(data,
r'<div class="lister-item-image ribbonize"(.*?)<div class="wtw-option-standalone"')
for group in groups:
infos = \
{
'title': single_scrape(group, r'<a[^>]+>([^<]+)</a>'),
'year': single_scrape(group, r'unbold">\((\d+)\)</span>'),
'rating': single_scrape(group, r'star__rating">(\d+,?\d*)</span>'),
'plot': single_scrape(group, r'<p class="">\s*([^<]+)</p>'),
'genres': single_scrape(group, r'genre">\s*([^<]+)</span>'),
'age': single_scrape(group, r'certificate">([^<]+)</span>'),
'metascore': single_scrape(group, r'metascore[^>]*>\s*(\d+)[^>]+>'),
'image': single_scrape(group, r'loadlate="([^"]+)"[^>]+>')
}
infos['title'] = scrapertools.decodeHtmlentities(infos['title']).strip()
infos['plot'] = scrapertools.decodeHtmlentities(infos['plot']).strip()
title = "%s (%s)%s[%s]" % (infos['title'], color(infos['year'], "gray"),
(" [%s]" % age_color("%s" % infos['age'])) if infos['age'] else "",
color(infos['rating'], "orange"))
plot = "Anno: %s%s\nVoto: %s\nGeneri: %s\nMetascore: %s\nDescrizione:\n%s" % \
(infos['year'], "\nPubblico: %s" % age_color(infos['age']) if infos['age'] else "", infos['rating'],
infos['genres'], infos['metascore'], infos['plot'])
itemlist.append(
Item(channel=item.channel,
text_color="azure",
action="do_search",
contentTitle=infos['title'],
infoLabels={'year': infos['year']},
title=title,
plot=plot,
extra="%s{}%s" % (urllib.quote_plus(infos['title']), "movie"),
thumbnail=infos['image']))
tmdb.set_infoLabels_itemlist(itemlist, True)
return itemlist
def age_color(age):
logger.info("[saghe.py]==> age_color")
if age.lower() == "t":
age = color(age, "green")
elif age.lower() == "pg":
age = color(age, "yellow")
elif age.lower() == "vm14":
age = color(age, "yellow")
elif age.lower() == "vm18":
age = color(age, "red")
elif 'banned' in age.lower():
age = color(age.replace('(', '').replace(')', '').strip(), "red")
return age
def single_scrape(text, patron):
logger.info("[saghe.py]==> single_scrape")
return scrapertools.find_single_match(text, patron)
def color(text, color):
logger.info("[saghe.py]==> color")
return "[COLOR %s]%s[/COLOR]" % (color, text)

View File

@@ -14,7 +14,7 @@ from core import scrapertools, servertools, httptools
from platformcode import logger, config
from core.item import Item
from platformcode import config
from core.tmdb import infoIca
from core import tmdb
__channel__ = "seriehd"
@@ -111,14 +111,14 @@ def fichas(item):
for scrapedtitle, scrapedthumbnail, scrapedurl in matches:
scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
itemlist.append(infoIca(
itemlist.append(
Item(channel=__channel__,
action="episodios",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
fulltitle=scrapedtitle,
url=scrapedurl,
show=scrapedtitle,
thumbnail=scrapedthumbnail), tipo='tv'))
thumbnail=scrapedthumbnail))
patron = "<span class='current'>\d+</span><a rel='nofollow' class='page larger' href='([^']+)'>\d+</a>"
next_page = scrapertools.find_single_match(data, patron)
@@ -129,6 +129,7 @@ def fichas(item):
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=next_page))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist

View File

@@ -12,7 +12,7 @@ from platformcode import logger, config
from core import scrapertools
from core import servertools
from core.item import Item
from core.tmdb import infoIca
from core import tmdb
@@ -89,7 +89,7 @@ def lista_serie(item):
scrapedthumbnail = scrapedthumbnail.replace(" ", "%20")
scrapedtitle = scrapertools.unescape(match.group(2)).replace("[", "").replace("]", "")
scrapedurl = urlparse.urljoin(item.url, match.group(3))
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="serietv",
contentType="serietv",
@@ -99,7 +99,7 @@ def lista_serie(item):
url=scrapedurl,
thumbnail=scrapedthumbnail,
extra=item.extra,
viewmode="movie_with_plot"), tipo='serie'))
viewmode="movie_with_plot"))
next_page = scrapertools.find_single_match(dataoriginale, '<div class="pagination">.*?href="([^"]+)".*?</div>')
if next_page != "":
@@ -111,6 +111,7 @@ def lista_serie(item):
extra=item.extra,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def serietv(item):
@@ -222,7 +223,7 @@ def topimdb(item):
scrapedurl = scrapertools.unescape(match.group(5))
scrapedtitle = scrapertools.unescape(match.group(6))
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="serietv",
contentType="serietv",
@@ -232,8 +233,9 @@ def topimdb(item):
url=scrapedurl,
thumbnail=scrapedthumbnail,
extra=item.extra,
viewmode="movie_with_plot"), tipo='serie'))
viewmode="movie_with_plot")))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def search(item, texto):

View File

@@ -9,7 +9,7 @@ import urlparse
from core import httptools, scrapertools, servertools
from core.item import Item
from core.tmdb import infoIca
from core import tmdb
from lib import unshortenit
from platformcode import logger, config
@@ -78,7 +78,7 @@ def search(item, texto):
scrapedplot = ""
scrapedthumbnail = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="episodios",
fulltitle=scrapedtitle,
@@ -88,9 +88,9 @@ def search(item, texto):
thumbnail=scrapedthumbnail,
plot=scrapedplot,
extra=item.extra,
folder=True), tipo='tv'))
folder=True))
# Paginazione
# Paginazione
patronvideos = '<div class="siguiente"><a href="([^"]+)">'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
@@ -105,6 +105,7 @@ def search(item, texto):
extra=item.extra,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@@ -128,14 +129,14 @@ def lista_serie(item):
if (p - 1) * PERPAGE > i: continue
if i >= p * PERPAGE: break
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(infoIca(Item(channel=item.channel,
itemlist.append(Item(channel=item.channel,
action="episodios",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
fanart=item.fanart if item.fanart != "" else item.scrapedthumbnail,
show=item.fulltitle,
folder=True), tipo='tv'))
folder=True))
if len(matches) >= p * PERPAGE:
scrapedurl = item.url + '{}' + str(p + 1)
@@ -148,6 +149,7 @@ def lista_serie(item):
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist

View File

@@ -9,7 +9,7 @@ import re, urlparse
from platformcode import logger, config
from core import scrapertools, httptools
from core.item import Item
from core.tmdb import infoIca
from core import tmdb
@@ -75,7 +75,7 @@ def episodios(item):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle = scrapedtitle.replace(scraped_1, "")
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="findvideos",
fulltitle=scraped_1,
@@ -84,7 +84,7 @@ def episodios(item):
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
folder=True), tipo='tv'))
folder=True))
# paginación
patron = '<div id="navigation">.*?\d+</a> <a href="([^"]+)"'
@@ -98,6 +98,7 @@ def episodios(item):
extra=item.extra,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist

View File

@@ -8,7 +8,7 @@ import re
from core import httptools, scrapertools, servertools
from core.item import Item
from core.tmdb import infoIca
from core import tmdb
from platformcode import logger, config
@@ -137,7 +137,7 @@ def latestep(item):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
episodio = re.compile(r'(\d+)x(\d+)', re.DOTALL).findall(scrapedinfo)
title = "%s %s" % (scrapedtitle, scrapedinfo)
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="findepisodevideo",
title=title,
@@ -146,7 +146,8 @@ def latestep(item):
extra=episodio,
thumbnail=scrapedimg,
show=title,
folder=True), tipo="tv"))
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@@ -165,7 +166,7 @@ def lista_serie(item):
for scrapedurl, scrapedimg, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="episodios",
title=scrapedtitle,
@@ -173,7 +174,7 @@ def lista_serie(item):
url=scrapedurl,
thumbnail=scrapedimg,
show=scrapedtitle,
folder=True), tipo="tv"))
folder=True))
# Pagine
patron = '<a href="([^"]+)"[^>]+>Pagina'
@@ -186,6 +187,7 @@ def lista_serie(item):
url=next_page,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist

View File

@@ -7,7 +7,7 @@
from core import httptools, scrapertools, servertools, listtools
from core.item import Item
from platformcode import logger
from core.tmdb import infoIca
from core import tmdb
import re
__channel__ = "streaminghd"
@@ -191,7 +191,7 @@ def peliculas_src(item):
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="movie",
@@ -199,8 +199,9 @@ def peliculas_src(item):
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail), tipo="movie"))
thumbnail=scrapedthumbnail))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def peliculas_tv_src(item):
@@ -218,7 +219,7 @@ def peliculas_tv_src(item):
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="episodios",
contentType="episode",
@@ -226,8 +227,9 @@ def peliculas_tv_src(item):
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail), tipo="tv"))
thumbnail=scrapedthumbnail))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def by_anno_or_by_genere(item):

View File

@@ -11,7 +11,7 @@ from channels import autoplay
from channels import filtertools
from core import scrapertools, servertools, httptools
from core.item import Item
from core.tmdb import infoIca
from core import tmdb
from lib.unshortenit import unshorten_only
from platformcode import config
from platformcode import logger
@@ -217,7 +217,7 @@ def peliculas(item):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle = scrapedtitle.replace("streaming", "")
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="movie",
@@ -227,9 +227,9 @@ def peliculas(item):
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
folder=True), tipo='movie'))
folder=True))
# Paginazione
# Paginazione
patronvideos = '<a class="nextpostslink".*?href="([^"]+)"' ### <- Fix Pagina successiva '<a class="nextpostslink" rel="next" href="([^"]+)">»</a>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
@@ -243,6 +243,7 @@ def peliculas(item):
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@@ -267,7 +268,7 @@ def peliculas_tv(item):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle = scrapedtitle.replace("streaming", "")
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="episodios",
fulltitle=scrapedtitle,
@@ -276,9 +277,9 @@ def peliculas_tv(item):
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
folder=True), tipo='tv'))
folder=True))
# Paginazione
# Paginazione
patronvideos = '<a class="nextpostslink" rel="next" href="([^"]+)">»</a>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
@@ -292,6 +293,7 @@ def peliculas_tv(item):
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def latest(item):
@@ -317,7 +319,7 @@ def latest(item):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle = scrapedtitle.replace("Permalink to ", "")
scrapedtitle = scrapedtitle.replace("streaming", "")
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="movie",
@@ -327,9 +329,9 @@ def latest(item):
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
folder=True), tipo='movie'))
folder=True))
# Paginazione
# Paginazione
patronvideos = '<a class="nextpostslink" rel="next" href="([^"]+)">»</a>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
@@ -343,6 +345,7 @@ def latest(item):
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist

View File

@@ -10,7 +10,7 @@ import urlparse
from platformcode import logger, config
from core import servertools, httptools, scrapertools
from core.item import Item
from core.tmdb import infoIca
from core import tmdb
host = "https://toonitalia.org"
@@ -79,7 +79,7 @@ def src_list(item):
scrapedtitle = scrapertools.decodeHtmlentities(url_title[0][1])
scrapedurl = url_title[0][0]
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="links",
text_color="azure",
@@ -89,8 +89,9 @@ def src_list(item):
url=scrapedurl,
show=scrapedtitle,
extra=item.extra,
folder=True), tipo=item.extra))
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def lista_anime(item):

View File

@@ -12,7 +12,7 @@ from core import httptools
from platformcode import logger
from core import scrapertools
from core.item import Item
from core.tmdb import infoIca
from core import tmdb
@@ -93,7 +93,7 @@ def lista_anime(item):
for scrapedthumbnail, scrapeddetails, scrapedurl, scrapedtitle in matches:
scrapedurl = item.url.replace(item.url.split("/")[-1], scrapedurl)
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
itemlist.append(infoIca(
itemlist.append(
Item(channel=item.channel,
action="episodi",
title="%s %s %s" % (
@@ -102,7 +102,7 @@ def lista_anime(item):
show=scrapedtitle,
url=scrapedurl,
thumbnail=makeurl(scrapedthumbnail),
folder=True), tipo='tv'))
folder=True))
return itemlist