Merge pull request #9 from kodiondemand/master

riorganizzazione
This commit is contained in:
greko
2019-05-23 19:00:39 +02:00
committed by GitHub
81 changed files with 1130 additions and 5372 deletions

View File

@@ -5,8 +5,8 @@
import re
import urlparse
from channels import filtertools, autoplay, support
from core import servertools, httptools, tmdb, scrapertoolsV2
from specials import filtertools, autoplay
from core import servertools, httptools, tmdb, scrapertoolsV2, support
from core.item import Item
from platformcode import logger, config

View File

@@ -14,7 +14,7 @@
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"label": "Cerca informazioni extra",
"default": true,
"enabled": true,
"visible": true

View File

@@ -5,13 +5,9 @@
# -*- last change: 04/05/2019
from channels import autoplay, support, filtertools
from specials import autoplay, filtertools
from channelselector import get_thumb
from core import httptools
from core import channeltools
from core import scrapertools
from core import servertools
from core import tmdb
from core import httptools, channeltools, scrapertools, servertools, tmdb, support
from core.item import Item
from platformcode import config, logger

View File

@@ -5,8 +5,8 @@
import re
from channels import autoplay, filtertools, support
from core import servertools
from specials import autoplay, filtertools
from core import servertools, support
from core.item import Item
from platformcode import logger, config

View File

@@ -7,7 +7,7 @@ import re
from core import httptools, scrapertools, servertools, tmdb
from platformcode import logger, config
from core.item import Item
from channels import autoplay
from specials import autoplay
from channelselector import thumb

View File

@@ -5,9 +5,9 @@
import re
from channels import filtertools, autoplay, support, autorenumber
from channels.support import log, menu
from core import servertools, httptools, scrapertoolsV2, scrapertools, tmdb
from specials import filtertools, autoplay, autorenumber
from core.support import log, menu
from core import servertools, httptools, scrapertoolsV2, scrapertools, tmdb, support
from platformcode import logger, config
from core.item import Item
from lib.js2py.host import jsfunctions

View File

@@ -1,7 +1,7 @@
{
"id": "animesaturn",
"name": "AnimeSaturn",
"active": false,
"active": true,
"adult": false,
"language": ["ita"],
"thumbnail": "animesaturn.png",

View File

@@ -8,9 +8,9 @@ import re
import time
import urlparse
import channelselector
from channels import autoplay, support, filtertools
from core import httptools, tmdb, scrapertools, servertools
import channelselector, filtertools
from core import httptools, tmdb, scrapertools, servertools, support
from specials import autoplay
from core.item import Item
from platformcode import logger, config
__channel__ = "animesaturn"
@@ -30,11 +30,10 @@ def mainlist(item):
support.log(item.channel + 'mainlist')
itemlist = []
support.menu(itemlist, 'Anime bold', 'lista_anime', "%s/animelist?load_all=1" % host,'anime')
# support.menu(itemlist, 'Novità submenu', 'ultimiep', "%s/fetch_pages.php?request=episodes" % host,'anime')
itemlist.append(
Item(channel=item.channel,
action="ultimiep",
url="%s/fetch_pages.php?request=episodes" % host,
url="%s/fetch_pages.php?request=episodios" % host,
title=support.typo("Novità submenu"),
extra="",
contentType='anime',
@@ -124,29 +123,36 @@ def lista_anime(item):
if (p - 1) * PERPAGE > i: continue
if i >= p * PERPAGE: break
title = cleantitle(scrapedtitle).replace('(ita)','(ITA)')
movie = False
showtitle = title
if '(ITA)' in title:
title = title.replace('(ITA)','').strip()
showtitle = title
title += ' '+support.typo(' [ITA] color kod')
title += ' '+support.typo(' (ITA)')
infoLabels = {}
# if 'Akira' in title:
# movie = True
# infoLabels['year']= 1988
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="episodios",
action="episodios" if movie == False else 'findvideos',
title=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=showtitle,
show=showtitle,
contentTitle=showtitle,
plot=scrapedplot,
contentType='episode',
contentType='episode' if movie == False else 'movie',
originalUrl=scrapedurl,
infoLabels=infoLabels,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
autorenumber.renumber(itemlist)
# Paginazione
if len(matches) >= p * PERPAGE:
@@ -175,6 +181,10 @@ def episodios(item):
anime_id = scrapertools.find_single_match(data, r'\?anime_id=(\d+)')
#movie or series
movie = scrapertools.find_single_match(data, r'\Episodi:</b>\s(\d*)\sMovie')
data = httptools.downloadpage(
host + "/loading_anime?anime_id=" + anime_id,
headers={
@@ -188,7 +198,8 @@ def episodios(item):
for scrapedtitle, scrapedurl in matches:
scrapedtitle = cleantitle(scrapedtitle)
scrapedtitle = re.sub(r'<[^>]*?>', '', scrapedtitle)
scrapedtitle = '[COLOR azure][B]' + scrapedtitle + '[/B][/COLOR]'
scrapedtitle = '[B]' + scrapedtitle + '[/B]'
itemlist.append(
Item(
channel=item.channel,
@@ -202,10 +213,16 @@ def episodios(item):
fanart=item.thumbnail,
thumbnail=item.thumbnail))
if((len(itemlist) == 1 and 'Movie' in itemlist[0].title) or movie):
item.url = itemlist[0].url
item.contentType = 'movie'
return findvideos(item)
# tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# support.videolibrary(itemlist,item,'bold color kod')
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
autorenumber.renumber(itemlist, item)
support.videolibrary(itemlist,item,'bold color kod')
return itemlist
@@ -214,24 +231,32 @@ def episodios(item):
# ----------------------------------------------------------------------------------------------------------------
def findvideos(item):
support.log(item.channel + " findvideos")
originalItem = item
if(item.contentType == 'movie'):
episodes = episodios(item)
if(len(episodes)>0):
item.url = episodes[0].url
itemlist = []
data = httptools.downloadpage(item.url).data
patron = r'<a href="([^"]+)"><div class="downloadestreaming">'
url = scrapertools.find_single_match(data, patron)
data = httptools.downloadpage(url).data
patron = r"""<source\s*src=(?:"|')([^"']+?)(?:"|')\s*type=(?:"|')video/mp4(?:"|')>"""
matches = re.compile(patron, re.DOTALL).findall(data)
for video in matches:
itemlist.append(
Item(
channel=item.channel,
action="play",
fulltitle=item.fulltitle,
title="".join([item.title, ' ', support.typo(video.title, 'color kod []')]),
url=video,
contentType=item.contentType,
folder=False))
# patron = r"""<source\s*src=(?:"|')([^"']+?)(?:"|')\s*type=(?:"|')video/mp4(?:"|')>"""
# matches = re.compile(patron, re.DOTALL).findall(data)
# for video in matches:
# itemlist.append(
# Item(
# channel=item.channel,
# action="play",
# fulltitle=item.fulltitle,
# title="".join([item.title, ' ', support.typo(video.title, 'color kod []')]),
# url=video,
# contentType=item.contentType,
# folder=False))
itemlist = support.server(item, data=data)
# itemlist = filtertools.get_links(itemlist, item, list_language)
@@ -295,7 +320,7 @@ def ultimiep(item):
channel=item.channel,
action="ultimiep",
title=support.typo(config.get_localized_string(30992), 'color kod bold'),
url=host + "/fetch_pages?request=episodes",
url=host + "/fetch_pages?request=episodios",
thumbnail= support.thumb(),
extra=next_page,
folder=True))
@@ -317,7 +342,7 @@ def newest(categoria):
item.extra = ''
try:
if categoria == "anime":
item.url = "%s/fetch_pages?request=episodes" % host
item.url = "%s/fetch_pages?request=episodios" % host
item.action = "ultimiep"
itemlist = ultimiep(item)

View File

@@ -13,9 +13,9 @@ from channelselector import get_thumb
from core import tmdb
from core.item import Item
from platformcode import logger, config
from channels import autoplay
from channels import filtertools
from channels import renumbertools
from specials import autoplay
from specials import filtertools
from specials import renumbertools
host = "https://animespace.tv/"

View File

@@ -7,10 +7,9 @@
import re, urllib, urlparse
from core import servertools, httptools, scrapertools, tmdb
from core import servertools, httptools, scrapertools, tmdb, support
from platformcode import logger, config
from core.item import Item
from channels import support

View File

@@ -4,10 +4,10 @@
# ----------------------------------------------------------
import re, urlparse
from core import httptools, scrapertoolsV2, servertools, tmdb, tvdb
from core import httptools, scrapertoolsV2, servertools, tmdb, tvdb, support
from core.item import Item
from platformcode import logger, config
from channels import autoplay, filtertools, support, autorenumber
from specials import autoplay, filtertools, autorenumber
from channelselector import thumb

View File

@@ -5,8 +5,8 @@
# ------------------------------------------------------------
import re, urlparse
from core import scrapertools, scrapertoolsV2, httptools, servertools, tmdb
from channels import autoplay, filtertools, support
from core import scrapertools, scrapertoolsV2, httptools, servertools, tmdb, support
from specials import autoplay, filtertools
from core.item import Item
from platformcode import logger, config
from channelselector import thumb, get_thumb

View File

@@ -4,8 +4,8 @@
# ------------------------------------------------------------
import re, urlparse, base64
from core import scrapertoolsV2, httptools, servertools, tmdb
from channels import autoplay, support
from core import scrapertoolsV2, httptools, servertools, tmdb, support
from specials import autoplay
from core.item import Item
from platformcode import logger, config

View File

@@ -6,8 +6,8 @@
import re
import urlparse
from channels import autoplay, filtertools, support
from core import scrapertoolsV2, httptools, servertools, tmdb
from specials import autoplay, filtertools
from core import scrapertoolsV2, httptools, servertools, tmdb, support
from core.item import Item
from lib import unshortenit
from platformcode import logger, config
@@ -53,6 +53,7 @@ def mainlist(item):
support.menu(itemlist, 'Cerca film... submenu', 'search', host, args='film')
support.menu(itemlist, 'Serie TV bold', 'peliculas', host + '/serietv/', contentType='episode')
support.menu(itemlist, 'Aggiornamenti serie tv', 'last', host + '/serietv/aggiornamento-quotidiano-serie-tv/', contentType='episode')
support.menu(itemlist, 'Per Lettera submenu', 'menu', host + '/serietv/', contentType='episode', args="Serie-Tv per Lettera")
support.menu(itemlist, 'Per Genere submenu', 'menu', host + '/serietv/', contentType='episode', args="Serie-Tv per Genere")
support.menu(itemlist, 'Per anno submenu', 'menu', host + '/serietv/', contentType='episode', args="Serie-Tv per Anno")
@@ -118,21 +119,31 @@ def last(item):
infoLabels = {}
quality = ''
matches = support.match(item, r'<ahref=([^>]+)>([^(:(|[)]+)([^<]+)<\/a>', r'<strong>Ultimi 100 film Aggiornati:<\/a><\/strong>(.*?)<td>', headers)[0]
if item.contentType == 'episode':
matches = support.match(item, r'<a href="([^">]+)".*?>([^(:(|[)]+)([^<]+)<\/a>', '<article class="sequex-post-content.*?</article>', headers)[0]
else:
matches = support.match(item, r'<ahref=([^>]+)>([^(:(|[)]+)([^<]+)<\/a>', r'<strong>Ultimi 100 film Aggiornati:<\/a><\/strong>(.*?)<td>', headers)[0]
for url, title, info in matches:
add = True
title = title.rstrip()
infoLabels['year'] = scrapertoolsV2.find_single_match(info, r'\(([0-9]+)\)')
quality = scrapertoolsV2.find_single_match(info, r'\[([A-Z]+)\]')
if item.contentType == 'episode':
for i in itemlist:
if i.url == url: # togliamo i doppi
add = False
else:
infoLabels['year'] = scrapertoolsV2.find_single_match(info, r'\(([0-9]+)\)')
quality = scrapertoolsV2.find_single_match(info, r'\[([A-Z]+)\]')
if quality:
if quality:
longtitle = title + support.typo(quality,'_ [] color kod')
else:
longtitle = title
itemlist.append(
if add:
itemlist.append(
Item(channel=item.channel,
action='findvideos',
action='findvideos' if item.contentType != 'episode' else 'episodios',
contentType=item.contentType,
title=longtitle,
fulltitle=title,

View File

@@ -6,9 +6,8 @@ import base64
import re
import urlparse
from channels import autoplay, support
from channels import filtertools
from core import scrapertools, servertools, httptools
from specials import autoplay, filtertools
from core import scrapertools, servertools, httptools, support
from platformcode import logger, config
from core.item import Item
from lib import unshortenit

View File

@@ -4,8 +4,8 @@
# ------------------------------------------------------------
import re
from channels import filtertools, support, autoplay
from core import scrapertools, servertools, httptools, scrapertoolsV2
from specials import filtertools, autoplay
from core import scrapertools, servertools, httptools, scrapertoolsV2, support
from core.item import Item
host = 'https://cinemastreaming.icu'

View File

@@ -15,8 +15,8 @@ from channelselector import get_thumb
from core import tmdb
from core.item import Item
from platformcode import logger, config, platformtools
from channels import autoplay
from channels import filtertools
from specials import autoplay
from specials import filtertools
list_data = {}

View File

@@ -19,7 +19,7 @@
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"label": "Cerca informazioni extra",
"default": true,
"enabled": true,
"visible": true

View File

@@ -17,8 +17,8 @@
import re
from channels import autoplay, filtertools, support
from core import scrapertoolsV2, httptools, servertools, tmdb
from specials import autoplay, filtertools
from core import scrapertoolsV2, httptools, servertools, tmdb, support
from core.item import Item
from platformcode import logger, config

View File

@@ -4,10 +4,18 @@
"language": ["ita"],
"active": true,
"adult": false,
"thumbnail": "http://fastsubita.ml/wp-content/uploads/2017/10/Untitled-222255xxx.jpg",
"banner": "http://fastsubita.ml/wp-content/uploads/2017/10/Untitled-222255xxx.jpg",
"thumbnail": "http://fastsubita.com/wp-content/uploads/2017/10/Untitled-222255xxx.jpg",
"banner": "http://fastsubita.com/wp-content/uploads/2017/10/Untitled-222255xxx.jpg",
"categories": ["tvshow", "vosi"],
"settings": [
{
"id": "channel_host",
"type": "text",
"label": "Host del canale",
"default": "http://fastsubita.com",
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",

View File

@@ -1,25 +1,25 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Thanks Icarus crew & Alfa addon
# Canale per fastsubita
# ------------------------------------------------------------
import re, urlparse
from channels import autoplay, filtertools
from specials import autoplay, filtertools
from core import scrapertools, servertools, httptools, tmdb
from core.item import Item
from platformcode import config, logger
host = "http://fastsubita.com"
__channel__ = 'fastsubita'
host = config.get_setting("channel_host", __channel__)
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['verystream', 'openload', 'speedvideo', 'wstream', 'flashx', 'vidoza', 'vidtome']
list_quality = ['default']
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'fastsubita')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'fastsubita')
# __comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'fastsubita')
# __comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'fastsubita')
headers = [
['Host', 'fastsubita.com'],
@@ -34,45 +34,52 @@ headers = [
['Cache-Control', 'max-age=0']
]
PERPAGE = 14
PERPAGE = 15
def mainlist(item):
logger.info("[fastsubita.py] mainlist")
logger.info(item.channel+" mainlist")
itemlist = []
support.menu(itemlist, 'Serie TV bold', 'lista_serie', host,'tvshow')
support.menu(itemlist, 'Novità submenu', 'pelicuals_tv', host,'tvshow')
support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', host,'tvshow',args=['serie'])
support.menu(itemlist, 'Cerca', 'search', host,'tvshow')
autoplay.init(item.channel, list_servers, list_quality)
itemlist = [Item(channel=item.channel,
title="[COLOR azure]Aggiornamenti[/COLOR]",
action="serietv",
extra='serie',
url=host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="[COLOR azure]Tutte le Serie TV[/COLOR]",
action="all_quick",
extra='serie',
url="%s/elenco-serie-tv/" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="[COLOR yellow]Cerca...[/COLOR]",
action="search",
extra='serie',
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
autoplay.show_option(item.channel, itemlist)
itemlist.append(
Item(channel='setting',
action="channel_config",
title=support.typo("Configurazione Canale color lime"),
config=item.channel,
folder=False,
thumbnail=channelselector.get_thumb('setting_0.png'))
)
return itemlist
# ----------------------------------------------------------------------------------------------------------------
def cleantitle(scrapedtitle):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
scrapedtitle = scrapedtitle.replace('', '\'').replace('&#215;','x').replace('×','x')
return scrapedtitle.strip()
# ================================================================================================================
def newest(categoria):
logger.info("[fastsubita.py]==> newest" + categoria)
logger.info(__channel__+" newest" + categoria)
itemlist = []
item = Item()
try:
if categoria == "series":
item.url = host
item.action = "serietv"
itemlist = serietv(item)
# item.action = "serietv"
itemlist = pelicuals_tv(item)
if itemlist[-1].action == "serietv":
itemlist.pop()
@@ -87,13 +94,12 @@ def newest(categoria):
return itemlist
def serietv(item):
logger.info("[fastsubita.py] peliculas")
def pelicuals_tv(item):
logger.info(item.channel+" pelicuals_tv")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
logger.info("[fastsubita.py] peliculas")
# Estrae i contenuti
patron = r'<h3 class="entry-title title-font"><a href="([^"]+)" rel="bookmark">(.*?)<'
@@ -103,9 +109,11 @@ def serietv(item):
scrapedplot = ""
scrapedthumbnail = ""
scraped_1 = scrapedtitle.split("&#215;")[0][:-2]
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedtitle = cleantitle(scrapedtitle)
episode = scrapertools.find_multiple_matches(scrapedtitle, r'((\d*)x(\d*))')[0]
scrapedtitle = scrapedtitle.replace(scraped_1, "")
if "http:" in scrapedurl:
scrapedurl = scrapedurl
else:
@@ -115,37 +123,64 @@ def serietv(item):
Item(channel=item.channel,
action="findvideos",
contentTpye="tvshow",
title="[COLOR azure]" + scraped_1 + "[/COLOR]" + " " + scrapedtitle,
fulltitle=scraped_1,
title=scraped_1 + " " + scrapedtitle,
fulltitle=scraped_1 + " " + scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
show=scraped_1,
extra=item.extra,
contentSerieName=scraped_1+" ("+episode[0]+" Sub-Ita)",
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginazione
patronvideos = r'<a class="next page-numbers" href="(.*?)">Successivi'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
itemlist.append(
Item(channel=item.channel,
action="serietv",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
extra=item.extra,
folder=True))
# Paginazione
support.nextPage(itemlist,item,data,'<a class="next page-numbers" href="(.*?)">Successivi')
return itemlist
def serietv():
logger.info(__channel__+" serietv")
def all_quick(item):
logger.info("[fastsubita.py] peliculas")
itemlist = []
data = httptools.downloadpage("%s/" % host, headers=headers).data
# block = scrapertools.find_single_match(data, r'<div class="entry-content">(.*?)</div>')
block = scrapertools.find_single_match(data, r"<select\s*?name='cat'\s*?id='cat'\s*?class='postform'\s*?>(.*?)</select>")
# block = data
# Estrae i contenuti
# patron = r'<a style.*?href="([^"]+)">([^<]+)<\/a>'
# patron = r'<a.*?href="([^"]+)">([^<]+)<\/a>'
# matches = re.compile(patron, re.DOTALL).findall(block)
matches = re.compile(r'<option class="level-([0-9]?)" value="([^"]+)">([^<]+)</option>', re.DOTALL).findall(block)
index = 0
# for scrapedurl, scrapedtitle in matches:
# scrapedtitle = cleantitle(scrapedtitle)
# if "http:" not in scrapedurl:
# scrapedurl = "http:" + scrapedurl
#
# if ('S' in scrapedtitle.strip().upper()[0] and len(scrapedtitle.strip()) == 3) or '02' == scrapedtitle:
# # itemlist[index -1][0]+='{|}'+scrapedurl
# continue
#
# itemlist.append([scrapedurl,scrapedtitle])
# index += 1
for level, cat, title in matches:
title = cleantitle(title)
url = '%s?cat=%s' % (host, cat)
if int(level) > 0:
itemlist[index - 1][0] += '{|}' + url
continue
itemlist.append([url, title])
index += 1
logger.debug(itemlist)
return itemlist
def lista_serie(item):
logger.info(item.channel+" lista_serie")
itemlist = []
p = 1
@@ -153,57 +188,65 @@ def all_quick(item):
item.url, p = item.url.split('{}')
p = int(p)
# logger.debug(p)
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
# data = httptools.downloadpage(item.url, headers=headers).data
#
# block = scrapertools.find_single_match(data,r'<div class="entry-content">(.*?)</div>')
#
# # Estrae i contenuti
# # patron = r'<a style.*?href="([^"]+)">([^<]+)<\/a>'
# patron = r'<a.*?href="([^"]+)">([^<]+)<\/a>'
# matches = re.compile(patron, re.DOTALL).findall(block)
if '||' in item.url:
series = item.url.split('\n\n')
matches = []
for i, serie in enumerate(series):
matches.append(serie.split('||'))
series = matches
else:
series = serietv()
# Estrae i contenuti
patron = r'<a style.*?href="([^"]+)">([^<]+)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
for i, (scrapedurl, scrapedtitle) in enumerate(series):
if (p - 1) * PERPAGE > i: continue
if i >= p * PERPAGE: break
scrapedplot = ""
scrapedthumbnail = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
if 'S' in scrapedtitle.lower(): continue
if "http:" in scrapedurl:
scrapedurl = scrapedurl
else:
scrapedurl = "http:" + scrapedurl
itemlist.append(
Item(channel=item.channel,
action="serietv",
contentType="tvshow",
action="episodios",
title=scrapedtitle,
fulltitle=scrapedtitle,
text_color="azure",
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
show=scrapedtitle,
extra=item.extra,
contentType='episode',
originalUrl=scrapedurl,
folder=True))
# ii += 1
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if len(matches) >= p * PERPAGE:
if len(series) >= p * PERPAGE:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="all_quick",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
action='lista_serie',
contentType=item.contentType,
title=support.typo(config.get_localized_string(30992), 'color kod bold'),
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
args=item.args,
extra=item.extra,
thumbnail=support.thumb()))
return itemlist
def findvideos(item):
logger.info("[fastsubita.py] findvideos")
logger.info(item.channel+" findvideos")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
@@ -217,30 +260,33 @@ def findvideos(item):
scrapedurl, follow_redirects=False)
data += resp.headers.get("location", "") + '\n'
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title + videoitem.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.show = item.show
videoitem.plot = item.plot
videoitem.channel = item.channel
videoitem.contentType = item.contentType
videoitem.language = IDIOMAS['Italiano']
itemlist = support.server(item,data)
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
# itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
# itemlist = servertools.find_video_items(data=data)
#
# for videoitem in itemlist:
# videoitem.title = item.title + videoitem.title
# videoitem.fulltitle = item.fulltitle
# videoitem.thumbnail = item.thumbnail
# videoitem.show = item.show
# videoitem.plot = item.plot
# videoitem.channel = item.channel
# videoitem.contentType = item.contentType
# videoitem.language = IDIOMAS['Italiano']
#
# # Requerido para Filtrar enlaces
#
# if __comprueba_enlaces__:
# itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
#
# # Requerido para FilterTools
#
# # itemlist = filtertools.get_links(itemlist, item, list_language)
#
# # Requerido para AutoPlay
#
# autoplay.start(itemlist, item)
@@ -248,10 +294,30 @@ def findvideos(item):
def search(item, texto):
logger.info("[fastsubita.py] " + item.url + " search " + texto)
item.url = "%s/?s=%s" % (host, texto)
logger.info(item.channel + " " + item.url + " search " + texto)
# item.url = "%s/?s=%s" % (host, texto)
# item.url = "%s/elenco-serie-tv/" % host
itemlist = []
try:
return serietv(item)
series = serietv()
for i, (scrapedurl, scrapedtitle) in enumerate(series):
if texto.upper() in scrapedtitle.upper():
scrapedthumbnail = ""
scrapedplot = ""
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="episodios",
title=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=scrapedtitle,
show=scrapedtitle,
plot=scrapedplot,
contentType='episode',
originalUrl=scrapedurl,
folder=True))
return itemlist
# Continua la ricerca in caso di errore
except:
import sys
@@ -259,4 +325,103 @@ def search(item, texto):
logger.error("%s" % line)
return []
# ----------------------------------------------------------------------------------------------------------------
def list_az(item):
support.log(item.channel + " list_az")
itemlist = []
alphabet = dict()
for i, (scrapedurl, scrapedtitle) in enumerate(serietv()):
letter = scrapedtitle[0].upper()
if letter not in alphabet:
alphabet[letter] = []
alphabet[letter].append(scrapedurl + '||' + scrapedtitle)
for letter in sorted(alphabet):
itemlist.append(
Item(channel=item.channel,
action="lista_serie",
url='\n\n'.join(alphabet[letter]),
title=letter,
fulltitle=letter))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def episodios(item,itemlist = []):
support.log(item.channel + " episodios")
urls = item.url.split('{|}')
# logger.debug(urls)
# Carica la pagina
data = httptools.downloadpage(urls[0], headers=headers).data
urls.pop(0)
# Estrae i contenuti
patron = r'<h3 class="entry-title title-font"><a href="([^"]+)" rel="bookmark">(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
# logger.debug(matches)
for scrapedurl, scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedtitle = cleantitle(scrapedtitle)
episode = scrapertools.find_multiple_matches(scrapedtitle,r'((\d*)x(\d*))')[0]
season = episode[1].lstrip('0')
# if season in seasons and '/page/' not in item.url: break
# logger.debug(scrapedtitle)
# logger.debug(episode)
# return False
infoLabels = {}
infoLabels['season'] = season
infoLabels['episode'] = episode[2]
title = infoLabels['season']+'x'+infoLabels['episode']
if "http:" not in scrapedurl:
scrapedurl = "http:" + scrapedurl
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentTpye="tvshow",
title=title,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
show=item.show,
extra=item.extra,
infoLabels=infoLabels,
folder=True))
next_page = scrapertools.find_single_match(data,r'<a class="next page-numbers" href="(.*?)">Successivi')
if next_page != "":
urls.insert(0,next_page)
# logger.debug(urls)
if(len(urls) > 0):
item.url = '{|}'.join(urls)
itemlist = episodios(item, itemlist)
else:
cleanItemlist = []
episodes = []
for episode in itemlist:
if episode.title in episodes: continue
cleanItemlist.append(episode)
episodes.append(episode.title)
itemlist = cleanItemlist
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
item.url = item.originalUrl
support.videolibrary(itemlist, item, 'bold color kod')
return itemlist
# ================================================================================================================

View File

@@ -7,8 +7,8 @@ import re
import urlparse
from channelselector import get_thumb
from channels import filtertools, support, autoplay
from core import scrapertools, servertools, httptools, tmdb
from specials import filtertools, autoplay
from core import scrapertools, servertools, httptools, tmdb, support
from platformcode import logger, config
from core.item import Item

View File

@@ -5,8 +5,8 @@
import re
import urlparse
from channels import autoplay, support
from core import scrapertoolsV2, servertools, httptools, tmdb
from specials import autoplay
from core import scrapertoolsV2, servertools, httptools, tmdb, support
from core.item import Item
from lib import unshortenit
from platformcode import config, logger

View File

@@ -7,9 +7,8 @@ import re
import urlparse
from channelselector import get_thumb
from channels import autoplay
from channels import filtertools, support
from core import scrapertools, servertools, httptools
from specials import filtertools, autoplay
from core import scrapertools, servertools, httptools, support
from platformcode import logger, config
from core.item import Item
from platformcode import config

View File

@@ -6,8 +6,8 @@ import base64
import re
import urlparse
from channels import autoplay
from channels import filtertools
from specials import autoplay
from specials import filtertools
from core import scrapertools, servertools, httptools
from platformcode import logger, config
from core.item import Item

View File

@@ -6,11 +6,10 @@
import re
from core import httptools, scrapertools, servertools
from core import httptools, scrapertools, servertools, support
from core.item import Item
from core import tmdb
from platformcode import logger, config
from channels import support
host = "http://www.guardaserie.watch"

View File

@@ -8,8 +8,8 @@ import re, urlparse
from platformcode import config, logger
from core import scrapertools, servertools, httptools
from core.item import Item
from channels import autoplay
from channels import filtertools
from specials import autoplay
from specials import filtertools
from core import tmdb
__channel__ = "ilgeniodellostreaming"

View File

@@ -7,11 +7,10 @@ import base64
import re
import urlparse
from channels import autoplay
from channels import filtertools, support
from core import scrapertools, servertools, httptools
from specials import autoplay
from specials import filtertools
from core import scrapertools, servertools, httptools, tmdb, support
from core.item import Item
from core import tmdb
from platformcode import logger, config
IDIOMAS = {'Italiano': 'IT'}

View File

@@ -7,12 +7,9 @@ import base64
import re
import urlparse
from core import scrapertools, httptools
from core import servertools
from core import scrapertools, httptools, servertools, tmdb, support
from core.item import Item
from core import tmdb
from platformcode import logger, config
from channels import support

View File

@@ -7,8 +7,8 @@
import re
import urllib
from channels import autoplay
from channels import filtertools
from specials import autoplay
from specials import filtertools
from core import scrapertools, servertools, httptools, scrapertoolsV2
from core.item import Item
from core import tmdb

View File

@@ -6,8 +6,8 @@
import re
import urlparse
from channels import autoplay
from channels import filtertools
from specials import autoplay
from specials import filtertools
from core import httptools, scrapertools, servertools
from core.item import Item
from core import tmdb

View File

@@ -4,9 +4,9 @@
# ------------------------------------------------------------
import urlparse
from channels import autoplay, filtertools, support
from channels.support import menu, log
from core import scrapertoolsV2, servertools, httptools, tmdb
from specials import autoplay, filtertools
from core.support import menu, log
from core import scrapertoolsV2, servertools, httptools, tmdb, support
from core.item import Item
from platformcode import logger, config
from channelselector import thumb

View File

@@ -4,11 +4,11 @@
# ----------------------------------------------------------
import re
from core import httptools, scrapertoolsV2, servertools, tmdb
from core import httptools, scrapertoolsV2, servertools, tmdb, support
from core.item import Item
from lib import unshortenit
from platformcode import logger, config
from channels import autoplay, support
from specials import autoplay
from channelselector import thumb
host = "https://serietvonline.live"

View File

@@ -8,8 +8,8 @@ import re
import time
import channelselector
from channels import autoplay, support, filtertools
from core import httptools, tmdb, scrapertools, servertools
from specials import autoplay, filtertools
from core import httptools, tmdb, scrapertools, servertools, support
from core.item import Item
from platformcode import logger, config
__channel__ = "serietvsubita"
@@ -30,7 +30,7 @@ def mainlist(item):
itemlist = []
support.menu(itemlist, 'Serie TV bold', 'lista_serie', host,'tvshow')
support.menu(itemlist, 'Novità submenu', 'peliculas_tv', host,'tvshow')
support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', host,'tvshow')
support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', host,'tvshow',args=['serie'])
support.menu(itemlist, 'Cerca', 'search', host,'tvshow')
@@ -293,9 +293,13 @@ def peliculas_tv(item):
scrapedthumbnail = ""
scrapedplot = ""
scrapedtitle = cleantitle(scrapedtitle)
episode = scrapertools.find_multiple_matches(scrapedtitle, r'((\d*)x(\d*))')[0]
title = scrapedtitle.split(" S0")[0].strip()
title = title.split(" S1")[0].strip()
title = title.split(" S2")[0].strip()
itemlist.append(
Item(channel=item.channel,
action="findvideos",
@@ -304,7 +308,7 @@ def peliculas_tv(item):
title=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
contentSerieName=title,
contentSerieName=title+" ("+episode[0]+" Sub-Ita)",
plot=scrapedplot,
folder=True))

View File

@@ -6,8 +6,8 @@
import re
import channelselector
from channels import autoplay, support, filtertools
from core import httptools, tmdb, scrapertools, servertools
from specials import autoplay, filtertools
from core import httptools, tmdb, scrapertools, servertools, support
from core.item import Item
from platformcode import logger, config
__channel__ = 'serietvu'

View File

@@ -1,7 +1,13 @@
from channels import support, autoplay
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per Streaming Altadefinizione
# ------------------------------------------------------------
from core import support
from specials import autoplay
from core.item import Item
host = 'https://www.streamingaltadefinizione.world/'
host = 'https://www.streamingaltadefinizione.space'
list_servers = ['verystream', 'openload', 'wstream']
list_quality = ['1080p', 'HD', 'DVDRIP', 'SD', 'CAM']

View File

@@ -7,7 +7,7 @@ import urllib
from lib import unshortenit
from platformcode import logger, config
from channelselector import thumb
from channels import autoplay
from specials import autoplay
def hdpass_get_servers(item):
@@ -379,7 +379,8 @@ def menu(itemlist, title='', action='', url='', contentType='movie', args=[]):
def typo(string, typography=''):
kod_color = '0xFF0081C2'
kod_color = '0xFF65B3DA' #'0xFF0081C2'
# Check if the typographic attributes are in the string or outside
if typography:
@@ -388,7 +389,7 @@ def typo(string, typography=''):
string = string + ' >'
# If there are no attributes, it applies the default ones
attribute = ['[]','()','{}','submenu','color','bold','italic','_','[B]','[I]','[COLOR]']
attribute = ['[]','()','{}','submenu','color','bold','italic','_','--','[B]','[I]','[COLOR]']
movie_word_list = ['film', 'serie', 'tv', 'anime', 'cinema', 'sala']
search_word_list = ['cerca']
@@ -422,6 +423,8 @@ def typo(string, typography=''):
string = '[I]' + re.sub(r'\sitalic','',string) + '[/I]'
if '_' in string:
string = ' ' + re.sub(r'\s_','',string)
if '--' in string:
string = ' - ' + re.sub(r'\s--','',string)
return string
@@ -455,7 +458,7 @@ def videolibrary(itemlist, item, typography=''):
action = 'add_serie_to_library'
extra = 'episodios'
contentType = 'tvshow'
if not typography: typography = 'color kod bold'
title = typo(config.get_localized_string(30161) + ' ' + typography)
if inspect.stack()[1][3] == 'findvideos' and contentType == 'movie' or inspect.stack()[1][3] != 'findvideos' and contentType != 'movie':
if config.get_videolibrary_support() and len(itemlist) > 0:
@@ -500,11 +503,8 @@ def server(item, data='', headers='', AutoPlay=True, CheckLinks=True):
if not data:
data = httptools.downloadpage(item.url, headers=headers).data
## fix by greko
data = str(item.url)
## FINE fix by greko
itemlist = servertools.find_video_items(data=data)
itemlist = servertools.find_video_items(data=str(data))
for videoitem in itemlist:
videoitem.title = "".join([item.title, ' ', typo(videoitem.title, 'color kod []')])

View File

@@ -6,9 +6,9 @@
import re
import urlparse
from channels import support, autorenumber
from channels.support import menu, log, aplay
from core import scrapertoolsV2, httptools, tmdb
from specials import autorenumber
from core.support import menu, log, aplay
from core import scrapertoolsV2, httptools, tmdb, support
from core.item import Item
from platformcode import config, logger

View File

@@ -14,7 +14,7 @@
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"label": "Cerca informazioni extra",
"default": true,
"enabled": true,
"visible": true

View File

@@ -7,8 +7,8 @@
import re
import urlparse
from channels import autoplay, filtertools, support
from core import scrapertools, scrapertoolsV2, httptools, tmdb, servertools
from specials import autoplay, filtertool
from core import scrapertools, scrapertoolsV2, httptools, tmdb, servertoolss, support
from core.item import Item
from platformcode import logger, config

View File

@@ -6,8 +6,8 @@
import re
import urlparse
from channels import autoplay, support
from core import scrapertoolsV2, httptools, servertools
from specials import autoplay
from core import scrapertoolsV2, httptools, servertools, support
from core.item import Item
from platformcode import logger
from channelselector import thumb

View File

@@ -14,7 +14,7 @@
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"label": "Cerca informazioni extra",
"default": true,
"enabled": true,
"visible": true

View File

@@ -27,11 +27,11 @@ def getmainlist(view="thumb_"):
thumbnail=get_thumb("channels.png", view), view=view,
category=config.get_localized_string(30119), viewmode="thumbnails"))
itemlist.append(Item(title=config.get_localized_string(70527), channel="alfavorites", action="mainlist",
itemlist.append(Item(title=config.get_localized_string(70527), channel="kodfavorites", action="mainlist",
thumbnail=get_thumb("mylink.png", view), view=view,
category=config.get_localized_string(70527), viewmode="thumbnails"))
itemlist.append(Item(title=config.get_localized_string(30103), channel="search", action="mainlist",
itemlist.append(Item(title=config.get_localized_string(30103), channel="search", path='special', action="mainlist",
thumbnail=get_thumb("search.png", view),
category=config.get_localized_string(30119), viewmode="list",
context=[{"title": config.get_localized_string(70286), "channel": "search", "action": "opciones",

View File

@@ -133,6 +133,8 @@ def get_channel_json(channel_name):
channel_json = None
try:
channel_path = filetools.join(config.get_runtime_path(), "channels", channel_name + ".json")
if not os.path.isfile(channel_path):
channel_path = filetools.join(config.get_runtime_path(), "specials", channel_name + ".json")
if filetools.isfile(channel_path):
# logger.info("channel_data=" + channel_path)
channel_json = jsontools.load(filetools.read(channel_path))
@@ -149,7 +151,7 @@ def get_channel_json(channel_name):
def get_channel_controls_settings(channel_name):
# logger.info("channel_name=" + channel_name)
dict_settings = {}
# import web_pdb; web_pdb.set_trace()
list_controls = get_channel_json(channel_name).get('settings', list())
for c in list_controls:

534
core/support.py Normal file
View File

@@ -0,0 +1,534 @@
# support functions that are needed by many channels, to no repeat the same code
import base64, urlparse, re, os, inspect
from core import httptools, scrapertoolsV2, servertools, tmdb
from core.item import Item
import urllib
from lib import unshortenit
from platformcode import logger, config
from channelselector import thumb
from specials import autoplay
def hdpass_get_servers(item):
# Carica la pagina
data = httptools.downloadpage(item.url).data.replace('\n', '')
patron = r'<iframe(?: id="[^"]+")? width="[^"]+" height="[^"]+" src="([^"]+)"[^>]+><\/iframe>'
url = scrapertoolsV2.find_single_match(data, patron).replace("?alta", "")
url = url.replace("&download=1", "")
if 'https' not in url:
url = 'https:' + url
if 'hdpass' or 'hdplayer' in url:
data = httptools.downloadpage(url).data
start = data.find('<div class="row mobileRes">')
end = data.find('<div id="playerFront">', start)
data = data[start:end]
patron_res = '<div class="row mobileRes">(.*?)</div>'
patron_mir = '<div class="row mobileMirrs">(.*?)</div>'
patron_media = r'<input type="hidden" name="urlEmbed" data-mirror="([^"]+)" id="urlEmbed"\s*value="([^"]+)"\s*/>'
res = scrapertoolsV2.find_single_match(data, patron_res)
itemlist = []
for res_url, res_video in scrapertoolsV2.find_multiple_matches(res, '<option.*?value="([^"]+?)">([^<]+?)</option>'):
data = httptools.downloadpage(urlparse.urljoin(url, res_url)).data.replace('\n', '')
mir = scrapertoolsV2.find_single_match(data, patron_mir)
for mir_url, server in scrapertoolsV2.find_multiple_matches(mir, '<option.*?value="([^"]+?)">([^<]+?)</value>'):
data = httptools.downloadpage(urlparse.urljoin(url, mir_url)).data.replace('\n', '')
for media_label, media_url in scrapertoolsV2.find_multiple_matches(data, patron_media):
itemlist.append(Item(channel=item.channel,
action="play",
title=item.title+" ["+color(server, 'orange')+"]"+" - "+color(res_video, 'limegreen'),
fulltitle=item.fulltitle,
quality=res_video,
show=item.show,
thumbnail=item.thumbnail,
contentType=item.contentType,
server=server,
url=url_decode(media_url)))
log("video -> ", res_video)
return itemlist
def url_decode(url_enc):
lenght = len(url_enc)
if lenght % 2 == 0:
len2 = lenght / 2
first = url_enc[0:len2]
last = url_enc[len2:lenght]
url_enc = last + first
reverse = url_enc[::-1]
return base64.b64decode(reverse)
last_car = url_enc[lenght - 1]
url_enc[lenght - 1] = ' '
url_enc = url_enc.strip()
len1 = len(url_enc)
len2 = len1 / 2
first = url_enc[0:len2]
last = url_enc[len2:len1]
url_enc = last + first
reverse = url_enc[::-1]
reverse = reverse + last_car
return base64.b64decode(reverse)
def color(text, color):
return "[COLOR " + color + "]" + text + "[/COLOR]"
def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data="", patron_block="",
patronNext="", action="findvideos", addVideolibrary = True):
# patron: the patron to use for scraping page, all capturing group must match with listGroups
# listGroups: a list containing the scraping info obtained by your patron, in order
# accepted values are: url, title, thumb, quality, year, plot, duration, genre, rating
# header: values to pass to request header
# blacklist: titles that you want to exclude(service articles for example)
# data: if you want to pass data manually, maybe because you need some custom replacement
# patron_block: patron to get parts of the page (to scrape with patron attribute),
# if you need a "block inside another block" you can create a list, please note that all matches
# will be packed as string
# patronNext: patron for scraping next page link
# action: if you want results perform an action different from "findvideos", useful when scraping film by genres
# url_host: string to prepend to scrapedurl, useful when url don't contain host
# example usage:
# import support
# itemlist = []
# patron = 'blablabla'
# headers = [['Referer', host]]
# blacklist = 'Request a TV serie!'
# return support.scrape(item, itemlist, patron, ['thumb', 'quality', 'url', 'title', 'year', 'plot'],
# headers=headers, blacklist=blacklist)
itemlist = []
if not data:
data = httptools.downloadpage(item.url, headers=headers).data.replace("'", '"')
data = re.sub('\n|\t', ' ', data)
# replace all ' with " and eliminate newline, so we don't need to worry about
log('DATA =', data)
block = data
if patron_block:
if type(patron_block) == str:
patron_block = [patron_block]
for n, regex in enumerate(patron_block):
blocks = scrapertoolsV2.find_multiple_matches(block, regex)
block = ""
for b in blocks:
block += "\n" + b
log('BLOCK ', n, '=', block)
else:
block = data
if patron and listGroups:
matches = scrapertoolsV2.find_multiple_matches(block, patron)
log('MATCHES =', matches)
known_keys = ['url', 'title', 'episode', 'thumb', 'quality', 'year', 'plot', 'duration', 'genere', 'rating'] #by greko aggiunto episode
for match in matches:
if len(listGroups) > len(match): # to fix a bug
match = list(match)
match.extend([''] * (len(listGroups) - len(match)))
scraped = {}
for kk in known_keys:
val = match[listGroups.index(kk)] if kk in listGroups else ''
if val and (kk == "url" or kk == 'thumb') and 'http' not in val:
val = scrapertoolsV2.find_single_match(item.url, 'https?://[a-z0-9.-]+') + val
scraped[kk] = val
title = scrapertoolsV2.decodeHtmlentities(scraped["title"]).strip()
plot = scrapertoolsV2.htmlclean(scrapertoolsV2.decodeHtmlentities(scraped["plot"]))
if scraped["quality"] and scraped["episode"]: # by greko aggiunto episode
longtitle = '[B]' + title + '[/B] - [B]' + scraped["episode"] + '[/B][COLOR blue][' + scraped["quality"] + '][/COLOR]' # by greko aggiunto episode
elif scraped["episode"]: # by greko aggiunto episode
longtitle = '[B]' + title + '[/B] - [B]' + scraped["episode"] + '[/B]' # by greko aggiunto episode
else:
longtitle = '[B]' + title + '[/B]'
if item.infoLabels["title"] or item.fulltitle: # if title is set, probably this is a list of episodes or video sources
infolabels = item.infoLabels
else:
infolabels = {}
if scraped["year"]:
infolabels['year'] = scraped["year"]
if scraped["plot"]:
infolabels['plot'] = plot
if scraped["duration"]:
matches = scrapertoolsV2.find_multiple_matches(scraped["duration"],r'([0-9])\s*?(?:[hH]|:|\.|,|\\|\/|\||\s)\s*?([0-9]+)')
for h, m in matches:
scraped["duration"] = int(h) * 60 + int(m)
if not matches:
scraped["duration"] = scrapertoolsV2.find_single_match(scraped["duration"], r'(\d+)')
infolabels['duration'] = int(scraped["duration"]) * 60
if scraped["genere"]:
genres = scrapertoolsV2.find_multiple_matches(scraped["genere"], '[A-Za-z]+')
infolabels['genere'] = ", ".join(genres)
if scraped["rating"]:
infolabels['rating'] = scrapertoolsV2.decodeHtmlentities(scraped["rating"])
if scraped["title"] not in blacklist:
it = Item(
channel=item.channel,
action=action,
contentType=item.contentType,
title=longtitle,
fulltitle=title,
show=title,
quality=scraped["quality"],
url=scraped["url"],
infoLabels=infolabels,
thumbnail=scraped["thumb"],
args=item.args
)
for lg in list(set(listGroups).difference(known_keys)):
it.__setattr__(lg, match[listGroups.index(lg)])
itemlist.append(it)
if (item.contentType == "episode" and (action != "findvideos" and action != "play")) \
or (item.contentType == "movie" and action != "play"):
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
else:
for it in itemlist:
it.infoLabels = item.infoLabels
if patronNext:
nextPage(itemlist, item, data, patronNext, 2)
if addVideolibrary and (item.infoLabels["title"] or item.fulltitle):
item.fulltitle = item.infoLabels["title"]
videolibrary(itemlist, item)
return itemlist
def dooplay_get_links(item, host):
# get links from websites using dooplay theme and dooplay_player
# return a list of dict containing these values: url, title and server
data = httptools.downloadpage(item.url).data.replace("'", '"')
patron = r'<li id="player-option-[0-9]".*?data-type="([^"]+)" data-post="([^"]+)" data-nume="([^"]+)".*?<span class="title".*?>([^<>]+)</span>(?:<span class="server">([^<>]+))?'
matches = scrapertoolsV2.find_multiple_matches(data, patron)
ret = []
for type, post, nume, title, server in matches:
postData = urllib.urlencode({
"action": "doo_player_ajax",
"post": post,
"nume": nume,
"type": type
})
dataAdmin = httptools.downloadpage(host + 'wp-admin/admin-ajax.php', post=postData,headers={'Referer': item.url}).data
link = scrapertoolsV2.find_single_match(dataAdmin, "<iframe.*src='([^']+)'")
ret.append({
'url': link,
'title': title,
'server': server
})
return ret
def dooplay_get_episodes(item):
itemlist = []
item.contentType = "episode"
data = httptools.downloadpage(item.url).data.replace("'", '"')
patron = '<li class="mark-[0-9]">.*?<img.*?data-lazy-src="([^"]+).*?([0-9] - [0-9]).*?<a href="([^"]+)">([^<>]+).*?([0-9]{4})'
for scrapedthumb, scrapedep, scrapedurl, scrapedtitle, scrapedyear in scrapertoolsV2.find_multiple_matches(data, patron):
scrapedep = scrapedep.replace(' - ', 'x')
infoLabels = {}
infoLabels['year'] = scrapedyear
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="episode",
title=scrapedep + " " + scrapedtitle,
fulltitle=scrapedtitle,
show=item.fulltitle,
url=scrapedurl,
thumbnail=scrapedthumb,
infoLabels=infoLabels
)
)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
videolibrary(itemlist, item)
return itemlist
def dooplay_films(item, blacklist=""):
if item.contentType == 'movie':
action = 'findvideos'
patron = '<article id="post-[0-9]+" class="item movies">.*?<img src="(?!data)([^"]+)".*?<span class="quality">([^<>]+).*?<a href="([^"]+)">([^<>]+)</a></h3>.*?(?:<span>([0-9]{4})</span>|</article>).*?(?:<span>([0-9]+) min</span>|</article>).*?(?:<div class="texto">([^<>]+)|</article>).*?(?:genres">(.*?)</div>|</article>)'
else:
action = 'episodios'
patron = '<article id="post-[0-9]+" class="item tvshows">.*?<img src="(?!data)([^"]+)".*?(?:<span class="quality">([^<>]+))?.*?<a href="([^"]+)">([^<>]+)</a></h3>.*?(?:<span>([0-9]{4})</span>|</article>).*?(?:<span>([0-9]+) min</span>|</article>).*?(?:<div class="texto">([^<>]+)|</article>).*?(?:genres">(.*?)</div>|</article>)'
# patronNext = '<a class="arrow_pag" href="([^"]+)"><i id="nextpagination"'
patronNext = '<div class="pagination">.*?class="current".*?<a href="([^"]+)".*?<div class="resppages">'
itemlist = scrape(item, patron, ['thumb', 'quality', 'url', 'title', 'year', 'duration', 'plot', 'genre'], blacklist=blacklist, patronNext=patronNext, action=action, addVideolibrary=False)
if itemlist and 'Successivo' in itemlist[-1].title:
itemlist[-1].action = 'peliculas'
return itemlist
def dooplay_search(item, blacklist=""):
if item.contentType == 'movie':
type = 'movies'
action = 'findvideos'
else:
type = 'tvshows'
action = 'episodios'
patron = '<div class="result-item">.*?<img src="([^"]+)".*?<span class="' + type + '">([^<>]+).*?<a href="([^"]+)">([^<>]+)</a>.*?<span class="year">([0-9]{4}).*?<div class="contenido"><p>([^<>]+)'
patronNext = '<a class="arrow_pag" href="([^"]+)"><i id="nextpagination"'
return scrape(item, patron, ['thumb', 'quality', 'url', 'title', 'year', 'plot'], blacklist=blacklist, patronNext=patronNext, action=action)
def swzz_get_url(item):
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:59.0) Gecko/20100101 Firefox/59.0'}
if "/link/" in item.url:
data = httptools.downloadpage(item.url, headers=headers).data
if "link =" in data:
data = scrapertoolsV2.find_single_match(data, 'link = "([^"]+)"')
if 'http' not in data:
data = 'https:' + data
else:
match = scrapertoolsV2.find_single_match(data, r'<meta name="og:url" content="([^"]+)"')
match = scrapertoolsV2.find_single_match(data, r'URL=([^"]+)">') if not match else match
if not match:
from lib import jsunpack
try:
data = scrapertoolsV2.find_single_match(data.replace('\n', ''), r"(eval\s?\(function\(p,a,c,k,e,d.*?)</script>")
data = jsunpack.unpack(data)
logger.debug("##### play /link/ unpack ##\n%s\n##" % data)
except:
logger.debug("##### The content is yet unpacked ##\n%s\n##" % data)
data = scrapertoolsV2.find_single_match(data, r'var link(?:\s)?=(?:\s)?"([^"]+)";')
data, c = unshortenit.unwrap_30x_only(data)
else:
data = match
if data.startswith('/'):
data = urlparse.urljoin("http://swzz.xyz", data)
if not "vcrypt" in data:
data = httptools.downloadpage(data).data
logger.debug("##### play /link/ data ##\n%s\n##" % data)
else:
data = item.url
return data
def menu(itemlist, title='', action='', url='', contentType='movie', args=[]):
# Function to simplify menu creation
frame = inspect.stack()[1]
filename = frame[0].f_code.co_filename
filename = os.path.basename(filename).replace('.py','')
# Call typo function
title = typo(title)
if contentType == 'movie': extra = 'movie'
else: extra = 'tvshow'
itemlist.append(Item(
channel = filename,
title = title,
action = action,
url = url,
extra = extra,
args = args,
contentType = contentType
))
# Apply auto Thumbnails at the menus
from channelselector import thumb
thumb(itemlist)
return itemlist
def typo(string, typography=''):
kod_color = '0xFF65B3DA' #'0xFF0081C2'
# Check if the typographic attributes are in the string or outside
if typography:
string = string + ' ' + typography
if config.get_localized_string(30992) in string:
string = string + ' >'
# If there are no attributes, it applies the default ones
attribute = ['[]','()','{}','submenu','color','bold','italic','_','--','[B]','[I]','[COLOR]']
movie_word_list = ['film', 'serie', 'tv', 'anime', 'cinema', 'sala']
search_word_list = ['cerca']
categories_word_list = ['genere', 'categoria', 'categorie', 'ordine', 'lettera', 'anno', 'alfabetico', 'a-z', 'menu']
if not any(word in string for word in attribute):
if any(word in string.lower() for word in search_word_list):
string = '[COLOR '+ kod_color +']' + string + '[/COLOR]'
elif any(word in string.lower() for word in categories_word_list):
string = ' > ' + string
elif any(word in string.lower() for word in movie_word_list):
string = '[B]' + string + '[/B]'
# Otherwise it uses the typographical attributes of the string
else:
if '[]' in string:
string = '[' + re.sub(r'\s\[\]','',string) + ']'
if '()' in string:
string = '(' + re.sub(r'\s\(\)','',string) + ')'
if '{}' in string:
string = '{' + re.sub(r'\s\{\}','',string) + '}'
if 'submenu' in string:
string = ' > ' + re.sub(r'\ssubmenu','',string)
if 'color' in string:
color = scrapertoolsV2.find_single_match(string,'color ([a-z]+)')
if color == 'kod' or '': color = kod_color
string = '[COLOR '+ color +']' + re.sub(r'\scolor\s([a-z]+)','',string) + '[/COLOR]'
if 'bold' in string:
string = '[B]' + re.sub(r'\sbold','',string) + '[/B]'
if 'italic' in string:
string = '[I]' + re.sub(r'\sitalic','',string) + '[/I]'
if '_' in string:
string = ' ' + re.sub(r'\s_','',string)
if '--' in string:
string = ' - ' + re.sub(r'\s--','',string)
return string
def match(item, patron='', patron_block='', headers='', url=''):
matches = []
url = url if url else item.url
data = httptools.downloadpage(url, headers=headers).data.replace("'", '"')
data = re.sub('\n|\t', '', data)
log('DATA= ', data)
if patron_block:
block = scrapertoolsV2.find_single_match(data, patron_block)
log('BLOCK= ',block)
else:
block = data
if patron:
matches = scrapertoolsV2.find_multiple_matches(block, patron)
log('MATCHES= ',matches)
return matches, data
def videolibrary(itemlist, item, typography=''):
if item.contentType != 'episode':
action = 'add_pelicula_to_library'
extra = 'findvideos'
contentType = 'movie'
else:
action = 'add_serie_to_library'
extra = 'episodios'
contentType = 'tvshow'
if not typography: typography = 'color kod bold'
title = typo(config.get_localized_string(30161) + ' ' + typography)
if inspect.stack()[1][3] == 'findvideos' and contentType == 'movie' or inspect.stack()[1][3] != 'findvideos' and contentType != 'movie':
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel,
title=title,
contentType=contentType,
contentSerieName=item.fulltitle if contentType == 'tvshow' else '',
url=item.url,
action=action,
extra=extra,
contentTitle=item.fulltitle))
def nextPage(itemlist, item, data, patron, function_level=1):
# Function_level is useful if the function is called by another function.
# If the call is direct, leave it blank
next_page = scrapertoolsV2.find_single_match(data, patron)
if next_page != "":
if 'http' not in next_page:
next_page = scrapertoolsV2.find_single_match(item.url, 'https?://[a-z0-9.-]+') + next_page
log('NEXT= ', next_page)
itemlist.append(
Item(channel=item.channel,
action=inspect.stack()[function_level][3],
contentType=item.contentType,
title=typo(config.get_localized_string(30992), 'color kod bold'),
url=next_page,
args=item.args,
thumbnail=thumb()))
return itemlist
def server(item, data='', headers='', AutoPlay=True, CheckLinks=True):
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', item.channel)
log(__comprueba_enlaces__ )
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', item.channel)
log(__comprueba_enlaces_num__ )
if not data:
data = httptools.downloadpage(item.url, headers=headers).data
itemlist = servertools.find_video_items(data=str(data))
for videoitem in itemlist:
videoitem.title = "".join([item.title, ' ', typo(videoitem.title, 'color kod []')])
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
videoitem.contentType = item.contentType
if __comprueba_enlaces__ and CheckLinks:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
if AutoPlay == True:
autoplay.start(itemlist, item)
return itemlist
def aplay(item, itemlist, list_servers='', list_quality=''):
if inspect.stack()[1][3] == 'mainlist':
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
else:
autoplay.start(itemlist, item)
def log(stringa1="", stringa2="", stringa3="", stringa4="", stringa5=""):
# Function to simplify the log
# Automatically returns File Name and Function Name
frame = inspect.stack()[1]
filename = frame[0].f_code.co_filename
filename = os.path.basename(filename)
logger.info("[" + filename + "] - [" + inspect.stack()[1][3] + "] " + str(stringa1) + str(stringa2) + str(stringa3) + str(stringa4) + str(stringa5))

View File

@@ -11,6 +11,10 @@ import xbmc
from platformcode import config, logger
logger.info("init...")
if os.path.isfile(os.path.join(config.get_data_path(), 'alfavorites-default.json')):
os.rename(os.path.join(config.get_data_path(), 'alfavorites-default.json'), os.path.join(config.get_data_path(), 'kodfavorites-default.json'))
if os.path.isfile(os.path.join(config.get_data_path(), 'alfa_db.sqlite')):
os.rename(os.path.join(config.get_data_path(), 'alfa_db.sqlite'), os.path.join(config.get_data_path(), 'kod_db.sqlite'))
librerias = xbmc.translatePath(os.path.join(config.get_runtime_path(), 'lib'))
sys.path.append(librerias)

View File

@@ -93,19 +93,19 @@ def faq(item):
if item.extra == "onoff_canales":
respuesta = platformtools.dialog_yesno(config.get_localized_string(60457), config.get_localized_string(60458))
if respuesta == 1:
from channels import setting
from specials import setting
setting.conf_tools(Item(extra='channels_onoff'))
elif item.extra == "trakt_sync":
respuesta = platformtools.dialog_yesno(config.get_localized_string(60457), config.get_localized_string(60459))
if respuesta == 1:
from channels import videolibrary
from specials import videolibrary
videolibrary.channel_config(Item(channel='videolibrary'))
elif item.extra == "tiempo_enlaces":
respuesta = platformtools.dialog_yesno(config.get_localized_string(60457), config.get_localized_string(60460))
if respuesta == 1:
from channels import videolibrary
from specials import videolibrary
videolibrary.channel_config(Item(channel='videolibrary'))
elif item.extra == "prob_busquedacont":
@@ -128,7 +128,7 @@ def faq(item):
config.get_localized_string(60465))
if respuesta == 1:
itemlist = []
from channels import setting
from specials import setting
new_item = Item(channel="setting", action="submenu_tools", folder=True)
itemlist.extend(setting.submenu_tools(new_item))
return itemlist
@@ -142,7 +142,7 @@ def faq(item):
elif item.extra == "buscador_juntos":
respuesta = platformtools.dialog_yesno(config.get_localized_string(60457), config.get_localized_string(60466))
if respuesta == 1:
from channels import search
from specials import search
search.settings("")
elif item.extra == "report_error":

View File

@@ -1229,7 +1229,7 @@ def busqueda_global(item, infoLabels, org_title=False):
new_item.extra = infoLabels.get("originaltitle", "")
new_item.category = item.contentType
from channels import search
from specials import search
return search.do_search(new_item, cat)
@@ -2188,7 +2188,7 @@ class images(xbmcgui.WindowDialog):
class Trailer(xbmcgui.WindowXMLDialog):
def Start(self, item, trailers):
self.item = item
from channels import trailertools
from specials import trailertools
self.video_url, self.windows = trailertools.buscartrailer(self.item.clone(), trailers=trailers)
self.doModal()

View File

@@ -56,7 +56,7 @@ def run(item=None):
category = dictCategory[config.get_setting("category")]
item = Item(channel="news", action="novedades", extra=category, mode = 'silent')
else:
from channels import side_menu
from specials import side_menu
item= Item()
item = side_menu.check_user_home(item)
item.start = True
@@ -140,18 +140,22 @@ def run(item=None):
# updater.update_channel(item.channel)
# Checks if channel exists
channel_file = os.path.join(config.get_runtime_path(),
'channels', item.channel + ".py")
logger.info("channel_file=%s" % channel_file)
if os.path.isfile(os.path.join(config.get_runtime_path(), 'channels', item.channel + ".py")):
CHANNELS = 'channels'
else:
CHANNELS ='specials'
channel_file = os.path.join(config.get_runtime_path(), CHANNELS, item.channel + ".py")
logger.info("channel_file= " + channel_file)
channel = None
if os.path.exists(channel_file):
try:
channel = __import__('channels.%s' % item.channel, None,
None, ["channels.%s" % item.channel])
channel = __import__(CHANNELS + item.channel, None, None, [CHANNELS + item.channel])
except ImportError:
exec "import channels." + item.channel + " as channel"
importer = "import " + CHANNELS + "." + item.channel + " as channel"
exec(importer)
logger.info("Running channel %s | %s" % (channel.__name__, channel.__file__))
@@ -223,7 +227,7 @@ def run(item=None):
# Special action for downloading all episodes from a serie
elif item.action == "download_all_episodes":
from channels import downloads
from specials import downloads
item.action = item.extra
del item.extra
downloads.save_download(item)
@@ -244,7 +248,7 @@ def run(item=None):
tecleado = platformtools.dialog_input(last_search)
if tecleado is not None:
if last_search_active and not tecleado.startswith("http"):
from channels import search
from specials import search
search.save_search(tecleado)
itemlist = channel.search(item, tecleado)
@@ -253,8 +257,9 @@ def run(item=None):
platformtools.render_items(itemlist, item)
# For all other actions
# For all other actions
else:
# import web_pdb; web_pdb.set_trace()
logger.info("Executing channel '%s' method" % item.action)
itemlist = getattr(channel, item.action)(item)
if config.get_setting('trakt_sync'):
@@ -291,8 +296,7 @@ def run(item=None):
import traceback
logger.error(traceback.format_exc())
patron = 'File "' + os.path.join(config.get_runtime_path(), "channels", "").replace("\\",
"\\\\") + '([^.]+)\.py"'
patron = 'File "' + os.path.join(config.get_runtime_path(), CHANNELS, "").replace("\\", "\\\\") + '([^.]+)\.py"'
canal = scrapertools.find_single_match(traceback.format_exc(), patron)
platformtools.dialog_ok(
@@ -302,8 +306,7 @@ def run(item=None):
import traceback
logger.error(traceback.format_exc())
patron = 'File "' + os.path.join(config.get_runtime_path(), "channels", "").replace("\\",
"\\\\") + '([^.]+)\.py"'
patron = 'File "' + os.path.join(config.get_runtime_path(), "channels", "").replace("\\", "\\\\") + '([^.]+)\.py"'
canal = scrapertools.find_single_match(traceback.format_exc(), patron)
try:
@@ -430,7 +433,7 @@ def play_from_library(item):
else:
# Ventana emergente
from channels import videolibrary
from specials import videolibrary
p_dialog = platformtools.dialog_progress_bg(config.get_localized_string(20000), config.get_localized_string(70004))
p_dialog.update(0, '')
@@ -488,6 +491,6 @@ def play_from_library(item):
item = videolibrary.play(itemlist[seleccion])[0]
platformtools.play_video(item)
from channels import autoplay
from specials import autoplay
if (platformtools.is_playing() and item.action) or item.server == 'torrent' or autoplay.is_active(item.contentChannel):
break

View File

@@ -180,7 +180,7 @@ def render_items(itemlist, parent_item):
item.thumbnail = get_thumb("videolibrary_tvshow.png")
if unify_enabled and parent_item.channel != 'alfavorites':
if unify_enabled and parent_item.channel != 'kodfavorites':
# Formatear titulo con unify
item = unify.title_format(item)
else:
@@ -268,11 +268,11 @@ def render_items(itemlist, parent_item):
# ...forzamos segun el viewcontent
xbmcplugin.setContent(int(sys.argv[1]), parent_item.viewcontent)
elif parent_item.channel not in ["channelselector", "", "alfavorites"]:
elif parent_item.channel not in ["channelselector", "", "kodfavorites"]:
# ... o segun el canal
xbmcplugin.setContent(int(sys.argv[1]), "movies")
elif parent_item.channel == "alfavorites" and parent_item.action == 'mostrar_perfil':
elif parent_item.channel == "kodfavorites" and parent_item.action == 'mostrar_perfil':
xbmcplugin.setContent(int(sys.argv[1]), "movies")
# Fijamos el "breadcrumb"
@@ -470,7 +470,7 @@ def set_context_commands(item, parent_item):
# Si no se está dentro de Alfavoritos y hay los contextos de alfavoritos, descartarlos.
# (pasa al ir a un enlace de alfavoritos, si este se clona en el canal)
if parent_item.channel != 'alfavorites' and 'i_perfil' in command and 'i_enlace' in command:
if parent_item.channel != 'kodfavorites' and 'i_perfil' in command and 'i_enlace' in command:
continue
if "goto" in command:
@@ -481,7 +481,7 @@ def set_context_commands(item, parent_item):
(command["title"], "XBMC.RunPlugin(%s?%s)" % (sys.argv[0], item.clone(**command).tourl())))
# No añadir más opciones predefinidas si se está dentro de Alfavoritos
if parent_item.channel == 'alfavorites':
if parent_item.channel == 'kodfavorites':
return context_commands
# Opciones segun criterios, solo si el item no es un tag (etiqueta), ni es "Añadir a la videoteca", etc...
@@ -548,7 +548,7 @@ def set_context_commands(item, parent_item):
# Añadir a Alfavoritos (Mis enlaces)
if item.channel not in ["favorites", "videolibrary", "help", ""] and parent_item.channel != "favorites":
context_commands.append(('[COLOR blue]%s[/COLOR]' % config.get_localized_string(70557), "XBMC.RunPlugin(%s?%s)" %
(sys.argv[0], item.clone(channel="alfavorites", action="addFavourite",
(sys.argv[0], item.clone(channel="kodfavorites", action="addFavourite",
from_channel=item.channel,
from_action=item.action).tourl())))
@@ -596,7 +596,7 @@ def set_context_commands(item, parent_item):
if item.channel != "downloads" and downloadenabled != "false":
# Descargar pelicula
if item.contentType == "movie" and item.contentTitle:
if item.contentType == "movie":
context_commands.append((config.get_localized_string(60354), "XBMC.RunPlugin(%s?%s)" %
(sys.argv[0], item.clone(channel="downloads", action="save_download",
from_channel=item.channel, from_action=item.action)
@@ -966,7 +966,7 @@ def set_opcion(item, seleccion, opciones, video_urls):
# "Descargar"
elif opciones[seleccion] == config.get_localized_string(30153):
from channels import downloads
from specials import downloads
import xbmcaddon
import xbmcgui
__addon__ = xbmcaddon.Addon()
@@ -984,13 +984,13 @@ def set_opcion(item, seleccion, opciones, video_urls):
# "Quitar de favoritos"
elif opciones[seleccion] == config.get_localized_string(30154):
from channels import favorites
from specials import favorites
favorites.delFavourite(item)
salir = True
# "Añadir a favoritos":
elif opciones[seleccion] == config.get_localized_string(30155):
from channels import favorites
from specials import favorites
item.from_channel = "favorites"
favorites.addFavourite(item)
salir = True

View File

@@ -174,12 +174,12 @@ class SettingsWindow(xbmcgui.WindowXMLDialog):
channelpath = inspect.currentframe().f_back.f_back.f_code.co_filename
self.channel = os.path.basename(channelpath).replace(".py", "")
self.ch_type = os.path.basename(os.path.dirname(channelpath))
logger.info('PATH= ' + channelpath)
# Si no tenemos list_controls, hay que sacarlos del json del canal
if not self.list_controls:
# Si la ruta del canal esta en la carpeta "channels", obtenemos los controles y valores mediante chaneltools
if os.path.join(config.get_runtime_path(), "channels") in channelpath:
if os.path.join(config.get_runtime_path(), "channels") or os.path.join(config.get_runtime_path(), "specials") in channelpath:
# La llamada se hace desde un canal
self.list_controls, default_values = channeltools.get_channel_controls_settings(self.channel)
@@ -544,7 +544,7 @@ class SettingsWindow(xbmcgui.WindowXMLDialog):
default = ""
c["default"] = c.get("default", default)
c["color"] = c.get("color", "0xFF0066CC")
c["color"] = c.get("color", "0xFFFFFFFF")
c["visible"] = c.get("visible", True)
c["enabled"] = c.get("enabled", True)

View File

@@ -51,7 +51,7 @@ def mark_auto_as_watched(item):
logger.debug("marcado")
item.playcount = 1
sync_with_trakt = True
from channels import videolibrary
from specials import videolibrary
videolibrary.mark_content_as_watched2(item)
break
@@ -326,7 +326,7 @@ def mark_season_as_watched_on_kodi(item, value=1):
def mark_content_as_watched_on_alfa(path):
from channels import videolibrary
from specials import videolibrary
from core import videolibrarytools
from core import scrapertools
from core import filetools

File diff suppressed because it is too large Load Diff

View File

@@ -721,6 +721,14 @@ msgctxt "#60055"
msgid "Error of provider configuration in BD."
msgstr "Errore di configurazione del provider in BD."
msgctxt "#60056"
msgid "Videolibrary %s not configured"
msgstr "Videoteca %s non configurata"
msgctxt "#60057"
msgid "Videolibrary %s configured"
msgstr "Videoteca %s configurata"
msgctxt "#60058"
msgid "You need to restart Kodi for the changes to take effect."
msgstr "E' necessario riavviare Kodi affinchè le modifiche abbiano effetto."
@@ -729,6 +737,10 @@ msgctxt "#60059"
msgid "Congratulations, Kodi's video library has been configured correctly."
msgstr "Complimenti, la videoteca di Kodi è stata configurata correttamente."
msgctxt "#60060"
msgid "KOD Auto-configuration"
msgstr "KOD Auto-configurazione"
msgctxt "#60062"
msgid "Adding movies to your video library..."
msgstr "Aggiunta film alla videoteca..."
@@ -786,8 +798,8 @@ msgid "No coincidence"
msgstr "Nessuna coincidenza"
msgctxt "#60076"
msgid "New quality/server available in \nConfiguration"
msgstr "Nuova qualità/server disponibile in \nConfigurazione"
msgid "New quality/server available in configuration"
msgstr "Nuova qualità/server disponibile in configurazione"
msgctxt "#60077"
msgid "AutoPlay initialization error"
@@ -1886,7 +1898,7 @@ msgid "Uploading new data"
msgstr "Caricamento nuovi dati"
msgctxt "#60470"
msgid "Buscando en Tmdb......."
msgid "Searching in Tmdb......."
msgstr "Ricerca in Tmdb......."
msgctxt "#60471"
@@ -2334,7 +2346,7 @@ msgid " - Settings created"
msgstr "- Impostazioni create"
msgctxt "#60589"
msgid "- - No correction necessary"
msgid " - No correction necessary"
msgstr " - Nessuna correzione necessaria"
msgctxt "#60590"
@@ -2754,7 +2766,7 @@ msgid "Search for actor"
msgstr "Cerca attore"
msgctxt "#70012"
msgid "Beginnin"
msgid "Beginning"
msgstr "Inizio"
msgctxt "#70013"
@@ -2902,23 +2914,23 @@ msgid " My Account"
msgstr " Il Mio Account"
msgctxt "#70049"
msgid " Most Popular"
msgid "Most Popular"
msgstr "Più Popolari"
msgctxt "#70050"
msgid " Recommended Now"
msgid "Recommended Now"
msgstr "Da Vedere"
msgctxt "#70051"
msgid " Most Anticipated "
msgid "Most Anticipated "
msgstr "Più Attesi"
msgctxt "#70052"
msgid " Custom recommendations"
msgid "Custom recommendations"
msgstr "Raccomandazioni personalizzate"
msgctxt "#70053"
msgid " Most Viewed"
msgid "Most Viewed"
msgstr "Più Visti"
msgctxt "#70054"
@@ -2982,11 +2994,11 @@ msgid "In my Collection"
msgstr "Nella mia Collezione"
msgctxt "#70069"
msgid "Search %s in kod: %s"
msgstr "Cerca %s in kod: %s"
msgid "Search %s in KOD: %s"
msgstr "Cerca %s in KOD: %s"
msgctxt "#70070"
msgid " Search original title: %s"
msgid "Search original title: %s"
msgstr "Cerca il titolo originale: %s"
msgctxt "#70071"
@@ -2994,8 +3006,8 @@ msgid "Cast"
msgstr "Vedi Cast"
msgctxt "#70072"
msgid " Most Viewed"
msgstr " Più Viste"
msgid "Most Viewed"
msgstr "Più Viste"
msgctxt "#70073"
msgid "Most Anticipated"
@@ -3014,7 +3026,7 @@ msgid "Top rated"
msgstr "Più Votate"
msgctxt "#70077"
msgid " Most Viewed"
msgid "Most Viewed"
msgstr "Più Viste"
msgctxt "#70078"
@@ -3495,7 +3507,7 @@ msgstr "Vuoi annullare il processo?"
msgctxt "#70200"
msgid "Finishing and deleting data"
msgstr Fine e cancellazione dati"
msgstr "Fine e cancellazione dati"
msgctxt "#70201"
msgid "Mass Testing Tools"
@@ -3841,7 +3853,6 @@ msgctxt "#70291"
msgid "Error, during conversion"
msgstr "Errore, in conversione"
# Servers ----
msgctxt "#70292"
msgid "[%s] File no longer exist on this server."
msgstr "[%s] Il file non è più presente nel server."
@@ -3893,7 +3904,6 @@ msgstr "[%s] Questo server non funziona con la tua versione di Plex, prova ad ag
msgctxt "#70304"
msgid "[%s] This server requires updating python to version 2.7.9 or higher."
msgstr "[%s] Questo server richiede la versione 2.7.9 (O Maggiore) di Python."
# ============
msgctxt "#70305"
msgid "Search in channels"
@@ -4060,7 +4070,7 @@ msgid "[Trakt] Remove %s from your collection"
msgstr "[Trakt] Rimuovi %s dalla tua collezione"
msgctxt "#70346"
msgid "[Trakt] Add %s to your collection
msgid "[Trakt] Add %s to your collection"
msgstr "[Trakt] Aggiungi %s alla tua collezione"
msgctxt "#70347"
@@ -5017,7 +5027,7 @@ msgstr "Configura la rinumerazione della serie..."
msgctxt "#70587"
msgid "Set up"
msgstr ""
msgstr "Configurazione"
msgctxt "#70588"
msgid "No series found, look for a series and click on contextual menu"
@@ -5100,8 +5110,8 @@ msgid "List informations"
msgstr "Informazioni lista"
msgctxt "#70608"
msgid "Liste dei link"
msgstr "Listas de enlaces"
msgid "Link lists"
msgstr "Liste dei link"
msgctxt "#70609"
msgid "File_id of tinyupload link"
@@ -5388,7 +5398,7 @@ msgstr "Aggiungi un canale"
msgctxt "#70677"
msgid "Episode"
msgstr "%sx%s - Episodio %s"
msgstr "Episodio"
msgctxt "#70678"
msgid "From local file"

View File

@@ -137,7 +137,7 @@
<setting id="addon_update_timer" type="labelenum" values="0|6|12|24" label="70581" default="12"/>
<setting id="addon_update_message" type="bool" label="70582" default="false"/>
<setting label="Lista activa" type="text" id="lista_activa" default="alfavorites-default.json" visible="false"/>
<setting label="Lista activa" type="text" id="lista_activa" default="kodfavorites-default.json" visible="false"/>
<setting type="sep"/>
<setting label="70583" type="lsep"/>
@@ -275,7 +275,7 @@
<setting id="addon_update_timer" type="labelenum" values="0|6|12|24" label="Intervalo entre actualizaciones automáticas (horas)" default="12"/>
<setting id="addon_update_message" type="bool" label="¿Quiere ver mensajes de las actualizaciones?" default="false"/>
<setting label="Lista activa" type="text" id="lista_activa" default="alfavorites-default.json" visible="false"/>
<setting label="Lista activa" type="text" id="lista_activa" default="kodfavorites-default.json" visible="false"/>
<setting type="sep"/>
<setting label="Gestión de actualizaciones de otros addon relacionados con Alfa:" type="lsep"/>

View File

@@ -23,7 +23,7 @@
<height>34</height>
<width>725</width>
<font>font12_title</font>
<textcolor>0xFFFFA500</textcolor>
<textcolor>0xFFFFFFFF</textcolor>
<align>center</align>
<aligny>center</aligny>
<label>$ADDON[plugin.video.kod 70000]</label>

12
specials/__init__.py Normal file
View File

@@ -0,0 +1,12 @@
# -*- coding: utf-8 -*-
import os
import sys
# Appends the main plugin dir to the PYTHONPATH if an internal package cannot be imported.
# Examples: In Plex Media Server all modules are under "Code.*" package, and in Enigma2 under "Plugins.Extensions.*"
try:
# from core import logger
import core
except:
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))

View File

@@ -14,7 +14,7 @@ from platformcode import config
from core import jsontools, tvdb
from core.item import Item
from platformcode import platformtools
from channels.support import typo, log
from core.support import typo, log
TAG_TVSHOW_RENUMERATE = "TVSHOW_AUTORENUMBER"
TAG_SEASON_EPISODE = "season_episode"
@@ -86,7 +86,7 @@ def write_data(channel, show, data):
log()
dict_series = jsontools.get_node_from_file(channel, TAG_TVSHOW_RENUMERATE)
tvshow = show.strip()
list_season_episode = dict_series.get(tvshow, {}).get(TAG_SEASON_EPISODE, [])
# list_season_episode = dict_series.get(tvshow, {}).get(TAG_SEASON_EPISODE, [])
if data:
dict_renumerate = {TAG_SEASON_EPISODE: data}
@@ -94,7 +94,7 @@ def write_data(channel, show, data):
else:
dict_series.pop(tvshow, None)
result, json_data = jsontools.update_node(dict_series, channel, TAG_TVSHOW_RENUMERATE)
result = jsontools.update_node(dict_series, channel, TAG_TVSHOW_RENUMERATE)[0]
if result:
if data:
@@ -103,7 +103,7 @@ def write_data(channel, show, data):
message = config.get_localized_string(60444)
else:
message = config.get_localized_string(70593)
heading = show.strip()
platformtools.dialog_notification(heading, message)

View File

@@ -128,9 +128,9 @@
"label": "@70238",
"lvalues": [
"@70244",
"Reordenar"
"@70245"
],
"default": 1,
"default": 0,
"enabled": true,
"visible": true
},
@@ -139,13 +139,13 @@
"type": "list",
"label": "@70246",
"lvalues": [
"Esp, Lat, Sub, Eng, Vose",
"Esp, Sub, Lat, Eng, Vose",
"Eng, Sub, Vose, Esp, Lat",
"Vose, Eng, Sub, Esp, Lat"
"Ita, Sub, Eng, Vos, Vosi",
"Eng, Ita, Sub, Vos, Vosi",
"Sub, Ita, Eng, Vos, Vosi",
"Eng, Sub, Ita, Vos, Vosi"
],
"default": 0,
"enabled": "eq(-1,'Reordenar')",
"enabled": "eq(-1,'@70245')",
"visible": true
},
{

View File

@@ -9,12 +9,7 @@ import time
import unicodedata
from core import filetools
from core import jsontools
from core import scraper
from core import scrapertools
from core import servertools
from core import videolibrarytools
from core import filetools, jsontools, scraper, scrapertools, servertools, videolibrarytools, support
from core.downloader import Downloader
from core.item import Item
from platformcode import config, logger
@@ -90,7 +85,7 @@ def mainlist(item):
estados = [i.downloadStatus for i in itemlist]
# Si hay alguno completado
# Si hay alguno completado
if 2 in estados:
itemlist.insert(0, Item(channel=item.channel, action="clean_ready", title=config.get_localized_string(70218),
contentType=item.contentType, contentChannel=item.contentChannel,
@@ -104,22 +99,20 @@ def mainlist(item):
# Si hay alguno pendiente
if 1 in estados or 0 in estados:
itemlist.insert(0, Item(channel=item.channel, action="download_all", title=config.get_localized_string(70220),
itemlist.insert(0, Item(channel=item.channel, action="download_all", title=support.typo(config.get_localized_string(70220),'bold'),
contentType=item.contentType, contentChannel=item.contentChannel,
contentSerieName=item.contentSerieName, text_color="green"))
contentSerieName=item.contentSerieName))
if len(itemlist):
itemlist.insert(0, Item(channel=item.channel, action="clean_all", title=config.get_localized_string(70221),
itemlist.insert(0, Item(channel=item.channel, action="clean_all", title=support.typo(config.get_localized_string(70221),'bold'),
contentType=item.contentType, contentChannel=item.contentChannel,
contentSerieName=item.contentSerieName, text_color="red"))
contentSerieName=item.contentSerieName))
if not item.contentType == "tvshow" and config.get_setting("browser", "downloads") == True:
itemlist.insert(0, Item(channel=item.channel, action="browser", title=config.get_localized_string(70222),
url=DOWNLOAD_PATH, text_color="yellow"))
itemlist.insert(0, Item(channel=item.channel, action="browser", title=support.typo(config.get_localized_string(70222),'bold'),url=DOWNLOAD_PATH))
if not item.contentType == "tvshow":
itemlist.insert(0, Item(channel=item.channel, action="settings", title=config.get_localized_string(70223),
text_color="blue"))
itemlist.insert(0, Item(channel=item.channel, action="settings", title= support.typo(config.get_localized_string(70223),'bold color kod')))
return itemlist
@@ -274,8 +267,7 @@ def move_to_libray(item):
library_path = filetools.join(config.get_videolibrary_path(), *filetools.split(item.downloadFilename))
final_path = download_path
if config.get_setting("library_add", "downloads") == True and config.get_setting("library_move",
"downloads") == True:
if config.get_setting("library_add", "downloads") == True and config.get_setting("library_move", "downloads") == True:
if not filetools.isdir(filetools.dirname(library_path)):
filetools.mkdir(filetools.dirname(library_path))

View File

@@ -7,8 +7,7 @@
import re
import urllib
from channels import support
from core import httptools, scrapertools, tmdb
from core import httptools, scrapertools, tmdb, support
from core.item import Item
from platformcode import logger
@@ -85,5 +84,5 @@ def tvoggi(item):
def do_search(item):
from channels import search
from specials import search
return search.do_search(item)

View File

@@ -30,7 +30,7 @@ def fechahora_actual():
# Helpers para listas
# -------------------
PREFIJO_LISTA = 'alfavorites-'
PREFIJO_LISTA = 'kodfavorites-'
# Devuelve el nombre de la lista activa (Ej: alfavorites-default.json)
def get_lista_activa():

View File

@@ -257,7 +257,7 @@ def novedades(item):
list_canales, any_active = get_channels_list()
if config.is_xbmc():
from channels import side_menu
from specials import side_menu
if mode=='silent' and any_active and len(list_canales[item.extra]) > 0:
side_menu.set_menu_settings(item)
aux_list=[]

View File

@@ -13,7 +13,6 @@ from core.item import Item
from platformcode import config, logger
from platformcode import platformtools
from core import tmdb
import xbmc, xbmcaddon
addon = xbmcaddon.Addon('metadata.themoviedb.org')
def_lang = addon.getSetting('language')
@@ -437,7 +436,7 @@ def show_result(item):
def channel_search(search_results, channel_parameters, tecleado):
try:
exec "from channels import " + channel_parameters["channel"] + " as module"
exec("from specials import " + channel_parameters["channel"] + " as module")
mainlist = module.mainlist(Item(channel=channel_parameters["channel"]))
search_items = [item for item in mainlist if item.action == "search"]
if not search_items:

View File

@@ -624,7 +624,7 @@ def channel_search(queue, channel_parameters, category, title_year, tecleado):
title_search = urllib.unquote_plus(tecleado)
exec "from channels import " + channel_parameters["channel"] + " as module"
exec "from specials import " + channel_parameters["channel"] + " as module"
mainlist = module.mainlist(Item(channel=channel_parameters["channel"]))
for item in mainlist:

View File

@@ -89,7 +89,7 @@ def get_start_page():
if custom_start == False:
item = Item(channel="news", action="novedades", extra=category, mode='silent')
else:
from channels import side_menu
from specials import side_menu
item = Item()
item = side_menu.check_user_home(item)
return item

View File

@@ -15,7 +15,7 @@ from platformcode import config, logger
from platformcode import platformtools
import xbmc, xbmcaddon
from channelselector import get_thumb
from channels.support import typo, thumb
from core.support import typo, thumb
addon = xbmcaddon.Addon('metadata.themoviedb.org')
def_lang = addon.getSetting('language')
@@ -139,7 +139,7 @@ def busqueda(item):
new_item.extra = item.contentTitle.replace("+", " ")
new_item.category = item.extra
from channels import search
from specials import search
return search.do_search(new_item, cat)
@@ -1975,7 +1975,7 @@ def imagenes(item):
return itemlist
if item.images:
from channels import infoplus
from specials import infoplus
for key, value in item.images.iteritems():
if key == "tmdb" and "Tmdb" in item.title:
if item.folder:

View File

@@ -422,7 +422,7 @@ def get_episodes(item):
def findvideos(item):
from channels import autoplay
from specials import autoplay
logger.info()
# logger.debug("item:\n" + item.tostring('\n'))

View File

@@ -23,7 +23,7 @@ except:
from core import channeltools, filetools, videolibrarytools
from platformcode import logger
from platformcode import platformtools
from channels import videolibrary
from specials import videolibrary
from lib import generictools