- Fix item.nfo e item.strm_path

- Fix vari
 - N.B. Ove possibile usare item.clone() al posto di Item()
This commit is contained in:
Alhaziel01
2020-06-19 15:27:20 +02:00
parent a43d0d7050
commit c06bbb9795
30 changed files with 213 additions and 1119 deletions

View File

@@ -142,10 +142,7 @@ def findvideos(item):
if 'https' not in item.url:
url = support.match(item, url='https://or01.top-ix.org/videomg/_definst_/mp4:' + item.url + '/playlist.m3u')[1]
url = url.split()[-1]
itemlist.append(
support.Item(action= 'play',
url= 'https://or01.top-ix.org/videomg/_definst_/mp4:' + item.url + '/' + url,
server= 'directo'))
itemlist.append(item.clone(action= 'play', url= 'https://or01.top-ix.org/videomg/_definst_/mp4:' + item.url + '/' + url, server= 'directo'))
elif 'adf.ly' in item.url:
from servers.decrypters import adfly
@@ -170,11 +167,6 @@ def findvideos(item):
elif url.startswith('/'): url = 'https:/' + url
itemlist.append(
support.Item(channel=item.channel,
action="play",
title='Diretto',
url=url,
server='directo'))
itemlist.append(item.clone(action="play", title='Diretto', url=url, server='directo'))
return support.server(item, itemlist=itemlist)

View File

@@ -84,15 +84,7 @@ def peliculas(item):
action = 'findvideos'
def itemlistHook(itemlist):
if page:
itemlist.append(
support.Item(channel=item.channel,
action = item.action,
contentType=item.contentType,
title=support.typo(support.config.get_localized_string(30992), 'color kod bold'),
url=item.url,
page= page,
args=item.args,
thumbnail=support.thumb()))
itemlist.append(item(title=support.typo(support.config.get_localized_string(30992), 'color kod bold'),page= page, thumbnail=support.thumb()))
return itemlist
else:
pagination = ''
@@ -133,17 +125,7 @@ def findvideos(item):
if url:
links = support.match(url, patron=r'(?:<source type="[^"]+"\s*src=|file:\s*)"([^"]+)"', headers=headers, debug=False).matches
for link in links:
itemlist.append(
support.Item(channel=item.channel,
action="play",
title='Diretto',
quality='',
url=link,
server='directo',
fulltitle=item.fulltitle,
show=item.show,
contentType=item.contentType,
folder=False))
itemlist.append(item.clone(action="play", title='Diretto', url=link, server='directo'))
return support.server(item, itemlist=itemlist)

View File

@@ -1,11 +0,0 @@
{
"id": "animesubita",
"name": "AnimeSubIta",
"active": false,
"language": ["sub-ita"],
"thumbnail": "animesubita.png",
"bannermenu": "animesubita.png",
"categories": ["anime", "vos"],
"settings": []
}

View File

@@ -1,123 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per AnimeSubIta
# ------------------------------------------------------------
from core import support
host = support.config.get_channel_url()
headers = {'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0'}
@support.menu
def mainlist(item):
anime = ['/lista-anime/',
('Ultimi Episodi',['/category/ultimi-episodi/', 'peliculas', 'updated']),
('in Corso',['/category/anime-in-corso/', 'peliculas', 'alt']),
('Generi',['/generi/', 'genres', 'alt'])]
return locals()
def newest(categoria):
support.log(categoria)
itemlist = []
item = support.Item()
try:
if categoria == "anime":
item.url = host
item.args = "updated"
itemlist = peliculas(item)
if itemlist[-1].action == "ultimiep":
itemlist.pop()
# Continua l'esecuzione in caso di errore
except:
import sys
for line in sys.exc_info():
support.logger.error("{0}".format(line))
return []
return itemlist
def search(item, texto):
support.log(texto)
item.url = host + "/?s=" + texto
item.args = 'alt'
try:
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.logger.error("%s" % line)
return []
@support.scrape
def genres(item):
blacklist= ['Anime In Corso','Ultimi Episodi']
patronMenu=r'<li><a title="[^"]+" href="(?P<url>[^"]+)">(?P<title>[^<]+)</a>'
action = 'peliculas'
return locals()
@support.scrape
def peliculas(item):
anime = True
if item.args == 'updated':
patron = r'<div class="post-thumbnail">\s*<a href="(?P<url>[^"]+)" title="(?P<title>.*?)\s*Episodio (?P<episode>\d+) (?P<lang>[a-zA-Z-\s]+)[^"]*"> <img[^src]+src="(?P<thumb>[^"]+)"'
patronNext = r'<link rel="next" href="([^"]+)"\s*/>'
action = 'findvideos'
elif item.args == 'alt':
patron = r'<div class="post-thumbnail">\s*<a href="(?P<url>[^"]+)" title="(?P<title>.*?)(?: [Oo][Aa][Vv])?(?:\s*(?P<lang>[Ss][Uu][Bb].[Ii][Tt][Aa]))[^"]+">\s*<img[^src]+src="(?P<thumb>[^"]+)"'
patronNext = r'<link rel="next" href="([^"]+)"\s*/>'
action = 'episodios'
else:
pagination = ''
patronBlock = r'<ul class="lcp_catlist"[^>]+>(?P<block>.*?)</ul>'
patron = r'<a href="(?P<url>[^"]+)"[^>]+>(?P<title>.*?)(?: [Oo][Aa][Vv])?(?:\s*(?P<lang>[Ss][Uu][Bb].[Ii][Tt][Aa])[^<]+)?</a>'
action = 'episodios'
return locals()
@support.scrape
def episodios(item):
anime = True
patron = r'<td style="[^"]*?">\s*.*?<strong>(?P<episode>[^<]+)</strong>\s*</td>\s*<td[^>]+>\s*<a href="(?P<url>[^"]+)"[^>]+>\s*<img src="(?P<thumb>[^"]+?)"[^>]+>'
return locals()
def findvideos(item):
support.log(item)
itemlist = []
if item.args == 'updated':
ep = support.match(item.fulltitle, patron=r'(\d+)').match
item.url = support.re.sub(r'episodio-\d+-|oav-\d+-'+ep, '',item.url)
if 'streaming' not in item.url: item.url = item.url.replace('sub-ita','sub-ita-streaming')
item.url = support.match(item, patron= ep + r'[^>]+>[^>]+>[^>]+><a href="([^"]+)"').match
# post
url = host + '/' + support.match(item.url, patron=r'(episodio\d*.php.*?)"').match.replace('%3F','?').replace('%3D','=')
headers['Referer'] = url
cookies = ""
matches = support.re.compile('(.%s.*?)\n' % host.replace("http://", "").replace("www.", ""), support.re.DOTALL).findall(support.config.get_cookie_data())
for cookie in matches:
cookies += cookie.split('\t')[5] + "=" + cookie.split('\t')[6] + ";"
headers['Cookie'] = cookies[:-1]
url = support.match(url, patron=r'<source src="([^"]+)"[^>]+>').match
itemlist.append(
support.Item(channel=item.channel,
action="play",
title='Diretto',
url=url + '|' + support.urllib.urlencode(headers),
server='directo'))
return support.server(item,itemlist=itemlist)

View File

@@ -1,10 +0,0 @@
{
"id": "animetubeita",
"name": "AnimeTubeITA",
"active": false,
"language": ["sub-ita"],
"thumbnail": "animetubeita.png",
"bannermenu": "animetubeita.png",
"categories": ["anime","vos"],
"settings": []
}

View File

@@ -1,137 +0,0 @@
# -*- coding: utf-8 -*-
# ----------------------------------------------------------
# Canale per animetubeita
# ----------------------------------------------------------
import re
import urllib
from core import support
host = support.config.get_channel_url()
headers = {'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0'}
@support.menu
def mainlist(item):
anime = [('Generi',['/generi', 'genres', 'genres']),
('Ordine Alfabetico',['/lista-anime', 'peliculas', 'list']),
('In Corso',['/category/serie-in-corso/', 'peliculas', 'in_progress'])
]
return locals()
@support.scrape
def genres(item):
blacklist = ['Ultimi Episodi', 'Serie in Corso']
patronMenu = r'<li[^>]+><a href="(?P<url>[^"]+)" >(?P<title>[^<]+)</a>'
action = 'peliculas'
return locals()
def search(item, text):
support.log(text)
item.url = host + '/lista-anime'
item.args = 'list'
item.search = text
try:
return peliculas(item)
except:
import sys
for line in sys.exc_info():
support.logger.error("%s" % line)
return []
def newest(categoria):
support.log(categoria)
item = support.Item()
try:
if categoria == "anime":
item.contentType='tvshow'
item.url = host
item.args = "last"
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.logger.error("{0}".format(line))
return []
@support.scrape
def peliculas(item):
anime = True
if not item.search: pagination = ''
action = 'episodios'
if item.args == 'list':
search = item.search
patronBlock = r'<ul class="page-list ">(?P<block>.*?)<div class="wprc-container'
patron = r'<li.*?class="page_.*?href="(?P<url>[^"]+)">(?P<title>.*?) Sub Ita'
elif item.args == 'last':
action = 'findvideos'
item.contentType='episode'
patronBlock = r'<div class="blocks">(?P<block>.*?)<div id="sidebar'
patron = r'<h2 class="title"><a href="(?P<url>[^"]+)" [^>]+>.*?<img.*?src="(?P<thumb>[^"]+)".*?<strong>Titolo</strong></td>.*?<td>(?P<title>.*?)\s*Episodio\s*(?P<episode>\d+)[^<]+</td>.*?<td><strong>Trama</strong></td>\s*<td>(?P<plot>[^<]+)<'
elif item.args in ['in_progress','genres']:
patronBlock = r'<div class="blocks">(?P<block>.*?)<div id="sidebar'
patron = r'<h2 class="title"><a href="(?P<url>[^"]+)"[^>]+>(?P<title>.*?)\s* Sub Ita[^<]+</a></h2>.*?<img.*?src="(?P<thumb>.*?)".*?<td><strong>Trama</strong></td>.*?<td>(?P<plot>[^<]+)<'
patronNext = r'href="([^"]+)" >&raquo;'
else:
patronBlock = r'<div class="blocks">(?P<block>.*?)<div id="sidebar'
patron = r'<img.*?src="(?P<thumb>[^"]+)".*?<strong>Titolo</strong></td>.*?<td>\s*(?P<title>.*?)\s*Episodio[^<]+</td>.*?<td><strong>Trama</strong></td>\s*<td>(?P<plot>[^<]+)<.*?<a.*?href="(?P<url>[^"]+)"'
patronNext = r'href="([^"]+)" >&raquo;'
return locals()
@support.scrape
def episodios(item):
patronBlock = r'<h6>Episodio</h6>(?P<block>.*?)(?:<!--|</table>)'
patron = r'<strong>(?P<title>[^<]+)</strong>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+><a href="http://link\.animetubeita\.com/2361078/(?P<url>[^"]+)"'
action = 'findvideos'
return locals()
def findvideos(item):
itemlist=[]
if item.args == 'last':
match = support.match(item, patron=r'href="(?P<url>[^"]+)"[^>]+><strong>DOWNLOAD &amp; STREAMING</strong>').match
if match:
patronBlock = r'<h6>Episodio</h6>(?P<block>.*?)(?:<!--|</table>)'
patron = r'<a href="http://link\.animetubeita\.com/2361078/(?P<url>[^"]+)"'
match = support.match(match, patron=patron, patronBlock=patronBlock, headers=headers).match
else: return itemlist
if match: item.url = match[-1]
else: return itemlist
data = support.httptools.downloadpage(item.url, headers=headers).data
cookies = ""
matches = re.compile('(.animetubeita.com.*?)\n', re.DOTALL).findall(support.config.get_cookie_data())
for cookie in matches:
name = cookie.split('\t')[5]
value = cookie.split('\t')[6]
cookies += name + "=" + value + ";"
headers['Referer'] = item.url
headers['Cookie'] = cookies[:-1]
url = support.scrapertools.find_single_match(data, """<source src="([^"]+)" type='video/mp4'>""")
if not url: url = support.scrapertools.find_single_match(data, 'file: "([^"]+)"')
if url:
url += '|' + urllib.urlencode(headers)
itemlist.append(
support.Item(channel=item.channel,
action="play",
title='diretto',
server='directo',
quality='',
url=url,
thumbnail=item.thumbnail,
fulltitle=item.fulltitle,
show=item.show,
contentType=item.contentType,
folder=False))
return support.server(item, itemlist=itemlist)

View File

@@ -61,7 +61,7 @@ def newest(categoria):
except:
import sys
for line in sys.exc_info():
support.log({0}.format(line))
support.log(line)
return []
return itemlist
@@ -108,16 +108,5 @@ def findvideos(item):
itemlist = []
if item.contentType != 'episode': item.contentType = 'movie'
video = support.match(html.data, patron=r'<source src="([^"]+)"').match
itemlist.append(
support.Item(
channel=item.channel,
action="play",
title='Diretto',
quality='',
url=video,
server='directo',
fulltitle=item.fulltitle,
show=item.show,
contentType=item.contentType,
folder=False))
itemlist.append(item.clone(action="play", title='Diretto', url=video, server='directo'))
return support.server(item, itemlist=itemlist)

View File

@@ -156,20 +156,7 @@ def findvideos(item):
if serverid == '18':
url = support.match('%s/ajax/episode/serverPlayer?id=%s' % (host, ID), patron=r'source src="([^"]+)"', debug=False).match
itemlist.append(
support.Item(
channel=item.channel,
action="play",
title='diretto',
quality='',
url=url,
server='directo',
fulltitle=item.fulltitle,
contentSerieName=item.contentSerieName,
contentTitle=item.contentTitle,
show=item.show,
contentType=item.contentType,
folder=False))
itemlist.append(item.clone(action="play", title='diretto', url=url, server='directo'))
elif serverid == '26':
matches = support.match('%s/ajax/episode/serverPlayer?id=%s' % (host, item.url.split('/')[-1]), patron=r'<a href="([^"]+)"', ).matches

View File

@@ -135,7 +135,7 @@ def episodios(item):
data = support.match(item.url, headers=headers).data
support.log(data)
if 'TUTTA LA ' in data:
folderUrl = scrapertools.find_single_match(data, 'TUTTA LA \w+\s+(?:&#8211;|-)\s+<a href="?([^" ]+)')
folderUrl = scrapertools.find_single_match(data, r'TUTTA LA \w+\s+(?:&#8211;|-)\s+<a href="?([^" ]+)')
data = httptools.downloadpage(folderUrl).data
patron = r'<a href="(?P<url>[^"]+)[^>]+>(?P<title>[^<]+)'
sceneTitle = True
@@ -181,18 +181,7 @@ def findvideos(item):
matches = support.match(streaming, patron = r'<td><a.*?href=([^ ]+) [^>]+>([^<]+)<').matches
for scrapedurl, scrapedtitle in matches:
logger.debug("##### findvideos %s ## %s ## %s ##" % (desc_txt, scrapedurl, scrapedtitle))
itemlist.append(
Item(channel=item.channel,
action="play",
title=scrapedtitle,
url=scrapedurl,
server=scrapedtitle,
fulltitle=item.fulltitle,
thumbnail=item.thumbnail,
show=item.show,
quality=quality,
contentType=item.contentType,
folder=False))
itemlist.append(item.clone(action="play", title=scrapedtitle, url=scrapedurl, server=scrapedtitle, quality=quality))
support.log()
@@ -213,7 +202,7 @@ def findvideos(item):
itemlist = support.server(item, itemlist=itemlist)
# Extract the quality format
patronvideos = '([\w.]+)</strong></div></td>'
patronvideos = r'([\w.]+)</strong></div></td>'
support.addQualityTag(item, itemlist, data, patronvideos)
return itemlist
@@ -231,24 +220,13 @@ def findvid_serie(item):
# Estrae i contenuti
matches = support.match(html, patron=r'<a href=(?:")?([^ "]+)[^>]+>(?!<!--)(.*?)(?:</a>|<img)').matches
for url, server in matches:
item = Item(channel=item.channel,
action="play",
title=server,
url=url,
server=server,
fulltitle=item.fulltitle,
show=item.show,
quality=blktxt,
contentType=item.contentType,
folder=False)
item = item.clone(action="play", title=server, url=url, server=server, quality=blktxt)
if 'swzz' in item.url: item.url = support.swzz_get_url(item)
itemlist.append(item)
support.log()
itemlist = []
lnkblk = []
lnkblkp = []
data = re.sub(r'((?:<p>|<strong>)?[^\d]*\d*(?:&#215;|×)[0-9]+[^<]+)', '' ,item.url)

View File

@@ -68,14 +68,7 @@ def findvideos(item):
matches = support.match(item, patron=r'filename: "(.*?)"').matches
for url in matches:
itemlist.append(
Item(
channel=item.channel,
action="play",
title='Diretto',
server='directo',
url=host + url,
folder=False))
itemlist.append(item.clone(action="play", title='Diretto', server='directo', url=host + url))
return support.server(item, itemlist=itemlist)

View File

@@ -120,14 +120,7 @@ def findvideos(item):
return episodios(item)
if 'vvvvid' in matches.data:
itemlist.append(
support.Item(channel=item.channel,
action="play",
contentType=item.contentType,
title='vvvid',
url=support.match(matches.data, patron=r'(http://www.vvvvid[^"]+)').match,
server='vvvvid',
))
itemlist.append(item.clone(action="play", title='VVVVID', url=support.match(matches.data, patron=r'(http://www.vvvvid[^"]+)').match, server='vvvvid'))
else:
# matches.matches.sort()
support.log('VIDEO')
@@ -139,17 +132,7 @@ def findvideos(item):
language = 'Sub-' + language
quality = url.split('/')[-1].split('?')[0]
itemlist.append(
support.Item(channel=item.channel,
action="play",
contentType=item.contentType,
title=language,
url=url,
contentLanguage = language,
quality = quality,
order = quality.replace('p','').zfill(4),
server='directo',
))
itemlist.append(item.clone(action="play", title=language, url=url, contentLanguage = language, quality = quality, order = quality.replace('p','').zfill(4), server='directo',))
itemlist.sort(key=lambda x: (x.title, x.order), reverse=False)
return support.server(item, itemlist=itemlist)

View File

@@ -100,7 +100,6 @@ def genres(item):
def search(item, text):
support.log('search', item)
itemlist = []
text = text.replace(' ', '+')
item.url = host + '?s=' + text
try:
@@ -133,7 +132,7 @@ def newest(categoria):
except:
import sys
for line in sys.exc_info():
support.log('newest log: ', {0}.format(line))
support.log('newest log: ', line)
return []
return itemlist
@@ -163,16 +162,16 @@ def findvideos(item):
titles = support.typo(series.upper().replace('-', ' '), 'bold color kod')
goseries = support.typo("Vai alla Serie:", ' bold color kod')
itemlist.append(
Item(channel=item.channel,
title=goseries + titles,
fulltitle=titles,
show=series,
contentType='tvshow',
contentSerieName=series,
url=host+"/serietv/"+series,
action='episodios',
contentTitle=titles,
plot = "Vai alla Serie " + titles + " con tutte le puntate",
))
item.clone(channel=item.channel,
title=goseries + titles,
fulltitle=titles,
show=series,
contentType='tvshow',
contentSerieName=series,
url=host+"/serietv/"+series,
action='episodios',
contentTitle=titles,
plot = "Vai alla Serie " + titles + " con tutte le puntate",
))
return itemlist

View File

@@ -1,61 +0,0 @@
{
"id": "filmsenzalimiticc",
"name": "Filmsenzalimiti CC",
"active": false,
"language": ["ita"],
"thumbnail": "filmsenzalimiticc.png",
"banner": "",
"categories": ["movie","tvshow"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Includi in Novità - Film",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Includi in Novità - Serie TV",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "checklinks",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "checklinks_number",
"type": "list",
"label": "Numero de link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "1", "3", "5", "10" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": ["Non filtrare","IT"]
}
]
}

View File

@@ -1,313 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per Filmsenzalimiti CC
# ------------------------------------------------------------
import re
try:
import urlparse
except:
import urllib.parse as urlparse
from core import scrapertools, servertools, httptools
from core import tmdb
from core.item import Item
from platformcode import config
from platformcode import logger
from specials import autoplay
# Necessario per Autoplay
host = config.get_channel_url()
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
# Necessario per Verifica Link
checklinks = config.get_setting('checklinks', 'filmsenzalimiticc')
checklinks_number = config.get_setting('checklinks_number', 'filmsenzalimiticc')
headers = [['Referer', host]]
def mainlist(item):
logger.info('[filmsenzalimiticc.py] mainlist')
# Menu Principale
itemlist = [Item(channel=item.channel,
action='video',
title='Film',
url=host,
contentType='movie',
thumbnail=''),
Item(channel=item.channel,
action='sottomenu_film',
title='Categorie Film',
url=host,
contentType='movie',
thumbnail=''),
Item(channel=item.channel,
action='video',
title='Serie TV',
url=host+'/serie-tv/',
contentType='tvshow',
thumbnail=''),
Item(channel=item.channel,
action='sottomenu_serie',
title='[B]Categorie Serie TV[/B]',
thumbnail=''),
Item(channel=item.channel,
action='search',
extra='tvshow',
title='[B]Cerca... (non funziona)[/B]',
thumbnail='')
]
return itemlist
def search(item, texto):
logger.info('[filmsenzalimiticc.py] search')
item.url = host + '/?s=' + texto
try:
return video(item)
# Continua la ricerca in caso di errore .
except:
import sys
for line in sys.exc_info():
logger.error('%s' % line)
return []
def sottomenu_film(item):
logger.info('[filmsenzalimiticc.py] sottomenu_film')
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
# Estrae i contenuti
patron = "<li><a href='([^']+)'>(.*?)<"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(channel=__channel__,
action='video',
contentType=item.contentType,
title=scrapedtitle,
url=scrapedurl))
# Elimina le Serie al Sottomenù
itemlist.pop(3)
itemlist.pop(29)
itemlist.pop(29)
itemlist.pop(32)
return itemlist
def sottomenu_serie(item):
logger.info('[seriehd.py] sottomenu_serie')
itemlist = [
Item(channel=item.channel,
action='video',
title='Serie TV HD',
url=host+'/watch-genre/serie-altadefinizione/',
contentType='tvshow',
thumbnail=''),
Item(channel=item.channel,
action='video',
title='Miniserie',
url=host+'/watch-genre/miniserie/',
contentType='tvshow',
thumbnail=''),
Item(channel=item.channel,
action='video',
title='Programmi TV',
url=host+'/watch-genre/programmi-tv/',
contentType='tvshow',
thumbnail='')
]
return itemlist
def video(item):
logger.info('[filmsenzalimiticc.py] video')
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data.replace('\n','').replace('\t','')
# Estrae i contenuti
patron = r'<div class="mediaWrap mediaWrapAlt">.*?<a href="([^"]+)".*?src="([^"]+)".*?<p>([^"]+) (\(.*?)streaming<\/p>.*?<p>\s*(\S+).*?<\/p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedquality in matches:
scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedyear = scrapertools.decodeHtmlentities(scrapedyear)
scrapedquality = scrapertools.decodeHtmlentities(scrapedquality)
year = scrapedyear.replace('(','').replace(')','')
infolabels = {}
if year:
infolabels['year'] = year
title = scrapedtitle + ' '+ scrapedyear +' [' + scrapedquality + ']'
# Seleziona fra Serie TV e Film
if item.contentType == 'movie':
azione = 'findvideos'
tipologia = 'movie'
if item.contentType == 'tvshow':
azione='episodios'
tipologia = 'tv'
itemlist.append(
Item(channel=item.channel,
action=azione,
contentType=item.contentType,
title=title,
fulltitle=scrapedtitle,
text_color='azure',
url=scrapedurl,
thumbnail=scrapedthumbnail,
infoLabels=infolabels,
show=scrapedtitle))
# Next page
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink".*?href="([^"]+)">')
if next_page != '':
itemlist.append(
Item(channel=item.channel,
action='film',
title='[COLOR lightgreen]' + config.get_localized_string(30992) + '[/COLOR]',
url=next_page,
contentType=item.contentType,
thumbnail='http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png'))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item): # Questa def. deve sempre essere nominata findvideos
logger.info('[filmsenzalimiticc.py] findvideos')
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
# Aggira protectlink
if 'protectlink' in data:
urls = scrapertools.find_multiple_matches(data, r'<iframe src="[^=]+=(.*?)"')
for url in urls:
url= url.decode('base64')
if '\t' in url: #fix alcuni link presentano una tabulazione finale.
url = url[:-1]
data += '\t' + url
if 'nodmca' in data: #fix player Openload sezione Serie TV
page = httptools.downloadpage(url, headers=headers).data
data += '\t' + scrapertools.find_single_match(page,'<meta name="og:url" content="([^=]+)">')
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.fulltitle + ' - [[COLOR limegreen]'+videoitem.title+'[/COLOR] ]'
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.show = item.show
videoitem.plot = item.plot
videoitem.channel = item.channel
videoitem.contentType = item.contentType
# Link Aggiungi alla Libreria
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findservers':
itemlist.append(
Item(channel=item.channel, title='[COLOR lightblue][B]Aggiungi alla videoteca[/B][/COLOR]', url=item.url,
action='add_pelicula_to_library', extra='findservers', contentTitle=item.contentTitle))
# Necessario per FilterTools
# itemlist = filtertools.get_links(itemlist, item, list_language)
# Necessario per AutoPlay
autoplay.start(itemlist, item)
return itemlist
def episodios(item): # Questa def. deve sempre essere nominata episodios
logger.info('[filmsenzalimiticc.py] episodios')
itemlist = []
# Trova le Stagioni
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
# Estrae i contenuti
patron = r'<iframe src="([^"]+)".*?>'
url = scrapertools.find_single_match(data, patron)
# Carica la pagina
data = httptools.downloadpage(url).data.replace('\t', '').replace('\n', '')
# Estrae i contenuti
section_stagione = scrapertools.find_single_match(data, r'Stagioni<\/a>(.*?)<\/ul>')
patron = r'<a href="([^"]+)" >.*?<\/i>\s(.*?)<\/a>'
seasons = re.compile(patron, re.DOTALL).findall(section_stagione)
for scrapedseason_url, scrapedseason in seasons:
# Trova gli Episodi
season_url = urlparse.urljoin(url, scrapedseason_url)
# Carica la pagina
data = httptools.downloadpage(season_url).data.replace('\t', '').replace('\n', '')
# Estrae i contenuti
section_episodio = scrapertools.find_single_match(data, r'Episodio<\/a>(.*?)<\/ul>')
patron = r'<a href="([^"]+)" >.*?<\/i>\s(.*?)<\/a>'
episodes = re.compile(patron, re.DOTALL).findall(section_episodio)
for scrapedepisode_url, scrapedepisode in episodes:
episode_url = urlparse.urljoin(url, scrapedepisode_url)
title = scrapedseason + 'x' + scrapedepisode.zfill(2)
itemlist.append(
Item(channel=item.channel,
action='findvideos',
contentType='episode',
title=title,
url=episode_url,
fulltitle=title + ' - ' + item.show,
show=item.show,
thumbnail=item.thumbnail))
# Link Aggiungi alla Libreria
if config.get_videolibrary_support() and len(itemlist) != 0:
itemlist.append(
Item(channel=item.channel,
title='[COLOR lightblue][B]Aggiungi Serie alla videoteca[/B][/COLOR]',
url=item.url,
action='add_serie_to_library',
extra='episodios' + '###' + item.extra,
show=item.show))
return itemlist

View File

@@ -51,8 +51,7 @@ def peliculas(item):
if item.args[1] in ['tvshow', 'anime', 'music', 'other']:
patron = r'>[^"<]+'
else:
patron = r'>(?P<quality>[^"<]+)'
patron += '</td> <TD[^>]+><A class="tab" HREF="(?P<url>[^"]+)"\s*>(?P<title>[^<]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<size>[^<]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<seed>[^<]+)'
patron = r'>(?P<quality>[^"<]+)</td> <TD[^>]+><A class="tab" HREF="(?P<url>[^"]+)"\s*>(?P<title>[^<]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<size>[^<]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<seed>[^<]+)'
def itemHook(item):
item.contentType = item.args[1]
@@ -62,21 +61,13 @@ def peliculas(item):
def itemlistHook(itemlist):
args = item.args
args[0] += 1
itemlist.append(
support.Item(channel=item.channel,
action = item.action,
contentType=item.contentType,
title=support.typo(support.config.get_localized_string(30992), 'color kod bold'),
url=item.url,
args=args,
thumbnail=support.thumb()))
itemlist.append(item.clone(title=support.typo(support.config.get_localized_string(30992), 'color kod bold'), args=args, thumbnail=support.thumb()))
return itemlist
return locals()
def search(item, text):
support.log(item, text)
itemlist = []
if 'all' in item.args:
item.url += text
else:

View File

@@ -90,18 +90,11 @@ def get_season(item, seas_url, seasonNumber):
json_object = jsontools.load(data)
for episode in json_object['hydra:member']:
itemlist.append(
Item(channel=item.channel,
action='findvideos',
contentType='episode',
fulltitle=item.fulltitle,
show=item.show,
contentSerieName=item.contentSerieName,
title=str(seasonNumber)+"x"+str("%02d"%episode['episodeNumber']),
url=seas_url,
thumbnail=item.thumbnail,
fanart=item.fanart,
plot=item.plot,
extra=str(len(json_object['hydra:member'])-episode['episodeNumber'])))
item.clone(action='findvideos',
contentType='episode',
title=str(seasonNumber)+"x"+str("%02d"%episode['episodeNumber']),
url=seas_url,
extra=str(len(json_object['hydra:member'])-episode['episodeNumber'])))
return itemlist[::-1]
def search(item, texto):
@@ -135,12 +128,10 @@ def search_movie_by_genre(item):
json_object = jsontools.load(data)
for genre in json_object['hydra:member']:
itemlist.append(
Item(channel=item.channel,
action="peliculas",
title=support.typo(genre['name'],'bold'),
contentType='movie',
url="%s/api/movies?genres.id=%s" %(host,genre['id']),
extra=item.extra))
item.clone(action="peliculas",
title=support.typo(genre['name'],'bold'),
contentType='movie',
url="%s/api/movies?genres.id=%s" %(host,genre['id'])))
return support.thumb(itemlist, True)
def search_movie_by_year(item):
@@ -150,12 +141,12 @@ def search_movie_by_year(item):
itemlist = []
for i in range(100):
year_to_search = year - i
itemlist.append(Item(channel=item.channel,
url="%s/api/movies?releaseDate=%s" %(host,year_to_search),
plot="1",
type="movie",
title=support.typo(year_to_search,'bold'),
action="peliculas"))
itemlist.append(
item.clone(channel=item.channel,url="%s/api/movies?releaseDate=%s" %(host,year_to_search),
plot="1",
type="movie",
title=support.typo(year_to_search,'bold'),
action="peliculas"))
return itemlist
def findvideos(item):
@@ -169,17 +160,12 @@ def findvideos(item):
array_index=int(item.extra)
for video in json_object['hydra:member'][array_index]['playlist']['videos']:
itemlist.append(
Item(
channel=item.channel,
action="play",
title='Direct',
thumbnail=item.thumbnail,
fulltitle = item.fulltitle,
search = item.search,
url=video['src'],
server='directo',
quality=str(video['size'])+ 'p',
folder=False))
item.clone(action="play",
title='Direct',
url=video['src'],
server='directo',
quality=str(video['size'])+ 'p',
folder=False))
except:
pass
return support.server(item, itemlist=itemlist)
@@ -220,19 +206,16 @@ def get_itemlist_element(element,item):
quality=''
url="%s%s"
itemlist.append(
Item(channel=item.channel,
action=next_action,
title=support.typo(scrapedtitle,'bold') + quality,
fulltitle=scrapedtitle,
show=scrapedtitle,
plot=scrapedplot,
fanart=scrapedfanart,
thumbnail=scrapedthumbnail,
contentType=item.contentType,
contentTitle=scrapedtitle,
url=url %(host,element['@id'] ),
infoLabels=infoLabels,
extra=item.extra))
item.clone(action=next_action,
title=support.typo(scrapedtitle,'bold') + quality,
fulltitle=scrapedtitle,
show=scrapedtitle,
plot=scrapedplot,
fanart=scrapedfanart,
thumbnail=scrapedthumbnail,
contentTitle=scrapedtitle,
url=url %(host,element['@id'] ),
infoLabels=infoLabels))
if item.contentType=='movie':
for item in itemlist:

View File

@@ -39,16 +39,10 @@ def mainlist(item):
def menu(item):
support.log()
itemlist = [support.Item(channel= item.channel, title = support.typo('Tutti','bullet bold'),
url = item.url, action = 'peliculas'),
support.Item(channel= item.channel, title = support.typo('Generi','submenu'),
url = item.url, args = 'genre', action = 'submenu'),
support.Item(channel= item.channel, title = support.typo('A-Z','submenu'),
url = item.url, args = 'az', action = 'submenu'),
support.Item(channel= item.channel, title = support.typo('Cerca','submenu'),
url = item.url, action = 'search')]
itemlist = [item.clone(title = support.typo('Tutti','bullet bold'), action = 'peliculas'),
item.clone(title = support.typo('Generi','submenu'), args = 'genre', action = 'submenu'),
item.clone(title = support.typo('A-Z','submenu'), args = 'az', action = 'submenu'),
item.clone(title = support.typo('Cerca','submenu'), action = 'search')]
return support.thumb(itemlist)
@@ -58,8 +52,8 @@ def learning(item):
itemlist =[]
json = current_session.get(item.url).json()['contents']
for key in json:
itemlist.append(support.Item(channel = item.channel, title = support.typo(key['name'],'bold'), fulltitle = key['name'], show = key['name'],
url = key['contents'], thumbnail = item.thumbnail, action = 'peliculas', args = item.args))
itemlist.append(item.clone(title = support.typo(key['name'],'bold'), fulltitle = key['name'],
show = key['name'], url = key['contents'], action = 'peliculas'))
return itemlist
@@ -71,34 +65,35 @@ def submenu(item):
json_url = getUrl(json[-1]['path_id'])
json = current_session.get(json_url).json()['contents']
for key in json:
itemlist.append(support.Item(channel = item.channel, title = support.typo(key,'bold'), fulltitle = key, show = key,
url = json[key], thumbnail = item.thumbnail, action = 'peliculas', args = item.args))
itemlist.append(item.clone(title = support.typo(key,'bold'), fulltitle = key,
show = key, url = json[key], action = 'peliculas'))
else:
for key in json:
itemlist.append(support.Item(channel = item.channel, title = support.typo(key['name'],'bold'), fulltitle = key['name'], show = key['name'],
thumbnail = getUrl(key['image']), url = getUrl(key['path_id']), action = 'peliculas', args = item.args))
itemlist.append(item.clone(title = support.typo(key['name'],'bold'), fulltitle = key['name'], show = key['name'],
thumbnail = getUrl(key['image']), url = getUrl(key['path_id']), action = 'peliculas'))
itemlist.pop(-1)
return support.thumb(itemlist)
def replay_menu(item):
support.log()
import datetime
import datetime, xbmc
# create day and month list
days = []
months = []
days.append(support.config.get_localized_string(17))
for day in range(11, 17): days.append(support.config.get_localized_string(day))
for month in range(21, 33): months.append(support.config.get_localized_string(month))
days.append(xbmc.getLocalizedString(17))
for day in range(11, 17): days.append(xbmc.getLocalizedString(day))
for month in range(21, 33): months.append(xbmc.getLocalizedString(month))
# make menu
itemlist = []
today = datetime.date.today()
for d in range(7):
day = today - datetime.timedelta(days=d)
itemlist.append(support.Item(channel = item.channel, thumbnail = item.thumbnail, action = 'replay_channels', url = item.url, date = day.strftime("%d-%m-%Y"),
title = support.typo(days[int(day.strftime("%w"))] + " " + day.strftime("%d") + " " + months[int(day.strftime("%m"))-1], 'bold')))
support.log(day)
itemlist.append(item.clone(action = 'replay_channels', date = day.strftime("%d-%m-%Y"),
title = support.typo(days[int(day.strftime("%w"))] + " " + day.strftime("%d") + " " + months[int(day.strftime("%m"))-1], 'bold')))
return itemlist
@@ -107,8 +102,8 @@ def replay_channels(item):
itemlist = []
json = current_session.get(item.url).json()['dirette']
for key in json:
itemlist.append(support.Item(channel = item.channel, title = support.typo(key['channel'], 'bold'), fulltitle = key['channel'], show = key['channel'],plot = item.title, action = 'replay',
thumbnail = key['transparent-icon'].replace("[RESOLUTION]", "256x-"), url = '%s/palinsesto/app/old/%s/%s.json' % (host, key['channel'].lower().replace(' ','-'), item.date)))
itemlist.append(item.clone(title = support.typo(key['channel'], 'bold'), fulltitle = key['channel'], show = key['channel'], plot = item.title, action = 'replay',
thumbnail = key['transparent-icon'].replace("[RESOLUTION]", "256x-"), url = '%s/palinsesto/app/old/%s/%s.json' % (host, key['channel'].lower().replace(' ','-'), item.date)))
return itemlist
@@ -118,8 +113,8 @@ def replay(item):
json = current_session.get(item.url).json()[item.fulltitle][0]['palinsesto'][0]['programmi']
for key in json:
support.log('KEY=',key)
if key and key['pathID']: itemlist.append(support.Item(channel = item.channel, thumbnail = getUrl(key['images']['landscape']), fanart = getUrl(key['images']['landscape']), url = getUrl(key['pathID']),
title = support.typo(key['timePublished'], 'color kod bold') + support.typo(' | ' + key['name'], ' bold'), fulltitle = key['name'], show = key['name'], plot = key['testoBreve'], action = 'findvideos'))
if key and key['pathID']: itemlist.append(item.clone(thumbnail = getUrl(key['images']['landscape']), fanart = getUrl(key['images']['landscape']), url = getUrl(key['pathID']), fulltitle = key['name'], show = key['name'],
title = support.typo(key['timePublished'], 'color kod bold') + support.typo(' | ' + key['name'], ' bold'), plot = key['testoBreve'], action = 'findvideos'))
return itemlist
def search(item, text):
@@ -134,9 +129,9 @@ def search(item, text):
for key in json:
for key in json[key]:
if 'PathID' in key and (text.lower() in key['name'].lower()):
itemlist.append(support.Item(channel = item.channel, title = support.typo(key['name'],'bold'), fulltitle = key['name'], show = key['name'], url = key['PathID'].replace('/?json', '.json'), action = 'Type',
thumbnail = getUrl(key['images']['portrait'] if 'portrait' in key['images'] else key['images']['portrait43'] if 'portrait43' in key['images'] else key['images']['landscape']),
fanart = getUrl(key['images']['landscape'] if 'landscape' in key['images'] else key['images']['landscape43'])))
itemlist.append(item.clone(title = support.typo(key['name'],'bold'), fulltitle = key['name'], show = key['name'], url = key['PathID'].replace('/?json', '.json'), action = 'Type',
thumbnail = getUrl(key['images']['portrait'] if 'portrait' in key['images'] else key['images']['portrait43'] if 'portrait43' in key['images'] else key['images']['landscape']),
fanart = getUrl(key['images']['landscape'] if 'landscape' in key['images'] else key['images']['landscape43'])))
except:
import sys
for line in sys.exc_info():
@@ -162,9 +157,9 @@ def dirette(item):
json = current_session.get(item.url).json()['dirette']
onAir = current_session.get(onair).json()['on_air']
for i, key in enumerate(json):
itemlist.append(support.Item(channel = item.channel, title = support.typo(key['channel'], 'bold'), fulltitle = key['channel'], show = key['channel'],
thumbnail = key['transparent-icon'].replace("[RESOLUTION]", "256x-"),forcethumb=True , fanart = getUrl(onAir[i]['currentItem']['image']), url = key['video']['contentUrl'],
plot = support.typo(onAir[i]['currentItem']['name'],'bold')+ '\n\n' + onAir[i]['currentItem']['description'], action = 'play'))
itemlist.append(item.clone(title = support.typo(key['channel'], 'bold'), fulltitle = key['channel'], show = key['channel'], url = key['video']['contentUrl'],
thumbnail = key['transparent-icon'].replace("[RESOLUTION]", "256x-"),forcethumb=True , fanart = getUrl(onAir[i]['currentItem']['image']),
plot = support.typo(onAir[i]['currentItem']['name'],'bold')+ '\n\n' + onAir[i]['currentItem']['description'], action = 'play'))
return itemlist
@@ -212,10 +207,7 @@ def peliculas(item):
itemlist = sorted(itemlist, key=lambda it: it.title)
if len(keys) > pag * pagination and not item.search:
itemlist.append(support.Item(channel=item.channel, action = item.action, contentType=item.contentType,
title=support.typo(support.config.get_localized_string(30992), 'color kod bold'),
fulltitle= item.fulltitle, show= item.show, url=item.url, args=item.args, page=pag + 1,
thumbnail=support.thumb()))
itemlist.append(item.clone(title=support.typo(support.config.get_localized_string(30992), 'color kod bold'), page=pag + 1, thumbnail=support.thumb()))
return itemlist
@@ -224,8 +216,7 @@ def select(item):
itemlist = []
json = current_session.get(item.url).json()['blocks']
for key in json:
itemlist.append(support.Item(channel = item.channel, title = support.typo(key['name'],'bold'), fulltitle = item.fulltitle,
show = item.show, thumbnail = item.thumbnail, url = key['sets'], action = 'episodios', args = item.args))
itemlist.append(item.clone(title = support.typo(key['name'],'bold'), url = key['sets'], action = 'episodios'))
if len(itemlist) == 1:
return episodios(itemlist[0])
else:
@@ -237,9 +228,7 @@ def episodios(item):
itemlist = []
if type(item.url) in [list, dict] and len(item.url) > 1 and ('name' in item.url[0] and 'stagione' not in item.url[0]['name'].lower()):
for key in item.url:
itemlist.append(support.Item(channel = item.channel, title = support.typo(key['name'], 'bold'), fulltitle = item.fulltitle, show = item.show, thumbnail = item.thumbnail,
fanart = item.fanart, url = getUrl(key['path_id']), plot = item.plot, contentType = 'tvshow',
action = 'episodios'))
itemlist.append(item.clone(title = support.typo(key['name'], 'bold'), url = getUrl(key['path_id']), contentType = 'tvshow', action = 'episodios'))
elif type(item.url) in [list, dict]:
with futures.ThreadPoolExecutor() as executor:
@@ -269,9 +258,8 @@ def episodios(item):
# title = key['subtitle'].strip()
if not title:
title = key['name']
itemlist.append(support.Item(channel = item.channel, title = support.typo(title, 'bold'), fulltitle = item.fulltitle, show = item.show, thumbnail = item.thumbnail,
fanart = getUrl(key['images']['landscape']), url = key['video_url'], plot = key['description'], contentType = 'episode',
action = 'findvideos', VL=True if ep else False))
itemlist.append(item.clone(title = support.typo(title, 'bold'), action = 'findvideos', VL=True if ep else False, plot = key['description'],
fanart = getUrl(key['images']['landscape']), url = key['video_url'], contentType = 'episode'))
if itemlist and itemlist[0].VL: support.videolibrary(itemlist, item)
return itemlist
@@ -290,8 +278,7 @@ def findvideos(item):
else:
url = item.url
itemlist.append(support.Item(channel = item.channel, server = 'directo', title = 'Diretto', fulltitle = item.fulltitle,
show = item.show, thumbnail = item.thumbnail, fanart = item.json, url = getUrl(url), action = 'play' ))
itemlist.append(item.clone(server = 'directo', title = 'Diretto', fanart = item.json, url = getUrl(url), action = 'play' ))
return support.server(item, itemlist=itemlist, Download=False)
@@ -321,8 +308,9 @@ def addinfo(key, item):
support.log()
info = current_session.get(getUrl(key['info_url'])).json()
if not item.search or item.search.lower() in key['name'].lower():
it = support.Item( channel = item.channel, title = support.typo(key['name'],'bold'), fulltitle = key['name'], show = key['name'],
thumbnail = getUrl(key['images']['portrait_logo'] if key['images']['portrait_logo'] else key['images']['landscape']), fanart = getUrl(key['images']['landscape']), url = getUrl(key['path_id']), plot = info['description'])
it = item.clone(title = support.typo(key['name'],'bold'), fulltitle = key['name'], show = key['name'],
thumbnail = getUrl(key['images']['portrait_logo'] if key['images']['portrait_logo'] else key['images']['landscape']),
fanart = getUrl(key['images']['landscape']), url = getUrl(key['path_id']), plot = info['description'])
if 'layout' not in key or key['layout'] == 'single':
it.action = 'findvideos'
it.contentType = 'movie'
@@ -351,9 +339,9 @@ def load_episodes(key, item):
if not title:
title = key['name']
itemlist.append(support.Item(channel = item.channel, title = support.typo(title, 'bold'), fulltitle = item.fulltitle, show = item.show, thumbnail = item.thumbnail,
fanart = getUrl(key['images']['landscape']), url = key['video_url'], plot = key['description'], contentType = 'episode',
action = 'findvideos', VL=True if ep else False, order=order))
itemlist.append(item.clone(title = support.typo(title, 'bold'), url = key['video_url'], contentType = 'episode',
fanart = getUrl(key['images']['landscape']), plot = key['description'],
action = 'findvideos', VL=True if ep else False, order=order))
return itemlist

View File

@@ -184,17 +184,6 @@ def findvideos(item):
series = support.typo(item.contentSerieName, ' bold color kod')
itemlist = support.server(item, data=url_video)
itemlist.append(
Item(channel=item.channel,
title=goseries + series,
fulltitle=item.fulltitle,
show=item.show,
contentType='tvshow',
contentSerieName=item.contentSerieName,
url=url_serie,
action='episodios',
contentTitle=item.contentSerieName,
plot = goseries + series + "con tutte le puntate",
))
itemlist.append(item.clone(title=goseries + series, contentType='tvshow', url=url_serie, action='episodios', plot = goseries + series + "con tutte le puntate"))
return itemlist

View File

@@ -42,7 +42,7 @@ def cleantitle(scrapedtitle):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
scrapedtitle = scrapedtitle.replace('[HD]', '').replace('', '\'').replace('×', 'x').replace('Game of Thrones ','')\
.replace('In The Dark 2019', 'In The Dark (2019)').replace('"', "'").strip()
year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)')
year = scrapertools.find_single_match(scrapedtitle, r'\((\d{4})\)')
if year:
scrapedtitle = scrapedtitle.replace('(' + year + ')', '')
@@ -58,7 +58,7 @@ def findvideos(item):
data = re.sub(r'\n|\t|\s+', ' ', data)
# recupero il blocco contenente i link
blocco = scrapertools.find_single_match(data, r'<div class="entry">([\s\S.]*?)<div class="post').replace('..:: Episodio ', 'Episodio ').strip()
matches = scrapertools.find_multiple_matches(blocco, '(S(\d*)E(\d*))\s')
matches = scrapertools.find_multiple_matches(blocco, r'(S(\d*)E(\d*))\s')
if len(matches) > 0:
for fullseasonepisode, season, episode in matches:
blocco = blocco.replace(fullseasonepisode + ' ', 'Episodio ' + episode + ' ')
@@ -74,7 +74,7 @@ def findvideos(item):
if len(matches):
data = matches[0][0]
patron = 'href="(https?://www\.keeplinks\.(?:co|eu)/p(?:[0-9]*)/([^"]+))"'
patron = r'href="(https?://www\.keeplinks\.(?:co|eu)/p(?:[0-9]*)/([^"]+))"'
matches = re.compile(patron, re.DOTALL).findall(data)
for keeplinks, id in matches:
headers2 = [['Cookie', 'flag[' + id + ']=1; defaults=1; nopopatall=' + str(int(time.time()))],
@@ -116,18 +116,15 @@ def lista_serie(item):
if i >= p * PERPAGE: break
title = cleantitle(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="episodios",
title=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=title,
show=title,
plot=scrapedplot,
contentType='episode',
originalUrl=scrapedurl,
folder=True))
item.clone(action="episodios",
title=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=title,
show=title,
plot=scrapedplot,
contentType='episode',
originalUrl=scrapedurl))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
@@ -162,7 +159,7 @@ def episodios(item, itemlist=[]):
# recupero la stagione
season = scrapertools.find_single_match(scrapedtitle, 'Stagione ([0-9]*)')
blocco = scrapertools.find_single_match(data, '<div class="entry">[\s\S.]*?<div class="post')
blocco = scrapertools.find_single_match(data, r'<div class="entry">[\s\S.]*?<div class="post')
blocco = blocco.replace('<strong>Episodio ', '<strong>Episodio ').replace(' </strong>', ' </strong>')
blocco = blocco.replace('<strong>Episodio ', '<strong>S' + season.zfill(2) + 'E')
matches = scrapertools.find_multiple_matches(blocco, r'(S(\d*)E(\d*))\s')
@@ -189,18 +186,15 @@ def episodios(item, itemlist=[]):
infoLabels['episode'] = episode
fullepisode += ' ' + support.typo("Sub-ITA", '_ [] color kod')
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=fullepisode,
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
contentSerieName=title,
infoLabels=infoLabels,
folder=True))
item.clone(action="findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=fullepisode,
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
contentSerieName=title,
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
@@ -224,7 +218,7 @@ def peliculas_tv(item):
log()
itemlist = []
patron = '<div class="post-meta">\s*<a href="([^"]+)"\s*title="([^"]+)"\s*class=".*?"></a>'
patron = r'<div class="post-meta">\s*<a href="([^"]+)"\s*title="([^"]+)"\s*class=".*?"></a>'
html = support.match(item, patron=patron, headers=headers)
matches = html.matches
@@ -249,18 +243,16 @@ def peliculas_tv(item):
infoLabels['episode'] = episode[2].zfill(2)
itemlist.append(
Item(channel=item.channel,
action="findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=title + " - " + episode[0] + " " + support.typo("Sub-ITA", '_ [] color kod'),
url=scrapedurl,
thumbnail=scrapedthumbnail,
contentSerieName=title,
contentLanguage='Sub-ITA',
plot=scrapedplot,
infoLabels=infoLabels,
folder=True))
item.clone(action="findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=title + " - " + episode[0] + " " + support.typo("Sub-ITA", '_ [] color kod'),
url=scrapedurl,
thumbnail=scrapedthumbnail,
contentSerieName=title,
contentLanguage='Sub-ITA',
plot=scrapedplot,
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
@@ -301,7 +293,7 @@ def search(item, texto):
log(texto)
itemlist = []
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)"\s?>([^<]+)</a>'
patron = r'<li class="cat-item cat-item-\d+"><a href="([^"]+)"\s?>([^<]+)</a>'
matches = support.match(item, patron=patron, headers=headers).matches
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
if texto.upper() in scrapedtitle.upper():
@@ -309,18 +301,15 @@ def search(item, texto):
scrapedplot = ""
title = cleantitle(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="episodios",
title=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=title,
show=title,
plot=scrapedplot,
contentType='episode',
originalUrl=scrapedurl,
folder=True))
item.clone(action="episodios",
title=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=title,
show=title,
plot=scrapedplot,
contentType='episode',
originalUrl=scrapedurl))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@@ -336,7 +325,7 @@ def list_az(item):
itemlist = []
alphabet = dict()
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)"\s?>([^<]+)</a>'
patron = r'<li class="cat-item cat-item-\d+"><a href="([^"]+)"\s?>([^<]+)</a>'
matches = support.match(item, patron=patron, headers=headers).matches
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
letter = scrapedtitle[0].upper()
@@ -346,11 +335,10 @@ def list_az(item):
for letter in sorted(alphabet):
itemlist.append(
Item(channel=item.channel,
action="lista_serie",
url='\n\n'.join(alphabet[letter]),
title=letter,
fulltitle=letter))
item.clone(action="lista_serie",
url='\n\n'.join(alphabet[letter]),
title=letter,
fulltitle=letter))
return itemlist

View File

@@ -122,14 +122,10 @@ def findvideos(item):
itemlist = support.server(item, data=urls_video)
itemlist.append(
Item(channel=item.channel,
title=support.typo("Vai alla Serie Completa: " + item.fulltitle, ' bold'),
fulltitle=item.fulltitle,
show=item.show,
contentType='tvshow',
url=url_serie,
action='episodios',
thumbnail = support.thumb(thumb='tvshow.png')
))
item.colone(title=support.typo("Vai alla Serie Completa: " + item.fulltitle, ' bold'),
contentType='tvshow',
url=url_serie,
action='episodios',
thumbnail = support.thumb(thumb='tvshow.png')))
return itemlist

View File

@@ -68,14 +68,8 @@ def findvideos(item):
matches = support.match(item, patron=r'<a href="([^"]+)[^>]+>Download[^>]+>[^>]+>[^>]+><strong class="quality">([^<]+)<').matches
for url, quality in matches:
itemlist.append(
Item(channel=item.channel,
action="play",
url=unquote(support.match(url, patron=[r'dest=([^"]+)"',r'/(http[^"]+)">Click']).match),
fulltitle=item.fulltitle,
thumbnail=item.thumbnail,
show=item.show,
quality=quality,
contentType=item.contentType,
folder=False))
item.clone(caction="play",
url=unquote(support.match(url, patron=[r'dest=([^"]+)"',r'/(http[^"]+)">Click']).match),
quality=quality))
return support.server(item, itemlist=itemlist)

View File

@@ -122,17 +122,12 @@ def episodios(item):
if '-' in season: # vedi https://stpgs.ml/SerieTv/Atypical-S01-8-8.html
season = season.split('-')[0]
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title=str(int(season)) + 'x' + str(n) + support.typo(item.quality, '-- [] color kod'),
url=url,
fulltitle=item.fulltitle,
thumbnail=item.thumbnail,
show=item.show,
quality=item.quality,
contentType='episode',
folder=True,
args={'id': id, 'season': season, 'episode': episode}))
item.clone(action="findvideos",
title=str(int(season)) + 'x' + str(n) + support.typo(item.quality, '-- [] color kod'),
url=url,
contentType='episode',
folder=True,
args={'id': id, 'season': season, 'episode': episode}))
support.videolibrary(itemlist, item)
return itemlist
@@ -154,21 +149,7 @@ def findvideos(item):
if res:
itemlist.append(
Item(channel=item.channel,
action="play",
title='contentful',
url=res,
server='directo',
fulltitle=item.fulltitle,
thumbnail=item.thumbnail,
quality=item.quality,
contentType=item.contentType))
# download = itemlist[0].clone()
# if item.contentType == 'movie':
# download.url = downPrefix + id
# else:
# download.url = downPrefix + id + '-S' + season + '-' + episode
# itemlist.append(download)
item.clone(action="play", title='contentful', url=res, server='directo'))
else:
# google drive...
pass

View File

@@ -27,12 +27,7 @@ def mainlist(item):
return item
def itemlistHook(itemlist):
itemlist.append(
support.Item(
channel=item.channel,
title=support.typo('Cerca...', 'bold color kod'),
url = item.url,
action='search',
thumbnail=support.thumb(thumb='search.png')))
item.clone(title=support.typo('Cerca...', 'bold color kod'), action='search', thumbnail=support.thumb(thumb='search.png')))
support.channel_config(item, itemlist)
return itemlist
return locals()
@@ -45,16 +40,12 @@ def radio(item):
if data.matches:
for title, location, url, quality, song, type, thumbnail in data.matches:
itemlist.append(
support.Item(
channel = item.channel,
title = support.typo(title, 'bold') + support.typo(quality + ' kbps','_ [] bold color kod'),
thumbnail = thumbnail,
url = url,
contentType = 'music',
plot = support.typo(location, 'bold') + '\n' + song,
action = 'findvideos'
)
)
item.clone(title = support.typo(title, 'bold') + support.typo(quality + ' kbps','_ [] bold color kod'),
thumbnail = thumbnail,
url = url,
contentType = 'music',
plot = support.typo(location, 'bold') + '\n' + song,
action = 'findvideos'))
else:
matches = support.match(data.data, patron= r'text="(?P<title>[^\("]+)(?:\([^\)]+\))?" URL="(?P<url>[^"]+)" (?:guide_id="[^"]+" )?(?:stream_type="[^"]+" )?topic_duration="(?P<duration>[^"]+)" subtext="(?P<plot>[^"]+)" item="[^"]+" image="(?P<thumb>[^"]+)"').matches
if matches:
@@ -62,29 +53,22 @@ def radio(item):
infoLabels={}
infoLabels['duration'] = duration
itemlist.append(
support.Item(
channel = item.channel,
title = support.typo(title, 'bold'),
thumbnail = thumbnail,
infolLbels = infoLabels,
url = url,
contentType = 'music',
plot = plot,
action = 'findvideos'
)
)
item.clone(title = support.typo(title, 'bold'),
thumbnail = thumbnail,
infolLbels = infoLabels,
url = url,
contentType = 'music',
plot = plot,
action = 'findvideos'))
else:
matches = support.match(data.data, patron= r'text="(?P<title>[^"]+)" URL="(?P<url>[^"]+)"').matches
for title, url in matches:
itemlist.append(
support.Item(
channel = item.channel,
title = support.typo(title, 'bold'),
thumbnail = item.thumbnail,
url = url,
action = 'radio'
)
)
item.clone(channel = item.channel,
title = support.typo(title, 'bold'),
thumbnail = item.thumbnail,
url = url,
action = 'radio'))
return itemlist

View File

@@ -52,18 +52,7 @@ def findvideos(item):
title = server + " [COLOR blue][" + quality + "][/COLOR]"
else:
title = server
itemlist.append(
Item(channel=item.channel,
action="play",
title=title,
url=link['url'],
server=server,
fulltitle=item.fulltitle,
thumbnail=item.thumbnail,
show=item.show,
quality=quality,
contentType=item.contentType,
folder=False))
itemlist.append(item.clone(action="play", title=title, url=link['url'], server=server, quality=quality,))
autoplay.start(itemlist, item)

View File

@@ -123,12 +123,10 @@ def peliculas(item):
for key in keys:
if key not in ['1','2']:
itemlist.append(
Item(channel = item.channel,
title = support.typo(key.upper() if Filter == 'filter' else key['name'], 'bold'),
url = item.url + item.args + (key if Filter == 'filter' else str(key['id'])),
action = 'peliculas',
args = 'filters',
contentType = item.contentType))
item.clone(title = support.typo(key.upper() if Filter == 'filter' else key['name'], 'bold'),
url = item.url + item.args + (key if Filter == 'filter' else str(key['id'])),
action = 'peliculas',
args = 'filters'))
else :
json_file=loadjs(item.url)
@@ -167,19 +165,10 @@ def episodios(item):
if make_item == True:
if type(title) == tuple: title = title[0]
itemlist.append(
Item(
channel = item.channel,
title = title,
fulltitle= item.fulltitle,
show= item.show,
url= host + show_id + '/season/' + str(key['season_id']) + '/',
action= 'findvideos',
video_id= key['video_id'],
thumbnail= item.thumbnail,
fanart = item.fanart,
plot=item.plot,
contentType = item.contentType
))
item.clone(title = title,
url= host + show_id + '/season/' + str(key['season_id']) + '/',
action= 'findvideos',
video_id= key['video_id']))
autorenumber.renumber(itemlist, item, 'bold')
if autorenumber.check(item) == True \
or support.match(itemlist[0].title, patron=r"(\d+x\d+)").match:
@@ -204,10 +193,10 @@ def findvideos(item):
url = support.match('https://or01.top-ix.org/videomg/_definst_/mp4:' + item.url + '/playlist.m3u').data
url = url.split()[-1]
itemlist.append(
Item(action= 'play',
title='direct',
url= 'https://or01.top-ix.org/videomg/_definst_/mp4:' + item.url + '/' + url,
server= 'directo')
item.clone(action= 'play',
title='direct',
url= 'https://or01.top-ix.org/videomg/_definst_/mp4:' + item.url + '/' + url,
server= 'directo')
)
return support.server(item, itemlist=itemlist, Download=False)
@@ -220,18 +209,15 @@ def make_itemlist(itemlist, item, data):
infoLabels['title'] = infoLabels['tvshowtitle'] = key['title']
title = encode(key['title'])
itemlist.append(
Item(
channel = item.channel,
title = support.typo(title, 'bold'),
fulltitle= title,
show= title,
url= host + str(key['show_id']) + '/seasons/',
action= 'findvideos' if item.contentType == 'movie' else 'episodios',
contentType = item.contentType,
contentSerieName= key['title'] if item.contentType != 'movie' else '',
contentTitle= title if item.contentType == 'movie' else '',
infoLabels=infoLabels
))
item.clone(title = support.typo(title, 'bold'),
fulltitle= title,
show= title,
url= host + str(key['show_id']) + '/seasons/',
action= 'findvideos' if item.contentType == 'movie' else 'episodios',
contentType = item.contentType,
contentSerieName= key['title'] if item.contentType != 'movie' else '',
contentTitle= title if item.contentType == 'movie' else '',
infoLabels=infoLabels))
return itemlist
def loadjs(url):

View File

@@ -152,7 +152,7 @@ def findvideos(data, skip=False):
servers_list = list(get_servers_list().keys())
is_filter_servers = False
# is_filter_servers = False
# Run findvideos on each active server
for serverid in servers_list:

View File

@@ -114,7 +114,7 @@ def search(channel, item, texto):
def dbg():
if config.dev_mode():
import web_pdb;
import web_pdb
if not web_pdb.WebPdb.active_instance:
import webbrowser
webbrowser.open('http://127.0.0.1:5555')
@@ -1178,6 +1178,8 @@ def server(item, data='', itemlist=[], headers='', AutoPlay=True, CheckLinks=Tru
videoitem.contentType = item.contentType
videoitem.infoLabels = item.infoLabels
videoitem.quality = quality
# videoitem.nfo = item.nfo
# videoitem.strm_path = item.strm_path
return videoitem
with futures.ThreadPoolExecutor() as executor:

View File

@@ -1039,40 +1039,11 @@ def resume_playback(item, return_played_time=False):
from core import videolibrarytools, filetools
# if nfo and strm_path not exist
if not item.nfo:
ID = item.infoLabels['IMDBNumber']
if item.contentType == 'movie':
ID = ID if ID else item.infoLabels['tmdb_id']
vl = xbmc.translatePath(filetools.join(config.get_setting("videolibrarypath"), config.get_setting("folder_movies")))
name = item.contentTitle if not config.get_setting('original_title_folder', 'videolibrary') else item.infoLabels['originaltitle']
name = name if not config.get_setting('lowerize_title', 'videolibrary') else name.lower()
path = filetools.validate_path('%s [%s]' % (name, ID))
item.nfo = filetools.join(vl, path, path + '.nfo')
if not item.strm_path: item.strm_path = filetools.join(path, item.contentTitle + '.strm')
else:
ID = ID if ID else 'tmdb_' + item.infoLabels['tmdb_id'] if config.get_setting('scraper_tvshows', 'videolibrary') == 0 else 'tvdb_' + item.infoLabels['tvdb_id']
vl = xbmc.translatePath(filetools.join(config.get_setting("videolibrarypath"), config.get_setting("folder_tvshows")))
name = item.contentSerieName if not config.get_setting('original_title_folder', 'videolibrary') else item.infoLabels['originaltitle']
name = name if not config.get_setting('lowerize_title', 'videolibrary') else name.lower()
path = filetools.validate_path('%s [%s]' % (name, ID))
item.nfo = filetools.join(vl, path, 'tvshow.nfo')
if item.contentSeason and item.contentEpisodeNumber:
title = str(item.contentSeason) + 'x' + str(item.contentEpisodeNumber).zfill(2)
else:
season, episode = scrapertools.find_single_match(item.title, r'(\d+)x(\d+)')
item.contentSeason = int(season)
item.contentEpisodeNumber = int(episode)
title = season + 'x' + episode.zfill(2)
if not item.strm_path: item.strm_path = filetools.join(path, title + '.strm')
# Read NFO FILE
if item.contentType == 'movie':
nfo_path = item.nfo
elif xbmc.translatePath(filetools.join(config.get_setting("videolibrarypath"), config.get_setting("folder_tvshows"))) in item.strm_path:
nfo_path = item.strm_path.replace('strm','nfo')
else:
nfo_path = xbmc.translatePath(filetools.join(config.get_setting("videolibrarypath"), config.get_setting("folder_tvshows"),item.strm_path.replace('strm','nfo')))
nfo_path = item.strm_path.replace('strm','nfo')
if filetools.isfile(nfo_path):
head_nfo, item_nfo = videolibrarytools.read_nfo(nfo_path)

View File

@@ -57,7 +57,8 @@ def mark_auto_as_watched(item, nfo_path=None, head_nfo=None, item_nfo=None):
break
# check for next Episode
if next_episode and total_time > time_from_end >= difference:
if next_episode and marked and time_from_end >= difference:
# from core.support import dbg;dbg()
nextdialog = NextDialog(ND, config.get_runtime_path())
nextdialog.show()
while platformtools.is_playing() and not nextdialog.is_exit():

View File

@@ -571,6 +571,7 @@ def findvideos(item):
item_json = Item().fromjson(filetools.read(json_path))
list_servers = []
# from core.support import dbg;dbg()
try:
# FILTERTOOLS
@@ -583,6 +584,8 @@ def findvideos(item):
# We run find_videos, from the channel or common
item_json.contentChannel = 'videolibrary'
item_json.play_from = item.play_from
item_json.nfo = item.nfo
item_json.strm_path = item.strm_path
if hasattr(channel, 'findvideos'):
from core import servertools
if item_json.videolibray_emergency_urls: