Fix updateDomains, pulizia

This commit is contained in:
marco
2022-01-23 15:55:29 +01:00
parent 61b50ec301
commit d7451c421d
32 changed files with 10 additions and 2422 deletions

View File

@@ -1,10 +0,0 @@
{
"id": "altadefinizione01_link",
"name": "Altadefinizione01 L",
"active": false,
"language": ["ita","sub-ita"],
"thumbnail": "altadefinizione01_L.png",
"banner": "altadefinizione01_L.png",
"categories": ["movie","vos"],
"settings" :[]
}

View File

@@ -1,105 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Channel altadefinizione01_link -*-
from core import support
from core.item import Item
from platformcode import config, logger
__channel__ = "altadefinizione01_link"
# ======== def per utility INIZIO ============================
host = config.get_channel_url()
headers = [['Referer', host]]
# =========== home menu ===================
@support.menu
def mainlist(item):
support.info('mainlist',item)
film = [
('Al Cinema', ['/film-del-cinema', 'peliculas', '']),
('Generi', ['', 'genres', 'genres']),
('Anni', ['', 'genres', 'years']),
('Qualità', ['/piu-visti.html', 'genres', 'quality']),
('Mi sento fortunato', ['/piu-visti.html', 'genres', 'lucky']),
('Popolari', ['/piu-visti.html', 'peliculas', '']),
('Sub-ITA', ['/film-sub-ita/', 'peliculas', ''])
]
return locals()
# ======== def in ordine di action dal menu ===========================
@support.scrape
def peliculas(item):
# debug = True
support.info('peliculas',item)
patron = r'<a href="(?P<url>[^"]+)">(?P<title>[^<]+)(?:[^>]+>){5}\s*<div class="[^"]+" style="background-image:url\((?P<thumb>[^\)]+)(?:[^>]+>){6}\s*(?P<year>\d{4})[^>]+>[^>]+>(?:\s*(?P<duration>\d+))?(?:[^>]+>){0,2}\s+(?P<quality>[a-zA-Z]+)\s+(?:[^>]+>){2}\s*(?P<lang>[^>]+)\s+[^>]+>'
patronNext = r'<span>\d</span> <a href="([^"]+)">'
# debug = True
return locals()
# =========== def pagina categorie ======================================
@support.scrape
def genres(item):
support.info('genres',item)
action = 'peliculas'
if item.args == 'genres':
patronBlock = r'<ul class="listSubCat" id="Film">(?P<block>.*)<ul class="listSubCat" id="Anno">'
elif item.args == 'years':
patronBlock = r'<ul class="listSubCat" id="Anno">(?P<block>.*)<ul class="listSubCat" id="Qualita">'
elif item.args == 'quality':
patronBlock = r'<ul class="listSubCat" id="Qualita">(?P<block>.*)<blockquote'
elif item.args == 'lucky': # sono i titoli random nella pagina
patronBlock = r'FILM RANDOM.*?class="listSubCat">(?P<block>.*)</ul>'
action = 'findvideos'
patronMenu = r'<li><a href="(?P<url>[^"]+)">(?P<title>[^<]+)<'
#debug = True
return locals()
# =========== def per cercare film/serietv =============
#host+/index.php?do=search&story=avatar&subaction=search
def search(item, text):
support.info('search', item)
itemlist = []
text = text.replace(" ", "+")
item.url = host+"/index.php?do=search&story=%s&subaction=search" % (text)
try:
return peliculas(item)
# Se captura la excepcion, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
# =========== def per le novità nel menu principale =============
def newest(categoria):
support.info('newest', categoria)
itemlist = []
item = Item()
try:
if categoria == "peliculas":
item.url = host
item.action = "peliculas"
item.contentType='movie'
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def findvideos(item):
support.info('findvideos', item)
return support.server(item, support.match(item, patron='<ul class="playernav">.*?</ul>', headers=headers).match)

View File

@@ -1,11 +0,0 @@
{
"id": "animeleggendari",
"name": "AnimePerTutti",
"active": false,
"language": ["ita", "sub-ita"],
"thumbnail": "animepertutti.png",
"bannermenu": "animepertutti.png",
"categories": ["anime", "vos"],
"not_active":["include_in_newest_peliculas", "include_in_newest_series", "include_in_newest_anime"],
"settings": []
}

View File

@@ -1,141 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per animeleggendari
# ------------------------------------------------------------
from core import support
from lib.js2py.host import jsfunctions
host = support.config.get_channel_url()
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
@support.menu
def mainlist(item):
anime = [
# ('Leggendari', ['/category/anime-leggendari/', 'peliculas']),
('ITA', ['/category/anime-ita/', 'peliculas']),
('SUB-ITA', ['/category/anime-sub-ita/', 'peliculas']),
('Conclusi', ['/category/serie-anime-concluse/', 'peliculas']),
('in Corso', ['/category/serie-anime-in-corso/', 'peliculas']),
('Genere', ['', 'genres'])
]
return locals()
def search(item, texto):
support.info(texto)
item.url = host + "/?s=" + texto
try:
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.logger.error("%s" % line)
return []
@support.scrape
def genres(item):
blacklist = ['Contattaci','Privacy Policy', 'DMCA']
patronMenu = r'<a href="(?P<url>[^"]+)">(?P<title>[^<]+)<'
patronBlock = r'Generi</a>\s*<ul[^>]+>(?P<block>.*?)<\/ul>'
action = 'peliculas'
return locals()
@support.scrape
def peliculas(item):
anime = True
blacklist = ['top 10 anime da vedere']
if item.url != host: patronBlock = r'<div id="main-content(?P<block>.*?)<aside'
patron = r'<figure class="(?:mh-carousel-thumb|mh-posts-grid-thumb)">\s*<a (?:class="[^"]+" )?href="(?P<url>[^"]+)" title="(?P<title>.*?)(?: \((?P<year>\d+)\))? (?:(?P<lang>SUB ITA|ITA))(?: (?P<title2>[Mm][Oo][Vv][Ii][Ee]))?[^"]*"><img (?:class="[^"]+"|width="[^"]+" height="[^"]+") src="(?P<thumb>[^"]+)"[^>]+'
def itemHook(item):
if 'movie' in item.title.lower():
item.title = support.re.sub(' - [Mm][Oo][Vv][Ii][Ee]|[Mm][Oo][Vv][Ii][Ee]','',item.title)
item.title += support.typo('Movie','_ () bold')
item.contentType = 'movie'
item.action = 'findvideos'
return item
def itemlistHook(itemlist):
itlist = []
for item in itemlist:
if 'nuovo episodio:' not in item.title.lower():
itlist += [item]
return itlist
patronNext = r'<a class="next page-numbers" href="([^"]+)">'
action = 'episodios'
return locals()
@support.scrape
def episodios(item):
data = support.match(item, headers=headers, patronBlock=r'entry-content clearfix">(.*?)class="mh-widget mh-posts-2 widget_text').block
if not 'pagination clearfix' in data:
support.info('NOT IN DATA')
patron = r'<iframe.*?src="(?P<url>[^"]+)"'
title = item.title
def fullItemlistHook(itemlist):
if len(itemlist) > 0:
urls = []
for item in itemlist:
urls.append(item.url)
item = itemlist[0]
item.data = urls
item.title = title
item.contentType = 'movie'
itemlist = []
itemlist.append(item)
return itemlist
else:
url = item.url
anime = True
patronBlock = r'(?:<p style="text-align: left;">|<div class="pagination clearfix">\s*)(?P<block>.*?)</span></a></div>'
patron = r'(?:<a href="(?P<url>[^"]+)"[^>]+>)?<span class="pagelink">(?P<episode>\d+)'
def itemHook(item):
if not item.url:
item.url = url
if 'Movie Parte' in data:
item.title = support.typo(item.fulltitle + ' - Part ','bold') + item.title
item.contentType = 'movie'
else:
item.title = support.typo('Episodio ', 'bold') + item.title
return item
return locals()
def check(item):
data = support.match(item, headers=headers).data
if 'Lista Episodi' not in data:
item.data = data
return findvideos(item)
data = ''
return data
def findvideos(item):
support.info()
if item.data:
data = item.data
else:
matches = support.match(item, patron=r'<iframe.*?src="(?P<url>[^"]+)"').matches
data = ''
if matches:
for match in matches:
try: data += str(jsfunctions.unescape(support.re.sub('@|g','%', match)))
except: data += ''
data += str(match)
else:
data = ''
return support.server(item,data)

View File

@@ -9,14 +9,14 @@ from core import scrapertools, httptools, servertools, support
from platformcode import logger, config
# def findhost(url):
# host = httptools.downloadpage(url, follow_redirect=True).url
# if host == 'https://cb01.uno/':
# host = support.match(host, patron=r'<a href="([^"]+)').match
# return host
def findhost(url):
host = httptools.downloadpage(url, follow_redirect=True).url
if host == 'https://cb01.uno/':
host = support.match(host, patron=r'<a href="([^"]+)').match
return host
host = config.get_channel_url()
host = config.get_channel_url(findhost)
headers = [['Referer', host]]

View File

@@ -1,10 +0,0 @@
{
"id": "dsda",
"name": "D.S.D.A",
"language": ["ita"],
"active": false,
"thumbnail": "dsda.png",
"banner": "dsda.png",
"categories": ["documentary"],
"settings": []
}

View File

@@ -1,140 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per documentaristreamingda
# ------------------------------------------------------------
from core import support
from core.item import Item
from platformcode import logger, config
host = config.get_channel_url()
@support.menu
def mainlist(item):
docu = [('Documentari {bullet bold}',('/elenco-documentari','peliculas')),
('Categorie {submenu}',('','menu')),
('Cerca... {bullet bold}',('','search')),]
return locals()
@support.scrape
def menu(item):
action = 'peliculas'
patronMenu = r'<li class="menu-item menu-item-type-taxonomy[^>]+>\s*<a href="(?P<url>[^"]+)"[^>]+>(?P<title>[^<]+)<'
def fullItemlistHook(itemlist):
item_list = []
title_list = []
for item in itemlist:
if item.title not in title_list:
item_list.append(item)
title_list.append(item.title)
itemlist = item_list
return itemlist
return locals()
def newest(categoria):
support.info()
item = Item()
try:
if categoria == "documentales":
item.url = host + "/elenco-documentari"
item.action = "peliculas"
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.logger.error("{0}".format(line))
return []
def search(item, texto):
support.info(texto)
item.url = host + "/?s=" + texto
try:
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
@support.scrape
def peliculas(item):
blacklist = ['GUIDA PRINCIPIANTI Vedere film e documentari streaming gratis', 'Guida Dsda']
data = support.match(item).data
# debug =True
if item.args == 'collection':
if 'class="panel"' in data:
item.args = 'raccolta'
patron = r'class="title-episodio">(?P<title>[^<]+)<(?P<url>.*?)<p'
# patron = r'<a (?:style="[^"]+" )?href="(?P<url>[^"]+)"[^>]+>(?:[^>]+><strong>)?(?P<title>[^<]+)(?:</a>)?</strong'
else:
patron = r'<div class="cover-racolta">\s*<a href="(?P<url>[^"]+)"[^>]+>\s*<img width="[^"]+" height="[^"]+" src="(?P<thumb>[^"]+)".*?<p class="title[^>]+>(?P<title>[^<]+)<'
else:
patron = r'<article[^>]+>[^>]+>[^>]+>(?:<img width="[^"]+" height="[^"]+" src="(?P<thumb>[^"]+)"[^>]+>)?.*?<a href="(?P<url>[^"]+)"[^>]*>\s*(?P<title>[^<]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s*<p>(?P<plot>[^<]+)<'
patronNext = r'<a class="page-numbers next" href="([^"]+)">'
# select category
def itemHook(item):
title = support.re.sub(r'(?:[Ss]erie\s*|[Ss]treaming(?:\s*[Dd][Aa])?\s*|[Cc]ollezione\s*|[Rr]accolta\s*|[Dd]ocumentari(?:o)?\s*)?','',item.fulltitle).strip()
if 'serie' in item.fulltitle.lower():
item.contentType = 'tvshow'
item.action = 'episodios'
item.contentSerieName = title
item.contentTitle = ''
elif 'collezion' in item.fulltitle.lower() or \
'raccolt' in item.fulltitle.lower() or \
'filmografia' in item.fulltitle.lower():
item.args = 'collection'
item.action = 'peliculas'
item.contentTitle = title
item.contentSerieName = ''
else:
item.contentTitle = title
item.contentSerieName = ''
item.title = support.typo(title,'bold')
item.fulltitle = item.show = title
return item
# remove duplicates
def fullItemlistHook(itemlist):
item_list = []
title_list = []
for item in itemlist:
if item.title not in title_list:
item_list.append(item)
title_list.append(item.title)
itemlist = item_list
return itemlist
return locals()
@support.scrape
def episodios(item):
html = support.match(item, patron=r'class="title-episodio">(\d+x\d+)')
data = html.data
if html.match:
patron = r'class="title-episodio">(?P<episode>[^<]+)<(?P<url>.*?)<p'
else:
patron = r'class="title-episodio">(?P<title>[^<]+)<(?P<url>.*?)<p'
# def itemlistHook(itemlist):
# counter = 0
# for item in itemlist:
# episode = support.match(item.title, patron=r'\d+').match
# if episode == '1':
# counter += 1
# item.title = support.typo(str(counter) + 'x' + episode.zfill(2) + support.re.sub(r'\[[^\]]+\](?:\d+)?','',item.title),'bold')
# return itemlist
return locals()
def findvideos(item):
support.info()
if item.args == 'raccolta' or item.contentType == 'episode':
return support.server(item, item.url)
else:
return support.server(item)

View File

@@ -1,11 +0,0 @@
{
"id": "fastsubita",
"name": "Fastsubita",
"language": ["sub-ita"],
"active": false,
"thumbnail": "fastsubita.png",
"banner": "fastsubita.png",
"categories": ["tvshow", "vos"],
"not_active": ["include_in_newest_peliculas", "include_in_newest_anime"],
"settings": []
}

View File

@@ -1,214 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per fastsubita.py
# ------------------------------------------------------------
"""
Su questo canale, nella categoria 'Ricerca Globale'
non saranno presenti le voci 'Aggiungi alla Videoteca'
e 'Scarica Film'/'Scarica Serie', dunque,
la loro assenza, nel Test, NON dovrà essere segnalata come ERRORE.
Novità. Indicare in quale/i sezione/i è presente il canale:
- serie
Ulteriori info:
- SOLO SUB-ITA
"""
from core import support, httptools, scrapertools
from core.item import Item
from core.support import info
from platformcode import config
host = config.get_channel_url()
headers = [['Referer', host]]
@support.menu
def mainlist(item):
Tvshow = [
('Aggiornamenti', ['', 'peliculas', 'update']),
('Cerca... {bold}{TV}', ['', 'search'])
]
# search = ''
return locals()
@support.scrape
def peliculas(item):
support.info(item)
# support.dbg()
deflang = 'Sub-ITA'
# è una singola pagina con tutti gli episodi
if item.grouped and not support.scrapertools.find_single_match(item.url, '-[0-9]+x[0-9]+-'):
item.grouped = False
return episodios_args(item)
# ogni puntata è un articolo a se
if item.fulltitle:
item.url = host + '?s=' + item.fulltitle
actLike = 'episodios'
action = 'findvideos'
blacklist = ['']
if item.args == 'genres':
patronBlock = r'<h4 id="mctm1-.">'+item.fulltitle+'</h4>(?P<block>.+?)</div>'
patron = r'[^>]+>[^>]+>.+?href="(?P<url>[^"]+)[^>]>(?P<title>[^<]+)\s<'
action = 'episodios'
elif item.args == 'search':
group = True
patronBlock = r'</header>(?P<block>.*?)</main>'
patron = '(?:<img[^>]+src="(?P<thumb>[^"]+)".*?)?<a href="(?P<url>[^"]+)"[^>]+>(?P<title>[^<]+?)(?:(?P<episode>\d+&#215;\d+|\d+×\d+)|\[[sS](?P<season>[0-9]+)[^]]+\])\s?(?:(?P<lang>\([a-zA-Z\s]+\)) (?:[Ss]\d+[Ee]\d+)?\s?(?:[&#\d;|.{3}]+)(?P<title2>[^”[<]+)(?:&#\d)?)?'
else:
# è una singola pagina con tutti gli episodi
if item.args != 'update' and not support.scrapertools.find_single_match(item.url, '-[0-9]+x[0-9]+-'):
return episodios_args(item)
patron = r'<div class="featured-thumb"> +<a href="(?P<url>[^"]+)" title="(?P<title>[^[]+)\[(?P<episode>\d+&#215;\d+)?'
patronBlock = r'<main id="main"[^>]+>(?P<block>.*?)<div id="secondary'
# def itemlistHook(itemlist):
# from core import scraper
# return scraper.sort_episode_list(itemlist)
patronNext = '<a class="next page-numbers" href="(.*?)">Successivi'
# debug = True
return locals()
def episodios_args(item):
actLike = 'episodios'
# support.dbg()
deflang = 'Sub-ITA'
action = 'findvideos'
patron = '(?P<episode>\d+&#215;\d+|\d+[×.]+\d+)(?:\s?\((?P<lang>[a-zA-Z ]+)\))?(?:\s[Ss]\d+[Ee]+\d+)? +(?:“|&#8220;)(?P<title2>.*?)(?:”|&#8221;).*?(?P<other>.*?)(?:/>|<p)'
patronBlock = r'<main id="main" class="site-main" role="main">(?P<block>.*?)</main>'
patronNext = '<a class="next page-numbers" href="(.*?)">Successivi'
# debug = True
return locals()
@support.scrape
def episodios(item):
support.info(item)
return episodios_args(item)
@support.scrape
def genres(item):
support.info()
#support.dbg()
action = 'peliculas'
patronBlock = r'<div id="mcTagMapNav">(?P<block>.+?)</div>'
patron = r'<a href="(?P<url>[^"]+)">(?P<title>.+?)</a>'
def itemHook(item):
item.url = host+'/elenco-serie-tv/'
item.contentType = 'tvshow'
return item
#debug = True
return locals()
def search(item, text):
support.info('search', item)
text = text.replace(' ', '+')
item.url = host + '?s=' + text
try:
item.args = 'search'
item.contentType = 'tvshow'
return peliculas(item)
# Se captura la excepcion, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
info('search log:', line)
return []
def newest(categoria):
support.info('newest ->', categoria)
itemlist = []
item = Item()
if categoria == 'series':
try:
item.contentType = 'tvshow'
item.args = 'newest'
item.url = host
item.action = 'peliculas'
itemlist = peliculas(item)
if itemlist[-1].action == 'peliculas':
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.info('newest log: ', line)
return []
return itemlist
def findvideos(item):
support.info('findvideos ->', item)
patron = r'<a href="([^"]+)">'
itemlist = []
if item.other.startswith('http'):
resp = httptools.downloadpage(item.url, follow_redirects=False)
data = resp.headers.get("location", "") + '\n'
elif item.other:
html = support.match(item.other, patron=patron, headers=headers)
matches = html.matches
data = html.data
for scrapedurl in matches:
if 'is.gd' in scrapedurl:
resp = httptools.downloadpage(scrapedurl, follow_redirects=False)
data += resp.headers.get("location", "") + '\n'
elif not support.scrapertools.find_single_match(item.url, '-[0-9]+x[0-9]+-'):
return episodios(item)
else:
patronBlock = '<div class="entry-content">(?P<block>.*)<footer class="entry-footer">'
html = support.match(item, patron=patron, patronBlock=patronBlock, headers=headers)
matches = html.matches
data= html.data
if item.args != 'episodios':
item.infoLabels['mediatype'] = 'episode'
for scrapedurl in matches:
if 'is.gd' in scrapedurl:
resp = httptools.downloadpage(scrapedurl, follow_redirects=False)
data += resp.headers.get("location", "") + '\n'
itemlist += support.server(item, data)
# data = support.match(item.url).data
# patron = r'>Posted in <a href="https?://fastsubita.com/serietv/([^/]+)/(?:[^"]+)?"'
# series = scrapertools.find_single_match(data, patron)
# titles = support.typo(series.upper().replace('-', ' '), 'bold color kod')
# goseries = support.typo("Vai alla Serie:", ' bold color kod')
# itemlist.append(
# item.clone(channel=item.channel,
# # title=goseries + titles,
# title=titles,
# fulltitle=titles,
# show=series,
# contentType='tvshow',
# contentSerieName=series,
# url=host+"/serietv/"+series,
# action='episodios',
# contentTitle=titles,
# plot = "Vai alla Serie " + titles + " con tutte le puntate",
# ))
return itemlist

View File

@@ -1,11 +0,0 @@
{
"id": "film4k",
"name": "Film4k",
"language": ["ita"],
"active": false,
"thumbnail": "film4k.png",
"banner": "film4k.png",
"categories": ["tvshow", "movie", "anime"],
"not_active": ["include_in_newest_peliculas", "include_in_newest_anime", "include_in_newest_series"],
"settings": []
}

View File

@@ -1,82 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per film4k
# ------------------------------------------------------------
from core import support
from platformcode import logger, config
def findhost(url):
return support.httptools.downloadpage(url).url
host = config.get_channel_url(findhost)
@support.menu
def mainlist(item):
film = ['movies',
('Qualità', ['', 'menu', 'quality']),
('Generi', ['movies', 'menu', 'genres']),
('Anno', ['movies', 'menu', 'releases']),
('Più popolari', ['trending/?get=movies', 'peliculas']),
('Più votati', ['ratings/?get=movies', 'peliculas'])]
tvshow = ['/tvshows',
('Più popolari', ['trending/?get=tv', 'peliculas']),
('Più votati', ['ratings/?get=tv', 'peliculas'])]
return locals()
def search(item, text):
logger.info('search', text)
item.url = item.url + "/?s=" + text
try:
return support.dooplay_search(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def peliculas(item):
if 'anime' in item.url:
return support.dooplay_peliculas(item, True)
else:
return support.dooplay_peliculas(item, False)
def episodios(item):
itemlist = support.dooplay_get_episodes(item)
return itemlist
def findvideos(item):
itemlist = []
if item.contentType == 'episode':
linkHead = support.httptools.downloadpage(item.url, only_headers=True).headers['link']
epId = support.scrapertools.find_single_match(linkHead, r'\?p=([0-9]+)>')
for link in support.dooplay_get_links(item, host, paramList=[['tv', epId, 1, 'title', 'server']]):
itemlist.append(
item.clone(action="play", url=link['url']))
else:
for link, quality in support.match(item.url, patron="(" + host + """links/[^"]+).*?class="quality">([^<]+)""").matches:
srv = support.servertools.find_video_items(data=support.httptools.downloadpage(link).data)
for s in srv:
s.quality = quality
itemlist.extend(srv)
return support.server(item, itemlist=itemlist)
@support.scrape
def menu(item):
action = 'peliculas'
if item.args in ['genres','releases']:
patronBlock = r'<nav class="' + item.args + r'">(?P<block>.*?)</nav'
patronMenu= r'<a href="(?P<url>[^"]+)"[^>]*>(?P<title>[^<]+)<'
else:
patronBlock = r'class="main-header">(?P<block>.*?)headitems'
patronMenu = r'(?P<url>' + host + r'quality/[^/]+/\?post_type=movies)">(?P<title>[^<]+)'
return locals()

View File

@@ -1,35 +0,0 @@
{
"id": "filmigratis",
"name": "Filmi Gratis",
"active": false,
"language": ["ita", "sub-ita"],
"thumbnail": "filmigratis.png",
"banner": "filmigratis.png",
"categories": ["movie","tvshow"],
"settings": [
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "@70727",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "@70727",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "@70727",
"default": false,
"enabled": false,
"visible": false
}
]
}

View File

@@ -1,156 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per Filmi Gratis
# ------------------------------------------------------------
"""
La voce "Al cinema" si riferisce ai titoli che scorrono nella home page
Problemi:
- Nessuno noto
Novità, il canale, è presente in:
- FILM
"""
import re
from core import httptools, support
from core.item import Item
from platformcode import config
host = config.get_channel_url()
headers = [['Referer', host]]
@support.menu
def mainlist(item):
film = [
('Al Cinema ', ['', 'peliculas', 'cinema']),
('Categorie', ['', 'genres', 'genres']),
]
tvshow = ['/serie/ALL',
('Generi', ['', 'genres', 'genres'])
]
search = ''
return locals()
@support.scrape
def peliculas(item):
support.info()
if item.args == 'search':
action = ''
patron = r'<div class="cnt">.*?src="([^"]+)"[^>]+>[^>]+>[^>]+>\s+(?P<title>.+?)(?:\[(?P<lang>Sub-ITA|SUB-ITA|SUB)\])?\s?(?:\[?(?P<quality>HD).+\]?)?\s?(?:\(?(?P<year>\d+)?\)?)?\s+<[^>]+>[^>]+>[^>]+>\s<a href="(?P<url>[^"]+)"[^<]+<'
patronBlock = r'<div class="container">(?P<block>.*?)</main>'
elif item.contentType == 'movie':
if not item.args:
# voce menu: Film
patronBlock = r'<h1>Film streaming ita in alta definizione</h1>(?P<block>.*?)<div class="content-sidebar">'
patron = r'<div class="timeline-right">[^>]+>\s<a href="(?P<url>.*?)".*?src="(?P<thumb>.*?)".*?<h3 class="timeline-post-title">(?:(?P<title>.+?)\s\[?(?P<lang>Sub-ITA)?\]?\s?\[?(?P<quality>HD)?\]?\s?\(?(?P<year>\d+)?\)?)<'
patronNext = r'<a class="page-link" href="([^"]+)">>'
elif item.args == 'cinema':
patronBlock = r'<div class="owl-carousel" id="postCarousel">(?P<block>.*?)<section class="main-content">'
patron = r'background-image: url\((?P<thumb>.*?)\).*?<h3.*?>(?:(?P<title>.+?)\s\[?(?P<lang>Sub-ITA)?\]?\s?\[?(?P<quality>HD)?\]?\s?\(?(?P<year>\d+)?\)?)<.+?<a.+?<a href="(?P<url>[^"]+)"[^>]+>'
elif item.args == 'genres':
# ci sono dei titoli dove ' viene sostituito con " da support
data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data
data = re.sub('\n|\t', ' ', data)
patron = r'<div class="cnt">\s.*?src="([^"]+)".+?title="((?P<title>.+?)(?:[ ]\[(?P<lang>Sub-ITA|SUB-ITA)\])?(?:[ ]\[(?P<quality>.*?)\])?(?:[ ]\((?P<year>\d+)\))?)"\s*[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s+<a href="(?P<url>[^"]+)"'
patronBlock = r'<div class="container">(?P<block>.*?)</main>'
pagination = ''
patronNext = '<a class="page-link" href="([^"]+)">>>'
else:
action = 'episodios'
patron = r'<div class="cnt">\s.*?src="([^"]+)".+?title="((?P<title>.+?)(?:[ ]\[(?P<lang>Sub-ITA|SUB-ITA)\])?(?:[ ]\[(?P<quality>.*?)\])?(?:[ ]\((?P<year>\d+)\))?)"\s*[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s+<a href="(?P<url>[^"]+)"'
## if item.args == 'search':
## patron = r'<div class="cnt">.*?src="([^"]+)".+?[^>]+>[^>]+>[^>]+>\s+((?P<title>.+?)(?:[ ]\[(?P<lang>Sub-ITA|SUB-ITA)\])?(?:[ ]\[(?P<quality>.*?)\])?(?:[ ]\((?P<year>\d+)\))?)\s+<[^>]+>[^>]+>[^>]+>[ ]<a href="(?P<url>[^"]+)"'
patronBlock = r'<div class="container">(?P<block>.*?)</main>'
def itemHook(item):
if item.args == 'search':
if 'series' in item.url:
item.action = 'episodios'
item.contentType = 'tvshow'
else:
item.action = 'findvideos'
item.contentType = 'movie'
return item
#debug = True
return locals()
@support.scrape
def episodios(item):
support.info()
action = 'findvideos'
patronBlock = r'<div class="row">(?P<block>.*?)<section class="main-content">'
patron = r'href="(?P<url>.*?)">(?:.+?)?\s+S(?P<season>\d+)\s\-\sEP\s(?P<episode>\d+)[^<]+<'
return locals()
@support.scrape
def genres(item):
support.info()
if item.contentType == 'movie':
action = 'peliculas'
patron = r'<a href="(?P<url>.*?)">(?P<title>.*?)<'
patronBlock = r'CATEGORIES.*?<ul>(?P<block>.*?)</ul>'
else:
item.contentType = 'tvshow'
action = 'peliculas'
blacklist = ['Al-Cinema']
patron = r'<a href="(?P<url>.*?)">(?P<title>.*?)<'
patronBlock = r'class="material-button submenu-toggle"> SERIE TV.*?<ul>.*?</li>(?P<block>.*?)</ul>'
return locals()
def search(item, text):
support.info('search', item)
text = text.replace(' ', '+')
item.url = host + '/search/?s=' + text
try:
item.args = 'search'
return peliculas(item)
# Se captura la excepcion, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
support.info('search log:', line)
return []
def newest(categoria):
support.info('newest ->', categoria)
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host
item.contentType = 'movie'
item.action = 'peliculas'
itemlist = peliculas(item)
if itemlist[-1].action == 'peliculas':
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.info({0}.format(line))
return []
return itemlist
def findvideos(item):
support.info()
return support.server(item)

View File

@@ -1,11 +0,0 @@
{
"id": "netfreex",
"name": "Netfreex",
"language": ["ita"],
"active": false,
"thumbnail": "netfreex.png",
"banner": "netfreex.png",
"categories": ["tvshow", "movie", "anime"],
"not_active": ["include_in_newest_peliculas", "include_in_newest_anime", "include_in_newest_series"],
"settings": []
}

View File

@@ -1,71 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per netfreex
# ------------------------------------------------------------
from core import support
from core.item import Item
from platformcode import logger, config
# def findhost(url):
# return 'https://' + support.match('https://netfreex.uno/', patron='value="site:([^"]+)"').match
host = config.get_channel_url()
headers = ""
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
@support.menu
def mainlist(item):
film = ['/film',
('Generi', ['', 'menu', 'genres'])
]
tvshow = ['/serietv']
anime = ['/genere/anime']
return locals()
def search(item, text):
logger.info('search', text)
item.url = item.url + "/?s=" + text
try:
return support.dooplay_search(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def peliculas(item):
if 'anime' in item.url:
return support.dooplay_peliculas(item, True)
else:
return support.dooplay_peliculas(item, False)
def episodios(item):
return support.dooplay_get_episodes(item)
def findvideos(item):
from core import jsontools
itemlist = []
matches = support.match(item, patron=r'<li id="player-option-[0-9]".*?data-type="([^"]+)" data-post="([^"]+)" data-nume="([^"]+)".*?<span class="title".*?>([^<>]+)</span>(?:<span class="server">([^<>]+))?').matches
for Type, Post, Nume, Quality, Server in matches:
dataAdmin = support.match(host + '/wp-json/dooplayer/v1/post/%s?type=%s&source=%s' %(Post, Type, Nume)).data
js = jsontools.load(dataAdmin)
link = js['embed_url'] if 'embed_url' in js else ''
itemlist.append( item.clone(server=Server, quality=Quality, url=link, action='play'))
return support.server(item, itemlist=itemlist)
@support.scrape
def menu(item):
action = 'peliculas'
data = support.match(item, patron=r'<a href="#">Genere<(.*?)</ul').match
patronMenu= r'<a href="(?P<url>[^"]+)"[^>]*>(?P<title>[^<]+)<'
return locals()

View File

@@ -1,12 +0,0 @@
{
"id": "polpotv",
"name": "PolpoTV",
"language": ["ita"],
"active": false,
"thumbnail": "polpotv.png",
"banner": "polpotv.png",
"categories": ["movie","tvshow"],
"not_active":[],
"default_off":["include_in_newest"],
"settings": []
}

View File

@@ -1,227 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# KoD - XBMC Plugin
# Canale polpotv
# ------------------------------------------------------------
from core import support, jsontools
from core.item import Item
from platformcode import config
import datetime
host = config.get_channel_url()
headers = [['Accept', 'application/ld+json']]
@support.menu
def mainlist(item):
# menu = [
# ('Ultimi Film aggiunti', ['/api/movies', 'peliculas', '']),
# ('Ultime Serie TV aggiunte', ['/api/shows', 'peliculas', '']),
# ('Generi', ['/api/genres', 'search_movie_by_genre', '']),
# ('Anni {film}', ['', 'search_movie_by_year', '']),
# ('Cerca... bold', ['', 'search', ''])
# ]
film = ['/api/movies',
('Generi', ['/api/genres', 'search_movie_by_genre', '']),
('Anni', ['', 'search_movie_by_year', '']),]
tvshow=['/api/shows']
search=''
return locals()
def newest(categoria):
support.info()
item = Item()
if categoria == 'peliculas':
item.contentType = 'movie'
item.url = host + '/api/movies'
elif categoria == 'series':
item.contentType = 'tvshow'
item.url = host+'/api/shows'
return peliculas(item)
def peliculas(item):
support.info()
itemlist = []
data = support.match(item.url, headers=headers).data
json_object = jsontools.load(data)
for element in json_object['hydra:member']:
if 'shows' not in item.url:
item.contentType='movie'
else:
item.contentType='tvshow'
itemlist.append(get_itemlist_element(element, item))
support.tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
try:
if support.inspect.stack()[1][3] not in ['newest']:
support.nextPage(itemlist, item, next_page=json_object['hydra:view']['hydra:next'])
except:
pass
return itemlist
def episodios(item):
support.info()
itemlist = []
data = support.match(item.url, headers=headers).data
json_object = jsontools.load(data)
for season in json_object['seasons']:
seas_url=host+season['@id']+'/releases'
itemlist_season=get_season(item, seas_url, season['seasonNumber'])
if(len(itemlist_season)>0):
itemlist.extend(itemlist_season)
support.videolibrary(itemlist, item, 'color kod bold')
support.download(itemlist, item)
return itemlist
def get_season(item, seas_url, seasonNumber):
support.info()
itemlist = []
data = support.match(seas_url, headers=headers).data
json_object = jsontools.load(data)
for episode in json_object['hydra:member']:
itemlist.append(
item.clone(action='findvideos',
contentType='episode',
title=support.typo(str(seasonNumber)+"x"+str("%02d"%episode['episodeNumber']), 'bold'),
url=seas_url,
extra=str(len(json_object['hydra:member'])-episode['episodeNumber'])))
return itemlist[::-1]
def search(item, texto):
support.info(item.url, "search", texto)
itemlist=[]
try:
item.url = host + "/api/movies?originalTitle="+texto+"&translations.name=" +texto
data = support.match(item.url, headers=headers).data
json_object = jsontools.load(data)
for movie in json_object['hydra:member']:
item.contentType='movie'
itemlist.append(get_itemlist_element(movie,item))
item.url = host + "/api/shows?originalTitle="+texto+"&translations.name=" +texto
data = support.match(item.url, headers=headers).data
json_object = jsontools.load(data)
for tvshow in json_object['hydra:member']:
item.contentType='tvshow'
itemlist.append(get_itemlist_element(tvshow,item))
support.tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.logger.error("%s" % line)
return []
def search_movie_by_genre(item):
support.info()
itemlist = []
data = support.match(item.url, headers=headers).data
json_object = jsontools.load(data)
for genre in json_object['hydra:member']:
itemlist.append(
item.clone(action="peliculas",
title=support.typo(genre['name'],'bold'),
contentType='movie',
url="%s/api/movies?genres.id=%s" %(host,genre['id'])))
return support.thumb(itemlist, True)
def search_movie_by_year(item):
support.info()
now = datetime.datetime.now()
year = int(now.year)
itemlist = []
for i in range(100):
year_to_search = year - i
itemlist.append(
item.clone(channel=item.channel,url="%s/api/movies?releaseDate=%s" %(host,year_to_search),
plot="1",
type="movie",
title=support.typo(year_to_search,'bold'),
action="peliculas"))
return itemlist
def findvideos(item):
support.info()
itemlist = []
try:
data = support.match(item.url, headers=headers).data
json_object = jsontools.load(data)
array_index=0
if item.contentType!='movie':
array_index=int(item.extra)
for video in json_object['hydra:member'][array_index]['playlist']['videos']:
itemlist.append(
item.clone(action="play",
title='Direct',
url=video['src'],
server='directo',
quality=str(video['size'])+ 'p',
folder=False))
except:
pass
return support.server(item, itemlist=itemlist)
def get_itemlist_element(element,item):
support.info()
contentSerieName = ''
contentTitle =''
try:
if element['originalLanguage']['id']=='it':
scrapedtitle=element['originalTitle']
else:
scrapedtitle=element['translations'][1]['name']
if scrapedtitle=='':
scrapedtitle=element['originalTitle']
except:
scrapedtitle=element['originalTitle']
try:
scrapedplot=element['translations'][1]['overview']
except:
scrapedplot = ""
try:
scrapedthumbnail="https:"+element['bestPosters'].values()[0]
except:
scrapedthumbnail=""
# try:
# scrapedfanart="http:"+element['backdropPath']
# except:
# scrapedfanart=""
infoLabels = {}
if item.contentType=='movie':
contentTitle = scrapedtitle
next_action='findvideos'
quality=support.typo(element['lastQuality'].upper(), '_ [] color kod bold')
url="%s%s/releases"
infoLabels['tmdb_id']=element['tmdbId']
else:
contentSerieName = scrapedtitle
next_action='episodios'
quality=''
url="%s%s"
return item.clone(action=next_action,
title=support.typo(scrapedtitle, 'bold') + quality,
fulltitle=scrapedtitle,
show=scrapedtitle,
plot=scrapedplot,
# fanart=scrapedfanart,
thumbnail=scrapedthumbnail,
contentTitle=contentTitle,
contentSerieName=contentSerieName,
contentType=item.contentType,
url=url % (host, element['@id']),
infoLabels=infoLabels)

View File

@@ -1,10 +0,0 @@
{
"id": "pufimovies",
"name": "PufiMovies",
"active": false,
"language": ["ita", "sub-ita"],
"thumbnail": "pufimovies.png",
"banner": "pufimovies.png",
"categories": ["movie","tvshow"],
"settings": []
}

View File

@@ -1,114 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per PufiMovies
# ------------------------------------------------------------
from core import support
host = support.config.get_channel_url()
headers = [['Referer', host]]
@support.menu
def mainlist(item):
film = [
('Generi', ['', 'menu', 'Film']),
('Più Visti', ['','peliculas', 'most'])
]
tvshow = ['',
('Generi', ['', 'menu', 'Serie Tv']),
('Ultimi Episodi', ['','peliculas', 'last'])
]
search = ''
return locals()
@support.scrape
def menu(item):
action = 'peliculas'
patronBlock = item.args + r' Categorie</a>\s*<ul(?P<block>.*?)</ul>'
patronMenu = r'<a href="(?P<url>[^"]+)"[^>]+>(?P<title>[^>]+)<'
return locals()
def search(item, text):
support.info('search', item)
itemlist = []
text = text.replace(' ', '+')
item.url = host + '/search/keyword/' + text
try:
item.args = 'search'
itemlist = peliculas(item)
if itemlist[-1].action == 'peliculas':
itemlist.pop()
return itemlist
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.info('search log:', line)
return []
def newest(categoria):
support.info(categoria)
itemlist = []
item = support.Item()
item.url = host
item.action = 'peliculas'
try:
if categoria == 'peliculas':
item.contentType = 'movie'
itemlist = peliculas(item)
else:
item.args = 'last'
item.contentType = 'tvshow'
itemlist = peliculas(item)
if itemlist[-1].action == 'peliculas':
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.logger.error("%s" % line)
return []
return itemlist
@support.scrape
def peliculas(item):
if item.contentType == 'tvshow' and not item.args:
action = 'episodios'
patron = r'<div class="movie-box">\s*<a href="(?P<url>[^"]+)">[^>]+>[^>]+>\D+Streaming\s(?P<lang>[^"]+)[^>]+>[^>]+>[^>]+>(?P<quality>[^<]+)[^>]+>[^>]+>[^>]+>\s*<img src="(?P<thumb>[^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>(?P<rating>[^<]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<title>[^<]+)[^>]+>[^>]+>[^>]+>\s*(?P<year>\d+)'
elif item.contentType == 'movie' and not item.args:
patron = r'<div class="existing_item col-6 col-lg-3 col-sm-4 col-xl-4">\s*<div class="movie-box">\s*<a href="(?P<url>(?:http(?:s)://[^/]+)?/(?P<type>[^/]+)/[^"]+)">[^>]+>[^>]+>\D+Streaming\s*(?P<lang>[^"]+)">[^>]+>[^>]+>(?P<quality>[^<]+)<[^>]+>[^>]+>[^>]+>\s*<img src="(?P<thumb>[^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>(?P<rating>[^<]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<title>[^<]+)[^>]+>[^>]+>[^>]+>\s*(?:(?P<year>\d+))?[^>]+>[^>]+>[^>]+>[^>]+>(?P<plot>[^<]*)<'
elif item.args == 'last':
patron = r'<div class="episode-box">[^>]+>[^>]+>[^>]+>\D+Streaming\s(?P<lang>[^"]+)">[^>]+>[^>]+>(?P<quality>[^<]+)<[^>]+>[^>]+>[^>]+>[^^>]+>[^>]+>\s*<img src="(?P<thumb>[^"]+)"[^[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s*<a href="(?P<url>[^"]+)"[^>]+>[^>]+>(?P<title>[^<]+)<[^>]+>[^>]+>[^>]+>\D*(?P<season>\d+)[^>]+>\D*(?P<episode>\d+)'
elif item.args == 'most':
patron =r'div class="sm-113 item">\s*<a href="(?P<url>[^"]+)">[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s<img src="(?P<thumb>[^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s*(?P<title>[^<]+)'
else:
patron = r'<div class="movie-box">\s*<a href="(?P<url>(?:http(?:s)://[^/]+)?/(?P<type>[^/]+)/[^"]+)">[^>]+>[^>]+>\D+Streaming\s*(?P<lang>[^"]+)">[^>]+>[^>]+>(?P<quality>[^<]+)<[^>]+>[^>]+>[^>]+>\s*<img src="(?P<thumb>[^"]+)"[^>]+>[^>]+>[^>]+>[^>]+>(?P<rating>[^<]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>(?P<title>[^<]+)[^>]+>[^>]+>[^>]+>\s*(?:(?P<year>\d+))?[^>]+>[^>]+>[^>]+>[^>]+>(?P<plot>[^<]*)<'
typeActionDict = {'findvideos':['movie'], 'episodios':['tvshow']}
typeContentDict = {'movie':['movie'], 'tvshow':['tvshow']}
patronNext = r'<a href="([^"]+)"[^>]+>&raquo;'
return locals()
@support.scrape
def episodios(item):
patron = r'<div class="episode-box">[^>]+>[^>]+>[^>]+>\D+Streaming\s(?P<lang>[^"]+)">[^>]+>[^>]+>(?P<quality>[^<]+)<[^>]+>[^>]+>[^>]+>\s*<img src="(?P<thumb>[^"]+)"[^[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s*<a href="(?P<url>[^"]+)"[^>]+>[^>]+>(?P<title>[^<]+)<[^>]+>[^>]+>[^>]+>\D*(?P<season>\d+)[^>]+>\D*(?P<episode>\d+)'
return locals()
def findvideos(item):
support.info()
# match = support.match(item, patron='wstream', debug=True)
return support.server(item)

View File

@@ -1,11 +0,0 @@
{
"id": "seriehd",
"name": "SerieHD",
"active": false,
"language": ["ita"],
"thumbnail": "seriehd.png",
"banner": "seriehd.png",
"categories": ["tvshow"],
"settings": [],
"cloudflare": true
}

View File

@@ -1,156 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per SerieHD
# ------------------------------------------------------------
from core import support
def findhost(url):
return support.match(url, patron=r'<h2[^>]+><a href="([^"]+)"').match
host = support.config.get_channel_url()
headers = [['Referer', host]]
@support.menu
def mainlist(item):
tvshow = [('Genere', ['', 'menu', 'genre']),
('A-Z', ['', 'menu', 'a-z']),
('In Corso', ['/category/serie-tv-streaming/serie-in-corso', 'peliculas']),
('Complete', ['/category/serie-tv-streaming/serie-complete', 'peliculas']),
('Americane', ['/category/serie-tv-streaming/serie-tv-americane', 'peliculas']),
('Italiane', ['/category/serie-tv-streaming/serie-tv-italiane', 'peliculas']),
('Ultimi Episodi', ['/aggiornamenti', 'peliculas', 'last']),
('Evidenza', ['', 'peliculas', 'best'])]
return locals()
def search(item, texto):
support.info(texto)
item.contentType = 'tvshow'
item.url = host + "/?s=" + texto
try:
return peliculas(item)
# Continua la ricerca in caso di errore .
except:
import sys
for line in sys.exc_info():
support.logger.error("%s" % line)
return []
def newest(categoria):
support.info(categoria)
itemlist = []
item = support.Item()
item.url = host + '/aggiornamenti'
item.args = 'last'
try:
if categoria == "series":
item.contentType = 'tvshow'
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.logger.error("{0}".format(line))
return []
return itemlist
@support.scrape
def peliculas(item):
# debug = True
if item.args == 'last':
action = 'findvideos'
patron = r'singleUpdate">(?:[^>]+>){2}\s*<img src="(?P<thumb>[^"]+)"(?:[^>]+>){3}\s*<h2>(?P<title>[^<]+)<(?:[^>]+>){14,16}\s*<a href="(?P<url>[^"]+)">(?:[^>]+>){3}\s*(?P<season>\d+)\D+(?P<episode>\d+)(?:[^\(]*\()?(?P<lang>[^\)]+)?(?:\))?'
elif item.args == 'best':
action='episodios'
patron = r'col-md-3">\s*<a href="(?P<url>[^"]+)">[^>]+>\s*<div class="infoVetrina">[^>]+>(?P<year>\d{4})(?:[^>]+>){2}(?P<title>[^<]+)<(?:[^>]+>){4}(?P<rating>[^<]+)(?:[^>]+>){4}\s*<img src="(?P<thumb>[^"]+)"'
else:
action='episodios'
# patron = r'<a href="(?P<url>[^"]+)">[^>]+>\s*<div class="infoSeries">\s*<h2>(?P<title>[^<]+)<(?:[^>]+>){5}(?P<rating>[^<]+)?(?:[^>]+>){3}\s*<img src="(?P<thumb>[^"]+)"(?:[^>]+>){3}(?P<quality>[^<]+)<(?:[^>]+>){2}(?P<year>\d{4})'
patron = r'<a href="(?P<url>[^"]+)">[^>]+>\s*<div class="infoSeries">\s*<h2>(?P<title>[^<]+?)(?:\[(?P<lang>[^\]]+)\])?<(?:[^>]+>){5}(?P<rating>[^<]+)?(?:[^>]+>){3}\s*(?:<img src="(?P<thumb>[^"]+)"[^>]+>)?(?:[^>]+>){0,2}(?P<quality>[^<]+)<(?:[^>]+>){2}(?P<year>\d{4})'
patronNext=r'next page-numbers" href="([^"]+)"'
return locals()
@support.scrape
def episodios(item):
def get_season(pageData, seas_url, season):
data = ''
episodes = support.match(pageData if pageData else seas_url, patronBlock=patron_episode, patron=patron_option).matches
for episode_url, episode in episodes:
# episode_url = support.urlparse.urljoin(item.url, episode_url)
# if '-' in episode: episode = episode.split('-')[0].zfill(2) + 'x' + episode.split('-')[1].zfill(2)
title = season + "x" + episode.zfill(2) + ' - ' + item.fulltitle
data += title + '|' + episode_url + '\n'
return data
patron_season = '<div class="[^"]+" id="seasonsModal"[^>]+>(.*?)</ul>'
patron_episode = '<div class="[^"]+" id="episodesModal"[^>]+>(.*?)</ul>'
patron_option = r'<a href="([^"]+?)".*?>(?:Stagione |Episodio )([^<]+?)</a>'
url = support.match(item, patron=r'<iframe id="iframeVid" width="[^"]+" height="[^"]+" src="([^"]+)" allowfullscreen').match
seasons = support.match(url, patronBlock=patron_season, patron=patron_option)
data = ''
# debugging
# support.dbg()
# for i, season in enumerate(seasons.matches):
# data += get_season(seasons.data if i == 0 else '', season[0], season[1])
import sys
if sys.version_info[0] >= 3: from concurrent import futures
else: from concurrent_py2 import futures
with futures.ThreadPoolExecutor() as executor:
thL = []
for i, season in enumerate(seasons.matches):
thL.append(executor.submit(get_season, seasons.data if i == 0 else '', season[0], season[1]))
for res in futures.as_completed(thL):
if res.result():
data += res.result()
# debug = True
patron = r'(?P<season>\d+)x(?P<episode>\d+)\s*-\s*(?P<title>[^\|]+)\|(?P<url>[^ ]+)'
action = 'findvideos'
def itemlistHook(itemlist):
itemlist.sort(key=lambda item: (item.infoLabels['season'], item.infoLabels['episode']))
return itemlist
return locals()
@support.scrape
def menu(item):
if item.args == 'genre':
patronMenu = r'<a href="(?P<url>[^"]+)">(?P<title>[^<]+)</a>'
else:
patronMenu = r'<a href="(?P<url>[^"]+)" class="">(?P<title>[^<]+)'
blacklist = ['Serie TV Streaming','Serie TV Americane','Serie TV Italiane','Serie Complete','Serie in Corso','altadefinizione']
action = 'peliculas'
return locals()
def findvideos(item):
item.url = item.url.replace('&amp;', '&')
support.info(item)
if item.args == 'last':
url = support.match(item, patron = r'<iframe id="iframeVid" width="[^"]+" height="[^"]+" src="([^"]+)" allowfullscreen').match
matches = support.match(url,patron=r'<a href="([^"]+)">(\d+)<', patronBlock=r'<h3>EPISODIO</h3><ul>(.*?)</ul>').matches
if matches: item.url = support.urlparse.urljoin(url, matches[-1][0])
return support.hdpass_get_servers(item)
def play(item):
if 'hdpass' in item.url:
return support.hdpass_get_url(item)
return [item]

View File

@@ -1,11 +0,0 @@
{
"id": "serietvonline",
"name": "SerieTvOnline",
"active": false,
"language": ["ita"],
"thumbnail": "serietvonline.png",
"bannermenu": "serietvonline.png",
"categories": ["anime","tvshow","movie","documentary"],
"not_active": ["include_in_newest_anime"],
"settings": []
}

View File

@@ -1,184 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per serietvonline.py
# ----------------------------------------------------------
"""
Novità. Indicare in quale/i sezione/i è presente il canale:
- film, serie
Avvisi:
- Al massimo 25 titoli per le sezioni: Film
- Al massimo 35 titoli per le sezioni: Tutte le altre
Per aggiungere in videoteca le Anime:
Se hanno la forma 1x01:
-si posso aggiungere direttamente dalla pagina della serie, sulla voce in fondo "aggiungi in videoteca".
Altrimenti:
- Prima fare la 'Rinumerazione' dal menu contestuale dal titolo della serie
"""
import re
from core import support, httptools, scrapertools
from platformcode import config
from core.item import Item
# def findhost(url):
# host = support.match(url, patron=r'href="([^"]+)">\s*cliccando qui').matches[-1]
# return host
host = config.get_channel_url()
headers = [['Referer', host]]
@support.menu
def mainlist(item):
support.info()
film = ['/ultimi-film-aggiunti/',
('A-Z', ['/lista-film/', 'peliculas', 'lista'])
]
tvshow = ['',
('Aggiornamenti', ['/ultimi-episodi-aggiunti/', 'peliculas', 'update']),
('Tutte', ['/lista-serie-tv/', 'peliculas', 'qualcosa']),
('Italiane', ['/lista-serie-tv-italiane/', 'peliculas', 'qualcosa']),
('Anni 50-60-70-80', ['/lista-serie-tv-anni-60-70-80/', 'peliculas', 'qualcosa']),
('HD', ['/lista-serie-tv-in-altadefinizione/', 'peliculas', 'qualcosa'])
]
anime = ['/lista-cartoni-animati-e-anime/']
documentari = [('Documentari {bullet bold}', ['/lista-documentari/' , 'peliculas' , 'doc', 'tvshow'])]
search = ''
return locals()
@support.scrape
def peliculas(item):
support.info()
anime = True
blacklist = ['DMCA', 'Contatti', 'Attenzione NON FARTI OSCURARE', 'Lista Cartoni Animati e Anime']
patronBlock = r'<h1>.+?</h1>(?P<block>.*?)<div class="footer_c">'
patronNext = r'<div class="siguiente"><a href="([^"]+)" >'
# debug = True
if item.args == 'search':
patronBlock = r'>Lista Serie Tv</a></li></ul></div><div id="box_movies">(?P<block>.*?)<div id="paginador">'
patron = r'<div class="movie">[^>]+[^>]+>\s*<img src="(?P<thumb>[^"]+)" alt="(?P<title>.+?)(?:(?P<year>\d{4})|")[^>]*>\s*<a href="(?P<url>[^"]+)'
elif item.contentType == 'episode':
pagination = 35
action = 'findvideos'
patron = r'<td><a href="(?P<url>[^"]+)"(?:[^>]+)?>\s?(?P<title>.*?)(?P<episode>\d+x\d+)[ ]?(?P<title2>[^<]+)?<'
elif item.contentType == 'tvshow':
# SEZIONE Serie TV- Anime - Documentari
pagination = 35
if not item.args and 'anime' not in item.url:
patron = r'<div class="movie">[^>]+>.+?src="(?P<thumb>[^"]+)" alt="[^"]+".+? href="(?P<url>[^"]+)">.*?<h2>(?P<title>[^"]+)</h2>\s?(?:<span class="year">(?P<year>\d+|\-\d+))?<'
else:
anime = True
patron = r'(?:<td>)?<a href="(?P<url>[^"]+)"(?:[^>]+)?>\s?(?P<title>[^<]+)(?P<episode>[\d\-x]+)?(?P<title2>[^<]+)?<'
else:
# SEZIONE FILM
pagination = 25
if item.args == 'lista':
patron = r'href="(?P<url>[^"]+)"[^>]+>(?P<title>.+?)(?:\s(?P<year>\d{4})|<)'
patronBlock = r'Lista dei film disponibili in streaming e anche in download\.</p>(?P<block>.*?)<div class="footer_c">'
else:
patron = r'<tr><td><a href="(?P<url>[^"]+)"(?:|.+?)?>(?:&nbsp;&nbsp;)?[ ]?(?P<title>.*?)[ ]?(?P<quality>HD)?[ ]?(?P<year>\d+)?(?: | HD[^<]*| Streaming[^<]*| MD(?: iSTANCE)? [^<]*)?</a>'
def itemHook(item):
if 'film' in item.url:
item.action = 'findvideos'
item.contentType = 'movie'
elif item.args == 'update':
pass
else:
item.contentType = 'tvshow'
item.action = 'episodios'
return item
return locals()
@support.scrape
def episodios(item):
support.info()
anime = True
action = 'findvideos'
patronBlock = r'<table>(?P<block>.*)<\/table>'
patron = r'<tr><td>(?P<title>.*?)?[ ](?:Parte)?(?P<episode>\d+x\d+|\d+)(?:|[ ]?(?P<title2>.+?)?(?:avi)?)<(?P<data>.*?)<\/td><tr>'
def itemlistHook(itemlist):
for i, item in enumerate(itemlist):
ep = support.match(item.title, patron=r'\d+x(\d+)').match
if ep == '00':
item.title = item.title.replace('x00', 'x' + str(i+1).zfill(2)).replace('- ..','')
return itemlist
return locals()
def search(item, text):
support.info("CERCA :" ,text, item)
item.url = "%s/?s=%s" % (host, text)
try:
item.args = 'search'
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.info("%s" % line)
return []
def newest(categoria):
support.info(categoria)
itemlist = []
item = Item()
if categoria == 'peliculas':
item.contentType = 'movie'
item.url = host + '/ultimi-film-aggiunti/'
elif categoria == 'series':
item.args = 'update'
item.contentType = 'episode'
item.url = host +'/ultimi-episodi-aggiunti/'
try:
item.action = 'peliculas'
itemlist = peliculas(item)
except:
import sys
for line in sys.exc_info():
support.info("{0}".format(line))
return []
return itemlist
def findvideos(item):
support.info()
if item.contentType == 'movie':
return support.server(item, headers=headers)
else:
if item.args != 'update':
return support.server(item, item.data)
else:
itemlist = []
item.infoLabels['mediatype'] = 'episode'
data = support.match(item.url, headers=headers).data
url_video = scrapertools.find_single_match(data, r'<tr><td>(.+?)</td><tr>', -1)
url_serie = scrapertools.find_single_match(data, r'<link rel="canonical" href="([^"]+)"\s?/>')
goseries = support.typo("Vai alla Serie:", ' bold')
series = support.typo(item.contentSerieName, ' bold color kod')
itemlist = support.server(item, data=url_video)
itemlist.append(item.clone(title=goseries + series, contentType='tvshow', url=url_serie, action='episodios', plot = goseries + series + "con tutte le puntate", args=''))
return itemlist

View File

@@ -1,53 +0,0 @@
{
"id": "serietvsubita",
"name": "Serie TV Sub ITA",
"active": false,
"language": ["ita"],
"thumbnail": "serietvsubita.png",
"banner": "serietvsubita.png",
"categories": ["tvshow"],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Includi in Novità - Serie TV",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "checklinks",
"type": "bool",
"label": "Verifica se i link esistono",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "checklinks_number",
"type": "list",
"label": "Numero de link da verificare",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "1", "3", "5", "10" ]
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostra link in lingua...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": ["Non filtrare","IT"]
}
]
}

View File

@@ -1,351 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per Serietvsubita
# Thanks to Icarus crew & Alfa addon & 4l3x87
# ----------------------------------------------------------
import re
import time
from core import httptools, tmdb, scrapertools, support
from core.item import Item
from core.support import info
from platformcode import logger, config
host = config.get_channel_url()
headers = [['Referer', host]]
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
@support.menu
def mainlist(item):
info()
itemlist = []
tvshowSub = [
('Novità {bold}',[ '', 'peliculas_tv', '', 'tvshow']),
('Serie TV {bold}',[ '', 'lista_serie', '', 'tvshow']),
('Per Lettera', ['', 'list_az', 'serie', 'tvshow'])
]
cerca = [(support.typo('Cerca...', 'bold'),[ '', 'search', '', 'tvshow'])]
## support.aplay(item, itemlist, list_servers, list_quality)
## support.channel_config(item, itemlist)
return locals()
# ----------------------------------------------------------------------------------------------------------------
def cleantitle(scrapedtitle):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
scrapedtitle = scrapedtitle.replace('[HD]', '').replace('', '\'').replace('×', 'x').replace('Game of Thrones ','')\
.replace('In The Dark 2019', 'In The Dark (2019)').replace('"', "'").strip()
year = scrapertools.find_single_match(scrapedtitle, r'\((\d{4})\)')
if year:
scrapedtitle = scrapedtitle.replace('(' + year + ')', '')
return scrapedtitle.strip()
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def findvideos(item):
info()
data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data
data = re.sub(r'\n|\t|\s+', ' ', data)
# recupero il blocco contenente i link
blocco = scrapertools.find_single_match(data, r'<div class="entry">([\s\S.]*?)<div class="post').replace('..:: Episodio ', 'Episodio ').strip()
matches = scrapertools.find_multiple_matches(blocco, r'(S(\d*)E(\d*))\s')
if len(matches) > 0:
for fullseasonepisode, season, episode in matches:
blocco = blocco.replace(fullseasonepisode + ' ', 'Episodio ' + episode + ' ')
blocco = blocco.replace('Episodio ', '..:: Episodio ')
episodio = item.infoLabels['episode']
patron = r'\.\.:: Episodio %s([\s\S]*?)(<div class="post|..:: Episodio)' % episodio
info(patron)
info(blocco)
matches = scrapertools.find_multiple_matches(blocco, patron)
if len(matches):
data = matches[0][0]
patron = r'href="(https?://www\.keeplinks\.(?:co|eu)/p(?:[0-9]*)/([^"]+))"'
matches = re.compile(patron, re.DOTALL).findall(data)
for keeplinks, id in matches:
headers2 = [['Cookie', 'flag[' + id + ']=1; defaults=1; nopopatall=' + str(int(time.time()))],
['Referer', keeplinks]]
html = httptools.downloadpage(keeplinks, headers=headers2).data
data += str(scrapertools.find_multiple_matches(html, '</lable><a href="([^"]+)" target="_blank"'))
return support.server(item, data=data)
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def lista_serie(item):
info()
itemlist = []
PERPAGE = 15
p = 1 if not item.args else int(item.args)
if '||' in item.data:
series = item.data.split('\n\n')
matches = []
for i, serie in enumerate(series):
matches.append(serie.split('||'))
else:
# Extrae las entradas
patron = r'<li class="cat-item cat-item-\d+"><a href="([^"]+)"\s?>([^<]+)</a>'
matches = support.match(item, patron=patron, headers=headers).matches
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
scrapedplot = ""
scrapedthumbnail = ""
if (p - 1) * PERPAGE > i: continue
if i >= p * PERPAGE: break
title = cleantitle(scrapedtitle)
itemlist.append(
item.clone(action="episodios",
title=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=title,
show=title,
plot=scrapedplot,
contentType='episode',
originalUrl=scrapedurl))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginazione
if len(matches) >= p * PERPAGE:
item.args = p + 1
support.nextPage(itemlist, item, next_page=item.url)
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def episodios(item, itemlist=[]):
info()
patron = r'<div class="post-meta">\s*<a href="([^"]+)"\s*title="([^"]+)"\s*class=".*?"></a>.*?'
patron += r'<p><a href="([^"]+)">'
html = support.match(item, patron=patron, headers=headers)
matches = html.matches
data = html.data
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedplot = ""
scrapedtitle = cleantitle(scrapedtitle)
if "(Completa)" in scrapedtitle:
data = httptools.downloadpage(scrapedurl, headers=headers).data
scrapedtitle = scrapedtitle.replace(" Miniserie", " Stagione 1")
title = scrapedtitle.split(" Stagione")[0].strip()
# recupero la stagione
season = scrapertools.find_single_match(scrapedtitle, 'Stagione ([0-9]*)')
blocco = scrapertools.find_single_match(data, r'<div class="entry">[\s\S.]*?<div class="post')
blocco = blocco.replace('<strong>Episodio ', '<strong>Episodio ').replace(' </strong>', ' </strong>')
blocco = blocco.replace('<strong>Episodio ', '<strong>S' + season.zfill(2) + 'E')
matches = scrapertools.find_multiple_matches(blocco, r'(S(\d*)E(\d*))\s')
episodes = []
if len(matches) > 0:
for fullepisode_s, season, episode in matches:
season = season.lstrip("0")
episodes.append([
"".join([season, "x", episode]),
season,
episode
])
else:
title = scrapedtitle.split(" S0")[0].strip()
title = title.split(" S1")[0].strip()
title = title.split(" S2")[0].strip()
episodes = scrapertools.find_multiple_matches(scrapedtitle, r'((\d*)x(\d*))')
for fullepisode, season, episode in episodes:
infoLabels = {}
infoLabels['season'] = season
infoLabels['episode'] = episode
fullepisode += ' ' + support.typo("Sub-ITA", '_ [] color kod')
itemlist.append(
item.clone(action="findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=fullepisode,
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
contentSerieName=title,
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginazionazione
patron = r'<strong class="on">\d+</strong>\s*<a href="([^<]+)">\d+</a>'
next_page = scrapertools.find_single_match(data, patron)
if next_page != "":
item.url = next_page
itemlist = episodios(item, itemlist)
else:
item.url = item.originalUrl
support.videolibrary(itemlist, item, 'bold color kod')
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def peliculas_tv(item):
info()
itemlist = []
patron = r'<div class="post-meta">\s*<a href="([^"]+)"\s*title="([^"]+)"\s*class=".*?"></a>'
html = support.match(item, patron=patron, headers=headers)
matches = html.matches
data = html.data
for scrapedurl, scrapedtitle in matches:
if scrapedtitle in ["FACEBOOK", "RAPIDGATOR", "WELCOME!"]:
continue
scrapedthumbnail = ""
scrapedplot = ""
scrapedtitle = cleantitle(scrapedtitle)
infoLabels = {}
episode = scrapertools.find_multiple_matches(scrapedtitle, r'((\d*)x(\d*))')
if episode: # workaround per quando mettono le serie intere o altra roba, sarebbero da intercettare TODO
episode = episode[0]
title = scrapedtitle.split(" S0")[0].strip()
title = title.split(" S1")[0].strip()
title = title.split(" S2")[0].strip()
infoLabels['season'] = episode[1]
infoLabels['episode'] = episode[2].zfill(2)
itemlist.append(
item.clone(action="findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=title + " - " + episode[0] + " " + support.typo("Sub-ITA", '_ [] color kod'),
url=scrapedurl,
thumbnail=scrapedthumbnail,
contentSerieName=title,
contentLanguage='Sub-ITA',
plot=scrapedplot,
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginazione
patron = r'<strong class="on">\d+</strong>\s?<a href="([^<]+)">\d+</a>'
support.nextPage(itemlist, item, data, patron)
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def newest(categoria):
info(categoria)
itemlist = []
item = Item()
item.url = host
item.extra = 'serie'
try:
if categoria == "series":
itemlist = peliculas_tv(item)
if itemlist[-1].action == 'peliculas_tv':
itemlist.pop(-1)
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def search(item, texto):
info(texto)
itemlist = []
try:
patron = r'<li class="cat-item cat-item-\d+"><a href="([^"]+)"\s?>([^<]+)</a>'
matches = support.match(item, patron=patron, headers=headers).matches
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
if texto.upper() in scrapedtitle.upper():
scrapedthumbnail = ""
scrapedplot = ""
title = cleantitle(scrapedtitle)
itemlist.append(
item.clone(action="episodios",
title=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=title,
show=title,
plot=scrapedplot,
contentType='episode',
originalUrl=scrapedurl))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
except:
import sys
for line in sys.exc_info():
support.info('search log:', line)
return []
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def list_az(item):
info()
itemlist = []
alphabet = dict()
patron = r'<li class="cat-item cat-item-\d+"><a href="([^"]+)"\s?>([^<]+)</a>'
matches = support.match(item, patron=patron, headers=headers).matches
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
letter = scrapedtitle[0].upper()
if letter not in alphabet:
alphabet[letter] = []
alphabet[letter].append(scrapedurl + '||' + scrapedtitle)
for letter in sorted(alphabet):
itemlist.append(
item.clone(action="lista_serie",
data='\n\n'.join(alphabet[letter]),
title=letter,
fulltitle=letter,
args=''))
return itemlist
# ================================================================================================================

View File

@@ -1,11 +0,0 @@
{
"id": "streamingaltadefinizione",
"name": "Popcorn Stream",
"language": ["ita"],
"active": false,
"thumbnail": "popcornstream.png",
"banner": "popcornstream.png",
"categories": ["movie","tvshow","anime"],
"not_active":["include_in_newest_peliculas", "include_in_newest_series", "include_in_newest_anime"],
"settings": []
}

View File

@@ -1,72 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per Popcorn Stream
# ------------------------------------------------------------
from core import support, httptools
from core.item import Item
from platformcode import config
import sys
if sys.version_info[0] >= 3:
from urllib.parse import unquote
else:
from urllib import unquote
def findhost(url):
data = httptools.downloadpage(url).data
return support.scrapertools.find_single_match(data, '<a href="([^"]+)')
host = config.get_channel_url(findhost)
headers = [['Referer', host]]
@support.menu
def mainlist(item):
film = ["/film/"]
anime = ["/genere/anime/"]
tvshow = ["/serietv/"]
top = [('Generi',['', 'genre'])]
return locals()
def search(item, text):
support.info("[streamingaltadefinizione.py] " + item.url + " search " + text)
item.url = item.url + "/?s=" + text
try:
return support.dooplay_search(item)
except:
import sys
for line in sys.exc_info():
support.logger.error("%s" % line)
return []
@support.scrape
def genre(item):
patronMenu = '<a href="(?P<url>[^"#]+)">(?P<title>[a-zA-Z]+)'
patronBlock='<a href="#">Genere</a><ul class="sub-menu">(?P<block>.*?)</ul>'
action='peliculas'
return locals()
def peliculas(item):
return support.dooplay_peliculas(item, True if "/genere/" in item.url else False)
def episodios(item):
return support.dooplay_get_episodes(item)
def findvideos(item):
itemlist = []
matches = support.match(item, patron=r'<a href="([^"]+)[^>]+>Download[^>]+>[^>]+>[^>]+><strong class="quality">([^<]+)<').matches
for url, quality in matches:
itemlist.append(
item.clone(caction="play",
url=unquote(support.match(url, patron=[r'dest=([^"]+)"',r'/(http[^"]+)">Click']).match),
quality=quality))
return support.server(item, itemlist=itemlist)

View File

@@ -1,11 +0,0 @@
{
"id": "tapmovie",
"name": "Tap Movie",
"language": ["ita", "sub-ita"],
"active": false,
"thumbnail": "tapmovie.png",
"banner": "tapmovie.png",
"categories": ["movie", "tvshow", "anime"],
"not_active": ["include_in_newest"],
"settings": []
}

View File

@@ -1,102 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per 'dvdita'
from core import support, httptools
from core.item import Item
import sys
if sys.version_info[0] >= 3: from concurrent import futures
else: from concurrent_py2 import futures
host = support.config.get_channel_url()
api_url = '/api/v2/'
per_page = 24
@support.menu
def mainlist(item):
film = ['/browse/movie']
tvshow = ['/browse/tvshow']
search = ''
# [Voce Menu,['url','action','args',contentType]
top = [('Generi', ['', 'genres', '', 'undefined'])]
return locals()
def episodios(item):
support.info(item)
itemlist = []
with futures.ThreadPoolExecutor() as executor:
thL = []
for season in httptools.downloadpage(host + api_url + 'tvshow', post={'tvshow_id': item.id}).json.get('season', []):
season_id = season['season_number']
thL.append(executor.submit(httptools.downloadpage, host + api_url + 'episodes', post={'tvshow_id': item.id, 'season_id': season_id}))
for th in futures.as_completed(thL):
for episode in th.result().json.get('episodes', []):
itemlist.append(item.clone(action="findvideos", contentSeason=episode['season_id'], contentEpisodeNumber=episode['episode_number'], id=item.id,
title=episode['season_id']+'x'+episode['episode_number'], contentType='episode'))
support.scraper.sort_episode_list(itemlist)
support.videolibrary(itemlist, item)
support.download(itemlist, item)
return itemlist
def genres(item):
itemlist = []
for n, genre in enumerate(httptools.downloadpage(host + api_url + 'categories', post={}).json.get('categories', [])):
itemlist.append(item.clone(action="peliculas", genre=genre.get('name'), title=genre.get('value'), n=n))
return support.thumb(itemlist, genre=True)
def peliculas(item, text=''):
support.info('search', item)
itemlist = []
filter_type = False
if item.genre:
text = item.genre
cmd = 'search/category'
else:
cmd = 'search'
if not text:
filter_type = True
try:
page = int(item.url.split('?p=')[1])
except:
page = 1
results = httptools.downloadpage(host + api_url + cmd, post={'search': text, 'page': page}).json.get('results', [])
for result in results:
contentType = 'movie' if result['type'] == 'FILM' else 'tvshow'
if not filter_type or (filter_type and contentType == item.contentType):
itemlist.append(item.clone(id=result.get('id'), title=result.get('title'), contentTitle=result.get('title'),
contentSerieName='' if contentType == 'movie' else result.get('title'),
contentPlot=result.get('description'), thumbnail=result.get('poster'),
fanart=result.get('backdrop'), year=result.get('year'), action='episodios' if contentType == 'tvshow' else 'findvideos',
url='{}/{}/{}-{}'.format('https://filmigratis.org', contentType, result.get('id'), support.scrapertools.slugify(result.get('title'))),
contentType=contentType))
support.tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if len(results) >= per_page:
page += 1
support.nextPage(itemlist, item, next_page='https://filmigratis.org/category/' + str(item.n) + '/' + item.genre + '?p=' + str(page))
return itemlist
def search(item, text):
return peliculas(item, text)
def findvideos(item):
itemlist = []
if not item.contentSeason: # film
json = httptools.downloadpage(host + api_url + 'movie', post={'movie_id': item.id}).json
else:
json = httptools.downloadpage(host + api_url + 'episode/links', post={'tvshow_id': item.id, 'season_id': item.contentSeason, 'episode_id': item.contentEpisodeNumber}).json
for i in json.get('links', []) + json.get('special', []):
itemlist.append(Item(url=i.get('link')))
return support.server(item, itemlist=itemlist)

View File

@@ -1,10 +0,0 @@
{
"id": "vedohd",
"name": "VedoHD",
"language": ["ita"],
"active": false,
"thumbnail": "vedohd.png",
"banner": "vedohd.png",
"categories": ["movie"],
"settings": []
}

View File

@@ -1,69 +0,0 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per vedohd
# ------------------------------------------------------------
from core import scrapertools, support, autoplay
from platformcode import logger, config
host = config.get_channel_url()
headers = ""
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
#esclusione degli articoli 'di servizio'
blacklist = ['CB01.UNO &#x25b6; TROVA L&#8217;INDIRIZZO UFFICIALE ', 'AVVISO IMPORTANTE CB01.UNO', 'GUIDA VEDOHD']
@support.menu
def mainlist(item):
film = [
('I più votati', ["ratings/?get=movies", 'peliculas']),
('I più popolari', ["trending/?get=movies", 'peliculas']),
('Generi', ['ratings/?get=movies', 'menu', 'genres']),
('Anno', ["", 'menu', 'releases']),
]
return locals()
def search(item, text):
logger.info("search",text)
item.url = item.url + "/?s=" + text
return support.dooplay_search(item, blacklist)
def peliculas(item):
return support.dooplay_peliculas(item, False, blacklist)
def findvideos(item):
itemlist = []
for link in support.dooplay_get_links(item, host):
if link['title'] != 'Trailer':
server, quality = scrapertools.find_single_match(link['title'], '([^ ]+) ?(HD|3D)?')
if quality:
title = server + " [COLOR blue][" + quality + "][/COLOR]"
else:
title = server
itemlist.append(item.clone(action="play", title=title, url=link['url'], server=server, quality=quality,))
autoplay.start(itemlist, item)
return itemlist
@support.scrape
def menu(item):
return support.dooplay_menu(item, item.args)
def play(item):
logger.debug()
data = support.swzz_get_url(item)
return support.server(item, data, headers=headers)

View File

@@ -16,12 +16,11 @@ def http_Resp(lst_urls):
s = httplib2.Http()
code, resp = s.request(sito, body=None)
if code.previous:
print("r1 http_Resp: %s %s %s %s" %
(code.status, code.reason, code.previous['status'],
code.previous['-x-permanent-redirect-url']))
rslt['code'] = code.previous['status']
rslt['redirect'] = code.previous.get('-x-permanent-redirect-url', code.previous.get('content-location', sito))
rslt['redirect'] = code.get('content-location', sito)
rslt['status'] = code.status
print("r1 http_Resp: %s %s %s %s" %
(code.status, code.reason, rslt['code'], rslt['redirect']))
else:
rslt['code'] = code.status
except httplib2.ServerNotFoundError as msg:
@@ -32,6 +31,7 @@ def http_Resp(lst_urls):
# [Errno 111] Connection refused
rslt['code'] = 111
except:
print()
rslt['code'] = 'Connection error'
return rslt