Aggiunto D.S.D.A. ex DocumenteriStreamning DA
This commit is contained in:
@@ -13,8 +13,8 @@
|
||||
"casacinemaInfo": "https://casacinema.kim",
|
||||
"cb01anime": "https://www.cineblog01.ink",
|
||||
"cinetecadibologna": "http://cinestore.cinetecadibologna.it",
|
||||
"documentaristreamingda": "https://documentari-streaming-da.com",
|
||||
"dreamsub": "https://dreamsub.stream",
|
||||
"dsda": "https://www.dsda.press/",
|
||||
"fastsubita": "https://fastsubita.com",
|
||||
"filmgratis": "https://www.filmaltadefinizione.org",
|
||||
"filmigratis": "https://filmigratis.org",
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
{
|
||||
"id": "documentaristreamingda",
|
||||
"name": "DocumentariStreamingDa",
|
||||
"language": ["ita"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "documentaristreamingda.png",
|
||||
"banner": "documentaristreamingda.png",
|
||||
"categories": ["documentary"],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Includi ricerca globale",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_documentales",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Documentari",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_italiano",
|
||||
"type": "bool",
|
||||
"label": "Includi in Novità - Italiano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,264 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Ringraziamo Icarus crew
|
||||
# Canale per documentaristreamingda
|
||||
# ------------------------------------------------------------
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import httptools, scrapertools, servertools, support
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
|
||||
host = config.get_channel_url()
|
||||
|
||||
list_servers = ['']
|
||||
list_quality = ['']
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("kod.documentaristreamingda mainlist")
|
||||
itemlist = [Item(channel=item.channel,
|
||||
title="[COLOR azure]Aggiornamenti[/COLOR]",
|
||||
action="peliculas",
|
||||
url=host + "/?searchtype=movie&post_type=movie&sl=lasts&s=",
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR azure]Categorie[/COLOR]",
|
||||
action="categorias",
|
||||
url=host + "/documentari-streaming-dataarchive/",
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
|
||||
Item(channel=item.channel,
|
||||
title="[COLOR yellow]Cerca...[/COLOR]",
|
||||
action="search",
|
||||
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info("kod.documentaristreamingda newest" + categoria)
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "documentales":
|
||||
item.url = host + "/?searchtype=movie&post_type=movie&sl=lasts&s="
|
||||
item.action = "peliculas"
|
||||
itemlist = peliculas(item)
|
||||
|
||||
if itemlist[-1].action == "peliculas":
|
||||
itemlist.pop()
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.find_single_match(data, 'Categorie</a></li>(.*?)</ul>')
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<a href="([^"]+)">([^<]+)</a></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(bloque)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
|
||||
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.replace("Documentari ", ""))
|
||||
|
||||
html = httptools.downloadpage(scrapedurl).data
|
||||
|
||||
patron = '>Ultime uscite[^<]+<\/h3><a href="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(html)
|
||||
for url in matches:
|
||||
url = url.replace("&", "&")
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=url,
|
||||
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("kod.documentaristreamingda " + item.url + " search " + texto)
|
||||
item.url = host + "/?searchtype=movie&post_type=movie&s=" + texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("kod.documentaristreamingda peliculas")
|
||||
itemlist = []
|
||||
|
||||
# Carica la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Estrae i contenuti
|
||||
patron = '<div class="movie-poster">\s*<img[^s]+src="([^"]+)"[^=]+=[^=]+="([^"]+)"[^>]+>[^<]+<a[^h]+href="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
|
||||
# html = httptools.downloadpage(scrapedurl)
|
||||
# start = html.find("</div><h2>")
|
||||
# end = html.find("<p><strong>", start)
|
||||
# scrapedplot = html[start:end]
|
||||
# scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
|
||||
# scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle.replace("streaming", "")
|
||||
scrapedtitle = scrapedtitle.replace("_", " ")
|
||||
scrapedtitle = scrapedtitle.replace("-", " ")
|
||||
scrapedtitle = scrapedtitle.title()
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
fulltitle=scrapedtitle,
|
||||
show=scrapedtitle,
|
||||
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
viewmode="movie_with_plot",
|
||||
thumbnail=scrapedthumbnail,
|
||||
plot=scrapedplot,
|
||||
folder=True))
|
||||
|
||||
# Paginazione
|
||||
patronvideos = '<a class="next page-numbers" href="(.*?)">'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
scrapedurl = scrapedurl.replace("&", "&")
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="peliculas",
|
||||
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
|
||||
url=scrapedurl,
|
||||
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
|
||||
folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("kod.documentaristreamingda findvideos")
|
||||
return support.server(item)#, data= item.url)
|
||||
|
||||
##
|
||||
## data = httptools.downloadpage(item.url).data
|
||||
##
|
||||
## links = []
|
||||
## begin = data.find('<div class="moview-details-text">')
|
||||
## if begin != -1:
|
||||
## end = data.find('<!-- //movie-details -->', begin)
|
||||
## mdiv = data[begin:end]
|
||||
##
|
||||
## items = [[m.end(), m.group(1)] for m in re.finditer('<b style="color:#333333;">(.*?)<\/b>', mdiv)]
|
||||
## if items:
|
||||
## for idx, val in enumerate(items):
|
||||
## if idx == len(items) - 1:
|
||||
## _data = mdiv[val[0]:-1]
|
||||
## else:
|
||||
## _data = mdiv[val[0]:items[idx + 1][0]]
|
||||
##
|
||||
## for link in re.findall('<a.*?href="([^"]+)"[^>]+>.*?<b>(.*?)<\/b><\/a>+', _data):
|
||||
## if not link[0].strip() in [l[1] for l in links]: links.append(
|
||||
## [val[1], link[0].strip(), link[1].strip()])
|
||||
##
|
||||
## items = [[m.end(), m.group(1)] for m in re.finditer('<p><strong>(.*?)<\/strong><\/p>', mdiv)]
|
||||
## if items:
|
||||
## _title = ''
|
||||
## for idx, val in enumerate(items):
|
||||
## if idx == len(items) - 1:
|
||||
## _data = mdiv[val[0]:-1]
|
||||
## else:
|
||||
## _data = mdiv[val[0]:items[idx + 1][0]]
|
||||
##
|
||||
## for link in re.findall('<a\s.*?href="([^"]+)".*?>(?:<span[^>]+>)*(?:<strong>)*([^<]+)', _data):
|
||||
## if not link[0].strip() in [l[1] for l in links]:
|
||||
## if not link[1].strip() in link[0]: _title = link[1].strip()
|
||||
## links.append([_title, link[0].strip(), 'unknown'])
|
||||
##
|
||||
## items = [[m.start(), m.group(1)] for m in re.finditer('<li><strong>([^<]+)<', mdiv)]
|
||||
## if items:
|
||||
## for idx, val in enumerate(items):
|
||||
## if idx == len(items) - 1:
|
||||
## _data = mdiv[val[0]:-1]
|
||||
## else:
|
||||
## _data = mdiv[val[0]:items[idx + 1][0]]
|
||||
##
|
||||
## for link in re.findall('<a\s.*?href="([^"]+)".*?>(?:<span[^>]+>)*(?:<strong>)*([^<]+)', _data):
|
||||
## if not link[0].strip() in [l[1] for l in links]: links.append(
|
||||
## [val[1], link[0].strip(), link[1].strip()])
|
||||
##
|
||||
## itemlist = []
|
||||
## if links:
|
||||
## for l in links:
|
||||
## title = unicode(l[0], 'utf8', 'ignore')
|
||||
## title = title.replace(u'\xa0', ' ').replace('Documentario ', '').replace(' doc ', ' ').replace(' streaming',
|
||||
## '').replace(
|
||||
## ' Streaming', '')
|
||||
## url = l[1]
|
||||
## action = "play"
|
||||
## server = "unknown"
|
||||
## folder = False
|
||||
##
|
||||
## if url == '#' or not title: continue
|
||||
##
|
||||
## logger.info('server: %s' % l[2])
|
||||
## if l[2] != 'unknown':
|
||||
## server = unicode(l[2], 'utf8', 'ignore')
|
||||
## else:
|
||||
## logger.info(url)
|
||||
## match = re.search('https?:\/\/(?:www\.)*([^\.]+)\.', url)
|
||||
## if match:
|
||||
## server = match.group(1)
|
||||
##
|
||||
## if server == "documentari-streaming-db":
|
||||
## action = "findvideos"
|
||||
## folder = True
|
||||
## logger.info('server: %s, action: %s' % (server, action))
|
||||
##
|
||||
## logger.info(title + ' - [COLOR blue]' + server + '[/COLOR]')
|
||||
##
|
||||
## itemlist.append(Item(
|
||||
## channel=item.channel,
|
||||
## title=title + ' - [COLOR blue]' + server + '[/COLOR]',
|
||||
## action=action,
|
||||
## server=server, # servertools.get_server_from_url(url),
|
||||
## url=url,
|
||||
## thumbnail=item.thumbnail,
|
||||
## fulltitle=title,
|
||||
## show=item.show,
|
||||
## plot=item.plot,
|
||||
## parentContent=item,
|
||||
## folder=folder)
|
||||
## )
|
||||
## else:
|
||||
## itemlist = servertools.find_video_items(data=data)
|
||||
##
|
||||
## for videoitem in itemlist:
|
||||
## videoitem.title = "".join([item.title, '[COLOR green][B]' + videoitem.title + '[/B][/COLOR]'])
|
||||
## videoitem.fulltitle = item.fulltitle
|
||||
## videoitem.show = item.show
|
||||
## videoitem.thumbnail = item.thumbnail
|
||||
## videoitem.channel = item.channel
|
||||
##
|
||||
## return itemlist
|
||||
|
||||
11
channels/dsda.json
Normal file
11
channels/dsda.json
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"id": "dsda",
|
||||
"name": "D.S.D.A",
|
||||
"language": ["ita"],
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"thumbnail": "dsda.png",
|
||||
"banner": "dsda.png",
|
||||
"categories": ["documentary"],
|
||||
"settings": []
|
||||
}
|
||||
126
channels/dsda.py
Normal file
126
channels/dsda.py
Normal file
@@ -0,0 +1,126 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# ------------------------------------------------------------
|
||||
# Ringraziamo Icarus crew
|
||||
# Canale per documentaristreamingda
|
||||
# ------------------------------------------------------------
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import httptools, scrapertools, servertools, support
|
||||
from core.item import Item
|
||||
from platformcode import logger, config
|
||||
|
||||
host = config.get_channel_url()
|
||||
|
||||
@support.menu
|
||||
def mainlist(item):
|
||||
docu = [('Documentari {bullet bold}',('/elenco-documentari','peliculas')),
|
||||
('Categorie {submenu}',('','menu')),
|
||||
('Cerca... {bullet bold}',('','search')),]
|
||||
return locals()
|
||||
|
||||
|
||||
@support.scrape
|
||||
def menu(item):
|
||||
action = 'peliculas'
|
||||
patronMenu = r'<li class="menu-item menu-item-type-taxonomy[^>]+>\s*<a href="(?P<url>[^"]+)"[^>]+>(?P<title>[^<]+)<'
|
||||
def fullItemlistHook(itemlist):
|
||||
item_list = []
|
||||
title_list = []
|
||||
for item in itemlist:
|
||||
if item.title not in title_list:
|
||||
item_list.append(item)
|
||||
title_list.append(item.title)
|
||||
itemlist = item_list
|
||||
return itemlist
|
||||
return locals()
|
||||
|
||||
def newest(categoria):
|
||||
support.log()
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "documentales":
|
||||
item.url = host + "/elenco-documentari"
|
||||
item.action = "peliculas"
|
||||
return peliculas(item)
|
||||
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
support.logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
support.log(texto)
|
||||
item.url = host + "/?s=" + texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
# Continua la ricerca in caso di errore
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
@support.scrape
|
||||
def peliculas(item):
|
||||
if item.args == 'collection':
|
||||
patron = r'<div class="cover-racolta">\s*<a href="(?P<url>[^"]+)"[^>]+>\s*<img width="[^"]+" height="[^"]+" src="(?P<thumb>[^"]+)"[^>]+>[^>]+>(?P<title>[^<]+)<'
|
||||
elif item.args == 'raccolta':
|
||||
patron = r'<a (?:style="[^"]+" )?href="(?P<url>[^"]+)"[^>]+>(?:[^>]+><strong>)?(?P<title>[^<]+)(?:</a>)?</strong'
|
||||
else:
|
||||
patron = r'<article[^>]+>[^>]+>[^>]+>(?:<img width="[^"]+" height="[^"]+" src="(?P<thumb>[^"]+)"[^>]+>)?.*?<a href="(?P<url>[^"]+)">\s*(?P<title>[^<]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s*<p>(?P<plot>[^<]+)<'
|
||||
patronNext = r'<a class="page-numbers next" href="([^"]+)">'
|
||||
|
||||
# select category
|
||||
def itemHook(item):
|
||||
title = support.re.sub(r'(?:[Ss]erie\s*|[Ss]treaming(?:\s*[Dd][Aa])?\s*|[Cc]ollezione\s*|[Rr]accolta\s*|[Dd]ocumentari(?:o)?\s*)?','',item.fulltitle).strip()
|
||||
if 'serie' in item.fulltitle.lower():
|
||||
item.contentType = 'tvshow'
|
||||
item.action = 'episodios'
|
||||
item.contentSerieName = title
|
||||
item.contentTitle = ''
|
||||
elif 'collezione' in item.fulltitle.lower():
|
||||
item.args = 'collection'
|
||||
item.action = 'peliculas'
|
||||
item.contentTitle = title
|
||||
item.contentSerieName = ''
|
||||
elif 'raccolta' in item.fulltitle.lower():
|
||||
item.args = 'raccolta'
|
||||
item.action = 'peliculas'
|
||||
item.contentTitle = title
|
||||
item.contentSerieName = ''
|
||||
else:
|
||||
item.contentTitle = title
|
||||
item.contentSerieName = ''
|
||||
|
||||
item.title = support.typo(title,'bold')
|
||||
item.fulltitle = item.show = title
|
||||
return item
|
||||
# remove duplicates
|
||||
def fullItemlistHook(itemlist):
|
||||
item_list = []
|
||||
title_list = []
|
||||
for item in itemlist:
|
||||
if item.title not in title_list:
|
||||
item_list.append(item)
|
||||
title_list.append(item.title)
|
||||
itemlist = item_list
|
||||
return itemlist
|
||||
return locals()
|
||||
|
||||
@support.scrape
|
||||
def episodios(item):
|
||||
patron = r'class="title-episodio">(?P<episode>[^<]+)<(?P<url>.*?)<p'
|
||||
return locals()
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
support.log()
|
||||
if item.args == 'raccolta' or item.contentType == 'episode':
|
||||
return support.server(item, item.url)
|
||||
else:
|
||||
return support.server(item)
|
||||
@@ -295,7 +295,7 @@ def scrapeBlock(item, args, block, patron, headers, action, pagination, debug, t
|
||||
quality=quality,
|
||||
url=scraped["url"],
|
||||
infoLabels=infolabels,
|
||||
thumbnail=item.thumbnail if function == 'episodios' and not scraped["thumb"] else scraped["thumb"] ,
|
||||
thumbnail=item.thumbnail if function == 'episodios' and not scraped["thumb"] else scraped["thumb"] if scraped["thumb"] else '',
|
||||
args=item.args,
|
||||
contentSerieName= scraped['title'] if item.contentType or CT != 'movie' and function != 'episodios' else item.fulltitle if function == 'episodios' else '',
|
||||
contentTitle= scraped['title'] if item.contentType or CT == 'movie' else '',
|
||||
|
||||
Reference in New Issue
Block a user