Rewritten Dreamsub with autorenumber support

This commit is contained in:
Alhaziel
2019-06-08 11:58:00 +02:00
parent eecf79c6a2
commit fd02f208c0
4 changed files with 61 additions and 204 deletions

View File

@@ -286,7 +286,6 @@ def findvideos(item):
log()
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
matches, data = support.match(item, r'class="tab.*?data-name="([0-9]+)">([^<]+)</span', headers=headers)
videoData = ''

View File

@@ -1,50 +1,57 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Ringraziamo Icarus crew
# Canale per dreamsub
# ------------------------------------------------------------
import re
import urlparse
from core import scrapertools, httptools, servertools, tmdb
from core import scrapertoolsV2, httptools, servertools, tmdb, support
from specials.autorenumber import renumber
from core.support import menu, log, scrape
from core.item import Item
from platformcode import logger, config
__channel__ = "dreamsub"
host = config.get_channel_url(__channel__)
list_servers = ['animeworld', 'verystream', 'streamango', 'openload', 'directo']
list_quality = ['default', '480p', '720p', '1080p']
def mainlist(item):
logger.info("kod.dreamsub mainlist")
itemlist = [Item(channel=item.channel,
title="[COLOR azure]Anime / Cartoni[/COLOR]",
action="serietv",
url="%s/anime" % host,
thumbnail="http://orig09.deviantart.net/df5a/f/2014/169/2/a/fist_of_the_north_star_folder_icon_by_minacsky_saya-d7mq8c8.png"),
Item(channel=item.channel,
title="[COLOR azure]Categorie[/COLOR]",
action="categorie",
url=host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="[COLOR azure]Ultimi episodi Anime[/COLOR]",
action="ultimiep",
url=host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=item.channel,
title="[COLOR yellow]Cerca...[/COLOR]",
action="search",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
log()
itemlist = []
menu(itemlist, 'Anime / Cartoni', 'peliculas', host + '/anime', 'tvshow')
menu(itemlist, 'Categorie', 'categorie', host + '/filter?genere=', 'tvshow')
menu(itemlist, 'Ultimi Episodi', 'last', host, 'episode')
menu(itemlist, 'Cerca', 'search')
support.aplay(item, itemlist, list_servers, list_quality)
support.channel_config(item, itemlist)
return itemlist
def search(item, texto):
log(texto)
item.url = host + '/search/' + texto
try:
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info("kod.altadefinizione01 newest" + categoria)
log(categoria)
itemlist = []
item = Item()
try:
if categoria == "anime":
item.url = "https://www.dreamsub.tv"
item.url = host
item.action = "ultimiep"
itemlist = ultimiep(item)
@@ -60,196 +67,44 @@ def newest(categoria):
return itemlist
def serietv(item):
logger.info("kod.dreamsub peliculas")
itemlist = []
def peliculas(item):
itemlist = scrape(item, r'Lingua[^<]+<br>\s*<a href="(?:Lista episodi )?([^"]+)" title="(?:Lista episodi )?(.*?)(?: \(([0-9]+)\))?(?: Streaming)?">', ['url', 'title', 'year'], action='episodios', patron_block='<input type="submit" value="Vai!" class="blueButton">(.*?)<div class="footer">', patronNext='<li class="currentPage">[^>]+><li[^<]+<a href="([^"]+)">')
return renumber(itemlist)
# Carica la pagina
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data,
'<input type="submit" value="Vai!" class="blueButton">(.*?)<div class="footer">')
# Estrae i contenuti
patron = 'Lingua[^<]+<br>\s*<a href="([^"]+)" title="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedurl, scrapedtitle in matches:
scrapedurl = host + scrapedurl
scrapedplot = ""
scrapedthumbnail = ""
scrapedtitle = scrapedtitle.replace("Streaming", "")
scrapedtitle = scrapedtitle.replace("Lista episodi ", "")
itemlist.append(
Item(channel=item.channel,
action="episodios",
contentType="tvshow",
title="[COLOR azure]%s[/COLOR]" % scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
show=scrapedtitle,
plot=scrapedplot,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginazione
patronvideos = '<li class="currentPage">[^>]+><li[^<]+<a href="([^"]+)">'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
itemlist.append(
Item(channel=item.channel,
action="serietv",
title="[COLOR lightgreen]" + config.get_localized_string(30992) + "[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
return itemlist
def ultimiep(item):
logger.info("kod.dreamsub ultimiep")
itemlist = []
# Carica la pagina
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<ul class="last" id="recentAddedEpisodesAnimeDDM">(.*?)</ul>')
# Estrae i contenuti
patron = '<li><a href="([^"]+)"[^>]+>([^<]+)<br>'
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedurl, scrapedtitle in matches:
ep = scrapertools.find_single_match(scrapedtitle, r'\d+$').zfill(2)
scrapedtitle = re.sub(r'\d+$', ep, scrapedtitle)
scrapedurl = host + scrapedurl
scrapedplot = ""
scrapedthumbnail = ""
cleantitle = re.sub(r'\d*-?\d+$', '', scrapedtitle).strip()
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="tvshow",
title=scrapedtitle,
fulltitle=cleantitle,
text_color="azure",
url=scrapedurl,
thumbnail=scrapedthumbnail,
show=cleantitle,
plot=scrapedplot,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
for itm in itemlist:
itm.contentType = "episode"
return itemlist
def last(item):
return scrape(item, r'<li><a href="([^"]+)"[^>]+>([^<]+)(\d+)<br>', ['url', 'title', 'episode'], patron_block='<ul class="last" id="recentAddedEpisodesAnimeDDM">(.*?)</ul>' )
def categorie(item):
logger.info("[dreamsub.py] categorie")
log()
itemlist = []
data = httptools.downloadpage(item.url).data
blocco = scrapertools.find_single_match(data,
r'<select name="genere" id="genere" class="selectInput">(.*?)</select>')
patron = r'<option value="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(blocco)
matches = support.match(item, r'<option value="([^"]+)">', r'<select name="genere" id="genere" class="selectInput">(.*?)</select>')[0]
for value in matches:
url = host + '/filter?genere=' + value
url = item.url + value
itemlist.append(
Item(channel=item.channel,
action="serietv",
title="[COLOR azure]%s[/COLOR]" % value.capitalize(),
url=url,
extra="tv",
thumbnail=item.thumbnail,
folder=True))
return itemlist
def search(item, texto):
logger.info("[dreamsub.py] " + item.url + " search " + texto)
item.url = "%s/search/%s" % (host, texto)
try:
return serietv(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
contentType=item.contentType,
action="peliculas",
title=support.typo(value, 'bold'),
url=url))
return support.thumb(itemlist)
def episodios(item):
logger.info("kod.channels.dreamsub episodios")
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<div class="seasonEp">(.*?)<div class="footer">')
patron = '<li><a href="([^"]+)"[^<]+<b>(.*?)<\/b>[^>]+>([^<]+)<\/i>(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedurl, title1, title2, title3 in matches:
scrapedurl = host + scrapedurl
scrapedtitle = title1 + " " + title2 + title3
scrapedtitle = scrapedtitle.replace("Download", "")
scrapedtitle = scrapedtitle.replace("Streaming", "")
scrapedtitle = scrapedtitle.replace("& ", "")
scrapedtitle = re.sub(r'\s+', ' ', scrapedtitle)
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType="episode",
fulltitle=scrapedtitle,
show=item.show,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=item.thumbnail,
plot=item.plot,
folder=True))
if config.get_videolibrary_support() and len(itemlist) != 0:
itemlist.append(
Item(channel=item.channel,
title="[COLOR lightblue]%s[/COLOR]" % config.get_localized_string(30161),
url=item.url,
action="add_serie_to_library",
extra="episodios",
show=item.show))
return itemlist
itemlist = scrape(item, r'<li><a href="([^"]+)"[^<]+<b>(.*?)<\/b>[^>]+>([^<]+)<\/i>', ['url','title','title2'], patron_block='<div class="seasonEp">(.*?)<div class="footer">')
return renumber(itemlist, item, 'bold')
def findvideos(item):
logger.info()
print item.url
log()
itemlist = []
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
if 'keepem.online' in data:
urls = scrapertools.find_multiple_matches(data, r'(https://keepem\.online/f/[^"]+)"')
urls = scrapertoolsV2.find_multiple_matches(data, r'(https://keepem\.online/f/[^"]+)"')
for url in urls:
url = httptools.downloadpage(url).url
itemlist += servertools.find_video_items(data=url)
for videoitem in itemlist:
server = re.sub(r'[-\[\]\s]+', '', videoitem.title)
videoitem.title = "".join(
["[[COLOR orange]%s[/COLOR]] " % server.capitalize(), "[COLOR azure]%s[/COLOR]" % item.title])
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
videoitem.contentType = item.contentType
return itemlist
return support.server(item, data, itemlist)

View File

@@ -321,25 +321,27 @@ def thumb(itemlist=[]):
icon_dict = {'channels_movie':['film'],
'channels_tvshow':['serie','tv','episodi','episodio'],
'channels_documentary':['documentari','documentario'],
'channels_all':['tutti'],
'news':['novità', "novita'", 'aggiornamenti'],
'now_playing':['cinema', 'in sala'],
'channels_anime':['anime'],
'genres':['genere', 'generi', 'categorie', 'categoria'],
'channels_animation': ['animazione', 'cartoni'],
'channels_adventure': ['avventura'],
'channels_action':['azione'],
'channels_action':['azione', 'arti marziali'],
'channels_biographical':['biografico'],
'channels_comedy':['comico','commedia'],
'channels_adult':['erotico'],
'channels_comedy':['comico','commedia', 'demenziale'],
'channels_adult':['erotico', 'hentai'],
'channels_drama':['drammatico'],
'channels_syfy':['fantascienza'],
'channels_fantasy':['fantasy'],
'channels_crime':['gangster','poliziesco'],
'channels_grotesque':['grottesco'],
'channels_war':['guerra'],
'channels_children':['bambini'],
'horror':['horror'],
'lucky': ['fortunato'],
'channels_musical':['musical'],
'channels_musical':['musical', 'musica'],
'channels_mistery':['mistero', 'giallo'],
'channels_noir':['noir'],
'popular' : ['popolari','popolare', 'più visti'],

View File

@@ -217,8 +217,8 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
if scraped['type'] in variants:
action = name
if inspect.stack()[1][3] == 'episodios': item.contentType = 'episode'
if inspect.stack()[1][3] == 'episodios': item.contentType = 'episode'
if scraped["title"] not in blacklist:
it = Item(
channel=item.channel,
@@ -240,7 +240,8 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data=""
itemlist.append(it)
checkHost(item, itemlist)
if (item.contentType == "episode" and (action != "findvideos" and action != "play")) \
if (item.contentType == "tvshow" and (action != "findvideos" and action != "play")) \
or (item.contentType == "episode" and action != "play") \
or (item.contentType == "movie" and action != "play"):
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
else: