From 2e33775aa95e764c11c0119f7b4e2cf266c45a37 Mon Sep 17 00:00:00 2001 From: Alhaziel Date: Fri, 31 May 2019 10:40:39 +0200 Subject: [PATCH 01/10] SetResolvedUrl as default value --- resources/settings.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resources/settings.xml b/resources/settings.xml index 79ac5830..b58c1f94 100644 --- a/resources/settings.xml +++ b/resources/settings.xml @@ -1,7 +1,7 @@ - + From c95617f707fb33183cda4f80fd6445972dd7b82c Mon Sep 17 00:00:00 2001 From: Alhaziel Date: Fri, 31 May 2019 18:16:36 +0200 Subject: [PATCH 02/10] Little Fix to Wstream --- servers/wstream.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/servers/wstream.py b/servers/wstream.py index 15114a7f..5b0d4cd3 100644 --- a/servers/wstream.py +++ b/servers/wstream.py @@ -28,17 +28,19 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= vid = scrapertools.find_multiple_matches(data, 'download_video.*?>.*?<.*?([^\,,\s]+)') headers.append(['Referer', page_url]) - post_data = scrapertools.find_single_match(data, - "\s*") + post_data = scrapertools.find_single_match(data,"\s*") if post_data != "": from lib import jsunpack data = jsunpack.unpack(post_data) - media_url = scrapertools.find_multiple_matches(data, '(http.*?\.mp4)') + block = scrapertools.find_single_match(data, 'sources:\s*\[[^\]]+\]') + if block: data = block + + media_urls = scrapertools.find_multiple_matches(data, '(http.*?\.mp4)') _headers = urllib.urlencode(dict(headers)) i = 0 - for media_url in media_url: + for media_url in media_urls: video_urls.append([vid[i] + " mp4 [wstream] ", media_url + '|' + _headers]) i = i + 1 From a7effd0d7d8568a1125c2dd7ccf02b89814d14b5 Mon Sep 17 00:00:00 2001 From: 4l3x87 <50104109+4l3x87@users.noreply.github.com> Date: Fri, 31 May 2019 20:56:36 +0200 Subject: [PATCH 03/10] Improvements channel Guardaserie.click, Fastsubita and support (#41) --- channels/fastsubita.py | 195 ++++++++++------------------------- channels/guardaserieclick.py | 164 +++++++++++------------------ core/support.py | 8 +- 3 files changed, 117 insertions(+), 250 deletions(-) diff --git a/channels/fastsubita.py b/channels/fastsubita.py index c8f575f7..000c3dab 100644 --- a/channels/fastsubita.py +++ b/channels/fastsubita.py @@ -4,13 +4,10 @@ # Canale per fastsubita # ------------------------------------------------------------ -import re - -import channelselector from core import scrapertools, httptools, tmdb, support from core.item import Item +from core.support import log from platformcode import config, logger -from specials import autoplay __channel__ = 'fastsubita' host = config.get_setting("channel_host", __channel__) @@ -19,9 +16,6 @@ list_language = IDIOMAS.values() list_servers = ['verystream', 'openload', 'speedvideo', 'wstream', 'flashx', 'vidoza', 'vidtome'] list_quality = ['default'] -# checklinks = config.get_setting('checklinks', 'fastsubita') -# checklinks_number = config.get_setting('checklinks_number', 'fastsubita') - headers = [ ['Host', 'fastsubita.com'], ['User-Agent', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0'], @@ -39,32 +33,23 @@ PERPAGE = 15 def mainlist(item): - logger.info(item.channel+" mainlist") + log() itemlist = [] - support.menu(itemlist, 'Serie TV bold', 'lista_serie', host,'tvshow') - support.menu(itemlist, 'Novità submenu', 'pelicuals_tv', host,'tvshow') - support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', host,'tvshow',args=['serie']) - support.menu(itemlist, 'Cerca', 'search', host,'tvshow') - - autoplay.init(item.channel, list_servers, list_quality) - autoplay.show_option(item.channel, itemlist) - - itemlist.append( - Item(channel='setting', - action="channel_config", - title=support.typo("Configurazione Canale color lime"), - config=item.channel, - folder=False, - thumbnail=channelselector.get_thumb('setting_0.png')) - ) + support.menu(itemlist, 'Novità bold', 'pelicuals_tv', host, 'tvshow') + support.menu(itemlist, 'Serie TV bold', 'lista_serie', host, 'tvshow') + support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', host, 'tvshow', args=['serie']) + support.menu(itemlist, 'Cerca', 'search', host, 'tvshow') + support.aplay(item, itemlist, list_servers, list_quality) + support.channel_config(item, itemlist) return itemlist + # ---------------------------------------------------------------------------------------------------------------- def cleantitle(scrapedtitle): scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip()) - scrapedtitle = scrapedtitle.replace('’', '\'').replace('×','x').replace('×','x') + scrapedtitle = scrapedtitle.replace('’', '\'').replace('×', 'x').replace('×', 'x').replace('"', "'") return scrapedtitle.strip() @@ -73,7 +58,7 @@ def cleantitle(scrapedtitle): def newest(categoria): - logger.info(__channel__+" newest" + categoria) + log() itemlist = [] item = Item() try: @@ -96,15 +81,11 @@ def newest(categoria): def pelicuals_tv(item): - logger.info(item.channel+" pelicuals_tv") + log() itemlist = [] - # Carica la pagina - data = httptools.downloadpage(item.url, headers=headers).data - - # Estrae i contenuti - patron = r'

(.*?)<' - matches = re.compile(patron, re.DOTALL).findall(data) + matches, data = support.match(item, r'

(.*?)<', + headers=headers) for scrapedurl, scrapedtitle in matches: scrapedplot = "" @@ -123,7 +104,7 @@ def pelicuals_tv(item): else: scrapedurl = "http:" + scrapedurl - title = scraped_1+" - "+infoLabels['season']+"x"+infoLabels['episode']+" Sub-ITA" + title = scraped_1 + " - " + infoLabels['season'] + "x" + infoLabels['episode'] + " Sub-ITA" itemlist.append( Item(channel=item.channel, @@ -144,35 +125,20 @@ def pelicuals_tv(item): tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) # Paginazione - support.nextPage(itemlist,item,data,'([^<]+)<\/a>' - # patron = r'([^<]+)<\/a>' - # matches = re.compile(patron, re.DOTALL).findall(block) - matches = re.compile(r'', re.DOTALL).findall(block) + matches = support.match(Item(), r'', + r'(.*?)', headers, + url="%s/" % host)[0] index = 0 - # for scrapedurl, scrapedtitle in matches: - # scrapedtitle = cleantitle(scrapedtitle) - # if "http:" not in scrapedurl: - # scrapedurl = "http:" + scrapedurl - # - # if ('S' in scrapedtitle.strip().upper()[0] and len(scrapedtitle.strip()) == 3) or '02' == scrapedtitle: - # # itemlist[index -1][0]+='{|}'+scrapedurl - # continue - # - # itemlist.append([scrapedurl,scrapedtitle]) - # index += 1 + for level, cat, title in matches: title = cleantitle(title) url = '%s?cat=%s' % (host, cat) @@ -183,12 +149,11 @@ def serietv(): itemlist.append([url, title]) index += 1 - - logger.debug(itemlist) return itemlist + def lista_serie(item): - logger.info(item.channel+" lista_serie") + log() itemlist = [] p = 1 @@ -196,16 +161,6 @@ def lista_serie(item): item.url, p = item.url.split('{}') p = int(p) - # logger.debug(p) - # Carica la pagina - # data = httptools.downloadpage(item.url, headers=headers).data - # - # block = scrapertools.find_single_match(data,r'
(.*?)
') - # - # # Estrae i contenuti - # # patron = r'
([^<]+)<\/a>' - # patron = r'([^<]+)<\/a>' - # matches = re.compile(patron, re.DOTALL).findall(block) if '||' in item.url: series = item.url.split('\n\n') matches = [] @@ -235,76 +190,41 @@ def lista_serie(item): contentType='episode', originalUrl=scrapedurl, folder=True)) - # ii += 1 tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) if len(series) >= p * PERPAGE: - scrapedurl = item.url + '{}' + str(p + 1) - itemlist.append( - Item(channel=item.channel, - action='lista_serie', - contentType=item.contentType, - title=support.typo(config.get_localized_string(30992), 'color kod bold'), - url=scrapedurl, - args=item.args, - extra=item.extra, - thumbnail=support.thumb())) + next_page = item.url + '{}' + str(p + 1) + support.nextPage(itemlist, item, next_page=next_page) return itemlist + def findvideos(item): - logger.info(item.channel+" findvideos") + log() itemlist = [] - data = httptools.downloadpage(item.url, headers=headers).data - bloque = scrapertools.find_single_match(data, '
(.*?)
  • ', r'(.*?)', headers)[0] - - for scrapedurl, scrapedtitle in matches: - itemlist.append( - Item(channel=item.channel, - action="lista_serie", - title=scrapedtitle, - contentType="tvshow", - url="".join([host, scrapedurl]), - thumbnail=item.thumbnail, - extra="tv", - folder=True)) - - return itemlist + log() + return support.scrape(item, r'
  • \s]+>([^<]+)
  • ', ['url', 'title'], patron_block=r'(.*?)', headers=headers, action="lista_serie") # ================================================================================================================ # ---------------------------------------------------------------------------------------------------------------- def lista_serie(item): - support.log(item.channel+" lista_serie") + log() itemlist = [] - # data = httptools.downloadpage(item.url, headers=headers).data - # - # patron = r'\s*[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)

    ' - # blocco = scrapertools.find_single_match(data, - # r'(.*?)') - # matches = re.compile(patron, re.DOTALL).findall(blocco) - - patron_block = r'(.*?)' - patron = r'\s*[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)

    ' + patron_block = r'(.*?)' + patron = r'\s[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)

    ' matches, data = support.match(item, patron, patron_block, headers) - for scrapedurl, scrapedimg, scrapedtitle in matches: scrapedtitle = cleantitle(scrapedtitle) - if scrapedtitle not in ['DMCA','Contatti','Lista di tutte le serie tv']: + if scrapedtitle not in ['DMCA', 'Contatti', 'Lista di tutte le serie tv']: itemlist.append( Item(channel=item.channel, action="episodios", @@ -254,7 +222,7 @@ def lista_serie(item): tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) - support.nextPage(itemlist,item,data,r"\s*([^<]+)<\/div>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)?[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>]+>([^<]+)<[^>]+>[^>]+>[^>]+>' - patron += r'[^<]+[^"]+".*?serie="([^"]+)".*?stag="([0-9]*)".*?ep="([0-9]*)"\s*' - patron += r'.*?embed="([^"]+)"\s*.*?embed2="([^"]+)?"\s*.*?embed3="([^"]+)?"?[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s*' - patron += r'(?:]+>|]+>)?' - # matches = re.compile(patron, re.DOTALL).findall(data) - - # logger.debug(matches) + patron = r'\s([^<]+)<\/div>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)?[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>]+>([^<]+)<[^>]+>[^>]+>[^>]+>' + patron += r'[^<]+[^"]+".*?serie="([^"]+)".*?stag="([0-9]*)".*?ep="([0-9]*)"\s' + patron += r'.*?embed="([^"]+)"\s.*?embed2="([^"]+)?"\s.*?embed3="([^"]+)?"?[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s?' + patron += r'(?:]+>|]+>)?' matches = support.match(item, patron, headers=headers)[0] - - for scrapedtitle, scrapedepisodetitle, scrapedplot, scrapedserie, scrapedseason, scrapedepisode, scrapedurl, scrapedurl2,scrapedurl3,scrapedthumbnail,scrapedthumbnail2 in matches: + for scrapedtitle, scrapedepisodetitle, scrapedplot, scrapedserie, scrapedseason, scrapedepisode, scrapedurl, scrapedurl2, scrapedurl3, scrapedthumbnail, scrapedthumbnail2 in matches: scrapedtitle = cleantitle(scrapedtitle) scrapedepisode = scrapedepisode.zfill(2) scrapedepisodetitle = cleantitle(scrapedepisodetitle) title = str("%sx%s %s" % (scrapedseason, scrapedepisode, scrapedepisodetitle)).strip() if 'SUB-ITA' in scrapedtitle: - title +=" Sub-ITA" + title += " Sub-ITA" infoLabels = {} infoLabels['season'] = scrapedseason infoLabels['episode'] = scrapedepisode itemlist.append( - Item(channel=item.channel, - action="findvideos", - title=title, - fulltitle=scrapedtitle, - url=scrapedurl+"\r\n"+scrapedurl2+"\r\n"+scrapedurl3, - contentType="episode", - plot=scrapedplot, - contentSerieName=scrapedserie, - contentLanguage='Sub-ITA' if 'Sub-ITA' in title else '', - infoLabels=infoLabels, - thumbnail=scrapedthumbnail2 if scrapedthumbnail2 != '' else scrapedthumbnail, - folder=True)) + Item(channel=item.channel, + action="findvideos", + title=title, + fulltitle=scrapedtitle, + url=scrapedurl + "\r\n" + scrapedurl2 + "\r\n" + scrapedurl3, + contentType="episode", + plot=scrapedplot, + contentSerieName=scrapedserie, + contentLanguage='Sub-ITA' if 'Sub-ITA' in title else '', + infoLabels=infoLabels, + thumbnail=scrapedthumbnail2 if scrapedthumbnail2 != '' else scrapedthumbnail, + folder=True)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) @@ -315,22 +277,18 @@ def episodios(item): # ---------------------------------------------------------------------------------------------------------------- def findepvideos(item): - support.log(item.channel+" findepvideos") + log() data = httptools.downloadpage(item.url, headers=headers).data matches = scrapertools.find_multiple_matches(data, item.extra) data = "\r\n".join(matches[0]) item.contentType = 'movie' - itemlist = support.server(item, data=data) - - return itemlist + return support.server(item, data=data) # ================================================================================================================ # ---------------------------------------------------------------------------------------------------------------- def findvideos(item): - support.log(item.channel+" findvideos") + log() logger.debug(item.url) - itemlist = support.server(item, data=item.url) - - return itemlist + return support.server(item, data=item.url) diff --git a/core/support.py b/core/support.py index 94c93a52..c92b0c8b 100644 --- a/core/support.py +++ b/core/support.py @@ -452,7 +452,7 @@ def match(item, patron='', patron_block='', headers='', url=''): matches = [] url = url if url else item.url data = httptools.downloadpage(url, headers=headers, ignore_response_code=True).data.replace("'", '"') - data = re.sub(r'\n|\t|\s\s', '', data) + data = re.sub(r'\n|\t|\s\s', ' ', data) log('DATA= ', data) if patron_block: @@ -500,11 +500,11 @@ def videolibrary(itemlist, item, typography='', function_level=1): return itemlist -def nextPage(itemlist, item, data, patron, function_level=1): +def nextPage(itemlist, item, data='', patron='', function_level=1, next_page=''): # Function_level is useful if the function is called by another function. # If the call is direct, leave it blank - - next_page = scrapertoolsV2.find_single_match(data, patron) + if next_page == '': + next_page = scrapertoolsV2.find_single_match(data, patron) if next_page != "": if 'http' not in next_page: From 48f2d335e3df7fc0c82297d963dec33c4bebd768 Mon Sep 17 00:00:00 2001 From: mac12m99 Date: Fri, 31 May 2019 21:35:33 +0200 Subject: [PATCH 04/10] checkHost -> part1 --- channels/fastsubita.py | 3 ++- core/support.py | 19 +++++++++++++++++-- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/channels/fastsubita.py b/channels/fastsubita.py index c8f575f7..242c6bad 100644 --- a/channels/fastsubita.py +++ b/channels/fastsubita.py @@ -140,7 +140,7 @@ def pelicuals_tv(item): contentLanguage='Sub-ITA', infoLabels=infoLabels, folder=True)) - + support.checkHost(item, itemlist) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) # Paginazione @@ -237,6 +237,7 @@ def lista_serie(item): folder=True)) # ii += 1 + support.checkHost(item, itemlist) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) if len(series) >= p * PERPAGE: diff --git a/core/support.py b/core/support.py index 94c93a52..ad9d2f75 100644 --- a/core/support.py +++ b/core/support.py @@ -8,7 +8,7 @@ import urlparse import xbmcaddon from channelselector import thumb -from core import httptools, scrapertoolsV2, servertools, tmdb +from core import httptools, scrapertoolsV2, servertools, tmdb, channeltools from core.item import Item from lib import unshortenit from platformcode import logger, config @@ -225,7 +225,7 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data="" it.__setattr__(lg, match[listGroups.index(lg)]) itemlist.append(it) - + checkHost(item, itemlist) if (item.contentType == "episode" and (action != "findvideos" and action != "play")) \ or (item.contentType == "movie" and action != "play"): tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) @@ -243,6 +243,21 @@ def scrape(item, patron = '', listGroups = [], headers="", blacklist="", data="" return itemlist +def checkHost(item, itemlist): + # nel caso non ci siano risultati puo essere che l'utente abbia cambiato manualmente l'host, pertanto lo riporta + # al valore di default (fixa anche il problema del cambio di host da parte nostra) + if len(itemlist) == 0: + # trovo il valore di default + defHost = None + for s in channeltools.get_channel_json(item.channel)['settings']: + if s['id'] == 'channel_host': + defHost = s['default'] + break + # lo confronto con quello attuale + if config.get_setting('channel_host', item.channel) != defHost: + config.set_setting('channel_host', defHost, item.channel) + + def dooplay_get_links(item, host): # get links from websites using dooplay theme and dooplay_player # return a list of dict containing these values: url, title and server From e85ef540ffdc60aaf7244c75ab7f4d02d231194d Mon Sep 17 00:00:00 2001 From: 4l3x87 <50104109+4l3x87@users.noreply.github.com> Date: Sat, 1 Jun 2019 10:00:22 +0200 Subject: [PATCH 05/10] Fix vari (#42) * Improvements channel Guardaserie.click, Fastsubita and support * Refactor channels series with support & fix * Refactor channels series / anime with support & fix New server animeworld.biz * Fix videolibrary update --- channels/animesaturn.py | 196 +++++++++---------------- channels/animeworld.py | 41 +----- channels/fastsubita.py | 14 +- channels/guardaserieclick.py | 14 +- channels/serietvsubita.py | 271 +++++++++++------------------------ channels/serietvu.py | 176 ++++++++--------------- core/support.py | 2 +- servers/animeworld.json | 42 ++++++ servers/animeworld.py | 34 +++++ videolibrary_service.py | 2 +- 10 files changed, 308 insertions(+), 484 deletions(-) create mode 100644 servers/animeworld.json create mode 100644 servers/animeworld.py diff --git a/channels/animesaturn.py b/channels/animesaturn.py index 560ecedc..90edb519 100644 --- a/channels/animesaturn.py +++ b/channels/animesaturn.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # ------------------------------------------------------------ # Canale per AnimeSaturn -# Thanks to me +# Thanks to 4l3x87 # ---------------------------------------------------------- import re @@ -10,6 +10,7 @@ import urlparse import channelselector from core import httptools, tmdb, support, scrapertools, jsontools from core.item import Item +from core.support import log from platformcode import logger, config from specials import autoplay, autorenumber @@ -19,30 +20,19 @@ headers = [['Referer', host]] IDIOMAS = {'Italiano': 'IT'} list_language = IDIOMAS.values() -list_servers = ['openload','fembed'] -list_quality = ['default'] +list_servers = ['openload', 'fembed', 'animeworld'] +list_quality = ['default', '480p', '720p', '1080p'] def mainlist(item): - support.log(item.channel + 'mainlist') + log() itemlist = [] - support.menu(itemlist, 'Anime bold', 'lista_anime', "%s/animelist?load_all=1" % host) - support.menu(itemlist, 'Novità submenu', 'ultimiep', "%s/fetch_pages.php?request=episodes" % host,'tvshow') - support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', '%s/animelist?load_all=1' % host,args=['tvshow','alfabetico']) + support.menu(itemlist, 'Novità bold', 'ultimiep', "%s/fetch_pages.php?request=episodes" % host, 'tvshow') + support.menu(itemlist, 'Anime bold', 'lista_anime', "%s/animelist?load_all=1" % host) + support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', '%s/animelist?load_all=1' % host, args=['tvshow', 'alfabetico']) support.menu(itemlist, 'Cerca', 'search', host) - - - autoplay.init(item.channel, list_servers, list_quality) - autoplay.show_option(item.channel, itemlist) - - itemlist.append( - Item(channel='setting', - action="channel_config", - title=support.typo("Configurazione Canale color lime"), - config=item.channel, - folder=False, - thumbnail=channelselector.get_thumb('setting_0.png')) - ) + support.aplay(item, itemlist, list_servers, list_quality) + support.channel_config(item, itemlist) return itemlist @@ -50,7 +40,7 @@ def mainlist(item): # ---------------------------------------------------------------------------------------------------------------- def cleantitle(scrapedtitle): scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip()) - scrapedtitle = scrapedtitle.replace('[HD]', '').replace('’', '\'').replace('×','x') + scrapedtitle = scrapedtitle.replace('[HD]', '').replace('’', '\'').replace('×', 'x').replace('"', "'") year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)') if year: scrapedtitle = scrapedtitle.replace('(' + year + ')', '') @@ -62,7 +52,7 @@ def cleantitle(scrapedtitle): # ---------------------------------------------------------------------------------------------------------------- def lista_anime(item): - support.log(item.channel + " lista_anime") + log() itemlist = [] PERPAGE = 15 @@ -78,37 +68,33 @@ def lista_anime(item): for i, serie in enumerate(series): matches.append(serie.split('||')) else: - # Carica la pagina - data = httptools.downloadpage(item.url).data - # Estrae i contenuti patron = r']*?>[^>]*?>(.+?)<' - matches = re.compile(patron, re.DOTALL).findall(data) - + matches = support.match(item, patron, headers=headers)[0] scrapedplot = "" scrapedthumbnail = "" for i, (scrapedurl, scrapedtitle) in enumerate(matches): if (p - 1) * PERPAGE > i: continue if i >= p * PERPAGE: break - title = cleantitle(scrapedtitle).replace('(ita)','(ITA)') + title = cleantitle(scrapedtitle).replace('(ita)', '(ITA)') movie = False showtitle = title if '(ITA)' in title: - title = title.replace('(ITA)','').strip() + title = title.replace('(ITA)', '').strip() showtitle = title - title += ' '+support.typo(' (ITA)') + else: + title += ' ' + support.typo('Sub-ITA', '_ [] color kod') infoLabels = {} if 'Akira' in title: movie = True - infoLabels['year']= 1988 + infoLabels['year'] = 1988 if 'Dragon Ball Super Movie' in title: movie = True infoLabels['year'] = 2019 - itemlist.append( Item(channel=item.channel, extra=item.extra, @@ -130,15 +116,7 @@ def lista_anime(item): # Paginazione if len(matches) >= p * PERPAGE: - scrapedurl = item.url + '{}' + str(p + 1) - itemlist.append( - Item(channel=item.channel, - action='lista_anime', - contentType=item.contentType, - title=support.typo(config.get_localized_string(30992), 'color kod bold'), - url=scrapedurl, - args=item.args, - thumbnail=support.thumb())) + support.nextPage(itemlist, item, next_page=(item.url + '{}' + str(p + 1))) return itemlist @@ -148,17 +126,14 @@ def lista_anime(item): # ---------------------------------------------------------------------------------------------------------------- def episodios(item): - support.log(item.channel + " episodios") + log() itemlist = [] - data = httptools.downloadpage(item.url).data - + data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data anime_id = scrapertools.find_single_match(data, r'\?anime_id=(\d+)') - - #movie or series + # movie or series movie = scrapertools.find_single_match(data, r'\Episodi:\s(\d*)\sMovie') - data = httptools.downloadpage( host + "/loading_anime?anime_id=" + anime_id, headers={ @@ -167,7 +142,7 @@ def episodios(item): patron = r'(.+?)\s*' patron += r'0): + if len(episodes) > 0: item.url = episodes[0].url - itemlist = [] - data = httptools.downloadpage(item.url).data + data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data + data = re.sub(r'\n|\t|\s+', ' ', data) patron = r'
    ' url = scrapertools.find_single_match(data, patron) - - data = httptools.downloadpage(url).data - + data = httptools.downloadpage(url, headers=headers, ignore_response_code=True).data + data = re.sub(r'\n|\t|\s+', ' ', data) itemlist = support.server(item, data=data) - if item.contentType == 'movie': - support.videolibrary(itemlist, item, 'color kod') - # Controlla se i link sono validi - # if checklinks: - # itemlist = servertools.check_list_links(itemlist, checklinks_number) - # - # autoplay.start(itemlist, item) - return itemlist + # ================================================================================================================ # ---------------------------------------------------------------------------------------------------------------- def ultimiep(item): - logger.info(item.channel + "ultimiep") + log() itemlist = [] - post = "page=%s" % item.args['page'] if item.args and item.args['page'] else None + p = 1 + if '{}' in item.url: + item.url, p = item.url.split('{}') + p = int(p) + + post = "page=%s" % p if p > 1 else None data = httptools.downloadpage( item.url, post=post, headers={ @@ -259,14 +229,23 @@ def ultimiep(item): scrapedtitle2 = cleantitle(scrapedtitle2) scrapedtitle = scrapedtitle1 + ' - ' + scrapedtitle2 + '' + title = scrapedtitle + showtitle = scrapedtitle + if '(ITA)' in title: + title = title.replace('(ITA)', '').strip() + showtitle = title + else: + title += ' ' + support.typo('Sub-ITA', '_ [] color kod') + + itemlist.append( Item(channel=item.channel, - contentType="tvshow", + contentType="episode", action="findvideos", - title=scrapedtitle, + title=title, url=scrapedurl, fulltitle=scrapedtitle1, - show=scrapedtitle1, + show=showtitle, thumbnail=scrapedthumbnail)) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) @@ -275,27 +254,17 @@ def ultimiep(item): patronvideos = r'data-page="(\d+)" title="Next">Pagina Successiva' next_page = scrapertools.find_single_match(data, patronvideos) if next_page: - itemlist.append( - Item( - channel=item.channel, - action="ultimiep", - title=support.typo(config.get_localized_string(30992), 'color kod bold'), - url=item.url, - thumbnail= support.thumb(), - args={'page':next_page}, - folder=True)) + support.nextPage(itemlist, item, next_page=(item.url + '{}' + next_page)) return itemlist - # ================================================================================================================ - # ---------------------------------------------------------------------------------------------------------------- def newest(categoria): - logger.info(__channel__ + " newest" + categoria) + log(categoria) itemlist = [] item = Item() item.url = host @@ -323,42 +292,9 @@ def newest(categoria): # ---------------------------------------------------------------------------------------------------------------- def search_anime(item, texto): - logger.info(item.channel + " search_anime: "+texto) + log(texto) itemlist = [] - # data = httptools.downloadpage(host + "/animelist?load_all=1").data - # data = scrapertools.decodeHtmlentities(data) - # - # texto = texto.lower().split('+') - # - # patron = r']*?>[^>]*?>(.+?)<' - # matches = re.compile(patron, re.DOTALL).findall(data) - # - # for scrapedurl, scrapedtitle in [(scrapedurl, scrapedtitle) - # for scrapedurl, scrapedtitle in matches - # if all(t in scrapedtitle.lower() - # for t in texto)]: - # - # title = cleantitle(scrapedtitle).replace('(ita)','(ITA)') - # showtitle = title - # if '(ITA)' in title: - # title = title.replace('(ITA)','').strip() - # showtitle = title - # title += ' '+support.typo(' [ITA] color kod') - # - # itemlist.append( - # Item( - # channel=item.channel, - # contentType="episode", - # action="episodios", - # title=title, - # url=scrapedurl, - # fulltitle=title, - # show=showtitle, - # thumbnail="")) - # - # tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) - data = httptools.downloadpage(host + "/index.php?search=1&key=%s" % texto).data jsondata = jsontools.load(data) @@ -368,16 +304,15 @@ def search_anime(item, texto): if 'Anime non esistente' in data: continue else: - title = title.replace('(ita)','(ITA)') + title = title.replace('(ita)', '(ITA)') showtitle = title if '(ITA)' in title: title = title.replace('(ITA)', '').strip() showtitle = title - title += ' ' + support.typo(' (ITA)') + else: + title += ' ' + support.typo('Sub-ITA', '_ [] color kod') url = "%s/anime/%s" % (host, data) - logger.debug(title) - logger.debug(url) itemlist.append( Item( @@ -397,7 +332,7 @@ def search_anime(item, texto): # ---------------------------------------------------------------------------------------------------------------- def search(item, texto): - logger.info(item.channel + " search") + log(texto) itemlist = [] try: @@ -416,23 +351,20 @@ def search(item, texto): def list_az(item): - support.log(item.channel+" list_az") + log() itemlist = [] alphabet = dict() - # Scarico la pagina - data = httptools.downloadpage(item.url).data - # Articoli patron = r']*?>[^>]*?>(.+?)<' - matches = re.compile(patron, re.DOTALL).findall(data) + matches = support.match(item, patron, headers=headers)[0] for i, (scrapedurl, scrapedtitle) in enumerate(matches): letter = scrapedtitle[0].upper() if letter not in alphabet: alphabet[letter] = [] - alphabet[letter].append(scrapedurl+'||'+scrapedtitle) + alphabet[letter].append(scrapedurl + '||' + scrapedtitle) for letter in sorted(alphabet): itemlist.append( @@ -444,4 +376,4 @@ def list_az(item): return itemlist -# ================================================================================================================ \ No newline at end of file +# ================================================================================================================ diff --git a/channels/animeworld.py b/channels/animeworld.py index d5714602..fec6c29f 100644 --- a/channels/animeworld.py +++ b/channels/animeworld.py @@ -18,11 +18,9 @@ headers = [['Referer', host]] IDIOMAS = {'Italiano': 'Italiano'} list_language = IDIOMAS.values() -list_servers = ['diretto'] -list_quality = [] +list_servers = ['animeworld', 'verystream', 'streamango', 'openload', 'directo'] +list_quality = ['default', '480p', '720p', '1080p'] -checklinks = config.get_setting('checklinks', 'animeworld') -checklinks_number = config.get_setting('checklinks_number', 'animeworld') def mainlist(item): @@ -47,19 +45,10 @@ def mainlist(item): def generi(item): log() - itemlist = [] patron_block = r'\sGeneri\s*
      (.*?)
    ' patron = r'' - matches = support.match(item,patron, patron_block, headers)[0] - for scrapedurl, scrapedtitle in matches: - itemlist.append(Item( - channel=item.channel, - action="video", - title=scrapedtitle, - url="%s%s" % (host,scrapedurl))) - - return itemlist + return support.scrape(item, patron, ['url','title'], patron_block=patron_block, action='video') # Crea Menu Filtro ====================================================== @@ -183,7 +172,7 @@ def video(item): log() itemlist = [] - matches, data = support.match(item, r'(.*?)<\/a>') + matches, data = support.match(item, r'(.*?)<\/a>', headers=headers) for scrapedurl, scrapedthumb ,scrapedinfo, scrapedoriginal, scrapedtitle in matches: # Cerca Info come anno o lingua nel Titolo @@ -231,6 +220,9 @@ def video(item): # Concatena le informazioni + + lang = support.typo('Sub-ITA', '_ [] color kod') if '(ita)' not in lang.lower() else '' + info = ep + lang + year + ova + ona + movie + special # Crea il title da visualizzare @@ -329,25 +321,6 @@ def findvideos(item): videoData +='\n'+json['grabber'] - if serverid == '33': - post = urllib.urlencode({'r': '', 'd': 'www.animeworld.biz'}) - dataJson = httptools.downloadpage(json['grabber'].replace('/v/','/api/source/'),headers=[['x-requested-with', 'XMLHttpRequest']],post=post).data - json = jsontools.load(dataJson) - log(json['data']) - if json['data']: - for file in json['data']: - itemlist.append( - Item( - channel=item.channel, - action="play", - title='diretto', - url=file['file'], - quality=file['label'], - server='directo', - show=item.show, - contentType=item.contentType, - folder=False)) - if serverid == '28': itemlist.append( Item( diff --git a/channels/fastsubita.py b/channels/fastsubita.py index 7c04e499..0dae9dbe 100644 --- a/channels/fastsubita.py +++ b/channels/fastsubita.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # ------------------------------------------------------------ -# Thanks Icarus crew & Alfa addon # Canale per fastsubita +# Thanks Icarus crew & Alfa addon & 4l3x87 # ------------------------------------------------------------ from core import scrapertools, httptools, tmdb, support @@ -17,7 +17,7 @@ list_servers = ['verystream', 'openload', 'speedvideo', 'wstream', 'flashx', 'vi list_quality = ['default'] headers = [ - ['Host', 'fastsubita.com'], + ['Host', host.split("//")[-1].split("/")[0]], ['User-Agent', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0'], ['Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'], ['Accept-Language', 'en-US,en;q=0.5'], @@ -104,7 +104,9 @@ def pelicuals_tv(item): else: scrapedurl = "http:" + scrapedurl - title = scraped_1 + " - " + infoLabels['season'] + "x" + infoLabels['episode'] + " Sub-ITA" + + serie = cleantitle(scraped_1) + title = serie + " - " + infoLabels['season'] + "x" + infoLabels['episode'] + " "+support.typo('Sub-ITA', '_ [] color kod') itemlist.append( Item(channel=item.channel, @@ -115,9 +117,9 @@ def pelicuals_tv(item): url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, - show=scraped_1, + show=serie, extra=item.extra, - contentSerieName=scraped_1, + contentSerieName=serie, contentLanguage='Sub-ITA', infoLabels=infoLabels, folder=True)) @@ -301,7 +303,7 @@ def episodios(item, itemlist=[]): infoLabels = {} infoLabels['season'] = season infoLabels['episode'] = episode[2] - title = infoLabels['season'] + 'x' + infoLabels['episode'] + " Sub-ITA" + title = infoLabels['season'] + 'x' + infoLabels['episode'] + " "+support.typo('Sub-ITA', '_ [] color kod') if "http:" not in scrapedurl: scrapedurl = "http:" + scrapedurl diff --git a/channels/guardaserieclick.py b/channels/guardaserieclick.py index eafae4cb..51ac9fba 100644 --- a/channels/guardaserieclick.py +++ b/channels/guardaserieclick.py @@ -1,18 +1,16 @@ # -*- coding: utf-8 -*- # ------------------------------------------------------------ -# Canale per guardaserie.click -# Thanks to Icarus crew & Alfa addon +# Canale per Guardaserie.click +# Thanks to Icarus crew & Alfa addon & 4l3x87 # ------------------------------------------------------------ import re -import channelselector -from core import httptools, scrapertools, servertools, support +from core import httptools, scrapertools, support from core import tmdb from core.item import Item from core.support import log from platformcode import logger, config -from specials import autoplay __channel__ = 'guardaserieclick' host = config.get_setting("channel_host", __channel__) @@ -163,7 +161,7 @@ def serietvaggiornate(item): infoLabels['season'] = episode[0][0] title = str( - "%s - %sx%s %s" % (scrapedtitle, infoLabels['season'], infoLabels['episode'], contentlanguage)).strip() + "%s - %sx%s %s" % (scrapedtitle, infoLabels['season'], infoLabels['episode'], support.typo(contentlanguage, '_ [] color kod') if contentlanguage else '')).strip() itemlist.append( Item(channel=item.channel, @@ -247,7 +245,7 @@ def episodios(item): scrapedepisodetitle = cleantitle(scrapedepisodetitle) title = str("%sx%s %s" % (scrapedseason, scrapedepisode, scrapedepisodetitle)).strip() if 'SUB-ITA' in scrapedtitle: - title += " Sub-ITA" + title += " "+support.typo("Sub-ITA", '_ [] color kod') infoLabels = {} infoLabels['season'] = scrapedseason @@ -278,7 +276,7 @@ def episodios(item): # ---------------------------------------------------------------------------------------------------------------- def findepvideos(item): log() - data = httptools.downloadpage(item.url, headers=headers).data + data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data matches = scrapertools.find_multiple_matches(data, item.extra) data = "\r\n".join(matches[0]) item.contentType = 'movie' diff --git a/channels/serietvsubita.py b/channels/serietvsubita.py index 3ce72d6b..693e7487 100644 --- a/channels/serietvsubita.py +++ b/channels/serietvsubita.py @@ -1,16 +1,16 @@ # -*- coding: utf-8 -*- # ------------------------------------------------------------ -# Canale per Serie Tv Sub ITA -# Thanks to Icarus crew & Alfa addon +# Canale per Serietvsubita +# Thanks to Icarus crew & Alfa addon & 4l3x87 # ---------------------------------------------------------- + import re import time -import channelselector from core import httptools, tmdb, scrapertools, support from core.item import Item +from core.support import log from platformcode import logger, config -from specials import autoplay __channel__ = "serietvsubita" host = config.get_setting("channel_host", __channel__) @@ -18,33 +18,19 @@ headers = [['Referer', host]] IDIOMAS = {'Italiano': 'IT'} list_language = IDIOMAS.values() -list_servers = ['gounlimited','verystream','streamango','openload'] +list_servers = ['gounlimited', 'verystream', 'streamango', 'openload'] list_quality = ['default'] -# checklinks = config.get_setting('checklinks', __channel__) -# checklinks_number = config.get_setting('checklinks_number', __channel__) - def mainlist(item): - support.log(item.channel + 'mainlist') + log() itemlist = [] - support.menu(itemlist, 'Serie TV bold', 'lista_serie', host,'tvshow') - support.menu(itemlist, 'Novità submenu', 'peliculas_tv', host,'tvshow') - support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', host,'tvshow',args=['serie']) - support.menu(itemlist, 'Cerca', 'search', host,'tvshow') - - - autoplay.init(item.channel, list_servers, list_quality) - autoplay.show_option(item.channel, itemlist) - - itemlist.append( - Item(channel='setting', - action="channel_config", - title=support.typo("Configurazione Canale color lime"), - config=item.channel, - folder=False, - thumbnail=channelselector.get_thumb('setting_0.png')) - ) + support.menu(itemlist, 'Novità bold', 'peliculas_tv', host, 'tvshow') + support.menu(itemlist, 'Serie TV bold', 'lista_serie', host, 'tvshow') + support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', host, 'tvshow', args=['serie']) + support.menu(itemlist, 'Cerca', 'search', host, 'tvshow') + support.aplay(item, itemlist, list_servers, list_quality) + support.channel_config(item, itemlist) return itemlist @@ -52,20 +38,57 @@ def mainlist(item): # ---------------------------------------------------------------------------------------------------------------- def cleantitle(scrapedtitle): scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip()) - scrapedtitle = scrapedtitle.replace('[HD]', '').replace('’', '\'').replace('×','x').replace('Game of Thrones –','').replace('In The Dark 2019','In The Dark (2019)').strip() + scrapedtitle = scrapedtitle.replace('[HD]', '').replace('’', '\'').replace('×', 'x').replace('Game of Thrones –','')\ + .replace('In The Dark 2019', 'In The Dark (2019)').replace('"', "'").strip() year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)') if year: scrapedtitle = scrapedtitle.replace('(' + year + ')', '') - return scrapedtitle.strip() # ================================================================================================================ +# ---------------------------------------------------------------------------------------------------------------- +def findvideos(item): + log() + data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data + data = re.sub(r'\n|\t|\s+', ' ', data) + # recupero il blocco contenente i link + blocco = scrapertools.find_single_match(data, r'
    ([\s\S.]*?)
    ([^<]+)' + matches = support.match(item, patron, headers=headers)[0] for i, (scrapedurl, scrapedtitle) in enumerate(matches): scrapedplot = "" @@ -112,90 +132,58 @@ def lista_serie(item): # Paginazione if len(matches) >= p * PERPAGE: - scrapedurl = item.url + '{}' + str(p + 1) - itemlist.append( - Item(channel=item.channel, - action='lista_serie', - contentType=item.contentType, - title=support.typo(config.get_localized_string(30992), 'color kod bold'), - url=scrapedurl, - args=item.args, - thumbnail=support.thumb())) + support.nextPage(itemlist, item, next_page=(item.url + '{}' + str(p + 1))) return itemlist + # ================================================================================================================ # ---------------------------------------------------------------------------------------------------------------- def episodios(item, itemlist=[]): - support.log(item.channel + " episodios") - # itemlist = [] + log() + patron = r'(.*?)
    (.*?)
    (.*?)(.*?)(.*?)(.*?)\s[^>]+>[^>]+>[^>]+>[^>]+>' - patron += r'[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)

    ' - - matches = support.match(item, patron, patron_block, headers)[0] - - for scrapedurl, scrapedthumbnail, scrapedtitle in matches: - scrapedtitle = cleantitle(scrapedtitle) - - itemlist.append( - Item(channel=item.channel, - action="episodios", - contentType="episode", - title=scrapedtitle, - fulltitle=scrapedtitle, - url=scrapedurl, - show=scrapedtitle, - thumbnail=scrapedthumbnail, - folder=True)) - - tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) - return itemlist + patron = r'
    \s[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)

    ' + return support.scrape(item, patron, ['url', 'thumb', 'title'], patron_block=patron_block, action='episodios') # ================================================================================================================ @@ -141,47 +122,10 @@ def serietvaggiornate(item): log() itemlist = [] - patron_block = r'(.*?)]+> ]+>[^>]+>' - patron += r'[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<[^>]+>' + patron_block = r'
    (.*?)<\/div><\/div>]+> ]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^(?:<|\()]+)(?:\(([^\)]+)\))?[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<[^>]+>' - matches = support.match(item, patron, patron_block, headers)[0] - - for scrapedurl, scrapedthumbnail, scrapedep, scrapedtitle in matches: - episode = re.compile(r'^(\d+)x(\d+)', re.DOTALL).findall(scrapedep) # Prendo stagione ed episodioso - scrapedtitle = cleantitle(scrapedtitle) - - contentlanguage = "" - if 'sub-ita' in scrapedep.strip().lower(): - contentlanguage = 'Sub-ITA' - - extra = r']*>' % ( - episode[0][0], episode[0][1].lstrip("0")) - - infoLabels = {} - infoLabels['episode'] = episode[0][1].zfill(2) - infoLabels['season'] = episode[0][0] - - title = str( - "%s - %sx%s %s" % (scrapedtitle, infoLabels['season'], infoLabels['episode'], contentlanguage)).strip() - - itemlist.append( - Item(channel=item.channel, - action="findepvideos", - contentType="episode", - title=title, - show=scrapedtitle, - fulltitle=scrapedtitle, - url=scrapedurl, - extra=extra, - thumbnail=scrapedthumbnail, - contentLanguage=contentlanguage, - infoLabels=infoLabels, - folder=True)) - - tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) - - return itemlist + return support.scrape(item, patron, ['url', 'thumb', 'episode', 'lang', 'title'], patron_block=patron_block, action='findvideos') # ================================================================================================================ @@ -202,29 +146,7 @@ def lista_serie(item): patron_block = r'(.*?)' patron = r'\s[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)

    ' - matches, data = support.match(item, patron, patron_block, headers) - - for scrapedurl, scrapedimg, scrapedtitle in matches: - scrapedtitle = cleantitle(scrapedtitle) - - if scrapedtitle not in ['DMCA', 'Contatti', 'Lista di tutte le serie tv']: - itemlist.append( - Item(channel=item.channel, - action="episodios", - contentType="episode", - title=scrapedtitle, - fulltitle=scrapedtitle, - url=scrapedurl, - thumbnail=scrapedimg, - extra=item.extra, - show=scrapedtitle, - folder=True)) - - tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) - - support.nextPage(itemlist, item, data, r"\s([^<]+)<\/div>[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)?[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>]+>([^<]+)<[^>]+>[^>]+>[^>]+>' - patron += r'[^<]+[^"]+".*?serie="([^"]+)".*?stag="([0-9]*)".*?ep="([0-9]*)"\s' + patron += r'[^"]+".*?serie="([^"]+)".*?stag="([0-9]*)".*?ep="([0-9]*)"\s' patron += r'.*?embed="([^"]+)"\s.*?embed2="([^"]+)?"\s.*?embed3="([^"]+)?"?[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>[^>]+>\s?' patron += r'(?:]+>|]+>)?' @@ -255,7 +177,7 @@ def episodios(item): itemlist.append( Item(channel=item.channel, action="findvideos", - title=title, + title=support.typo(title, 'bold'), fulltitle=scrapedtitle, url=scrapedurl + "\r\n" + scrapedurl2 + "\r\n" + scrapedurl3, contentType="episode", @@ -290,5 +212,11 @@ def findepvideos(item): # ---------------------------------------------------------------------------------------------------------------- def findvideos(item): log() - logger.debug(item.url) - return support.server(item, data=item.url) + if item.contentType == 'tvshow': + data = httptools.downloadpage(item.url, headers=headers).data + matches = scrapertools.find_multiple_matches(data, item.extra) + data = "\r\n".join(matches[0]) + else: + log(item.url) + data = item.url + return support.server(item, data) diff --git a/channels/tantifilm.py b/channels/tantifilm.py index aa88d6b6..0b33787a 100644 --- a/channels/tantifilm.py +++ b/channels/tantifilm.py @@ -130,7 +130,7 @@ def peliculas(item): action = 'findvideos' if item.extra == 'movie' else 'episodios' if item.args == 'movie': patron= r'
    [^<]+]+>[^<]+]+>[^<]+<\/a>.*?

    \s*([a-zA-Z-0-9]+)\s*<\/p>' - itemlist = support.scrape(item, patron, ['url', 'title', 'year', 'thumb', 'quality'], headers, action=action, patronNext='

    ]+><\/a><[^>]+>

    ([^<]+) \(([^\)]+)[^<]+<\/p>.*?

    \s*([a-zA-Z-0-9]+)\s*<\/p>' itemlist = support.scrape(item, patron, ['url', 'thumb', 'title', 'year', 'quality'], headers, action=action, patronNext='(.*?)<\/a>', headers=headers) + matches, data = support.match(item, r']+>(.*?)<\/a>', headers=headers) for scrapedurl, scrapedthumb ,scrapedinfo, scrapedoriginal, scrapedtitle in matches: # Cerca Info come anno o lingua nel Titolo @@ -260,7 +259,6 @@ def episodios(item): itemlist = [] data = httptools.downloadpage(item.url).data.replace('\n', '') - data = re.sub(r'>\s*<', '><', data) block1 = scrapertoolsV2.find_single_match(data, r'

    (.*?)
    ') @@ -297,7 +295,7 @@ def findvideos(item): log() itemlist = [] - episode = '1' + episode = '' if item.extra and item.extra['episode']: data = item.extra['data'] diff --git a/resources/language/English/strings.po b/resources/language/English/strings.po index 7c5d7869..36e7d9a0 100644 --- a/resources/language/English/strings.po +++ b/resources/language/English/strings.po @@ -393,6 +393,10 @@ msgctxt "#50004" msgid "Path: " msgstr "" +msgctxt "#50005" +msgid "Delete This Channel?" +msgstr "" + msgctxt "#59970" msgid "Synchronization with Trakt started" msgstr "" diff --git a/resources/language/Italian/strings.po b/resources/language/Italian/strings.po index ab71e87f..49255e94 100644 --- a/resources/language/Italian/strings.po +++ b/resources/language/Italian/strings.po @@ -393,6 +393,10 @@ msgctxt "#50004" msgid "Path: " msgstr "Percorso: " +msgctxt "#50005" +msgid "Delete This Channel?" +msgstr "Eliminare Questo Caale?" + msgctxt "#59970" msgid "Synchronization with Trakt started" msgstr "Sincronizzazione con Trakt iniziata" diff --git a/specials/community.py b/specials/community.py index 864eec29..f8a7ebbb 100644 --- a/specials/community.py +++ b/specials/community.py @@ -37,7 +37,7 @@ def show_channels(item): logger.info() itemlist = [] - context = [{"title": "Eliminar este canal", + context = [{"title": config.get_localized_string(50005), "action": "remove_channel", "channel": "community"}] From bcb67cb03618ce2d8c18fa86b4c3b01f94fc031c Mon Sep 17 00:00:00 2001 From: Alhaziel <46535975+lozioangie@users.noreply.github.com> Date: Sat, 1 Jun 2019 15:11:09 +0200 Subject: [PATCH 09/10] Damned "n" :p --- resources/language/Italian/strings.po | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resources/language/Italian/strings.po b/resources/language/Italian/strings.po index 49255e94..cc4c7b59 100644 --- a/resources/language/Italian/strings.po +++ b/resources/language/Italian/strings.po @@ -395,7 +395,7 @@ msgstr "Percorso: " msgctxt "#50005" msgid "Delete This Channel?" -msgstr "Eliminare Questo Caale?" +msgstr "Eliminare Questo Canale?" msgctxt "#59970" msgid "Synchronization with Trakt started" From ac6053de8d58e758d637984abf0000b9cf5bde37 Mon Sep 17 00:00:00 2001 From: Alhaziel <46535975+lozioangie@users.noreply.github.com> Date: Sat, 1 Jun 2019 15:14:30 +0200 Subject: [PATCH 10/10] Fix title show and fulltitle --- channels/animeworld.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/channels/animeworld.py b/channels/animeworld.py index fdb8e78a..f2ef7dd6 100644 --- a/channels/animeworld.py +++ b/channels/animeworld.py @@ -142,14 +142,14 @@ def lista_anime(item): title = scrapedtitle.replace(year,'').replace(lang,'').strip() original = scrapedoriginal.replace(year,'').replace(lang,'').strip() if lang: lang = support.typo(lang,'_ color kod') - title = '[B]' + title + '[/B]' + lang + original + longtitle = '[B]' + title + '[/B]' + lang + original itemlist.append( Item(channel=item.channel, extra=item.extra, contentType="episode", action="episodios", - title=title, + title=longtitle, url=scrapedurl, thumbnail=scrapedthumb, fulltitle=title,