diff --git a/channels/serietvsubita.json b/channels/serietvsubita.json
new file mode 100644
index 00000000..4dd01090
--- /dev/null
+++ b/channels/serietvsubita.json
@@ -0,0 +1,44 @@
+{
+ "id": "serietvsubita",
+ "name": "Serie TV Sub ITA",
+ "active": false,
+ "adult": false,
+ "language": ["ita"],
+ "thumbnail": "http://serietvsubita.xyz/wp-content/uploads/2012/07/logo.jpg",
+ "banner": "http://serietvsubita.xyz/wp-content/uploads/2012/07/logo.jpg",
+ "categories": ["tvshow"],
+ "settings": [
+ {
+ "id": "channel_host",
+ "type": "text",
+ "label": "Host del canale",
+ "default": "http://serietvsubita.xyz/",
+ "enabled": true,
+ "visible": true
+ },
+ {
+ "id": "include_in_global_search",
+ "type": "bool",
+ "label": "Includi ricerca globale",
+ "default": true,
+ "enabled": true,
+ "visible": true
+ },
+ {
+ "id": "include_in_newest_series",
+ "type": "bool",
+ "label": "Includi in Novità - Serie TV",
+ "default": true,
+ "enabled": true,
+ "visible": true
+ },
+ {
+ "id": "include_in_newest_italiano",
+ "type": "bool",
+ "label": "Includi in Novità - Italiano",
+ "default": true,
+ "enabled": true,
+ "visible": true
+ }
+ ]
+}
diff --git a/channels/serietvsubita.py b/channels/serietvsubita.py
new file mode 100644
index 00000000..ff2d3381
--- /dev/null
+++ b/channels/serietvsubita.py
@@ -0,0 +1,352 @@
+# -*- coding: utf-8 -*-
+# ------------------------------------------------------------
+# Canale per Serie Tv Sub ITA
+# Ringraziamo Icarus crew
+# ----------------------------------------------------------
+import inspect
+import re
+import time
+
+import channelselector
+from channels import autoplay, support, filtertools
+from core import httptools, tmdb, scrapertools
+from core.item import Item
+from platformcode import logger, config
+
+host = config.get_setting("channel_host", 'serietvsubita')
+headers = [['Referer', host]]
+
+IDIOMAS = {'Italiano': 'IT'}
+list_language = IDIOMAS.values()
+list_servers = ['gounlimited','verystream','streamango','openload']
+list_quality = ['default']
+
+
+
+def mainlist(item):
+ support.log(item.channel + 'mainlist')
+ itemlist = []
+ support.menu(itemlist, 'Serie TV bold', 'lista_serie', host,'tvshow')
+ support.menu(itemlist, 'Novità submenu', 'peliculas_tv', host,'tvshow')
+ support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', host,'tvshow')
+ support.menu(itemlist, 'Cerca', 'search', host,'tvshow')
+
+
+ autoplay.init(item.channel, list_servers, list_quality)
+ autoplay.show_option(item.channel, itemlist)
+
+ itemlist.append(
+ Item(channel='setting',
+ action="channel_config",
+ title=support.typo("Configurazione Canale color lime"),
+ config=item.channel,
+ folder=False,
+ thumbnail=channelselector.get_thumb('setting_0.png'))
+ )
+
+ return itemlist
+
+
+# ----------------------------------------------------------------------------------------------------------------
+def cleantitle(scrapedtitle):
+ scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
+ scrapedtitle = scrapedtitle.replace('[HD]', '').replace('’', '\'').replace('Game of Thrones –','')
+ year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)')
+ if year:
+ scrapedtitle = scrapedtitle.replace('(' + year + ')', '')
+
+
+ return scrapedtitle.strip()
+
+
+# ================================================================================================================
+
+# ----------------------------------------------------------------------------------------------------------------
+def lista_serie(item):
+ support.log(item.channel + " lista_serie")
+ itemlist = []
+
+ PERPAGE = 15
+
+ p = 1
+ if '{}' in item.url:
+ item.url, p = item.url.split('{}')
+ p = int(p)
+
+ # Descarga la pagina
+ data = httptools.downloadpage(item.url).data
+
+ # Extrae las entradas
+ patron = '
([^<]+)'
+ matches = re.compile(patron, re.DOTALL).findall(data)
+
+ for i, (scrapedurl, scrapedtitle) in enumerate(matches):
+ scrapedplot = ""
+ scrapedthumbnail = ""
+ if (p - 1) * PERPAGE > i: continue
+ if i >= p * PERPAGE: break
+ title = cleantitle(scrapedtitle)
+ itemlist.append(
+ Item(channel=item.channel,
+ extra=item.extra,
+ action="episodes",
+ title=title,
+ url=scrapedurl,
+ thumbnail=scrapedthumbnail,
+ fulltitle=title,
+ show=title,
+ plot=scrapedplot,
+ folder=True))
+
+ tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
+
+ # Paginazione
+ if len(matches) >= p * PERPAGE:
+ scrapedurl = item.url + '{}' + str(p + 1)
+ itemlist.append(
+ Item(channel=item.channel,
+ action='lista_serie',
+ contentType=item.contentType,
+ title=support.typo(config.get_localized_string(30992), 'color kod bold'),
+ url=scrapedurl,
+ args=item.args,
+ thumbnail=support.thumb()))
+
+ return itemlist
+
+# ================================================================================================================
+
+
+# ----------------------------------------------------------------------------------------------------------------
+def episodes(item):
+ support.log(item.channel + " episodes")
+ itemlist = []
+
+ data = httptools.downloadpage(item.url).data
+
+ patron = '\s*
.*?'
+ patron += '
'
+ matches = re.compile(patron, re.DOTALL).findall(data)
+
+ for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
+ scrapedplot = ""
+ scrapedtitle = cleantitle(scrapedtitle)
+ title = scrapedtitle.split(" S0")[0].strip()
+ title = title.split(" S1")[0].strip()
+ title = title.split(" S2")[0].strip()
+
+ itemlist.append(
+ Item(channel=item.channel,
+ extra=item.extra,
+ action="findvideos",
+ fulltitle=scrapedtitle,
+ show=scrapedtitle,
+ title=scrapedtitle,
+ url=scrapedurl,
+ thumbnail=scrapedthumbnail,
+ plot=scrapedplot,
+ contentSerieName=title,
+ folder=True))
+
+ tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
+
+ # Paginazionazione
+ patron = '\d+\s*\d+'
+ next_page = scrapertools.find_single_match(data, patron)
+ if next_page != "":
+ itemlist.append(
+ Item(channel=item.channel,
+ action='episodes',
+ contentType=item.contentType,
+ title=support.typo(config.get_localized_string(30992), 'color kod bold'),
+ url=next_page,
+ args=item.args,
+ thumbnail=support.thumb()))
+
+ # support.videolibrary(itemlist,item,'bold color kod')
+
+ return itemlist
+
+# ================================================================================================================
+
+# ----------------------------------------------------------------------------------------------------------------
+def findvideos(item):
+ support.log(item.channel + " findvideos")
+
+ data = httptools.downloadpage(item.url).data
+
+ patron = 'href="(https?://www\.keeplinks\.(?:co|eu)/p(?:[0-9]*)/([^"]+))"'
+ matches = re.compile(patron, re.DOTALL).findall(data)
+ for keeplinks, id in matches:
+ headers = [['Cookie', 'flag[' + id + ']=1; defaults=1; nopopatall=' + str(int(time.time()))],
+ ['Referer', keeplinks]]
+
+ html = httptools.downloadpage(keeplinks, headers=headers).data
+ data += str(scrapertools.find_multiple_matches(html, '\d+\s*\d+'
+ next_page = scrapertools.find_single_match(data, patron)
+ if next_page != "":
+ if item.extra == "search_tv":
+ next_page = next_page.replace('&', '&')
+ itemlist.append(
+ Item(channel=item.channel,
+ action='peliculas_tv',
+ contentType=item.contentType,
+ title=support.typo(config.get_localized_string(30992), 'color kod bold'),
+ url=next_page,
+ args=item.args,
+ extra=item.extra,
+ thumbnail=support.thumb()))
+
+
+ return itemlist
+
+
+# ================================================================================================================
+
+
+
+# ----------------------------------------------------------------------------------------------------------------
+def newest(categoria):
+ logger.info('serietvsubita' + " newest" + categoria)
+ itemlist = []
+ item = Item()
+ item.url = host;
+ item.extra = 'serie';
+ try:
+ if categoria == "series":
+ itemlist = peliculas_tv(item)
+
+ # Continua la ricerca in caso di errore
+ except:
+ import sys
+ for line in sys.exc_info():
+ logger.error("{0}".format(line))
+ return []
+
+ return itemlist
+
+
+# ================================================================================================================
+
+# ----------------------------------------------------------------------------------------------------------------
+def search(item, texto):
+ logger.info(item.channel + " search")
+ itemlist = []
+ item.extra = "search_tv"
+
+ item.url = host + "/?s=" + texto + "&op.x=0&op.y=0"
+
+ try:
+ return peliculas_tv(item)
+
+ except:
+ import sys
+ for line in sys.exc_info():
+ logger.error("%s" % line)
+ return []
+
+
+# ================================================================================================================
+
+# ----------------------------------------------------------------------------------------------------------------
+def list_az(item):
+ support.log(item.channel+" list_az")
+ itemlist = []
+ PERPAGE = 50
+
+ p = 1
+ if '{}' in item.url:
+ item.url, p = item.url.split('{}')
+ p = int(p)
+
+ # Scarico la pagina
+ data = httptools.downloadpage(item.url).data
+
+ # Articoli
+ patron = '
([^<]+)'
+ matches = re.compile(patron, re.DOTALL).findall(data)
+
+ for i, (scrapedurl, scrapedtitle) in enumerate(matches):
+ scrapedplot = ""
+ scrapedthumbnail = ""
+ if (p - 1) * PERPAGE > i: continue
+ if i >= p * PERPAGE: break
+ title = cleantitle(scrapedtitle)
+ itemlist.append(
+ Item(channel=item.channel,
+ extra=item.extra,
+ action="episodes",
+ title=title,
+ url=scrapedurl,
+ fulltitle=title,
+ show=title,
+ plot=scrapedplot,
+ folder=True))
+ tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
+
+ # Paginazione
+ if len(matches) >= p * PERPAGE:
+ scrapedurl = item.url + '{}' + str(p + 1)
+ itemlist.append(
+ Item(channel=item.channel,
+ action='list_az',
+ contentType=item.contentType,
+ title=support.typo(config.get_localized_string(30992), 'color kod bold'),
+ url=scrapedurl,
+ args=item.args,
+ extra=item.extra,
+ thumbnail=support.thumb()))
+
+ return itemlist
+
+# ================================================================================================================
diff --git a/channels/serietvu.json b/channels/serietvu.json
new file mode 100644
index 00000000..b598e9ab
--- /dev/null
+++ b/channels/serietvu.json
@@ -0,0 +1,44 @@
+{
+ "id": "serietvu",
+ "name": "SerieTVU",
+ "active": true,
+ "adult": false,
+ "language": ["ita"],
+ "thumbnail": "https://www.serietvu.club/wp-content/themes/gurarjbar/images/logo.png",
+ "banner": "https://www.serietvu.club/wp-content/themes/gurarjbar/images/logo.png",
+ "categories": ["tvshow"],
+ "settings": [
+ {
+ "id": "channel_host",
+ "type": "text",
+ "label": "Host del canale",
+ "default": "https://www.serietvu.club",
+ "enabled": true,
+ "visible": true
+ },
+ {
+ "id": "include_in_global_search",
+ "type": "bool",
+ "label": "Includi ricerca globale",
+ "default": true,
+ "enabled": true,
+ "visible": true
+ },
+ {
+ "id": "include_in_newest_series",
+ "type": "bool",
+ "label": "Includi in Novità - Serie TV",
+ "default": true,
+ "enabled": true,
+ "visible": true
+ },
+ {
+ "id": "include_in_newest_italiano",
+ "type": "bool",
+ "label": "Includi in Novità - Italiano",
+ "default": true,
+ "enabled": true,
+ "visible": true
+ }
+ ]
+}
diff --git a/channels/serietvu.py b/channels/serietvu.py
new file mode 100644
index 00000000..225fc6dd
--- /dev/null
+++ b/channels/serietvu.py
@@ -0,0 +1,296 @@
+# -*- coding: utf-8 -*-
+# ------------------------------------------------------------
+# Canale per SerieTVU
+# Ringraziamo Icarus crew
+# ----------------------------------------------------------
+import re
+
+import channelselector
+from channels import autoplay, support, filtertools
+from core import httptools, tmdb, scrapertools
+from core.item import Item
+from platformcode import logger, config
+
+host = config.get_setting("channel_host", 'serietvu')
+headers = [['Referer', host]]
+
+IDIOMAS = {'Italiano': 'IT'}
+list_language = IDIOMAS.values()
+list_servers = ['speedvideo']
+list_quality = ['default']
+
+
+
+def mainlist(item):
+ support.log(item.channel + 'mainlist')
+ itemlist = []
+ support.menu(itemlist, 'Serie TV bold', 'lista_serie', "%s/category/serie-tv" % host,'tvshow')
+ support.menu(itemlist, 'Novità submenu', 'latestep', "%s/ultimi-episodi" % host,'tvshow')
+ # support.menu(itemlist, 'Nuove serie color azure', 'lista_serie', "%s/category/serie-tv" % host,'tvshow')
+ support.menu(itemlist, 'Categorie', 'categorie', host,'tvshow')
+ support.menu(itemlist, 'Cerca', 'search', host,'tvshow')
+
+
+ # autoplay.init(item.channel, list_servers, list_quality)
+ # autoplay.show_option(item.channel, itemlist)
+
+ itemlist.append(
+ Item(channel='setting',
+ action="channel_config",
+ title=support.typo("Configurazione Canale color lime"),
+ config=item.channel,
+ folder=False,
+ thumbnail=channelselector.get_thumb('setting_0.png'))
+ )
+
+ return itemlist
+
+
+# ----------------------------------------------------------------------------------------------------------------
+def cleantitle(scrapedtitle):
+ scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
+ scrapedtitle = scrapedtitle.replace('[HD]', '').replace('’', '\'').replace('Game of Thrones –','').replace('Flash 2014','Flash')
+ year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)')
+ if year:
+ scrapedtitle = scrapedtitle.replace('(' + year + ')', '')
+
+
+ return scrapedtitle.strip()
+
+
+# ================================================================================================================
+
+# ----------------------------------------------------------------------------------------------------------------
+def lista_serie(item):
+ support.log(item.channel + " lista_serie")
+ itemlist = []
+
+ data = httptools.downloadpage(item.url, headers=headers).data
+
+ patron = r'\s*
'
+ patron += r'[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<'
+ matches = re.compile(patron, re.DOTALL).findall(data)
+
+ for scrapedurl, scrapedimg, scrapedtitle in matches:
+ infoLabels = {}
+ year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)')
+ if year:
+ infoLabels['year'] = year
+ scrapedtitle = cleantitle(scrapedtitle)
+
+ itemlist.append(
+ Item(channel=item.channel,
+ action="episodios",
+ title=scrapedtitle,
+ fulltitle=scrapedtitle,
+ url=scrapedurl,
+ thumbnail=scrapedimg,
+ show=scrapedtitle,
+ infoLabels=infoLabels,
+ contentType='tvshow',
+ folder=True))
+
+ tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
+
+ # Pagine
+ support.nextPage(itemlist,item,data,'Pagina successiva')
+
+ return itemlist
+
+# ================================================================================================================
+
+
+# ----------------------------------------------------------------------------------------------------------------
+def episodios(item):
+ support.log(item.channel + " episodios")
+ itemlist = []
+
+ data = httptools.downloadpage(item.url, headers=headers).data
+
+ patron = r''
+ matches = re.compile(patron, re.DOTALL).findall(data)
+
+ for value in matches:
+ patron = r'(.*?)
\s* ' % value
+ blocco = scrapertools.find_single_match(data, patron)
+
+ patron = r'()[^>]+>[^>]+>([^<]+)<'
+ matches = re.compile(patron, re.DOTALL).findall(blocco)
+ for scrapedextra, scrapedurl, scrapedimg, scrapedtitle in matches:
+ number = scrapertools.decodeHtmlentities(scrapedtitle.replace("Episodio", "")).strip()
+ itemlist.append(
+ Item(channel=item.channel,
+ action="findvideos",
+ title=value + "x" + number.zfill(2),
+ fulltitle=scrapedtitle,
+ contentType="episode",
+ url=scrapedurl,
+ thumbnail=scrapedimg,
+ extra=scrapedextra,
+ folder=True))
+
+ if config.get_videolibrary_support() and len(itemlist) != 0:
+ itemlist.append(
+ Item(channel=item.channel,
+ title=support.typo(config.get_localized_string(30161) + ' bold color kod'),
+ thumbnail=support.thumb(),
+ url=item.url,
+ action="add_serie_to_library",
+ extra="episodios",
+ contentSerieName=item.fulltitle,
+ show=item.show))
+
+ return itemlist
+
+# ================================================================================================================
+
+# ----------------------------------------------------------------------------------------------------------------
+def findvideos(item):
+ support.log(item.channel + " findvideos")
+
+ itemlist = support.server(item, data=item.url)
+ itemlist = filtertools.get_links(itemlist, item, list_language)
+
+ autoplay.start(itemlist, item)
+
+ return itemlist
+
+# ================================================================================================================
+
+
+# ----------------------------------------------------------------------------------------------------------------
+def findepisodevideo(item):
+ support.log(item.channel + " findepisodevideo")
+
+ # Download Pagina
+ data = httptools.downloadpage(item.url, headers=headers).data
+
+ # Prendo il blocco specifico per la stagione richiesta
+ patron = r'(.*?)
\s* ' % item.extra[0][0]
+ blocco = scrapertools.find_single_match(data, patron)
+
+ # Estraggo l'episodio
+ patron = r'' % item.extra[0][1].lstrip("0")
+ matches = re.compile(patron, re.DOTALL).findall(blocco)
+
+ itemlist = support.server(item, data=matches[0][0])
+ itemlist = filtertools.get_links(itemlist, item, list_language)
+
+ autoplay.start(itemlist, item)
+
+ return itemlist
+
+
+# ================================================================================================================
+
+
+# ----------------------------------------------------------------------------------------------------------------
+def latestep(item):
+ support.log(item.channel + " latestep")
+ itemlist = []
+
+ data = httptools.downloadpage(item.url, headers=headers).data
+
+ patron = r'\s*
'
+ patron += r'[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)([^<]+)<'
+ matches = re.compile(patron, re.DOTALL).findall(data)
+
+ for scrapedurl, scrapedimg, scrapedtitle, scrapedinfo in matches:
+ infoLabels = {}
+ year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)')
+ if year:
+ infoLabels['year'] = year
+ scrapedtitle = cleantitle(scrapedtitle)
+
+ infoLabels['tvshowtitle'] = scrapedtitle
+
+ episodio = re.compile(r'(\d+)x(\d+)', re.DOTALL).findall(scrapedinfo)
+ title = "%s %s" % (scrapedtitle, scrapedinfo)
+ itemlist.append(
+ Item(channel=item.channel,
+ action="findepisodevideo",
+ title=title,
+ fulltitle=scrapedtitle,
+ url=scrapedurl,
+ extra=episodio,
+ thumbnail=scrapedimg,
+ show=scrapedtitle,
+ contentTitle=scrapedtitle,
+ contentSerieName=title,
+ infoLabels=infoLabels,
+ folder=True))
+
+ tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
+
+ return itemlist
+
+
+# ================================================================================================================
+
+# ----------------------------------------------------------------------------------------------------------------
+def newest(categoria):
+ logger.info('serietvu' + " newest" + categoria)
+ itemlist = []
+ item = Item()
+ try:
+ if categoria == "series":
+ item.url = host + "/ultimi-episodi"
+ item.action = "latestep"
+ itemlist = latestep(item)
+
+ if itemlist[-1].action == "latestep":
+ itemlist.pop()
+
+ # Continua la ricerca in caso di errore
+ except:
+ import sys
+ for line in sys.exc_info():
+ logger.error("{0}".format(line))
+ return []
+
+ return itemlist
+
+
+# ================================================================================================================
+
+# ----------------------------------------------------------------------------------------------------------------
+def search(item, texto):
+ logger.info(item.channel + " search")
+ item.url = host + "/?s=" + texto
+ try:
+ return lista_serie(item)
+ # Continua la ricerca in caso di errore
+ except:
+ import sys
+ for line in sys.exc_info():
+ logger.error("%s" % line)
+ return []
+
+
+# ================================================================================================================
+
+# ----------------------------------------------------------------------------------------------------------------
+def categorie(item):
+ logger.info(item.channel +" categorie")
+ itemlist = []
+
+ data = httptools.downloadpage(item.url, headers=headers).data
+ blocco = scrapertools.find_single_match(data, r'Sfoglia
\s*\s*')
+ patron = r'([^<]+)'
+ matches = re.compile(patron, re.DOTALL).findall(blocco)
+
+ for scrapedurl, scrapedtitle in matches:
+ if scrapedtitle == 'Home Page' or scrapedtitle == 'Calendario Aggiornamenti':
+ continue
+ itemlist.append(
+ Item(channel=item.channel,
+ action="lista_serie",
+ title=scrapedtitle,
+ contentType="tv",
+ url="%s%s" % (host, scrapedurl),
+ thumbnail=item.thumbnail,
+ folder=True))
+
+ return itemlist
+
+# ================================================================================================================
diff --git a/servers/gounlimited.json b/servers/gounlimited.json
index 7b66129f..e45a2872 100644
--- a/servers/gounlimited.json
+++ b/servers/gounlimited.json
@@ -4,7 +4,7 @@
"ignore_urls": [],
"patterns": [
{
- "pattern": "https://gounlimited.to/embed-(.*?).html",
+ "pattern": "https://gounlimited.to/(?:embed-|)([a-z0-9]+)(?:.html|)",
"url": "https://gounlimited.to/embed-\\1.html"
}
]