Add SerieTVU and Serie Tv Sub Ita Channel (#5)

* Add SerieTVU Channel

* SerieTVU Channel
- config host
- new menu

* SerieTVU Channel - fix

* Add Serie Tv Sub Ita Channel
Fix gounlimited regex

* Serie Tv Sub Ita - remove videolibrary support

* SerieTVU - fix videolibary tvshow

* Serie Tv Sub Ita disabled
Add Credit comment
This commit is contained in:
4l3x87
2019-05-03 19:44:50 +02:00
committed by mac12m99
parent 99202645e4
commit 1c61348979
5 changed files with 737 additions and 1 deletions

View File

@@ -0,0 +1,44 @@
{
"id": "serietvsubita",
"name": "Serie TV Sub ITA",
"active": false,
"adult": false,
"language": ["ita"],
"thumbnail": "http://serietvsubita.xyz/wp-content/uploads/2012/07/logo.jpg",
"banner": "http://serietvsubita.xyz/wp-content/uploads/2012/07/logo.jpg",
"categories": ["tvshow"],
"settings": [
{
"id": "channel_host",
"type": "text",
"label": "Host del canale",
"default": "http://serietvsubita.xyz/",
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Includi in Novità - Serie TV",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
}
]
}

352
channels/serietvsubita.py Normal file
View File

@@ -0,0 +1,352 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per Serie Tv Sub ITA
# Ringraziamo Icarus crew
# ----------------------------------------------------------
import inspect
import re
import time
import channelselector
from channels import autoplay, support, filtertools
from core import httptools, tmdb, scrapertools
from core.item import Item
from platformcode import logger, config
host = config.get_setting("channel_host", 'serietvsubita')
headers = [['Referer', host]]
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['gounlimited','verystream','streamango','openload']
list_quality = ['default']
def mainlist(item):
support.log(item.channel + 'mainlist')
itemlist = []
support.menu(itemlist, 'Serie TV bold', 'lista_serie', host,'tvshow')
support.menu(itemlist, 'Novità submenu', 'peliculas_tv', host,'tvshow')
support.menu(itemlist, 'Archivio A-Z submenu', 'list_az', host,'tvshow')
support.menu(itemlist, 'Cerca', 'search', host,'tvshow')
autoplay.init(item.channel, list_servers, list_quality)
autoplay.show_option(item.channel, itemlist)
itemlist.append(
Item(channel='setting',
action="channel_config",
title=support.typo("Configurazione Canale color lime"),
config=item.channel,
folder=False,
thumbnail=channelselector.get_thumb('setting_0.png'))
)
return itemlist
# ----------------------------------------------------------------------------------------------------------------
def cleantitle(scrapedtitle):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
scrapedtitle = scrapedtitle.replace('[HD]', '').replace('', '\'').replace('Game of Thrones ','')
year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)')
if year:
scrapedtitle = scrapedtitle.replace('(' + year + ')', '')
return scrapedtitle.strip()
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def lista_serie(item):
support.log(item.channel + " lista_serie")
itemlist = []
PERPAGE = 15
p = 1
if '{}' in item.url:
item.url, p = item.url.split('{}')
p = int(p)
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# Extrae las entradas
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)" >([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
scrapedplot = ""
scrapedthumbnail = ""
if (p - 1) * PERPAGE > i: continue
if i >= p * PERPAGE: break
title = cleantitle(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="episodes",
title=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=title,
show=title,
plot=scrapedplot,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginazione
if len(matches) >= p * PERPAGE:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=item.channel,
action='lista_serie',
contentType=item.contentType,
title=support.typo(config.get_localized_string(30992), 'color kod bold'),
url=scrapedurl,
args=item.args,
thumbnail=support.thumb()))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def episodes(item):
support.log(item.channel + " episodes")
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="post-meta">\s*<a href="([^"]+)"\s*title="([^"]+)"\s*class=".*?"></a>.*?'
patron += '<p><a href="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedplot = ""
scrapedtitle = cleantitle(scrapedtitle)
title = scrapedtitle.split(" S0")[0].strip()
title = title.split(" S1")[0].strip()
title = title.split(" S2")[0].strip()
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
contentSerieName=title,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginazionazione
patron = '<strong class=\'on\'>\d+</strong>\s*<a href="([^<]+)">\d+</a>'
next_page = scrapertools.find_single_match(data, patron)
if next_page != "":
itemlist.append(
Item(channel=item.channel,
action='episodes',
contentType=item.contentType,
title=support.typo(config.get_localized_string(30992), 'color kod bold'),
url=next_page,
args=item.args,
thumbnail=support.thumb()))
# support.videolibrary(itemlist,item,'bold color kod')
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def findvideos(item):
support.log(item.channel + " findvideos")
data = httptools.downloadpage(item.url).data
patron = 'href="(https?://www\.keeplinks\.(?:co|eu)/p(?:[0-9]*)/([^"]+))"'
matches = re.compile(patron, re.DOTALL).findall(data)
for keeplinks, id in matches:
headers = [['Cookie', 'flag[' + id + ']=1; defaults=1; nopopatall=' + str(int(time.time()))],
['Referer', keeplinks]]
html = httptools.downloadpage(keeplinks, headers=headers).data
data += str(scrapertools.find_multiple_matches(html, '</lable><a href="([^"]+)" target="_blank"'))
itemlist = support.server(item, data=data)
itemlist = filtertools.get_links(itemlist, item, list_language)
autoplay.start(itemlist, item)
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def peliculas_tv(item):
logger.info("icarus serietvsubita peliculas_tv")
itemlist = []
data = httptools.downloadpage(item.url).data
logger.debug(data)
patron = '<div class="post-meta">\s*<a href="([^"]+)"\s*title="([^"]+)"\s*class=".*?"></a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
if "FACEBOOK" in scrapedtitle or "RAPIDGATOR" in scrapedtitle:
continue
if scrapedtitle == "WELCOME!":
continue
scrapedthumbnail = ""
scrapedplot = ""
scrapedtitle = cleantitle(scrapedtitle)
title = scrapedtitle.split(" S0")[0].strip()
title = title.split(" S1")[0].strip()
title = title.split(" S2")[0].strip()
itemlist.append(
Item(channel=item.channel,
action="findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
contentSerieName=title,
plot=scrapedplot,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginazione
patron = '<strong class=\'on\'>\d+</strong>\s*<a href="([^<]+)">\d+</a>'
next_page = scrapertools.find_single_match(data, patron)
if next_page != "":
if item.extra == "search_tv":
next_page = next_page.replace('&#038;', '&')
itemlist.append(
Item(channel=item.channel,
action='peliculas_tv',
contentType=item.contentType,
title=support.typo(config.get_localized_string(30992), 'color kod bold'),
url=next_page,
args=item.args,
extra=item.extra,
thumbnail=support.thumb()))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def newest(categoria):
logger.info('serietvsubita' + " newest" + categoria)
itemlist = []
item = Item()
item.url = host;
item.extra = 'serie';
try:
if categoria == "series":
itemlist = peliculas_tv(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def search(item, texto):
logger.info(item.channel + " search")
itemlist = []
item.extra = "search_tv"
item.url = host + "/?s=" + texto + "&op.x=0&op.y=0"
try:
return peliculas_tv(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def list_az(item):
support.log(item.channel+" list_az")
itemlist = []
PERPAGE = 50
p = 1
if '{}' in item.url:
item.url, p = item.url.split('{}')
p = int(p)
# Scarico la pagina
data = httptools.downloadpage(item.url).data
# Articoli
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)" >([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
scrapedplot = ""
scrapedthumbnail = ""
if (p - 1) * PERPAGE > i: continue
if i >= p * PERPAGE: break
title = cleantitle(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
extra=item.extra,
action="episodes",
title=title,
url=scrapedurl,
fulltitle=title,
show=title,
plot=scrapedplot,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginazione
if len(matches) >= p * PERPAGE:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=item.channel,
action='list_az',
contentType=item.contentType,
title=support.typo(config.get_localized_string(30992), 'color kod bold'),
url=scrapedurl,
args=item.args,
extra=item.extra,
thumbnail=support.thumb()))
return itemlist
# ================================================================================================================

44
channels/serietvu.json Normal file
View File

@@ -0,0 +1,44 @@
{
"id": "serietvu",
"name": "SerieTVU",
"active": true,
"adult": false,
"language": ["ita"],
"thumbnail": "https://www.serietvu.club/wp-content/themes/gurarjbar/images/logo.png",
"banner": "https://www.serietvu.club/wp-content/themes/gurarjbar/images/logo.png",
"categories": ["tvshow"],
"settings": [
{
"id": "channel_host",
"type": "text",
"label": "Host del canale",
"default": "https://www.serietvu.club",
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Includi ricerca globale",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Includi in Novità - Serie TV",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_italiano",
"type": "bool",
"label": "Includi in Novità - Italiano",
"default": true,
"enabled": true,
"visible": true
}
]
}

296
channels/serietvu.py Normal file
View File

@@ -0,0 +1,296 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per SerieTVU
# Ringraziamo Icarus crew
# ----------------------------------------------------------
import re
import channelselector
from channels import autoplay, support, filtertools
from core import httptools, tmdb, scrapertools
from core.item import Item
from platformcode import logger, config
host = config.get_setting("channel_host", 'serietvu')
headers = [['Referer', host]]
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['speedvideo']
list_quality = ['default']
def mainlist(item):
support.log(item.channel + 'mainlist')
itemlist = []
support.menu(itemlist, 'Serie TV bold', 'lista_serie', "%s/category/serie-tv" % host,'tvshow')
support.menu(itemlist, 'Novità submenu', 'latestep', "%s/ultimi-episodi" % host,'tvshow')
# support.menu(itemlist, 'Nuove serie color azure', 'lista_serie', "%s/category/serie-tv" % host,'tvshow')
support.menu(itemlist, 'Categorie', 'categorie', host,'tvshow')
support.menu(itemlist, 'Cerca', 'search', host,'tvshow')
# autoplay.init(item.channel, list_servers, list_quality)
# autoplay.show_option(item.channel, itemlist)
itemlist.append(
Item(channel='setting',
action="channel_config",
title=support.typo("Configurazione Canale color lime"),
config=item.channel,
folder=False,
thumbnail=channelselector.get_thumb('setting_0.png'))
)
return itemlist
# ----------------------------------------------------------------------------------------------------------------
def cleantitle(scrapedtitle):
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.strip())
scrapedtitle = scrapedtitle.replace('[HD]', '').replace('', '\'').replace('Game of Thrones ','').replace('Flash 2014','Flash')
year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)')
if year:
scrapedtitle = scrapedtitle.replace('(' + year + ')', '')
return scrapedtitle.strip()
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def lista_serie(item):
support.log(item.channel + " lista_serie")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<div class="item">\s*<a href="([^"]+)" data-original="([^"]+)" class="lazy inner">'
patron += r'[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedimg, scrapedtitle in matches:
infoLabels = {}
year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)')
if year:
infoLabels['year'] = year
scrapedtitle = cleantitle(scrapedtitle)
itemlist.append(
Item(channel=item.channel,
action="episodios",
title=scrapedtitle,
fulltitle=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedimg,
show=scrapedtitle,
infoLabels=infoLabels,
contentType='tvshow',
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Pagine
support.nextPage(itemlist,item,data,'<li><a href="([^"]+)">Pagina successiva')
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def episodios(item):
support.log(item.channel + " episodios")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<option value="(\d+)"[\sselected]*>.*?</option>'
matches = re.compile(patron, re.DOTALL).findall(data)
for value in matches:
patron = r'<div class="list [active]*" data-id="%s">(.*?)</div>\s*</div>' % value
blocco = scrapertools.find_single_match(data, patron)
patron = r'(<a data-id="\d+[^"]*" data-href="([^"]+)" data-original="([^"]+)" class="[^"]+">)[^>]+>[^>]+>([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedextra, scrapedurl, scrapedimg, scrapedtitle in matches:
number = scrapertools.decodeHtmlentities(scrapedtitle.replace("Episodio", "")).strip()
itemlist.append(
Item(channel=item.channel,
action="findvideos",
title=value + "x" + number.zfill(2),
fulltitle=scrapedtitle,
contentType="episode",
url=scrapedurl,
thumbnail=scrapedimg,
extra=scrapedextra,
folder=True))
if config.get_videolibrary_support() and len(itemlist) != 0:
itemlist.append(
Item(channel=item.channel,
title=support.typo(config.get_localized_string(30161) + ' bold color kod'),
thumbnail=support.thumb(),
url=item.url,
action="add_serie_to_library",
extra="episodios",
contentSerieName=item.fulltitle,
show=item.show))
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def findvideos(item):
support.log(item.channel + " findvideos")
itemlist = support.server(item, data=item.url)
itemlist = filtertools.get_links(itemlist, item, list_language)
autoplay.start(itemlist, item)
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def findepisodevideo(item):
support.log(item.channel + " findepisodevideo")
# Download Pagina
data = httptools.downloadpage(item.url, headers=headers).data
# Prendo il blocco specifico per la stagione richiesta
patron = r'<div class="list [active]*" data-id="%s">(.*?)</div>\s*</div>' % item.extra[0][0]
blocco = scrapertools.find_single_match(data, patron)
# Estraggo l'episodio
patron = r'<a data-id="%s[^"]*" data-href="([^"]+)" data-original="([^"]+)" class="[^"]+">' % item.extra[0][1].lstrip("0")
matches = re.compile(patron, re.DOTALL).findall(blocco)
itemlist = support.server(item, data=matches[0][0])
itemlist = filtertools.get_links(itemlist, item, list_language)
autoplay.start(itemlist, item)
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def latestep(item):
support.log(item.channel + " latestep")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<div class="item">\s*<a href="([^"]+)" data-original="([^"]+)" class="lazy inner">'
patron += r'[^>]+>[^>]+>[^>]+>[^>]+>([^<]+)<small>([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedimg, scrapedtitle, scrapedinfo in matches:
infoLabels = {}
year = scrapertools.find_single_match(scrapedtitle, '\((\d{4})\)')
if year:
infoLabels['year'] = year
scrapedtitle = cleantitle(scrapedtitle)
infoLabels['tvshowtitle'] = scrapedtitle
episodio = re.compile(r'(\d+)x(\d+)', re.DOTALL).findall(scrapedinfo)
title = "%s %s" % (scrapedtitle, scrapedinfo)
itemlist.append(
Item(channel=item.channel,
action="findepisodevideo",
title=title,
fulltitle=scrapedtitle,
url=scrapedurl,
extra=episodio,
thumbnail=scrapedimg,
show=scrapedtitle,
contentTitle=scrapedtitle,
contentSerieName=title,
infoLabels=infoLabels,
folder=True))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def newest(categoria):
logger.info('serietvu' + " newest" + categoria)
itemlist = []
item = Item()
try:
if categoria == "series":
item.url = host + "/ultimi-episodi"
item.action = "latestep"
itemlist = latestep(item)
if itemlist[-1].action == "latestep":
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def search(item, texto):
logger.info(item.channel + " search")
item.url = host + "/?s=" + texto
try:
return lista_serie(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
# ================================================================================================================
# ----------------------------------------------------------------------------------------------------------------
def categorie(item):
logger.info(item.channel +" categorie")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
blocco = scrapertools.find_single_match(data, r'<h2>Sfoglia</h2>\s*<ul>(.*?)</ul>\s*</section>')
patron = r'<li><a href="([^"]+)">([^<]+)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(blocco)
for scrapedurl, scrapedtitle in matches:
if scrapedtitle == 'Home Page' or scrapedtitle == 'Calendario Aggiornamenti':
continue
itemlist.append(
Item(channel=item.channel,
action="lista_serie",
title=scrapedtitle,
contentType="tv",
url="%s%s" % (host, scrapedurl),
thumbnail=item.thumbnail,
folder=True))
return itemlist
# ================================================================================================================

View File

@@ -4,7 +4,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "https://gounlimited.to/embed-(.*?).html",
"pattern": "https://gounlimited.to/(?:embed-|)([a-z0-9]+)(?:.html|)",
"url": "https://gounlimited.to/embed-\\1.html"
}
]