Modifiche ad Alpha

This commit is contained in:
Alhaziel01
2021-08-24 17:38:44 +02:00
parent 2764c48805
commit dfeec60fe3
27 changed files with 1945 additions and 67 deletions

View File

@@ -0,0 +1,32 @@
name: Update channel domains
on:
workflow_dispatch:
schedule:
- cron: '30 17 * * *'
jobs:
update:
runs-on: ubuntu-latest
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v2
with:
ref: stable
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v1
with:
python-version: 3.7
- name: Install dependencies
run: pip install requests
- name: Update domains
run: python tools/updateDomains.py
- name: Commit & Push changes
uses: actions-js/push@master
with:
message: "Aggiornamento domini"
branch: "stable"
github_token: ${{ secrets.API_TOKEN_GITHUB }}

View File

@@ -1,6 +1,6 @@
{
"direct": {
"altadefinizione01": "https://www.altadefinizione01.games",
"altadefinizione01": "https://www.altadefinizione01.plus",
"altadefinizione01_link": "https://altadefinizione01.travel",
"animealtadefinizione": "https://www.animealtadefinizione.it",
"animeforce": "https://www.animeforce.it",
@@ -9,43 +9,45 @@
"animeunity": "https://www.animeunity.it",
"animeuniverse": "https://www.animeuniverse.it",
"animeworld": "https://www.animeworld.tv",
"aniplay": "https://aniplay.it",
"casacinema": "https://www.casacinema.page",
"cb01anime": "https://www.cineblog01.red",
"cineblog01": "https://cb01.uno",
"cinemalibero": "https://cinemalibero.blog",
"cinemalibero": "https://cinemalibero.bar",
"cinetecadibologna": "http://cinestore.cinetecadibologna.it",
"discoveryplus": "https://www.discoveryplus.com",
"dreamsub": "https://dreamsub.stream",
"dsda": "https://www.dsda.press",
"eurostreaming": "https://eurostreaming.click",
"eurostreaming": "https://eurostreaming.bar",
"filmigratis": "https://filmigratis.org",
"guardaseriecam": "https://guardaserie.cam",
"guardaserieclick": "https://www.guardaserie.support",
"guardaserieicu": "https://guardaserie.agency",
"guardaserieclick": "https://www.guardaserie.builders",
"guardaserieicu": "https://guardaserie.clothing",
"hd4me": "https://hd4me.net",
"ilcorsaronero": "https://ilcorsaronero.link",
"ilgeniodellostreaming": "https://ilgeniodellostreaming.ist",
"ilgeniodellostreaming_cam": "https://ilgeniodellostreaming.shop",
"italiaserie": "https://italiaserie.cam",
"ilgeniodellostreaming": "https://ilgeniodellostreaming.wtf",
"ilgeniodellostreaming_cam": "https://ilgeniodellostreaming.city",
"italiaserie": "https://italiaserie.date",
"mediasetplay": "https://www.mediasetplay.mediaset.it",
"mondoserietv": "https://mondoserietv.club",
"paramount": "https://www.paramountnetwork.it",
"piratestreaming": "https://www.piratestreaming.shop",
"piratestreaming": "https://www.piratestreaming.design",
"polpotv": "https://roma.polpo.tv",
"raiplay": "https://www.raiplay.it",
"seriehd": "https://seriehd.cam",
"seriehd": "https://altadefinizionecommunity.casa",
"serietvonline": "https://serietvonline.art",
"serietvsubita": "http://serietvsubita.xyz",
"serietvu": "https://www.serietvu.link",
"streamingcommunity": "https://streamingcommunity.xyz",
"serietvu": "https://www.serietvu.live",
"streamingcommunity": "https://streamingcommunity.vip",
"streamtime": "https://t.me/s/StreamTime",
"tantifilm": "https://www.tantifilm.vision",
"tantifilm": "https://www.tantifilm.stream",
"tapmovie": "https://it.tapmovie.net",
"toonitalia": "https://toonitalia.co",
"vvvvid": "https://www.vvvvid.it"
},
"findhost": {
"altadefinizioneclick": "https://altadefinizione-nuovo.click",
"altadefinizionecommunity": "https://altaregistrazione.com",
"animealtadefinizione": "https://www.animealtadefinizione.it",
"filmpertutti": "https://filmpertuttiii.nuovo.live"
}

View File

@@ -0,0 +1,27 @@
{
"id": "altadefinizionecommunity",
"name": "Altadefinizione Community",
"language": ["ita", "sub-ita"],
"active": true,
"thumbnail": "altadefinizionecommunity.png",
"banner": "",
"categories": ["movie", "tvshow", "vos"],
"not_active": ["include_in_newest"],
"settings": [
{
"default": "",
"enabled": true,
"id": "username",
"label": "username",
"type": "text",
"visible": true
},
{
"default": "",
"enabled": true,
"id": "password",
"label": "password",
"type": "text",
"visible": true
}]
}

View File

@@ -0,0 +1,230 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per Altadefinizione Community
from logging import debug
from core import jsontools, support
from lib.fakeMail import Gmailnator
from platformcode import config, platformtools, logger
from core import scrapertools, httptools
def findhost(url):
return support.match(url, patron=r'<a href="([^"]+)/\w+">Accedi').match
host = config.get_channel_url(findhost)
register_url = 'https://altaregistrazione.com'
headers = {'Referer': host, 'x-requested-with': 'XMLHttpRequest'}
@support.menu
def mainlist(item):
support.info(item)
film = ['/load-more-film?anno=&order=&support_webp=1&type=movie&page=1',
# Voce Menu,['url','action','args',contentType]
('Generi', ['', 'genres', 'genres']),
]
tvshow = ['/load-more-film?type=tvshow&anno=&order=&support_webp=1&page=1',
# Voce Menu,['url','action','args',contentType]
('Generi', ['', 'genres', 'genres']),
]
altri = [
# ('Per Lettera', ['/lista-film', 'genres', 'letters']),
('Qualità', ['', 'genres', 'quality']),
# ('Anni', ['/anno', 'genres', 'years'])
]
search = ''
return locals()
def login():
r = support.httptools.downloadpage(host, cloudscraper=True)
Token = support.match(r.data, patron=r'name=\s*"_token"\s*value=\s*"([^"]+)', cloudscraper=True).match
if 'id="logged"' in r.text:
logger.info('Già loggato')
else:
logger.info('Login in corso')
post = {'_token': '',
'form_action':'login',
'email': config.get_setting('username', channel='altadefinizionecommunity'),
'password':config.get_setting('password', channel='altadefinizionecommunity')}
r = support.httptools.downloadpage(host + '/login', post=post, headers={'referer': host}, cloudscraper=True)
if not r.status_code in [200, 302] or 'Email o Password non validi' in r.text:
platformtools.dialog_ok('AltadefinizioneCommunity', 'Username/password non validi')
return False
return 'id="logged"' in r.text
def registerOrLogin():
if config.get_setting('username', channel='altadefinizionecommunity') and config.get_setting('password', channel='altadefinizionecommunity'):
if login():
return True
action = platformtools.dialog_yesno('AltadefinizioneCommunity',
'Questo server necessita di un account, ne hai già uno oppure vuoi tentare una registrazione automatica?',
yeslabel='Accedi', nolabel='Tenta registrazione', customlabel='Annulla')
if action == 1: # accedi
from specials import setting
from core.item import Item
user_pre = config.get_setting('username', channel='altadefinizionecommunity')
password_pre = config.get_setting('password', channel='altadefinizionecommunity')
setting.channel_config(Item(config='altadefinizionecommunity'))
user_post = config.get_setting('username', channel='altadefinizionecommunity')
password_post = config.get_setting('password', channel='altadefinizionecommunity')
if user_pre != user_post or password_pre != password_post:
return registerOrLogin()
else:
return []
elif action == 0: # tenta registrazione
import random
import string
logger.debug('Registrazione automatica in corso')
mailbox = Gmailnator()
randPsw = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(10))
logger.debug('email: ' + mailbox.address)
logger.debug('pass: ' + randPsw)
reg = platformtools.dialog_register(register_url, email=True, password=True, email_default=mailbox.address, password_default=randPsw)
if not reg:
return False
regPost = httptools.downloadpage(register_url, post={'email': reg['email'], 'password': reg['password']}, cloudscraper=True)
if regPost.url == register_url:
error = scrapertools.htmlclean(scrapertools.find_single_match(regPost.data, 'Impossibile proseguire.*?</div>'))
error = scrapertools.unescape(scrapertools.re.sub('\n\s+', ' ', error))
platformtools.dialog_ok('AltadefinizioneCommunity', error)
return False
if reg['email'] == mailbox.address:
if "L'indirizzo email risulta già registrato" in regPost.data:
# httptools.downloadpage(baseUrl + '/forgotPassword', post={'email': reg['email']})
platformtools.dialog_ok('AltadefinizioneCommunity', 'Indirizzo mail già utilizzato')
return False
mail = mailbox.waitForMail()
if mail:
checkUrl = scrapertools.find_single_match(mail.body, '<a href="([^"]+)[^>]+>Verifica').replace(r'\/', '/')
logger.debug('CheckURL: ' + checkUrl)
httptools.downloadpage(checkUrl, cloudscraper=True)
config.set_setting('username', mailbox.address, channel='altadefinizionecommunity')
config.set_setting('password', randPsw, channel='altadefinizionecommunity')
platformtools.dialog_ok('AltadefinizioneCommunity',
'Registrato automaticamente con queste credenziali:\nemail:' + mailbox.address + '\npass: ' + randPsw)
else:
platformtools.dialog_ok('AltadefinizioneCommunity', 'Impossibile registrarsi automaticamente')
return False
else:
platformtools.dialog_ok('AltadefinizioneCommunity', 'Hai modificato la mail quindi KoD non sarà in grado di effettuare la verifica in autonomia, apri la casella ' + reg['email']
+ ' e clicca sul link. Premi ok quando fatto')
logger.debug('Registrazione completata')
else:
return False
return True
@support.scrape
def peliculas(item):
json = {}
action = 'check'
if '/load-more-film' not in item.url and '/search' not in item.url: # generi o altri menu, converto
import ast
ajax = support.match(item.url, patron='ajax_data\s*=\s*"?\s*([^;]+)', cloudscraper=True).match
item.url = host + '/load-more-film?' + support.urlencode(ast.literal_eval(ajax)) + '&page=1'
if not '/search' in item.url:
json = support.httptools.downloadpage(item.url, headers=headers, cloudscraper=True).json
data = "\n".join(json['data'])
else:
disabletmdb = True
data = support.httptools.downloadpage(item.url, headers=headers, cloudscraper=True).data
patron = r'wrapFilm">\s*<a href="(?P<url>[^"]+)">\s*<span class="year">(?P<year>[0-9]{4})</span>\s*<span[^>]+>[^<]+</span>\s*<span class="qual">(?P<quality>[^<]+).*?<img src="(?P<thumbnail>[^"]+)[^>]+>\s*<h3>(?P<title>[^<[]+)(?:\[(?P<lang>[sSuUbBiItTaA-]+))?'
# paginazione
if json.get('have_next'):
def fullItemlistHook(itemlist):
spl = item.url.split('=')
url = '='.join(spl[:-1])
page = str(int(spl[-1])+1)
support.nextPage(itemlist, item, next_page='='.join((url, page)), function_or_level='peliculas')
return itemlist
return locals()
def search(item, texto):
support.info("search ", texto)
item.args = 'search'
item.url = host + "/search?s={}&page=1".format(texto)
try:
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.logger.error("%s" % line)
return []
@support.scrape
def genres(item):
support.info(item)
data = support.httptools.downloadpage(item.url, cloudscraper=True).data
patronMenu = r'<a href="(?P<url>[^"]+)">(?P<title>[^<]+)'
if item.args == 'quality':
patronBlock = 'Risoluzione(?P<block>.*?)</ul>'
else:
patronBlock = ('Film' if item.contentType == 'movie' else 'Serie TV') + r'<span></span></a>\s+<ul class="dropdown-menu(?P<block>.*?)active-parent-menu'
action = 'peliculas'
return locals()
@support.scrape
def episodios(item):
support.info(item)
data = item.data
patron = r'class="playtvshow " data-href="(?P<url>[^"]+)'
def itemHook(it):
spl = it.url.split('/')[-2:]
it.infoLabels['season'] = int(spl[0])+1
it.infoLabels['episode'] = int(spl[1])+1
return it
return locals()
def check(item):
if '/watch-unsubscribed' not in item.url:
playWindow = support.match(support.httptools.downloadpage(item.url, cloudscraper=True).data, patron='playWindow" href="([^"]+)')
video_url = playWindow.match
if '/tvshow' in video_url:
item.data = playWindow.data
item.contentType = 'tvshow'
return episodios(item)
else:
item.video_url = video_url.replace('/watch-unsubscribed', '/watch-external')
return findvideos(item)
def findvideos(item):
itemlist = []
itemlist.append(item.clone(action='play', url=support.match(item.video_url, patron='allowfullscreen[^<]+src="([^"]+)"', cloudscraper=True).match, quality=''))
return support.server(item, itemlist=itemlist)
def play(item):
if host in item.url: # intercetto il server proprietario
if registerOrLogin():
return support.get_jwplayer_mediaurl(support.httptools.downloadpage(item.url, cloudscraper=True).data, 'Diretto')
else:
platformtools.play_canceled = True
return []
else:
return [item]

37
channels/aniplay.json Normal file
View File

@@ -0,0 +1,37 @@
{
"id": "aniplay",
"name": "AniPlay",
"active": true,
"language": ["ita", "sub-ita"],
"thumbnail": "aniplay.png",
"banner": "aniplay.png",
"categories": ["anime", "vos"],
"settings": [
{
"id": "sort",
"type": "list",
"label": "Ordine di Visualizzazione",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [ "Popolarità", "Titolo", "Numero Episodi", "Data di inizio", "Data di fine", "Data di aggiunta"]
},
{
"id": "order",
"type": "bool",
"label": "Visualizza in ordine Discendente?",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "perpage",
"type": "list",
"label": "Numero di elementi per pagina",
"default": 1,
"enabled": true,
"visible": true,
"lvalues": ["10", "20", "30", "40", "50", "60", "80", "90"]
}
]
}

331
channels/aniplay.py Normal file
View File

@@ -0,0 +1,331 @@
from platformcode import config, logger, autorenumber
from core import httptools, scrapertools, support, tmdb
from inspect import stack
import sys
if sys.version_info[0] >= 3:
from concurrent import futures
else:
from concurrent_py2 import futures
host = config.get_channel_url()
sort = ['views', 'title', 'episodeNumber', 'startDate', 'endDate', 'createdDate'][config.get_setting('sort', 'aniplay')]
order = 'asc' if config.get_setting('order', 'aniplay') else 'desc'
perpage = [10, 20, 30 ,40, 50, 60, 70, 80, 90][config.get_setting('perpage', 'aniplay')]
@support.menu
def mainlist(item):
anime=['/api/anime/advanced-search',
('A-Z', ['/api/anime/advanced-search', 'submenu_az', '']),
('Anno', ['', 'submenu_year', '']),
('Top', ['', 'submenu_top', '']),
('Ultimi aggiunti', ['', 'latest_added', ''])]
return locals()
def submenu_az(item):
itemlist = []
if item.args == 'az':
for letter in ['0-9'] + list('ABCDEFGHIJKLMNOPQRSTUVWXYZ'):
itemlist.append(item.clone(title = support.typo(letter, 'bold'),
url= host + '/api/anime/find-by-char',
action= 'peliculas',
variable= '&character=' + letter,
thumbnail=support.thumb('az')))
return itemlist
def submenu_year(item):
itemlist = []
from datetime import date
current = date.today().year
first = int(httptools.downloadpage('{}/api/anime/advanced-search?page=0&size=1&sort=startDate,asc&sort=id'.format(host)).json[0]['startDate'].split('-')[0]) -1
for year in range(current, first, -1):
itemlist.append(item.clone(title = support.typo(year, 'bold'),
action= 'submenu_season',
variable= year,
thumbnail=support.thumb('year')))
return itemlist
def submenu_top(item):
itemlist = []
links = {'Top del giorno':'daily-top', 'Top della settimana':'weekly-top', 'Top del mese':'monthly-top'}
for label in links:
link = links[label]
itemlist.append(item.clone(title = support.typo(label, 'bold'),
action= 'submenu_top_of',
variable= link))
return itemlist
def submenu_season(item):
itemlist = []
seasons = {'winter':'Inverno', 'spring':'Primavera', 'summer':'Estate', 'fall':'Autunno'}
url= '{}/api/seasonal-view?page=0&size=36&years={}'.format(host, item.variable)
js = httptools.downloadpage(url).json[0]['seasonalAnime']
for season in js:
s = season['season'].split('.')[-1]
title = seasons[s]
itemlist.append(item.clone(title=title,
url = '{}/api/seasonal-view/{}-{}'.format(host, s, item.variable),
thumbnail = support.thumb(s),
action = 'peliculas',
variable=''))
return itemlist
def submenu_top_of(item):
itemlist = []
url= '{}/api/home/{}'.format(host, item.variable)
js = httptools.downloadpage(url).json
for anime in js:
fulltitle = anime['animeTitle']
title = fulltitle.split('(')[0].strip()
scrapedlang = scrapertools.find_single_match(fulltitle, r'\(([^\)]+)')
lang = scrapedlang.upper() if scrapedlang else 'Sub-ITA'
long_title = support.typo(title, 'bold') + support.typo(lang, '_ [] color kod')
itemlist.append(item.clone(title=long_title,
id=anime['animeId'],
url = '{}/api/anime/{}'.format(host, anime['animeId']),
thumbnail = get_thumbnail(anime, 'animeHorizontalImages'),
action = 'episodios',
variable=anime['animeId']))
return itemlist
def search(item, texto):
support.info(texto)
item.url = host + '/api/anime/advanced-search'
item.variable = '&query=' + texto
try:
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.logger.error("%s" % line)
return []
def newest(categoria):
support.info(categoria)
item = support.Item()
try:
if categoria == "anime":
return latest_added(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.logger.error("{0}".format(line))
return []
def latest_added(item):
itemlist = []
page = item.page if item.page else 0
url= '{}/api/home/latest-episodes?page={}'.format(host, page)
js = httptools.downloadpage(url).json
for episode in js:
title = episode['title']
animeTitle, lang = get_lang(episode['animeTitle'])
quality = 'Full HD' if episode['fullHd'] else 'HD'
long_title = support.typo('{}. {}{}'.format(int(float(episode['episodeNumber'])), title + ' - ' if title else '', animeTitle), 'bold') + support.typo(lang, '_ [] color kod') + support.typo(quality, '_ [] color kod')
image = get_thumbnail(episode, 'episodeImages')
itemlist.append(item.clone(title=long_title,
fulltitle=title,
animeId = episode['animeId'],
id=episode['id'],
contentType = 'episode',
contentTitle = title,
contentSerieName = animeTitle,
contentLanguage = lang,
quality = quality,
contentEpisodeNumber = int(float(episode['episodeNumber'])),
animeUrl = '{}/api/anime/{}'.format(host, episode['animeId']),
thumbnail = image,
fanart = image,
action = 'findvideos'))
if stack()[1][3] not in ['newest']:
support.nextPage(itemlist, item.clone(page = page + 1))
return itemlist
def peliculas(item):
logger.debug()
itemlist = []
page = item.page if item.page else 0
js = httptools.downloadpage('{}?page={}&size={}{}&sort={},{}&sort=id'.format(item.url, page, perpage, item.variable, sort, order)).json
for it in js:
title, lang = get_lang(it['title'])
long_title = support.typo(title, 'bold') + support.typo(lang, '_ [] color kod')
itemlist.append(item.clone(title = long_title,
fulltitle = title,
show = title,
contentLanguage = lang,
contentType = 'movie' if it['type'] == 'Movie' else 'tvshow',
contentTitle = title,
contentSerieName = title if it['type'] == 'Serie' else '',
action ='findvideos' if it['type'] == 'Movie' else 'episodios',# '' if not active else 'findvideos' if it['type'] == 'Movie' else 'episodios',
plot = it['storyline'],
year = it['startDate'].split('-')[0],
url = '{}/api/anime/{}'.format(host, it['id']),
thumbnail = get_thumbnail(it),
fanart = get_thumbnail(it, 'horizontalImages')))
autorenumber.start(itemlist)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if len(itemlist) == perpage:
support.nextPage(itemlist, item.clone(page = page + 1))
return itemlist
def episodios(item):
logger.debug()
itemlist = []
# url = '{}/api/anime/{}'.format(host, item.id)
json = httptools.downloadpage(item.url, CF=False ).json
if type(json) == list:
item.show_renumber = False
itemlist = list_episodes(item, json)
elif json.get('seasons'):
seasons = json['seasons']
seasons.sort(key=lambda s: s['episodeStart'])
for it in seasons:
title = it['name']
itemlist.append(item.clone(title = title,
id= '{}/season/{}'.format(it['animeId'], it['id']),
contentType = 'season',
action = 'list_episodes',
plot = json['storyline'],
year = it['yearStart'],
show_renumber = True))
# If the call come from the videolibrary or autorenumber, shows the episodes
if stack()[1][3] in ['add_tvshow', 'get_episodes', 'update', 'find_episodes']:
itlist = []
with futures.ThreadPoolExecutor() as executor:
eplist = []
for ep in itemlist:
ep.show_renumber = False
eplist.append(executor.submit(list_episodes, ep))
for res in futures.as_completed(eplist):
if res.result():
itlist.extend(res.result())
itemlist = itlist
elif json.get('episodes'):
itemlist = list_episodes(item, json)
# add renumber option
if stack()[1][3] not in ['find_episodes'] and itemlist and itemlist[0].contentType == 'episode':
autorenumber.start(itemlist, item)
# add add to videolibrary menu
if stack()[1][3] not in ['add_tvshow', 'get_episodes', 'update', 'find_episodes']:
support.videolibrary(itemlist, item)
return itemlist
def list_episodes(item, json=None):
itemlist = []
if not json:
url = '{}/api/anime/{}'.format(host, item.id)
json = httptools.downloadpage(url, CF=False ).json
episodes = json['episodes'] if 'episodes' in json else json
episodes.sort(key=lambda ep: int(ep['episodeNumber'].split('.')[0]))
for it in episodes:
quality = 'Full HD' if it['fullHd'] else 'HD'
if item.contentSeason:
episode = '{}x{:02d}'.format(item.contentSeason, int(it['episodeNumber'].split('.')[0]))
else:
episode = '{:02d}'.format(int(it['episodeNumber'].split('.')[0]))
title = support.typo('{}. {}'.format(episode, it['title']), 'bold')
image = get_thumbnail(it, 'episodeImages')
itemlist.append(item.clone(title = title,
id= it['id'],
url= 'api/episode/{}'.format(it['id']),
contentType = 'episode',
contentEpisodeNumber = int(it['episodeNumber'].split('.')[0]),
contentSeason = item.contentSeason if item.contentSeason else '',
action = 'findvideos',
quality = quality,
thumbnail = image,
fanart= image))
# Renumber episodes only if shown in the menu
if item.show_renumber:
autorenumber.start(itemlist, item)
return itemlist
def findvideos(item):
logger.debug()
url = '{}/api/{}/{}'.format(host, 'episode' if item.contentType == 'episode' else 'anime', item.id)
json = httptools.downloadpage(url, CF=False ).json
if json.get('episodes', []):
json = httptools.downloadpage('{}/api/episode/{}'.format(host, json['episodes'][0]['id'])).json
videourl = json['episodeVideo']
itemlist = [item.clone(title=config.get_localized_string(30137),
url=videourl,
server='directo')]
return support.server(item, itemlist=itemlist)
def get_thumbnail(data, prop = 'verticalImages', key = 'full'):
"""
" Returns the vertical image as per given key and prop
" possibile key values are:
" - small
" - full
" - blurred
" - medium
" possibile prop values are:
" - verticalImages
" - animeHorizontalImages
" - animeVerticalImages
" - horizontalImages
" - episodeImages
"""
value = None
verticalImages = data.get(prop, [])
if verticalImages:
first = verticalImages[0]
if first:
value = first.get('image' + key.capitalize(), '')
return value
def get_lang(value):
title = value.split('(')[0] if value else ''
scrapedlang = scrapertools.find_single_match(value, r'\(([^\)]+)')
lang = scrapedlang.upper() if scrapedlang else 'Sub-ITA'
return title, lang

View File

@@ -5,6 +5,6 @@
"active": true,
"thumbnail": "cb01.png",
"banner": "cb01.png",
"categories": ["tvshow", "movie", "sub, "documentary"],
"categories": ["tvshow", "movie", "sub", "documentary"],
"settings": []
}

View File

@@ -0,0 +1,369 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per Mediaset Play
# ------------------------------------------------------------
from platformcode import logger, config
import uuid
import requests, sys
from core import support, jsontools
if sys.version_info[0] >= 3: from urllib.parse import urlencode, quote
else: from urllib import urlencode, quote
if sys.version_info[0] >= 3: from concurrent import futures
else: from concurrent_py2 import futures
from collections import OrderedDict
PAGINATION = 4
host = config.get_channel_url()
post_url = '?assetTypes=HD,browser,widevine,geoIT|geoNo:HD,browser,geoIT|geoNo:HD,geoIT|geoNo:SD,browser,widevine,geoIT|geoNo:SD,browser,geoIT|geoNo:SD,geoIT|geoNo&auto=true&balance=true&format=smil&formats=MPEG-DASH,MPEG4,M3U&tracking=true'
deviceid = '61d27df7-5cbf-4419-ba06-cfd27ecd4588'
loginUrl = 'https://api-ott-prod-fe.mediaset.net/PROD/play/idm/anonymous/login/v2.0'
loginData = {"cid": deviceid, "platform": "pc", "appName": "web/mediasetplay-web/d667681"}
lic_url = 'https://widevine.entitlement.theplatform.eu/wv/web/ModularDrm/getRawWidevineLicense?releasePid=%s&account=http://access.auth.theplatform.com/data/Account/2702976343&schema=1.0&token={token}|Accept=*/*&Content-Type=&User-Agent=' + support.httptools.get_user_agent() + '|R{{SSM}}|'
entry = 'https://api.one.accedo.tv/content/entry/{id}?locale=it'
entries = 'https://api.one.accedo.tv/content/entries?id={id}&locale=it'
sessionUrl = "https://api.one.accedo.tv/session?appKey=59ad346f1de1c4000dfd09c5&uuid={uuid}&gid=default"
current_session = requests.Session()
current_session.headers.update({'Content-Type': 'application/json', 'User-Agent': support.httptools.get_user_agent(), 'Referer': support.config.get_channel_url()})
# login anonimo
res = current_session.post(loginUrl, json=loginData, verify=False)
support.dbg()
Token = res.json['response']['beToken']
sid = res.json['response']['sid']
current_session.headers.update({'authorization': 'Bearer' + Token})
lic_url = lic_url.format(token=Token)
tracecid = res.json()['response']['traceCid']
cwid = res.json()['response']['cwId']
# sessione
res = current_session.get(sessionUrl.format(uuid=str(uuid.uuid4())), verify=False)
current_session.headers.update({'x-session': res.json()['sessionKey']})
cdict = {'CVFILM':'filmUltimiArrivi',
'CWFILMTOPVIEWED':'filmPiuVisti24H',
'CWFILMCOMEDY':'filmCommedia',
'CWFILMACTION':'filmAzioneThrillerAvventura',
'CWFILMDRAMATIC':'filmDrammatico',
'CWFILMSENTIMENTAL':'filmSentimentale',
'CWFILMCLASSIC':'filmClassici',
'personToContentFilm':'personToContentFilm',
'CWHOMEFICTIONNOWELITE':'stagioniFictionSerieTvSezione',
'CWFICTIONSOAP':'mostRecentSoapOpera',
'CWFICTIONDRAMATIC':'stagioniFictionDrammatico',
'CWFICTIONPOLICE':'stagioniFictionPoliziesco',
'CWFICTIONCOMEDY':'stagioniFictionCommedia',
'CWFICTIONSITCOM':'stagioniFictionSitCom',
'CWFICTIONSENTIMENTAL':'stagioniFictionSentimentale',
'CWFICTIONBIOGRAPHICAL':'stagioniFictionBiografico',
'CWPROGTVPRIME':'stagioniPrimaSerata',
'CWPROGTVDAY':'stagioniDaytime',
'CWPROGTVTOPVIEWED':'programmiTvClip24H',
'CWPROGTVTALENT':'stagioniReality',
'CWPROGTVVARIETY':'stagioniVarieta',
'CWPROGTVTALK':'stagioniTalk',
'CWPROGTVTG':'mostRecentTg',
'CWPROGTVSPORT':'mostRecentSport',
'CWPROGTVMAGAZINE':'stagioniCucinaLifestyle',
'CWDOCUMOSTRECENT':'mostRecentDocumentariFep',
'CWDOCUTOPVIEWED':'stagioniDocumentari',
'CWDOCUSPAZIO':'documentariSpazio',
'CWDOCUNATURANIMALI':'documentariNatura',
'CWDOCUSCIENZATECH':'documentariScienza',
'CWDOCUBIOSTORIE':'documentariBioStoria',
'CWDOCUINCHIESTE':'documentariInchiesta',
'CWFILMDOCU':'filmDocumentario',
'CWKIDSBOINGFORYOU':'kidsBoing',
'CWKIDSCARTOONITO':'kidsCartoonito',
'CWKIDSMEDIASETBRAND':'kidsMediaset',
'CWENABLERKIDS':'stagioniKids'}
@support.menu
def mainlist(item):
top = [('Dirette {bold}', ['https://feed.entertainment.tv.theplatform.eu/f/PR1GhC/mediaset-prod-all-stations?sort=ShortTitle', 'live'])]
menu = [('Fiction / Serie TV {bullet bold}', ['/fiction', 'menu', ['Tutte','all','searchStagioni', '5acfcb3c23eec6000d64a6a4'], 'tvshow']),
('Programmi TV{ bullet bold}', ['/programmitv', 'menu', ['Tutti','all','searchStagioni', '5acfc8011de1c4000b6ec953'], 'tvshow']),
('Documentari {bullet bold}', ['/documentari', 'menu', ['Tutti','all','', '5bfd17c423eec6001aec49f9'], 'undefined']),
('Kids {bullet bold}', ['/kids', 'menu',['Tutti','all','', '5acfcb8323eec6000d64a6b3'], 'undefined'])]
search = ''
return locals()
def search(item, text):
itemlist = []
logger.debug(text)
item.search = text
try:
itemlist = peliculas(item)
except:
import sys
for line in sys.exc_info():
support.logger.error("%s" % line)
return itemlist
def menu(item):
logger.debug()
itemlist = []
if item.url:
json = get_from_id(item)
for it in json:
logger.debug(jsontools.dump(it))
if 'uxReference' in it: itemlist.append(
item.clone(title=support.typo(it['title'], 'bullet bold'), url= it['landingUrl'], feed = it.get('feedurlV2',''), ref=it['uxReference'], args='', action='peliculas'))
return itemlist
def liveDict():
livedict = OrderedDict({})
json = current_session.get('https://feed.entertainment.tv.theplatform.eu/f/PR1GhC/mediaset-prod-all-stations?sort=ShortTitle').json()['entries']
for it in json:
urls = []
if it.get('tuningInstruction') and not it.get('mediasetstation$digitalOnly'):
guide=current_session.get('https://static3.mediasetplay.mediaset.it/apigw/nownext/' + it['callSign'] + '.json').json()['response']
for key in it['tuningInstruction']['urn:theplatform:tv:location:any']:
urls += key['publicUrls']
title = it['title']
livedict[title] = {}
livedict[title]['urls'] = urls
livedict[title]['plot'] = support.typo(guide['currentListing']['mediasetlisting$epgTitle'],'bold') + '\n' + guide['currentListing']['mediasetlisting$shortDescription'] + '\n' + guide['currentListing']['description'] + '\n\n' + support.typo('A Seguire:' + guide['nextListing']['mediasetlisting$epgTitle'], 'bold')
return livedict
def live(item):
logger.debug()
itemlist = []
for key, value in liveDict().items():
itemlist.append(item.clone(title=support.typo(key, 'bold'),
fulltitle=key,
show=key,
contentTitle=key,
forcethumb=True,
urls=value['urls'],
plot=value['plot'],
action='play',
no_return=True))
return support.thumb(itemlist, live=True)
def peliculas(item):
logger.debug()
itemlist = []
titlelist = []
contentType = ''
if item.text:
json = []
itlist = []
with futures.ThreadPoolExecutor() as executor:
for arg in ['searchMovie', 'searchStagioni', 'searchClip']:
item.args = ['', 'search', arg]
itlist += [executor.submit(get_programs, item)]
for res in futures.as_completed(itlist):
json += res.result()
else:
json = get_programs(item)
for it in json:
if item.search.lower() in it['title'].lower() and it['title'] not in titlelist:
titlelist.append(it['title'])
if 'media' in it:
action = 'findvideos'
contentType = 'movie'
urls = []
for key in it['media']:
urls.append(key['publicUrl'])
else:
action = 'epmenu'
contentType = 'tvshow'
urls = it['mediasetprogram$brandId']
if urls:
title = it['mediasetprogram$brandTitle'] + ' - ' if 'mediasetprogram$brandTitle' in it and it['mediasetprogram$brandTitle'] != it['title'] else ''
itemlist.append(
item.clone(channel=item.channel,
action=action,
title=support.typo(title + it['title'], 'bold'),
fulltitle=it['title'],
show=it['title'],
contentType=contentType if contentType else item.contentType,
contentTitle=it['title'] if 'movie' in [contentType, item.contentType] else '',
contentSerieName=it['title'] if 'tvshow' in [contentType, item.contentType] else '',
thumbnail=it['thumbnails']['image_vertical-264x396']['url'] if 'image_vertical-264x396' in it['thumbnails'] else '',
fanart=it['thumbnails']['image_keyframe_poster-1280x720']['url'] if 'image_keyframe_poster-1280x720' in it['thumbnails'] else '',
plot=it['longDescription'] if 'longDescription' in it else it['description'] if 'description' in it else '',
urls=urls,
seriesid = it.get('seriesId',''),
url=it['mediasetprogram$pageUrl'],
forcethumb=True,
no_return=True))
if item.feed:
item.page = item.page + 100 if item.page else 101
support.nextPage(itemlist, item)
return itemlist
def epmenu(item):
logger.debug()
itemlist = []
if item.seriesid:
seasons = current_session.get('https://feed.entertainment.tv.theplatform.eu/f/PR1GhC/mediaset-prod-tv-seasons?bySeriesId=' + item.seriesid).json()['entries']
for season in seasons:
if 'mediasettvseason$brandId' in season and 'mediasettvseason$displaySeason' in season:
itemlist.append(
item.clone(seriesid = '',
title=support.typo(season['mediasettvseason$displaySeason'], 'bold'),
urls=season['mediasettvseason$brandId']))
itemlist = sorted(itemlist, key=lambda it: it.title, reverse=True)
if len(itemlist) == 1: return epmenu(itemlist[0])
if not itemlist:
entries = current_session.get('https://feed.entertainment.tv.theplatform.eu/f/PR1GhC/mediaset-prod-all-brands?byCustomValue={brandId}{' + item.urls + '}').json()['entries']
for entry in entries:
if 'mediasetprogram$subBrandId' in entry:
itemlist.append(
item.clone(action='episodios',
title=support.typo(entry['description'], 'bold'),
url=entry['mediasetprogram$subBrandId'],
order=entry.get('mediasetprogram$order',0)))
if len(itemlist) == 1: return episodios(itemlist[0])
itemlist = sorted(itemlist, key=lambda it: it.order)
return itemlist
def episodios(item):
logger.debug()
itemlist = []
if not item.nextIndex: item.nextIndex = 1
url = 'https://feed.entertainment.tv.theplatform.eu/f/PR1GhC/mediaset-prod-all-programs?byCustomValue={subBrandId}{'+ item.url + '}&range=' + str(item.nextIndex) + '-' + str(item.nextIndex + PAGINATION)
json = current_session.get(url).json()['entries']
for it in json:
urls = []
if 'media' in it:
for key in it['media']:
urls.append(key['publicUrl'])
if urls:
title = it['title']
itemlist.append(
item.clone(action='findvideos',
title=support.typo(title, 'bold'),
contentType='episode',
thumbnail=it['thumbnails']['image_vertical-264x396']['url'] if 'image_vertical-264x396' in it['thumbnails'] else '',
fanart=it['thumbnails']['image_keyframe_poster-1280x720']['url'] if 'image_keyframe_poster-1280x720' in it['thumbnails'] else '',
plot=it['longDescription'] if 'longDescription' in it else it['description'],
urls=urls,
url=it['mediasetprogram$pageUrl'],
year=it.get('year',''),
ep= it.get('tvSeasonEpisodeNumber', 0) if it.get('tvSeasonEpisodeNumber', 0) else 0,
forcethumb=True,
no_return=True))
if len(itemlist) == 1: return findvideos(itemlist[0])
if (len(json) >= PAGINATION):
item.nextIndex += PAGINATION + 1
support.nextPage(itemlist, item)
return itemlist
def findvideos(item):
logger.debug()
itemlist = [item.clone(server='directo', title='Mediaset Play', urls=item.urls, action='play')]
return support.server(item, itemlist=itemlist, Download=False)
def play(item):
logger.debug()
for url in item.urls:
sec_data = support.match(url + post_url).data
item.url = support.match(sec_data, patron=r'<video src="([^"]+)').match
pid = support.match(sec_data, patron=r'pid=([^|]+)').match
item.manifest = 'mpd'
if pid:
item.drm = 'com.widevine.alpha'
item.license = lic_url % pid
break
return [item]
def subBrand(json):
logger.debug()
subBrandId = current_session.get('https://feed.entertainment.tv.theplatform.eu/f/PR1GhC/mediaset-prod-all-brands?byCustomValue={brandId}{' + json + '}').json()['entries'][-1]['mediasetprogram$subBrandId']
json = current_session.get('https://feed.entertainment.tv.theplatform.eu/f/PR1GhC/mediaset-prod-all-programs?byCustomValue={subBrandId}{' + subBrandId + '}').json()['entries']
return json
def get_from_id(item):
logger.debug()
json = current_session.get(entry.format(id=item.args[3])).json()
if 'components' in json:
id = quote(",".join(json["components"]))
json = current_session.get(entries.format(id=id)).json()
if 'entries' in json:
return json['entries']
return {}
def get_programs(item, ret=[], args={}):
hasMore = False
url = ''
# support.dbg()
if 'search' in item.args:
args['uxReference'] = item.args[2]
args["query"] = item.text
args['traceCid'] = tracecid
args['cwId'] = cwid
args['page'] = 1
args['platform'] = 'pc'
args['hitsPerPage'] = 500
url = 'https://api-ott-prod-fe.mediaset.net/PROD/play/rec2/search/v1.0?' + urlencode(args)
elif item.feed:
pag = item.page if item.page else 1
url='{}&range={}-{}'.format(item.feed, pag, pag + 99)
elif not args:
if item.ref in cdict:
args['uxReference'] = cdict[item.ref]
args['platform'] = 'pc'
else:
args = {"query": "*:*"}
if item.args[2]:
args['categories'] = item.args[2]
args['cwId'] = cwid
args['traceCid'] = tracecid
args['hitsPerPage'] = 500
args['page'] = '0'
args['deviceId'] = deviceid
url="https://api-ott-prod-fe.mediaset.net/PROD/play/rec2/cataloguelisting/v1.0?" + urlencode(args)
if url:
json = current_session.get(url).json()
if 'response' in json:
json = json['response']
if 'hasMore' in json:
hasMore = json['hasMore']
if 'components' in json:
id = quote(",".join(json["components"]))
json = current_session.get(entries.format(id=id)).json()
if 'entries' in json:
ret += json['entries']
if hasMore:
args['page'] = str(int(args['page']) + 1)
return get_programs(item, ret, args)
else:
return ret
else:
return ret

View File

@@ -177,7 +177,7 @@ def episodios(item):
for it in episodes['episodes']:
itemlist.append(
support.Item(channel=item.channel,
title=support.typo(str(episodes['number']) + 'x' + str(it['number']).zfill(2) + ' - ' + it['name'], 'bold'),
title=it['name'],
episode = it['number'],
season=episodes['number'],
thumbnail=it['images'][0]['original_url'] if 'images' in it and 'original_url' in it['images'][0] else item.thumbnail,
@@ -185,6 +185,8 @@ def episodios(item):
plot=it['plot'],
action='findvideos',
contentType='episode',
contentSeason = int(episodes['number']),
contentEpisodeNumber = int(it['number']),
contentSerieName=item.fulltitle,
url=host + '/watch/' + str(episodes['title_id']),
episodeid= '?e=' + str(it['id'])))

View File

@@ -562,12 +562,12 @@ def scrape(func):
prevthumb=item.prevthumb if item.prevthumb else item.thumbnail))
if inspect.stack()[1][3] not in ['find_episodes', 'add_tvshow']:
if addVideolibrary and (item.infoLabels["title"] or item.fulltitle):
# item.fulltitle = item.infoLabels["title"]
videolibrary(itemlist, item, function=function)
if downloadEnabled and function == 'episodios' or function == 'findvideos':
download(itemlist, item, function=function)
# if inspect.stack()[1][3] not in ['find_episodes', 'add_tvshow']:
# if addVideolibrary and (item.infoLabels["title"] or item.fulltitle):
# # item.fulltitle = item.infoLabels["title"]
# videolibrary(itemlist, item, function=function)
# if downloadEnabled and function == 'episodios' or function == 'findvideos':
# download(itemlist, item, function=function)
if 'patronGenreMenu' in args and itemlist:
itemlist = thumb(itemlist, genre=True)
@@ -1043,7 +1043,7 @@ def download(itemlist, item, typography='', function_level=1, function=''):
from_action=from_action,
contentTitle=contentTitle,
path=item.path,
thumbnail=thumb('downloads'),
thumbnail=thumb('download'),
downloadItemlist=downloadItemlist
))
if from_action == 'episodios':
@@ -1060,7 +1060,7 @@ def download(itemlist, item, typography='', function_level=1, function=''):
from_action=from_action,
contentTitle=contentTitle,
download='season',
thumbnail=thumb('downloads'),
thumbnail=thumb('download'),
downloadItemlist=downloadItemlist
))
@@ -1193,7 +1193,7 @@ def server(item, data='', itemlist=[], headers='', AutoPlay=True, CheckLinks=Tru
info(videoitem, 'Non supportato')
return
videoitem.server = findS[2]
videoitem.title= findS[0]
videoitem.serverName= findS[0]
videoitem.url = findS[1]
srv_param = servertools.get_server_parameters(videoitem.server.lower())
else:
@@ -1201,9 +1201,10 @@ def server(item, data='', itemlist=[], headers='', AutoPlay=True, CheckLinks=Tru
if videoitem.video_urls or srv_param.get('active', False):
quality = videoitem.quality if videoitem.quality else item.quality if item.quality else ''
# videoitem = item.clone(url=videoitem.url, serverName=videoitem.serverName, server=videoitem.server, action='play')
videoitem.contentLanguage = videoitem.contentLanguage if videoitem.contentLanguage else item.contentLanguage if item.contentLanguage else 'ITA'
videoitem.serverName = videoitem.title if videoitem.server == 'directo' else servertools.get_server_parameters(videoitem.server).get('name', videoitem.server.capitalize())
videoitem.title = item.contentTitle.strip() if item.contentType == 'movie' and item.contentTitle or (config.get_localized_string(30161) in item.fulltitle) else item.fulltitle
# videoitem.title = item.contentTitle.strip() if item.contentType == 'movie' and item.contentTitle or (config.get_localized_string(30161) in item.fulltitle) else item.fulltitle
videoitem.plot = typo(videoitem.title, 'bold') + (typo(quality, '_ [] bold') if quality else '')
videoitem.channel = item.channel
videoitem.fulltitle = item.fulltitle
@@ -1362,7 +1363,6 @@ def addQualityTag(item, itemlist, data, patron):
def get_jwplayer_mediaurl(data, srvName, onlyHttp=False, dataIsBlock=False):
from core import jsontools
video_urls = []
block = scrapertools.find_single_match(data, r'sources:\s*([^\]]+\])') if not dataIsBlock else data
if block:

View File

@@ -17,7 +17,7 @@ from future.builtins import object
import ast, copy, re, time
from core import filetools, httptools, jsontools, scrapertools
from core import filetools, httptools, jsontools, scrapertools, support
from core.item import InfoLabels
from platformcode import config, logger, platformtools
import threading
@@ -965,13 +965,8 @@ class Tmdb(object):
result = result["tv_results"][0]
else:
result = result['tv_episode_results'][0]
if result.get('id'):
Mpaaurl = '{}/{}/{}/{}?api_key={}'.format(host, self.search_type, result['id'], 'release_dates' if self.search_type == 'movie' else 'content_ratings', api)
Mpaas = self.get_json(Mpaaurl).get('results',[])
for m in Mpaas:
if m.get('iso_3166_1','').lower() == 'us':
result['mpaa'] = m.get('rating', m.get('release_dates', [{}])[0].get('certification'))
break
result = self.get_mpaa(result)
self.results = [result]
self.total_results = 1
@@ -1041,6 +1036,8 @@ class Tmdb(object):
self.total_results = total_results
self.total_pages = total_pages
self.result = ResultDictDefault(self.results[index_results])
if not config.get_setting('tmdb_plus_info'):
self.result = self.get_mpaa(self.result)
return len(self.results)
else:
@@ -1785,6 +1782,16 @@ class Tmdb(object):
return ret_infoLabels
def get_mpaa(self, result):
if result.get('id'):
Mpaaurl = '{}/{}/{}/{}?api_key={}'.format(host, self.search_type, result['id'], 'release_dates' if self.search_type == 'movie' else 'content_ratings', api)
Mpaas = self.get_json(Mpaaurl).get('results',[])
for m in Mpaas:
if m.get('iso_3166_1','').lower() == 'us':
result['mpaa'] = m.get('rating', m.get('release_dates', [{}])[0].get('certification'))
break
return result
def get_season_dic(season):
ret_dic = dict()
@@ -1800,7 +1807,7 @@ def get_season_dic(season):
seasonCredits = season.get('credits', {})
seasonPosters = season.get('images',{}).get('posters',{})
seasonFanarts = season.get('images',{}).get('backdrops',{})
seasonTrailers = season.get('videos',[]).get('results',[])
seasonTrailers = season.get('videos',{}).get('results',[])
ret_dic["season_title"] = seasonTitle
ret_dic["season_plot"] = seasonPlot

View File

@@ -802,8 +802,9 @@ def add_tvshow(item, channel=None, itemlist=[]):
itemlist = getattr(channel, it.action)(it)
item.host = channel.host
if itemlist:
# support.dbg()
from platformcode.autorenumber import start, check
if not check(item):
if not check(item, itemlist):
action = item.action
item.setrenumber = True
start(item)
@@ -877,7 +878,8 @@ def get_fanart_tv(item, set='', ret={}):
d[k['season']] = o
return d
_id = item.infoLabels.get('tvdb_id', item.infoLabels.get('tmdb_id'))
_id = item.infoLabels.get('tmdb_id')
# support.dbg()
if _id:
_type = item.contentType.replace('show','').replace('movie','movies')

View File

@@ -0,0 +1,3 @@
from lib.streamingcommunity.client import Client
from lib.streamingcommunity.server import Server
__all__ = ['Client', 'Server']

View File

@@ -0,0 +1,285 @@
import base64, json, random, struct, time, sys, traceback
if sys.version_info[0] >= 3:
PY3 = True
import urllib.request as urllib
xrange = range
else:
PY3 = False
import urllib
from core import httptools, jsontools, support
from threading import Thread
import re
from lib.streamingcommunity.handler import Handler
from platformcode import logger
from lib.streamingcommunity.server import Server
class Client(object):
def __init__(self, url, port=None, ip=None, auto_shutdown=True, wait_time=20, timeout=5, is_playing_fnc=None, video_id=None):
self.port = port if port else random.randint(8000,8099)
self.ip = ip if ip else "127.0.0.1"
self.connected = False
self.start_time = None
self.last_connect = None
self.is_playing_fnc = is_playing_fnc
self.auto_shutdown = auto_shutdown
self.wait_time = wait_time
self.timeout = timeout
self.running = False
self.file = None
self.files = []
# video_id is the ID in the webpage path
self._video_id = video_id
# Get json_data for entire details from video page
jsonDataStr = httptools.downloadpage('https://streamingcommunityws.com/videos/1/{}'.format(self._video_id), CF=False ).data
logger.debug( jsonDataStr )
self._jsonData = jsontools.load( jsonDataStr )
# going to calculate token and expiration time
# These values will be used for manifests request
self._token, self._expires = self.calculateToken( self._jsonData['client_ip'] )
# Starting web server
self._server = Server((self.ip, self.port), Handler, client=self)
self.start()
def start(self):
"""
" Starting client and server in a separated thread
"""
self.start_time = time.time()
self.running = True
self._server.run()
t= Thread(target=self._auto_shutdown)
t.setDaemon(True)
t.start()
logger.info("SC Server Started", (self.ip, self.port))
def _auto_shutdown(self):
while self.running:
time.sleep(1)
if self.file and self.file.cursor:
self.last_connect = time.time()
if self.is_playing_fnc and self.is_playing_fnc():
self.last_connect = time.time()
if self.auto_shutdown:
#shudown por haber cerrado el reproductor
if self.connected and self.last_connect and self.is_playing_fnc and not self.is_playing_fnc():
if time.time() - self.last_connect - 1 > self.timeout:
self.stop()
#shutdown por no realizar ninguna conexion
if (not self.file or not self.file.cursor) and self.start_time and self.wait_time and not self.connected:
if time.time() - self.start_time - 1 > self.wait_time:
self.stop()
#shutdown tras la ultima conexion
if (not self.file or not self.file.cursor) and self.timeout and self.connected and self.last_connect and not self.is_playing_fnc:
if time.time() - self.last_connect - 1 > self.timeout:
self.stop()
def stop(self):
self.running = False
self._server.stop()
logger.info("SC Server Stopped")
def get_manifest_url(self):
# remap request path for main manifest
# it must point to local server ip:port
return "http://" + self.ip + ":" + str(self.port) + "/manifest.m3u8"
def get_main_manifest_content(self):
# get the manifest file for entire video/audio chunks
# it must remap each urls in order to catch all chunks
url = 'https://streamingcommunityws.com/master/{}?token={}&expires={}'.format(self._video_id, self._token, self._expires)
m3u8_original = httptools.downloadpage(url, CF=False).data
logger.debug('CLIENT: m3u8:', m3u8_original);
# remap video/audio manifests url
# they must point to local server:
# /video/RES.m3u8
# /audio/RES.m3u8
r_video = re.compile(r'(\.\/video\/(\d+p)\/playlist.m3u8)', re.MULTILINE)
r_audio = re.compile(r'(\.\/audio\/(\d+k)\/playlist.m3u8)', re.MULTILINE)
for match in r_video.finditer(m3u8_original):
line = match.groups()[0]
res = match.groups()[1]
video_url = "/video/" + res + ".m3u8"
# logger.info('replace', match.groups(), line, res, video_url)
m3u8_original = m3u8_original.replace( line, video_url )
for match in r_audio.finditer(m3u8_original):
line = match.groups()[0]
res = match.groups()[1]
audio_url = "/audio/" + res + ".m3u8"
# logger.info('replace', match.groups(), line, res, audio_url)
m3u8_original = m3u8_original.replace( line, audio_url )
# m_video = re.search(, m3u8_original)
# self._video_res = m_video.group(1)
# m_audio = re.search(r'\.\/audio\/(\d+k)\/playlist.m3u8', m3u8_original)
# self._audio_res = m_audio.group(1)
# video_url = "/video/" + self._video_res + ".m3u8"
# audio_url = "/audio/" + self._audio_res + ".m3u8"
# m3u8_original = m3u8_original.replace( m_video.group(0), video_url )
# m3u8_original = m3u8_original.replace( m_audio.group(0), audio_url )
return m3u8_original
def get_video_manifest_content(self, url):
"""
" Based on `default_start`, `default_count` and `default_domain`
" this method remap each video chunks url in order to make them point to
" the remote domain switching from `default_start` to `default_count` values
"""
m_video = re.search( r'\/video\/(\d+p)\.m3u8', url)
video_res = m_video.groups()[0]
logger.info('Video res: ', video_res)
# get the original manifest file for video chunks
url = 'https://streamingcommunityws.com/master/{}?token={}&expires={}&type=video&rendition={}'.format(self._video_id, self._token, self._expires, video_res)
original_manifest = httptools.downloadpage(url, CF=False).data
manifest_to_parse = original_manifest
# remap each chunks
r = re.compile(r'^(\w+\.ts)$', re.MULTILINE)
default_start = self._jsonData[ "proxies" ]["default_start"]
default_count = self._jsonData[ "proxies" ]["default_count"]
default_domain = self._jsonData[ "proxies" ]["default_domain"]
storage_id = self._jsonData[ "storage_id" ]
folder_id = self._jsonData[ "folder_id" ]
for match in r.finditer(manifest_to_parse):
# getting all single chunks and replace in the original manifest file content
ts = match.groups()[0]
# compute final url pointing to given domain
url = 'https://au-{default_start}.{default_domain}/hls/{storage_id}/{folder_id}/video/{video_res}/{ts}'.format(
default_start = default_start,
default_domain = default_domain,
storage_id = storage_id,
folder_id = folder_id,
video_res = video_res,
ts = ts
)
original_manifest = original_manifest.replace( ts, url )
default_start = default_start + 1
if default_start > default_count:
default_start = 1
# replace the encryption file url pointing to remote streamingcommunity server
original_manifest = re.sub(r'"(\/.*[enc]?\.key)"', '"https://streamingcommunityws.com\\1"', original_manifest)
return original_manifest
def get_audio_manifest_content(self, url):
"""
" Based on `default_start`, `default_count` and `default_domain`
" this method remap each video chunks url in order to make them point to
" the remote domain switching from `default_start` to `default_count` values
"""
m_audio = re.search( r'\/audio\/(\d+k)\.m3u8', url)
audio_res = m_audio.groups()[0]
logger.info('Audio res: ', audio_res)
# get the original manifest file for video chunks
url = 'https://streamingcommunityws.com/master/{}?token={}&expires={}&type=audio&rendition={}'.format(self._video_id, self._token, self._expires, audio_res)
original_manifest = httptools.downloadpage(url, CF=False).data
manifest_to_parse = original_manifest
# remap each chunks
r = re.compile(r'^(\w+\.ts)$', re.MULTILINE)
default_start = self._jsonData[ "proxies" ]["default_start"]
default_count = self._jsonData[ "proxies" ]["default_count"]
default_domain = self._jsonData[ "proxies" ]["default_domain"]
storage_id = self._jsonData[ "storage_id" ]
folder_id = self._jsonData[ "folder_id" ]
for match in r.finditer(manifest_to_parse):
# getting all single chunks and replace in the original manifest file content
ts = match.groups()[0]
# compute final url pointing to given domain
url = 'https://au-{default_start}.{default_domain}/hls/{storage_id}/{folder_id}/audio/{audio_res}/{ts}'.format(
default_start = default_start,
default_domain = default_domain,
storage_id = storage_id,
folder_id = folder_id,
audio_res = audio_res,
ts = ts
)
original_manifest = original_manifest.replace( ts, url )
default_start = default_start + 1
if default_start > default_count:
default_start = 1
# replace the encryption file url pointing to remote streamingcommunity server
original_manifest = re.sub(r'"(\/.*[enc]?\.key)"', '"https://streamingcommunityws.com\\1"', original_manifest)
return original_manifest
def calculateToken(self, ip):
"""
" Compute the `token` and the `expires` values in order to perform each next requests
"""
from time import time
from base64 import b64encode as b64
import hashlib
o = 48
# NOT USED: it has been computed by `jsondata` in the constructor method
# n = support.match('https://au-1.scws-content.net/get-ip').data
i = 'Yc8U6r8KjAKAepEA'
t = int(time() + (3600 * o))
l = '{}{} {}'.format(t, ip, i)
md5 = hashlib.md5(l.encode())
#s = '?token={}&expires={}'.format(, t)
token = b64( md5.digest() ).decode().replace( '=', '' ).replace( '+', "-" ).replace( '\\', "_" )
expires = t
return token, expires

View File

@@ -0,0 +1,73 @@
import time, os, re, sys
if sys.version_info[0] >= 3:
PY3 = True
from http.server import BaseHTTPRequestHandler
import urllib.request as urllib
import urllib.parse as urlparse
else:
PY3 = False
from BaseHTTPServer import BaseHTTPRequestHandler
import urlparse
import urllib
from platformcode import logger
class Handler(BaseHTTPRequestHandler):
protocol_version = 'HTTP/1.1'
def log_message(self, format, *args):
pass
def do_GET(self):
"""
" Got request
" We are going to handle the request path in order to proxy each manifest
"""
url = urlparse.urlparse(self.path).path
logger.debug('HANDLER:', url)
response = None
# Default content-type for each manifest
cType = "application/vnd.apple.mpegurl"
if url == "/manifest.m3u8":
response = self.server._client.get_main_manifest_content()
elif url.startswith('/video/'):
response = self.server._client.get_video_manifest_content(url)
elif url.startswith('/audio/'):
response = self.server._client.get_audio_manifest_content(url)
elif url.endswith('enc.key'):
# This path should NOT be used, see get_video_manifest_content function
response = self.server._client.get_enc_key( url )
cType = "application/octet-stream"
if response == None:
# Default 404 response
self.send_error(404, 'Not Found')
logger.warn('Responding 404 for url', url)
else:
# catch OK response and send it to client
self.send_response(200)
self.send_header("Content-Type", cType )
self.send_header("Content-Length", str( len(response.encode('utf-8')) ) )
self.end_headers()
self.wfile.write( response.encode() )
# force flush just to be sure
self.wfile.flush()
logger.info('HANDLER flushed:', cType , str( len(response.encode('utf-8')) ) )
logger.debug( response.encode('utf-8') )

View File

@@ -0,0 +1,39 @@
import sys, traceback
if sys.version_info[0] >= 3:
from http.server import HTTPServer
from socketserver import ThreadingMixIn
else:
from BaseHTTPServer import HTTPServer
from SocketServer import ThreadingMixIn
from threading import Thread
from platformcode import logger
class Server(ThreadingMixIn, HTTPServer):
daemon_threads = True
timeout = 1
def __init__(self, address, handler, client):
HTTPServer.__init__(self,address,handler)
self._client = client
self.running=True
self.request = None
def stop(self):
self.running=False
# def serve(self):
# while self.running:
# try:
# self.handle_request()
# except:
# logger.error(traceback.format_exc())
def run(self):
t=Thread(target=self.serve_forever, name='HTTP Server')
t.daemon=self.daemon_threads
t.start()
def handle_error(self, request, client_address):
if not "socket.py" in traceback.format_exc():
logger.error(traceback.format_exc())

View File

@@ -28,7 +28,9 @@ MANUALMODE = 'manual'
GROUP = 'info'
# helper Functions
def check(item):
def check(item, itemlist=None):
if itemlist and itemlist[0].contentSeason:
return True
logger.debug()
dict_series = load(item)
title = item.fulltitle.rstrip()

View File

@@ -0,0 +1,165 @@
# -*- coding: utf-8 -*-
import xbmc, sys, xbmcgui, os, xbmcvfs, traceback
from platformcode import config, logger
librerias = xbmc.translatePath(os.path.join(config.get_runtime_path(), 'lib'))
sys.path.insert(0, librerias)
from core.item import Item
from lib.sambatools import libsmb as samba
from core import scrapertools, support
path = ''
mediatype = ''
def exists(path, silent=False, vfs=True):
path = xbmc.translatePath(path)
try:
if vfs:
result = bool(xbmcvfs.exists(path))
if not result and not path.endswith('/') and not path.endswith('\\'):
result = bool(xbmcvfs.exists(join(path, ' ').rstrip()))
return result
elif path.lower().startswith("smb://"):
return samba.exists(path)
else:
return os.path.exists(path)
except:
logger.error("ERROR when checking the path: %s" % path)
if not silent:
logger.error(traceback.format_exc())
return False
def join(*paths):
list_path = []
if paths[0].startswith("/"):
list_path.append("")
for path in paths:
if path:
list_path += path.replace("\\", "/").strip("/").split("/")
if scrapertools.find_single_match(paths[0], r'(^\w+:\/\/)'):
return str("/".join(list_path))
else:
return str(os.sep.join(list_path))
def search_paths(Id):
records = execute_sql('SELECT idPath FROM tvshowlinkpath WHERE idShow LIKE "%s"' % Id)
if len(records) >= 1:
for record in records:
path_records = execute_sql('SELECT strPath FROM path WHERE idPath LIKE "%s"' % record[0])
for path in path_records:
if config.get_setting('videolibrarypath') in path[0] and exists(join(path[0], 'tvshow.nfo')):
return path[0]
return ''
def execute_sql(sql):
logger.debug()
file_db = ""
records = None
# We look for the archive of the video database according to the version of kodi
video_db = config.get_platform(True)['video_db']
if video_db:
file_db = os.path.join(xbmc.translatePath("special://userdata/Database"), video_db)
# alternative method to locate the database
if not file_db or not os.path.exists(file_db):
file_db = ""
for f in os.path.listdir(xbmc.translatePath("special://userdata/Database")):
path_f = os.path.join(xbmc.translatePath("special://userdata/Database"), f)
if os.path.pathoos.pathols.isfile(path_f) and f.lower().startswith('myvideos') and f.lower().endswith('.db'):
file_db = path_f
break
if file_db:
logger.debug("DB file: %s" % file_db)
conn = None
try:
import sqlite3
conn = sqlite3.connect(file_db)
cursor = conn.cursor()
logger.debug("Running sql: %s" % sql)
cursor.execute(sql)
conn.commit()
records = cursor.fetchall()
if sql.lower().startswith("select"):
if len(records) == 1 and records[0][0] is None:
records = []
conn.close()
logger.debug("Query executed. Records: %s" % len(records))
except:
logger.error("Error executing sql query")
if conn:
conn.close()
else:
logger.debug("Database not found")
return records
def get_id():
global mediatype
mediatype = xbmc.getInfoLabel('ListItem.DBTYPE')
if mediatype == 'tvshow':
dbid = xbmc.getInfoLabel('ListItem.DBID')
elif mediatype in ('season', 'episode'):
dbid = xbmc.getInfoLabel('ListItem.TvShowDBID')
else:
dbid = ''
return dbid
def check_condition():
# support.dbg()
global path
path = search_paths(get_id())
return path
def get_menu_items():
logger.debug('get menu item')
if check_condition():
items = [(config.get_localized_string(70269), update)]
from core.videolibrarytools import read_nfo
nfo = path + 'tvshow.nfo'
item = read_nfo(nfo)[1]
if item:
item.nfo = nfo
item_url = item.tourl()
# Context menu: Automatically search for new episodes or not
if item.active and int(item.active) > 0:
update_text = config.get_localized_string(60022)
value = 0
else:
update_text = config.get_localized_string(60023)
value = 1
items.append((update_text, lambda: xbmc.executebuiltin("RunPlugin(plugin://plugin.video.kod/?{}&title={}&action=mark_tvshow_as_updatable&channel=videolibrary&active={})".format(item_url, update_text, str(value)))))
if item.local_episodes_path == "":
items.append((config.get_localized_string(80048), lambda: xbmc.executebuiltin("RunPlugin(plugin://plugin.video.kod/?{}&action=add_local_episodes&channel=videolibrary&path={})".format(item_url, path))))
else:
items.append((config.get_localized_string(80049), lambda: xbmc.executebuiltin("RunPlugin(plugin://plugin.video.kod/?{}&action=remove_local_episodes&channel=videolibrary&path={})".format(item_url, path))))
return items
else:
return []
def update():
dbid = get_id()
path = search_paths(dbid)
if path:
item = Item(action="update_tvshow", channel="videolibrary", path=path)
# Why? I think it is not necessary, just commented
# item.tourl()
xbmc.executebuiltin("RunPlugin(plugin://plugin.video.kod/?" + item.tourl() + ")")

View File

@@ -453,7 +453,9 @@ class addTvShow(object):
self.info = self.item.infoLabels
self.seasons = videolibrarydb['season'][self.item.videolibrary_id]
self.episodes = videolibrarydb['episode'][self.item.videolibrary_id]
self.imdb_id, self.tmdb_id, self.tvdb_id = self.info['code']
self.imdb_id = self.info.get('imdb_id', '')
self.tmdb_id = self.info.get('tmdb_id', '')
self.tvdb_id = self.info.get('tvdb_id', '')
self.exist, self.idShow = self.get_idShow()
self.idSeasons = self.get_idSeasons()
self.idEpisodes = self.get_idEpisodes()

View File

@@ -351,8 +351,6 @@ def render_items(itemlist, parent_item):
if item.category == "":
item.category = parent_item.category
if not item.title:
item.title = item.contentTitle
# If there is no action or it is findvideos / play, folder = False because no listing will be returned
if item.action in ['play', '']:
item.folder = False
@@ -366,7 +364,9 @@ def render_items(itemlist, parent_item):
title = item.title
episode = ''
if (parent_item.channel not in ['videolibrary'] or item.server) and title[:1] not in ['[', '']:
if title[:1] not in ['[', '']:
if item.contentSerieName: title = item.contentSerieName
elif item.contentTitle: title = item.contentTitle
if type(item.contentSeason) == int and type(item.contentEpisodeNumber) == int and not item.onlyep:
episode = '{}x{:02d}'.format(item.contentSeason, item.contentEpisodeNumber)
elif type(item.contentEpisodeNumber) == int:
@@ -416,15 +416,15 @@ def render_items(itemlist, parent_item):
context_commands = def_context_commands
listitem.addContextMenuItems(context_commands)
return item, item_url, listitem
# from core.support import dbg;dbg()
r_list = [set_item(i, item, parent_item) for i, item in enumerate(itemlist)]
# r_list = []
# r_list = [set_item(i, item, parent_item) for i, item in enumerate(itemlist)]
r_list = []
with futures.ThreadPoolExecutor() as executor:
searchList = [executor.submit(set_item, i, item, parent_item) for i, item in enumerate(itemlist)]
for res in futures.as_completed(searchList):
r_list.append(res.result())
r_list.sort(key=lambda it: it[0].itemlistPosition)
# with futures.ThreadPoolExecutor() as executor:
# searchList = [executor.submit(set_item, i, item, parent_item) for i, item in enumerate(itemlist)]
# for res in futures.as_completed(searchList):
# r_list.append(res.result())
# r_list.sort(key=lambda it: it[0].itemlistPosition)
for item, item_url, listitem in r_list:
dirItems.append(('%s?%s' % (sys.argv[0], item_url), listitem, item.folder, len(r_list)))
@@ -685,12 +685,12 @@ def set_context_commands(item, item_url, parent_item, **kwargs):
if item.channel != "videolibrary" and item.videolibrary != False:
# Add Series to the video library
if item.action in ["episodios", "get_episodios", "get_seasons"] and item.contentSerieName:
context_commands.append((config.get_localized_string(60352), "RunPlugin(%s?%s&%s)" % (sys.argv[0], item_url, 'action=add_serie_to_library&from_action=' + item.action)))
context_commands.append((config.get_localized_string(60352), "RunPlugin(%s?%s&%s)" % (sys.argv[0], item_url, 'action=add_serie_to_library&from_action={}&contentChannel=videolibrary'.format(item.action))))
# Add Movie to Video Library
elif item.action in ["detail", "findvideos"] and item.contentType == 'movie' and item.contentTitle:
context_commands.append((config.get_localized_string(60353), "RunPlugin(%s?%s&%s)" % (sys.argv[0], item_url, 'action=add_movie_to_library&from_action=' + item.action)))
context_commands.append((config.get_localized_string(60353), "RunPlugin(%s?%s&%s)" % (sys.argv[0], item_url, 'action=add_movie_to_library&from_action={}&contentChannel=videolibrary'.format(item.action))))
elif item.action in ['check'] and item.contentTitle or item.contentSerieName:
context_commands.append((config.get_localized_string(30161), "RunPlugin(%s?%s&%s)" % (sys.argv[0], item_url, 'action=add_to_library&from_action=' + item.action)))
context_commands.append((config.get_localized_string(30161), "RunPlugin(%s?%s&%s)" % (sys.argv[0], item_url, 'action=add_to_library&from_action={}&contentChannel=videolibrary'.format(item.action))))
if not item.local and item.channel not in ["downloads", "filmontv", "search"] and item.server != 'torrent' and parent_item.action != 'mainlist' and config.get_setting('downloadenabled'):
# Download movie
@@ -1794,10 +1794,10 @@ def get_played_time(item):
result = None
try:
if item.contentType == 'movie':
result = db['viewed'].get(ID)
elif S and E:
if S and E:
result = db['viewed'].get(ID, {}).get(str(S)+'x'+str(E))
else:
result = db['viewed'].get(ID)
if result:
played_time = result

25
servers/maxstream.json Normal file
View File

@@ -0,0 +1,25 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "https?://maxstream.video/(?:e/|embed-|cast/)?([a-z0-9]+)",
"url": "https://maxstream.video/cast/\\1"
}
]
},
"free": true,
"id": "maxstream",
"name": "MaxStream",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "@70708",
"type": "bool",
"visible": true
}
]
}

65
servers/maxstream.py Normal file
View File

@@ -0,0 +1,65 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector MaxStream
# --------------------------------------------------------
from core import httptools, scrapertools, support
from platformcode import logger, config
import ast, sys
if sys.version_info[0] >= 3:
import urllib.parse as urlparse
else:
import urlparse
def test_video_exists(page_url):
logger.debug("(page_url='%s')" % page_url)
global data
data = httptools.downloadpage(page_url).data
if "file was deleted" in data:
return False, config.get_localized_string(70449) % "MaxStream"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.debug("url=" + page_url)
video_urls = []
url_video = ''
lastIndexStart = data.rfind('<script>')
lastIndexEnd = data.rfind('</script>')
script = data[ (lastIndexStart + len('<script>')):lastIndexEnd ]
match = scrapertools.find_single_match(script, r'(\[[^\]]+\])[^\{]*\{[^\(]+\(parseInt\(value\)\s?-\s?([0-9]+)')
if match:
char_codes = ast.literal_eval(match[0])
hidden_js = "".join([chr(c - int(match[1])) for c in char_codes])
newurl = scrapertools.find_single_match(hidden_js, r'\$.get\(\'([^\']+)')
castpage = httptools.downloadpage(newurl, headers={'x-requested-with': 'XMLHttpRequest', 'Referer': page_url }).data
url_video = scrapertools.find_single_match(castpage, r"cc\.cast\('(http[s]?.[^']+)'")
else:
logger.debug('Something wrong: no url found before that :(')
if url_video:
import random, string
parse = urlparse.urlparse(url_video)
video_urls.append(['mp4 [MaxStream]', url_video])
try:
r1 = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(19))
r2 = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(19))
r3 = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(19))
video_urls.append(['m3u8 [MaxStream]', '{}://{}/hls/{},{},{},{},.urlset/master.m3u8'.format(parse.scheme, parse.netloc, parse.path.split('/')[1], r1, r2, r3)])
# video_urls.append(['m3u8 [MaxStream]', '{}://{}/hls/{},wpsc2hllm5g5fkjvslq,4jcc2hllm5gzykkkgha,fmca2hllm5jtpb7cj5q,.urlset/master.m3u8'.format(parse.scheme, parse.netloc, parse.path.split('/')[1])])
except:
logger.debug('Something wrong: Impossible get HLS stream')
return video_urls

View File

@@ -0,0 +1,15 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
]
},
"free": true,
"id": "streamingcommunityws",
"name": "StreamingCommunityWS",
"premium": [
],
"settings": [
]
}

View File

@@ -0,0 +1,30 @@
# -*- coding: utf-8 -*-
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
import json
import random
from core import httptools, support, scrapertools
from platformcode import platformtools, logger
from lib.streamingcommunity import Client as SCClient
files = None
def test_video_exists(page_url):
# page_url is the {VIDEO_ID}. Es: 5957
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
video_urls = []
global c
c = SCClient("",video_id=page_url, is_playing_fnc=platformtools.is_playing)
media_url = c.get_manifest_url()
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [Streaming Community]", media_url])
return video_urls

17
servers/streamon.json Normal file
View File

@@ -0,0 +1,17 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [{
"pattern": "streamon\\.to/d/(\\w+)",
"url": "https://streamon.to/d/\\1"
}]
},
"free": true,
"id": "streamon",
"name": "Streamon",
"premium": [
],
"settings": [
]
}

124
servers/streamon.py Normal file
View File

@@ -0,0 +1,124 @@
# -*- coding: utf-8 -*-
from core import httptools, scrapertools, config
import base64
import math
import re
files = None
def test_video_exists(page_url):
global htmldata
htmldata = httptools.downloadpage(page_url).data
if 'Oops! video not found' in htmldata:
return False, config.get_localized_string(70449) % "Streamon"
else:
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
tabbler = httptools.downloadpage('https://streamon.to/assets/js/tabber.js').data
params_tabber = scrapertools.find_single_match(tabbler, r'\}\(([^\)]+)')
params_tabber_decoder = params_tabber.split(',')
decoded_tabler = eval_fn(
params_tabber_decoder[0].replace('"', ''),
int(params_tabber_decoder[1]),
params_tabber_decoder[2].replace('"', ''),
int(params_tabber_decoder[3]),
int(params_tabber_decoder[4]),
int(params_tabber_decoder[5])
)
decoder = scrapertools.find_single_match(decoded_tabler, r'var res = ([a-z]{12})\.replace\("([^"]+)[^\.]+\.replace\("([^"]+)')
params_from_page = scrapertools.find_single_match(htmldata, '<script\s+?type=[\'|"].*?[\'|"]>\s?var.*?\}\((.*?)\)\)<\/script>')
params_from_page_decoder = params_from_page.split(',')
first_decoder_fn = eval_fn(
params_from_page_decoder[0].replace('"', ''),
int(params_from_page_decoder[1]),
params_from_page_decoder[2].replace('"', ''),
int(params_from_page_decoder[3]),
int(params_from_page_decoder[4]),
int(params_from_page_decoder[5])
)
variable_value = scrapertools.find_single_match(first_decoder_fn, 'var {}="([^"]+)"'.format(decoder[0]))
res = variable_value.replace(decoder[1], "")
res2 = res.replace(decoder[2], "")
media_url = base64.b64decode( res2 ).decode('ascii')
video_urls = []
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [Streamon]", media_url])
return video_urls
"""
" I don't know what following lines do. Maybe neither God knows...
" but they seem to be working :)
"""
def loop_reduce(lst, h, e):
"""
" Simulate the Array.reduce functionality
"""
acc = 0
for index, val in enumerate(lst):
indexOf = h.find(val)
if indexOf > -1:
pow = int(math.pow(e, index))
acc = acc + indexOf * pow
return acc
def decrypt_string(d, e, f):
"""
" Decrypt char-sequence from given key
"""
g = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/'
h = g[0 : e]
i = g[0 : f]
j = loop_reduce(list(d)[::-1], h, e)
k = ''
while j > 0:
j = int(j)
k = i[j % f] + k
j = (j - (j % f)) / f
return k or ''
def eval_fn(h, u, n, t, e, r):
"""
" Extract decrypter key and convert decrypted string into a ASCII string
"""
r = ""
i = -1
while i < len(h) - 1:
i = i + 1
s = ''
while h[i] != n[e]:
s += h[i]
i = i + 1
for j in range(0, len(n)):
reg = re.compile(n[j])
s = re.sub(reg, str(j), s)
res = decrypt_string(s, e, 10)
r += chr( int( res ) - t )
return r

View File

@@ -2,10 +2,11 @@
# --------------------------------------------------------
# Conector streamtape By Alfa development Group
# --------------------------------------------------------
from core import httptools, scrapertools
from core import httptools
from platformcode import logger, config
from core.support import match
import sys
from lib import js2py
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
@@ -27,17 +28,10 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.debug("url=" + page_url)
# from core .support import dbg;dbg()
video_urls = []
possible_url = ''
find_url = match(data, patron=r'''innerHTML = ["']([^"]+)["'](?:\s*\+\s*['"]([^"']+))?''').match
for m in find_url:
possible_url += m
if not possible_url:
possible_url = match(data, patron=r"innerHTML\\'\]=\\'([^']+)").match
find_url = match(data, patron=r'innerHTML = ([^;]+)').match
possible_url = js2py.eval_js(find_url)
url = "https:" + possible_url
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
video_urls.append(['MP4 [Streamtape]', url])
return video_urls
return video_urls