Streamingcommunity (#309)

* added folder for new server

* WIP: streamingcommunity and animeunity

* streaming community for animeunity

* httpserver for streaming from streamingcommunity ws

* fix for episode and tvshows

* log and code cleanup

* fixed multi stream for streamingcommunity. Use 'serve_forever' in order to avoid infinite loop

* added log for debug and info. Little fixes
This commit is contained in:
fatshotty
2021-06-16 17:59:53 +02:00
committed by GitHub
parent 8e020bb605
commit 964cc80cce
9 changed files with 740 additions and 227 deletions

View File

@@ -1,222 +1,289 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per AnimeUnity
# ------------------------------------------------------------
from lib.requests.sessions import session
import requests, json, copy, inspect
from core import support
from platformcode import autorenumber
host = support.config.get_channel_url()
response = support.httptools.downloadpage(host + '/archivio')
csrf_token = support.match(response.data, patron='name="csrf-token" content="([^"]+)"').match
headers = {'content-type': 'application/json;charset=UTF-8',
'x-csrf-token': csrf_token,
'Cookie' : '; '.join([x.name + '=' + x.value for x in response.cookies])}
@support.menu
def mainlist(item):
top = [('Ultimi Episodi', ['', 'news'])]
menu = [('Anime {bullet bold}',['', 'menu', {}, 'tvshow']),
('Film {submenu}',['', 'menu', {'type': 'Movie'}]),
('TV {submenu}',['', 'menu', {'type': 'TV'}, 'tvshow']),
('OVA {submenu} {tv}',['', 'menu', {'type': 'OVA'}, 'tvshow']),
('ONA {submenu} {tv}',['', 'menu', {'type': 'ONA'}, 'tvshow']),
('Special {submenu} {tv}',['', 'menu', {'type': 'Special'}, 'tvshow'])]
search =''
return locals()
def menu(item):
item.action = 'peliculas'
ITA = copy.copy(item.args)
ITA['title'] = '(ita)'
InCorso = copy.copy(item.args)
InCorso['status'] = 'In Corso'
Terminato = copy.copy(item.args)
Terminato['status'] = 'Terminato'
itemlist = [item.clone(title=support.typo('Tutti','bold')),
item.clone(title=support.typo('ITA','bold'), args=ITA),
item.clone(title=support.typo('Genere','bold'), action='genres'),
item.clone(title=support.typo('Anno','bold'), action='years')]
if item.contentType == 'tvshow':
itemlist += [item.clone(title=support.typo('In Corso','bold'), args=InCorso),
item.clone(title=support.typo('Terminato','bold'), args=Terminato)]
itemlist +=[item.clone(title=support.typo('Cerca...','bold'), action='search', thumbnail=support.thumb('search'))]
return itemlist
def genres(item):
support.info()
# support.dbg()
itemlist = []
genres = json.loads(support.match(response.text, patron='genres="([^"]+)').match.replace('"','"'))
for genre in genres:
item.args['genres'] = [genre]
itemlist.append(item.clone(title=support.typo(genre['name'],'bold'), action='peliculas'))
return support.thumb(itemlist)
def years(item):
support.info()
itemlist = []
from datetime import datetime
current_year = datetime.today().year
oldest_year = int(support.match(response.text, patron='anime_oldest_date="([^"]+)').match)
for year in list(reversed(range(oldest_year, current_year + 1))):
item.args['year']=year
itemlist.append(item.clone(title=support.typo(year,'bold'), action='peliculas'))
return itemlist
def search(item, text):
support.info('search', item)
if not item.args:
item.args = {'title':text}
else:
item.args['title'] = text
item.search = text
try:
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.info('search log:', line)
return []
def newest(categoria):
support.info(categoria)
itemlist = []
item = support.Item()
item.url = host
try:
itemlist = news(item)
if itemlist[-1].action == 'news':
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.info(line)
return []
return itemlist
def news(item):
support.info()
item.contentType = 'episode'
itemlist = []
import cloudscraper
session = cloudscraper.create_scraper()
fullJs = json.loads(support.match(session.get(item.url).text, headers=headers, patron=r'items-json="([^"]+)"', debug=True).match.replace('"','"'))
js = fullJs['data']
for it in js:
itemlist.append(
item.clone(title= support.typo(it['anime']['title'] + ' - EP. ' + it['number'], 'bold'),
fulltitle=it['anime']['title'],
thumbnail=it['anime']['imageurl'],
forcethumb = True,
video_url=it['link'],
plot=it['anime']['plot'],
action='findvideos')
)
if 'next_page_url' in fullJs:
itemlist.append(item.clone(title=support.typo(support.config.get_localized_string(30992), 'color kod bold'),thumbnail=support.thumb(), url=fullJs['next_page_url']))
return itemlist
def peliculas(item):
support.info()
itemlist = []
page = item.page if item.page else 0
item.args['offset'] = page * 30
order = support.config.get_setting('order', item.channel)
if order:
order_list = [ "Standard", "Lista A-Z", "Lista Z-A", "Popolarità", "Valutazione" ]
item.args['order'] = order_list[order]
payload = json.dumps(item.args)
records = requests.post(host + '/archivio/get-animes', headers=headers, data=payload).json()['records']
for it in records:
lang = support.match(it['title'], patron=r'\(([It][Tt][Aa])\)').match
title = support.re.sub(r'\s*\([^\)]+\)', '', it['title'])
if 'ita' in lang.lower(): language = 'ITA'
else: language = 'Sub-ITA'
itm = item.clone(title=support.typo(title,'bold') + support.typo(language,'_ [] color kod') + (support.typo(it['title_eng'],'_ ()') if it['title_eng'] else ''))
itm.contentLanguage = language
itm.type = it['type']
itm.thumbnail = it['imageurl']
itm.plot = it['plot']
itm.url = item.url
if it['episodes_count'] == 1:
itm.contentType = 'movie'
itm.fulltitle = itm.show = itm.contentTitle = title
itm.contentSerieName = ''
itm.action = 'findvideos'
itm.video_url = it['episodes'][0]['link']
else:
itm.contentType = 'tvshow'
itm.contentTitle = ''
itm.fulltitle = itm.show = itm.contentSerieName = title
itm.action = 'episodios'
itm.episodes = it['episodes'] if 'episodes' in it else it['link']
itm.video_url = item.url
itemlist.append(itm)
autorenumber.start(itemlist)
if len(itemlist) >= 30:
itemlist.append(item.clone(title=support.typo(support.config.get_localized_string(30992), 'color kod bold'), thumbnail=support.thumb(), page=page + 1))
return itemlist
def episodios(item):
support.info()
itemlist = []
title = 'Parte ' if item.type.lower() == 'movie' else 'Episodio '
for it in item.episodes:
itemlist.append(
item.clone(title=support.typo(title + it['number'], 'bold'),
episode = it['number'],
fulltitle=item.title,
show=item.title,
contentTitle='',
contentSerieName=item.contentSerieName,
thumbnail=item.thumbnail,
plot=item.plot,
action='findvideos',
contentType='episode',
video_url=it['link']))
if inspect.stack()[1][3] not in ['find_episodes']:
autorenumber.start(itemlist, item)
support.videolibrary(itemlist, item)
support.download(itemlist, item)
return itemlist
def findvideos(item):
support.info()
if not 'vvvvid' in item.video_url:
return support.server(item,itemlist=[item.clone(title=support.config.get_localized_string(30137), url=item.video_url, server='directo', action='play')])
else:
return support.server(item, item.video_url)
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Canale per AnimeUnity
# ------------------------------------------------------------
import cloudscraper, json, copy, inspect
from core import jsontools, support, httptools, filetools
from platformcode import autorenumber, logger
import re
import xbmc
session = cloudscraper.create_scraper()
host = support.config.get_channel_url()
response = session.get(host + '/archivio')
csrf_token = support.match(response.text, patron='name="csrf-token" content="([^"]+)"').match
headers = {'content-type': 'application/json;charset=UTF-8',
'x-csrf-token': csrf_token,
'Cookie' : '; '.join([x.name + '=' + x.value for x in response.cookies])}
@support.menu
def mainlist(item):
top = [('Ultimi Episodi', ['', 'news'])]
menu = [('Anime {bullet bold}',['', 'menu', {}, 'tvshow']),
('Film {submenu}',['', 'menu', {'type': 'Movie'}]),
('TV {submenu}',['', 'menu', {'type': 'TV'}, 'tvshow']),
('OVA {submenu} {tv}',['', 'menu', {'type': 'OVA'}, 'tvshow']),
('ONA {submenu} {tv}',['', 'menu', {'type': 'ONA'}, 'tvshow']),
('Special {submenu} {tv}',['', 'menu', {'type': 'Special'}, 'tvshow'])]
search =''
return locals()
def menu(item):
item.action = 'peliculas'
ITA = copy.copy(item.args)
ITA['title'] = '(ita)'
InCorso = copy.copy(item.args)
InCorso['status'] = 'In Corso'
Terminato = copy.copy(item.args)
Terminato['status'] = 'Terminato'
itemlist = [item.clone(title=support.typo('Tutti','bold')),
item.clone(title=support.typo('ITA','bold'), args=ITA),
item.clone(title=support.typo('Genere','bold'), action='genres'),
item.clone(title=support.typo('Anno','bold'), action='years')]
if item.contentType == 'tvshow':
itemlist += [item.clone(title=support.typo('In Corso','bold'), args=InCorso),
item.clone(title=support.typo('Terminato','bold'), args=Terminato)]
itemlist +=[item.clone(title=support.typo('Cerca...','bold'), action='search', thumbnail=support.thumb('search'))]
return itemlist
def genres(item):
support.info()
# support.dbg()
itemlist = []
genres = json.loads(support.match(response.text, patron='genres="([^"]+)').match.replace('"','"'))
for genre in genres:
item.args['genres'] = [genre]
itemlist.append(item.clone(title=support.typo(genre['name'],'bold'), action='peliculas'))
return support.thumb(itemlist)
def years(item):
support.info()
itemlist = []
from datetime import datetime
current_year = datetime.today().year
oldest_year = int(support.match(response.text, patron='anime_oldest_date="([^"]+)').match)
for year in list(reversed(range(oldest_year, current_year + 1))):
item.args['year']=year
itemlist.append(item.clone(title=support.typo(year,'bold'), action='peliculas'))
return itemlist
def search(item, text):
support.info('search', item)
if not item.args:
item.args = {'title':text}
else:
item.args['title'] = text
item.search = text
try:
return peliculas(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.info('search log:', line)
return []
def newest(categoria):
support.info(categoria)
itemlist = []
item = support.Item()
item.url = host
try:
itemlist = news(item)
if itemlist[-1].action == 'news':
itemlist.pop()
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
support.info(line)
return []
return itemlist
def news(item):
support.info()
item.contentType = 'episode'
itemlist = []
import cloudscraper
session = cloudscraper.create_scraper()
fullJs = json.loads(support.match(session.get(item.url).text, headers=headers, patron=r'items-json="([^"]+)"').match.replace('"','"'))
js = fullJs['data']
for it in js:
itemlist.append(
item.clone(title= support.typo(it['anime']['title'] + ' - EP. ' + it['number'], 'bold'),
fulltitle=it['anime']['title'],
thumbnail=it['anime']['imageurl'],
forcethumb = True,
video_url=it['scws_id'],
plot=it['anime']['plot'],
action='findvideos')
)
if 'next_page_url' in fullJs:
itemlist.append(item.clone(title=support.typo(support.config.get_localized_string(30992), 'color kod bold'),thumbnail=support.thumb(), url=fullJs['next_page_url']))
return itemlist
def peliculas(item):
support.info()
itemlist = []
page = item.page if item.page else 0
item.args['offset'] = page * 30
order = support.config.get_setting('order', item.channel)
if order:
order_list = [ "Standard", "Lista A-Z", "Lista Z-A", "Popolarità", "Valutazione" ]
item.args['order'] = order_list[order]
payload = json.dumps(item.args)
records = session.post(host + '/archivio/get-animes', headers=headers, data=payload).json()['records']
for it in records:
logger.debug(jsontools.dump(it))
lang = support.match(it['title'], patron=r'\(([It][Tt][Aa])\)').match
title = support.re.sub(r'\s*\([^\)]+\)', '', it['title'])
if 'ita' in lang.lower(): language = 'ITA'
else: language = 'Sub-ITA'
itm = item.clone(title=support.typo(title,'bold') + support.typo(language,'_ [] color kod') + (support.typo(it['title_eng'],'_ ()') if it['title_eng'] else ''))
itm.contentLanguage = language
itm.type = it['type']
itm.thumbnail = it['imageurl']
itm.plot = it['plot']
itm.url = item.url
if it['episodes_count'] == 1:
itm.contentType = 'movie'
itm.fulltitle = itm.show = itm.contentTitle = title
itm.contentSerieName = ''
itm.action = 'findvideos'
itm.video_url = it['episodes'][0]['scws_id']
else:
itm.contentType = 'tvshow'
itm.contentTitle = ''
itm.fulltitle = itm.show = itm.contentSerieName = title
itm.action = 'episodios'
itm.episodes = it['episodes'] if 'episodes' in it else it['scws_id']
itm.video_url = item.url
itemlist.append(itm)
autorenumber.start(itemlist)
if len(itemlist) >= 30:
itemlist.append(item.clone(title=support.typo(support.config.get_localized_string(30992), 'color kod bold'), thumbnail=support.thumb(), page=page + 1))
return itemlist
def episodios(item):
support.info()
itemlist = []
title = 'Parte ' if item.type.lower() == 'movie' else 'Episodio '
for it in item.episodes:
itemlist.append(
item.clone(title=support.typo(title + it['number'], 'bold'),
episode = it['number'],
fulltitle=item.title,
show=item.title,
contentTitle='',
contentSerieName=item.contentSerieName,
thumbnail=item.thumbnail,
plot=item.plot,
action='findvideos',
contentType='episode',
video_url=it['scws_id']))
if inspect.stack()[1][3] not in ['find_episodes']:
autorenumber.start(itemlist, item)
support.videolibrary(itemlist, item)
support.download(itemlist, item)
return itemlist
def findvideos(item):
# def calculateToken():
# from time import time
# from base64 import b64encode as b64
# import hashlib
# o = 48
# n = support.match('https://au-1.scws-content.net/get-ip').data
# i = 'Yc8U6r8KjAKAepEA'
# t = int(time() + (3600 * o))
# l = '{}{} {}'.format(t, n, i)
# md5 = hashlib.md5(l.encode())
# s = '?token={}&expires={}'.format(b64(md5.digest()).decode().replace('=', '').replace('+', "-").replace('\\', "_"), t)
# return s
# token = calculateToken()
# url = 'https://streamingcommunityws.com/master/{}{}'.format(item.video_url, token)
# # support.dbg()
# m3u8_original = httptools.downloadpage(url, CF=False).data
# m_video = re.search(r'\.\/video\/(\d+p)\/playlist.m3u8', m3u8_original)
# video_res = m_video.group(1)
# m_audio = re.search(r'\.\/audio\/(\d+k)\/playlist.m3u8', m3u8_original)
# audio_res = m_audio.group(1)
# # https://streamingcommunityws.com/master/5957?type=video&rendition=480p&token=wQLowWskEnbLfOfXXWWPGA&expires=1623437317
# video_url = 'https://streamingcommunityws.com/master/{}{}&type=video&rendition={}'.format(item.video_url, token, video_res)
# audio_url = 'https://streamingcommunityws.com/master/{}{}&type=audio&rendition={}'.format(item.video_url, token, audio_res)
# m3u8_original = m3u8_original.replace( m_video.group(0), video_url )
# m3u8_original = m3u8_original.replace( m_audio.group(0), audio_url )
# file_path = 'special://temp/animeunity.m3u8'
# filetools.write(xbmc.translatePath(file_path), m3u8_original, 'w')
# return support.server(item, itemlist=[item.clone(title=support.config.get_localized_string(30137), url=file_path, manifest = 'hls', server='directo', action='play')])
# item.url=item.video_url
directLink = False
if item.video_url == None:
if item.extra == "tvshow":
epnum = item.episode
logger.info('it is a episode', epnum)
episode = None
for ep in item.episodes:
if ep["number"] == epnum:
episode = ep
break
if episode == None:
logger.warn('cannot found episode')
else:
item.url = episode["link"]
directLink = True
if directLink:
logger.info('try direct link')
return support.server(item, itemlist=[item.clone(title=support.config.get_localized_string(30137), url=item.url, server='directo', action='play')])
else:
return support.server(item, itemlist=[item.clone(title=support.config.get_localized_string(30137), url=str(item.video_url), manifest = 'hls', server='streamingcommunityws', action='play')])

View File

@@ -23,7 +23,7 @@ import re
from core import filetools
from core import httptools
from core import jsontools
from core import jsontools, support
from core.item import Item
from platformcode import config, logger
from platformcode import platformtools
@@ -784,4 +784,4 @@ def translate_server_name(name):
if '@' in name:
return config.get_localized_string(int(name.replace('@','')))
else:
return name
return name

View File

@@ -0,0 +1,3 @@
from lib.streamingcommunity.client import Client
from lib.streamingcommunity.server import Server
__all__ = ['Client', 'Server']

View File

@@ -0,0 +1,285 @@
import base64, json, random, struct, time, sys, traceback
if sys.version_info[0] >= 3:
PY3 = True
import urllib.request as urllib
xrange = range
else:
PY3 = False
import urllib
from core import httptools, jsontools, support
from threading import Thread
import re
from lib.streamingcommunity.handler import Handler
from platformcode import logger
from lib.streamingcommunity.server import Server
class Client(object):
def __init__(self, url, port=None, ip=None, auto_shutdown=True, wait_time=20, timeout=5, is_playing_fnc=None, video_id=None):
self.port = port if port else random.randint(8000,8099)
self.ip = ip if ip else "127.0.0.1"
self.connected = False
self.start_time = None
self.last_connect = None
self.is_playing_fnc = is_playing_fnc
self.auto_shutdown = auto_shutdown
self.wait_time = wait_time
self.timeout = timeout
self.running = False
self.file = None
self.files = []
# video_id is the ID in the webpage path
self._video_id = video_id
# Get json_data for entire details from video page
jsonDataStr = httptools.downloadpage('https://streamingcommunityws.com/videos/1/{}'.format(self._video_id), CF=False ).data
logger.debug( jsonDataStr )
self._jsonData = jsontools.load( jsonDataStr )
# going to calculate token and expiration time
# These values will be used for manifests request
self._token, self._expires = self.calculateToken( self._jsonData['client_ip'] )
# Starting web server
self._server = Server((self.ip, self.port), Handler, client=self)
self.start()
def start(self):
"""
" Starting client and server in a separated thread
"""
self.start_time = time.time()
self.running = True
self._server.run()
t= Thread(target=self._auto_shutdown)
t.setDaemon(True)
t.start()
logger.info("SC Server Started", (self.ip, self.port))
def _auto_shutdown(self):
while self.running:
time.sleep(1)
if self.file and self.file.cursor:
self.last_connect = time.time()
if self.is_playing_fnc and self.is_playing_fnc():
self.last_connect = time.time()
if self.auto_shutdown:
#shudown por haber cerrado el reproductor
if self.connected and self.last_connect and self.is_playing_fnc and not self.is_playing_fnc():
if time.time() - self.last_connect - 1 > self.timeout:
self.stop()
#shutdown por no realizar ninguna conexion
if (not self.file or not self.file.cursor) and self.start_time and self.wait_time and not self.connected:
if time.time() - self.start_time - 1 > self.wait_time:
self.stop()
#shutdown tras la ultima conexion
if (not self.file or not self.file.cursor) and self.timeout and self.connected and self.last_connect and not self.is_playing_fnc:
if time.time() - self.last_connect - 1 > self.timeout:
self.stop()
def stop(self):
self.running = False
self._server.stop()
logger.info("SC Server Stopped")
def get_manifest_url(self):
# remap request path for main manifest
# it must point to local server ip:port
return "http://" + self.ip + ":" + str(self.port) + "/manifest.m3u8"
def get_main_manifest_content(self):
# get the manifest file for entire video/audio chunks
# it must remap each urls in order to catch all chunks
url = 'https://streamingcommunityws.com/master/{}?token={}&expires={}'.format(self._video_id, self._token, self._expires)
m3u8_original = httptools.downloadpage(url, CF=False).data
logger.debug('CLIENT: m3u8:', m3u8_original);
# remap video/audio manifests url
# they must point to local server:
# /video/RES.m3u8
# /audio/RES.m3u8
r_video = re.compile(r'(\.\/video\/(\d+p)\/playlist.m3u8)', re.MULTILINE)
r_audio = re.compile(r'(\.\/audio\/(\d+k)\/playlist.m3u8)', re.MULTILINE)
for match in r_video.finditer(m3u8_original):
line = match.groups()[0]
res = match.groups()[1]
video_url = "/video/" + res + ".m3u8"
# logger.info('replace', match.groups(), line, res, video_url)
m3u8_original = m3u8_original.replace( line, video_url )
for match in r_audio.finditer(m3u8_original):
line = match.groups()[0]
res = match.groups()[1]
audio_url = "/audio/" + res + ".m3u8"
# logger.info('replace', match.groups(), line, res, audio_url)
m3u8_original = m3u8_original.replace( line, audio_url )
# m_video = re.search(, m3u8_original)
# self._video_res = m_video.group(1)
# m_audio = re.search(r'\.\/audio\/(\d+k)\/playlist.m3u8', m3u8_original)
# self._audio_res = m_audio.group(1)
# video_url = "/video/" + self._video_res + ".m3u8"
# audio_url = "/audio/" + self._audio_res + ".m3u8"
# m3u8_original = m3u8_original.replace( m_video.group(0), video_url )
# m3u8_original = m3u8_original.replace( m_audio.group(0), audio_url )
return m3u8_original
def get_video_manifest_content(self, url):
"""
" Based on `default_start`, `default_count` and `default_domain`
" this method remap each video chunks url in order to make them point to
" the remote domain switching from `default_start` to `default_count` values
"""
m_video = re.search( r'\/video\/(\d+p)\.m3u8', url)
video_res = m_video.groups()[0]
logger.info('Video res: ', video_res)
# get the original manifest file for video chunks
url = 'https://streamingcommunityws.com/master/{}?token={}&expires={}&type=video&rendition={}'.format(self._video_id, self._token, self._expires, video_res)
original_manifest = httptools.downloadpage(url, CF=False).data
manifest_to_parse = original_manifest
# remap each chunks
r = re.compile(r'^(\w+\.ts)$', re.MULTILINE)
default_start = self._jsonData[ "proxies" ]["default_start"]
default_count = self._jsonData[ "proxies" ]["default_count"]
default_domain = self._jsonData[ "proxies" ]["default_domain"]
storage_id = self._jsonData[ "storage_id" ]
folder_id = self._jsonData[ "folder_id" ]
for match in r.finditer(manifest_to_parse):
# getting all single chunks and replace in the original manifest file content
ts = match.groups()[0]
# compute final url pointing to given domain
url = 'https://au-{default_start}.{default_domain}/hls/{storage_id}/{folder_id}/video/{video_res}/{ts}'.format(
default_start = default_start,
default_domain = default_domain,
storage_id = storage_id,
folder_id = folder_id,
video_res = video_res,
ts = ts
)
original_manifest = original_manifest.replace( ts, url )
default_start = default_start + 1
if default_start > default_count:
default_start = 1
# replace the encryption file url pointing to remote streamingcommunity server
original_manifest = re.sub(r'"(\/.*[enc]?\.key)"', '"https://streamingcommunityws.com\\1"', original_manifest)
return original_manifest
def get_audio_manifest_content(self, url):
"""
" Based on `default_start`, `default_count` and `default_domain`
" this method remap each video chunks url in order to make them point to
" the remote domain switching from `default_start` to `default_count` values
"""
m_audio = re.search( r'\/audio\/(\d+k)\.m3u8', url)
audio_res = m_audio.groups()[0]
logger.info('Audio res: ', audio_res)
# get the original manifest file for video chunks
url = 'https://streamingcommunityws.com/master/{}?token={}&expires={}&type=audio&rendition={}'.format(self._video_id, self._token, self._expires, audio_res)
original_manifest = httptools.downloadpage(url, CF=False).data
manifest_to_parse = original_manifest
# remap each chunks
r = re.compile(r'^(\w+\.ts)$', re.MULTILINE)
default_start = self._jsonData[ "proxies" ]["default_start"]
default_count = self._jsonData[ "proxies" ]["default_count"]
default_domain = self._jsonData[ "proxies" ]["default_domain"]
storage_id = self._jsonData[ "storage_id" ]
folder_id = self._jsonData[ "folder_id" ]
for match in r.finditer(manifest_to_parse):
# getting all single chunks and replace in the original manifest file content
ts = match.groups()[0]
# compute final url pointing to given domain
url = 'https://au-{default_start}.{default_domain}/hls/{storage_id}/{folder_id}/audio/{audio_res}/{ts}'.format(
default_start = default_start,
default_domain = default_domain,
storage_id = storage_id,
folder_id = folder_id,
audio_res = audio_res,
ts = ts
)
original_manifest = original_manifest.replace( ts, url )
default_start = default_start + 1
if default_start > default_count:
default_start = 1
# replace the encryption file url pointing to remote streamingcommunity server
original_manifest = re.sub(r'"(\/.*[enc]?\.key)"', '"https://streamingcommunityws.com\\1"', original_manifest)
return original_manifest
def calculateToken(self, ip):
"""
" Compute the `token` and the `expires` values in order to perform each next requests
"""
from time import time
from base64 import b64encode as b64
import hashlib
o = 48
# NOT USED: it has been computed by `jsondata` in the constructor method
# n = support.match('https://au-1.scws-content.net/get-ip').data
i = 'Yc8U6r8KjAKAepEA'
t = int(time() + (3600 * o))
l = '{}{} {}'.format(t, ip, i)
md5 = hashlib.md5(l.encode())
#s = '?token={}&expires={}'.format(, t)
token = b64( md5.digest() ).decode().replace( '=', '' ).replace( '+', "-" ).replace( '\\', "_" )
expires = t
return token, expires

View File

@@ -0,0 +1,73 @@
import time, os, re, sys
if sys.version_info[0] >= 3:
PY3 = True
from http.server import BaseHTTPRequestHandler
import urllib.request as urllib
import urllib.parse as urlparse
else:
PY3 = False
from BaseHTTPServer import BaseHTTPRequestHandler
import urlparse
import urllib
from platformcode import logger
class Handler(BaseHTTPRequestHandler):
protocol_version = 'HTTP/1.1'
def log_message(self, format, *args):
pass
def do_GET(self):
"""
" Got request
" We are going to handle the request path in order to proxy each manifest
"""
url = urlparse.urlparse(self.path).path
logger.debug('HANDLER:', url)
response = None
# Default content-type for each manifest
cType = "application/vnd.apple.mpegurl"
if url == "/manifest.m3u8":
response = self.server._client.get_main_manifest_content()
elif url.startswith('/video/'):
response = self.server._client.get_video_manifest_content(url)
elif url.startswith('/audio/'):
response = self.server._client.get_audio_manifest_content(url)
elif url.endswith('enc.key'):
# This path should NOT be used, see get_video_manifest_content function
response = self.server._client.get_enc_key( url )
cType = "application/octet-stream"
if response == None:
# Default 404 response
self.send_error(404, 'Not Found')
logger.warn('Responding 404 for url', url)
else:
# catch OK response and send it to client
self.send_response(200)
self.send_header("Content-Type", cType )
self.send_header("Content-Length", str( len(response.encode('utf-8')) ) )
self.end_headers()
self.wfile.write( response.encode() )
# force flush just to be sure
self.wfile.flush()
logger.info('HANDLER flushed:', cType , str( len(response.encode('utf-8')) ) )
logger.debug( response.encode('utf-8') )

View File

@@ -0,0 +1,39 @@
import sys, traceback
if sys.version_info[0] >= 3:
from http.server import HTTPServer
from socketserver import ThreadingMixIn
else:
from BaseHTTPServer import HTTPServer
from SocketServer import ThreadingMixIn
from threading import Thread
from platformcode import logger
class Server(ThreadingMixIn, HTTPServer):
daemon_threads = True
timeout = 1
def __init__(self, address, handler, client):
HTTPServer.__init__(self,address,handler)
self._client = client
self.running=True
self.request = None
def stop(self):
self.running=False
# def serve(self):
# while self.running:
# try:
# self.handle_request()
# except:
# logger.error(traceback.format_exc())
def run(self):
t=Thread(target=self.serve_forever, name='HTTP Server')
t.daemon=self.daemon_threads
t.start()
def handle_error(self, request, client_address):
if not "socket.py" in traceback.format_exc():
logger.error(traceback.format_exc())

View File

@@ -0,0 +1,15 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
]
},
"free": true,
"id": "streamingcommunityws",
"name": "StreamingCommunityWS",
"premium": [
],
"settings": [
]
}

30
servers/streamingcommunityws.py Executable file
View File

@@ -0,0 +1,30 @@
# -*- coding: utf-8 -*-
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
import json
import random
from core import httptools, support, scrapertools
from platformcode import platformtools, logger
from lib.streamingcommunity import Client as SCClient
files = None
def test_video_exists(page_url):
# page_url is the {VIDEO_ID}. Es: 5957
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
video_urls = []
global c
c = SCClient("",video_id=page_url, is_playing_fnc=platformtools.is_playing)
media_url = c.get_manifest_url()
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [Streaming Community]", media_url])
return video_urls

View File

@@ -311,8 +311,9 @@ def servers_favorites(item):
orden = config.get_setting("favorites_servers_list", server=server)
if orden > 0:
dict_values[orden] = len(server_names) - 1
if not orden == None:
if orden > 0:
dict_values[orden] = len(server_names) - 1
for x in range(1, 12):
control = {'id': x,
@@ -1214,4 +1215,4 @@ def call_browser(item):
short = urllib.urlopen(
'https://u.nu/api.php?action=shorturl&format=simple&url=' + item.url).read()
platformtools.dialog_ok(config.get_localized_string(20000),
config.get_localized_string(70740) % short)
config.get_localized_string(70740) % short)